aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2008-05-06 13:55:32 -0400
committerSteve French <sfrench@us.ibm.com>2008-05-06 13:55:32 -0400
commita815752ac0ffdb910e92958d41d28f4fb28e5296 (patch)
treea3aa16a282354da0debe8e3a3a7ed8aac6e54001 /drivers
parent5ade9deaaa3e1f7291467d97b238648e43eae15e (diff)
parenta15306365a16380f3bafee9e181ba01231d4acd7 (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/accessibility/Kconfig23
-rw-r--r--drivers/accessibility/Makefile1
-rw-r--r--drivers/accessibility/braille/Makefile1
-rw-r--r--drivers/accessibility/braille/braille_console.c397
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/ac.c12
-rw-r--r--drivers/acpi/battery.c12
-rw-r--r--drivers/acpi/bay.c2
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/button.c24
-rw-r--r--drivers/acpi/dispatcher/dsfield.c173
-rw-r--r--drivers/acpi/dispatcher/dsinit.c2
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c57
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c2
-rw-r--r--drivers/acpi/dispatcher/dsobject.c101
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c260
-rw-r--r--drivers/acpi/dispatcher/dsutils.c167
-rw-r--r--drivers/acpi/dispatcher/dswexec.c78
-rw-r--r--drivers/acpi/dispatcher/dswload.c37
-rw-r--r--drivers/acpi/dispatcher/dswscope.c2
-rw-r--r--drivers/acpi/dispatcher/dswstate.c517
-rw-r--r--drivers/acpi/ec.c251
-rw-r--r--drivers/acpi/event.c8
-rw-r--r--drivers/acpi/events/evevent.c2
-rw-r--r--drivers/acpi/events/evgpe.c6
-rw-r--r--drivers/acpi/events/evgpeblk.c2
-rw-r--r--drivers/acpi/events/evmisc.c92
-rw-r--r--drivers/acpi/events/evregion.c4
-rw-r--r--drivers/acpi/events/evrgnini.c2
-rw-r--r--drivers/acpi/events/evsci.c2
-rw-r--r--drivers/acpi/events/evxface.c23
-rw-r--r--drivers/acpi/events/evxfevnt.c2
-rw-r--r--drivers/acpi/events/evxfregn.c2
-rw-r--r--drivers/acpi/executer/exconfig.c105
-rw-r--r--drivers/acpi/executer/exconvrt.c2
-rw-r--r--drivers/acpi/executer/excreate.c117
-rw-r--r--drivers/acpi/executer/exdump.c69
-rw-r--r--drivers/acpi/executer/exfield.c63
-rw-r--r--drivers/acpi/executer/exfldio.c46
-rw-r--r--drivers/acpi/executer/exmisc.c2
-rw-r--r--drivers/acpi/executer/exmutex.c237
-rw-r--r--drivers/acpi/executer/exnames.c2
-rw-r--r--drivers/acpi/executer/exoparg1.c25
-rw-r--r--drivers/acpi/executer/exoparg2.c21
-rw-r--r--drivers/acpi/executer/exoparg3.c3
-rw-r--r--drivers/acpi/executer/exoparg6.c10
-rw-r--r--drivers/acpi/executer/exprep.c17
-rw-r--r--drivers/acpi/executer/exregion.c10
-rw-r--r--drivers/acpi/executer/exresnte.c12
-rw-r--r--drivers/acpi/executer/exresolv.c55
-rw-r--r--drivers/acpi/executer/exresop.c13
-rw-r--r--drivers/acpi/executer/exstore.c119
-rw-r--r--drivers/acpi/executer/exstoren.c2
-rw-r--r--drivers/acpi/executer/exstorob.c2
-rw-r--r--drivers/acpi/executer/exsystem.c3
-rw-r--r--drivers/acpi/executer/exutils.c67
-rw-r--r--drivers/acpi/fan.c49
-rw-r--r--drivers/acpi/glue.c20
-rw-r--r--drivers/acpi/hardware/hwacpi.c2
-rw-r--r--drivers/acpi/hardware/hwgpe.c2
-rw-r--r--drivers/acpi/hardware/hwregs.c2
-rw-r--r--drivers/acpi/hardware/hwsleep.c16
-rw-r--r--drivers/acpi/hardware/hwtimer.c2
-rw-r--r--drivers/acpi/namespace/nsaccess.c101
-rw-r--r--drivers/acpi/namespace/nsalloc.c2
-rw-r--r--drivers/acpi/namespace/nsdump.c11
-rw-r--r--drivers/acpi/namespace/nsdumpdv.c2
-rw-r--r--drivers/acpi/namespace/nseval.c2
-rw-r--r--drivers/acpi/namespace/nsinit.c12
-rw-r--r--drivers/acpi/namespace/nsload.c6
-rw-r--r--drivers/acpi/namespace/nsnames.c8
-rw-r--r--drivers/acpi/namespace/nsobject.c2
-rw-r--r--drivers/acpi/namespace/nsparse.c33
-rw-r--r--drivers/acpi/namespace/nssearch.c2
-rw-r--r--drivers/acpi/namespace/nsutils.c2
-rw-r--r--drivers/acpi/namespace/nswalk.c6
-rw-r--r--drivers/acpi/namespace/nsxfeval.c15
-rw-r--r--drivers/acpi/namespace/nsxfname.c2
-rw-r--r--drivers/acpi/namespace/nsxfobj.c2
-rw-r--r--drivers/acpi/osl.c1
-rw-r--r--drivers/acpi/parser/psargs.c63
-rw-r--r--drivers/acpi/parser/psloop.c61
-rw-r--r--drivers/acpi/parser/psopcode.c38
-rw-r--r--drivers/acpi/parser/psparse.c45
-rw-r--r--drivers/acpi/parser/psscope.c2
-rw-r--r--drivers/acpi/parser/pstree.c4
-rw-r--r--drivers/acpi/parser/psutils.c2
-rw-r--r--drivers/acpi/parser/pswalk.c2
-rw-r--r--drivers/acpi/parser/psxface.c2
-rw-r--r--drivers/acpi/power.c13
-rw-r--r--drivers/acpi/processor_core.c79
-rw-r--r--drivers/acpi/processor_idle.c31
-rw-r--r--drivers/acpi/processor_perflib.c13
-rw-r--r--drivers/acpi/processor_thermal.c31
-rw-r--r--drivers/acpi/processor_throttling.c1
-rw-r--r--drivers/acpi/resources/rsaddr.c2
-rw-r--r--drivers/acpi/resources/rscalc.c26
-rw-r--r--drivers/acpi/resources/rscreate.c2
-rw-r--r--drivers/acpi/resources/rsdump.c10
-rw-r--r--drivers/acpi/resources/rsinfo.c2
-rw-r--r--drivers/acpi/resources/rsio.c41
-rw-r--r--drivers/acpi/resources/rsirq.c45
-rw-r--r--drivers/acpi/resources/rslist.c2
-rw-r--r--drivers/acpi/resources/rsmemory.c2
-rw-r--r--drivers/acpi/resources/rsmisc.c13
-rw-r--r--drivers/acpi/resources/rsutils.c8
-rw-r--r--drivers/acpi/resources/rsxface.c2
-rw-r--r--drivers/acpi/sbs.c35
-rw-r--r--drivers/acpi/scan.c63
-rw-r--r--drivers/acpi/sleep/main.c42
-rw-r--r--drivers/acpi/sleep/proc.c26
-rw-r--r--drivers/acpi/system.c27
-rw-r--r--drivers/acpi/tables/tbfadt.c2
-rw-r--r--drivers/acpi/tables/tbfind.c34
-rw-r--r--drivers/acpi/tables/tbinstal.c24
-rw-r--r--drivers/acpi/tables/tbutils.c4
-rw-r--r--drivers/acpi/tables/tbxface.c91
-rw-r--r--drivers/acpi/tables/tbxfroot.c2
-rw-r--r--drivers/acpi/thermal.c87
-rw-r--r--drivers/acpi/utilities/utalloc.c4
-rw-r--r--drivers/acpi/utilities/utcache.c2
-rw-r--r--drivers/acpi/utilities/utcopy.c61
-rw-r--r--drivers/acpi/utilities/utdebug.c19
-rw-r--r--drivers/acpi/utilities/utdelete.c23
-rw-r--r--drivers/acpi/utilities/uteval.c2
-rw-r--r--drivers/acpi/utilities/utglobal.c49
-rw-r--r--drivers/acpi/utilities/utinit.c5
-rw-r--r--drivers/acpi/utilities/utmath.c4
-rw-r--r--drivers/acpi/utilities/utmisc.c6
-rw-r--r--drivers/acpi/utilities/utmutex.c2
-rw-r--r--drivers/acpi/utilities/utobject.c8
-rw-r--r--drivers/acpi/utilities/utresrc.c2
-rw-r--r--drivers/acpi/utilities/utstate.c2
-rw-r--r--drivers/acpi/utilities/utxface.c41
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/acpi/video.c307
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-scsi.c507
-rw-r--r--drivers/ata/libata.h28
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_bf54x.c124
-rw-r--r--drivers/ata/pata_rb532_cf.c (renamed from drivers/ata/pata_rb500_cf.c)78
-rw-r--r--drivers/ata/pata_via.c11
-rw-r--r--drivers/ata/sata_mv.c77
-rw-r--r--drivers/atm/ambassador.c19
-rw-r--r--drivers/atm/ambassador.h2
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/base/base.h11
-rw-r--r--drivers/base/class.c638
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/base/cpu.c10
-rw-r--r--drivers/base/driver.c10
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/block/aoe/aoe.h1
-rw-r--r--drivers/block/aoe/aoecmd.c24
-rw-r--r--drivers/block/aoe/aoedev.c18
-rw-r--r--drivers/block/aoe/aoenet.c4
-rw-r--r--drivers/block/brd.c19
-rw-r--r--drivers/block/cciss.c12
-rw-r--r--drivers/block/cpqarray.c4
-rw-r--r--drivers/block/floppy.c6
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/nbd.c172
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/pktcdvd.c13
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/ub.c65
-rw-r--r--drivers/block/virtio_blk.c44
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c13
-rw-r--r--drivers/bluetooth/hci_usb.h21
-rw-r--r--drivers/cdrom/cdrom.c1
-rw-r--r--drivers/cdrom/viocd.c10
-rw-r--r--drivers/char/Kconfig9
-rw-r--r--drivers/char/agp/agp.h2
-rw-r--r--drivers/char/amiserial.c30
-rw-r--r--drivers/char/apm-emulation.c23
-rw-r--r--drivers/char/applicom.c4
-rw-r--r--drivers/char/consolemap.c1
-rw-r--r--drivers/char/cs5535_gpio.c2
-rw-r--r--drivers/char/cyclades.c432
-rw-r--r--drivers/char/drm/drmP.h8
-rw-r--r--drivers/char/drm/drm_sysfs.c2
-rw-r--r--drivers/char/drm/i830_dma.c18
-rw-r--r--drivers/char/drm/i830_drv.h2
-rw-r--r--drivers/char/drm/i830_irq.c8
-rw-r--r--drivers/char/drm/i915_dma.c4
-rw-r--r--drivers/char/drm/i915_drv.h2
-rw-r--r--drivers/char/drm/r128_cce.c2
-rw-r--r--drivers/char/drm/radeon_cp.c2
-rw-r--r--drivers/char/ds1286.c3
-rw-r--r--drivers/char/epca.c315
-rw-r--r--drivers/char/esp.c611
-rw-r--r--drivers/char/generic_serial.c18
-rw-r--r--drivers/char/hpet.c10
-rw-r--r--drivers/char/hvsi.c52
-rw-r--r--drivers/char/i8k.c12
-rw-r--r--drivers/char/ip2/i2ellis.c194
-rw-r--r--drivers/char/ip2/i2ellis.h58
-rw-r--r--drivers/char/ip2/i2hw.h6
-rw-r--r--drivers/char/ip2/i2lib.c141
-rw-r--r--drivers/char/ip2/i2os.h127
-rw-r--r--drivers/char/ip2/ip2main.c144
-rw-r--r--drivers/char/ipmi/Makefile4
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c153
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c153
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1508
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c206
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c698
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h89
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c149
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c244
-rw-r--r--drivers/char/isicom.c171
-rw-r--r--drivers/char/istallion.c22
-rw-r--r--drivers/char/keyboard.c4
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/misc.c27
-rw-r--r--drivers/char/mmtimer.c424
-rw-r--r--drivers/char/moxa.c2999
-rw-r--r--drivers/char/moxa.h304
-rw-r--r--drivers/char/mspec.c12
-rw-r--r--drivers/char/mxser.c346
-rw-r--r--drivers/char/mxser.h137
-rw-r--r--drivers/char/n_hdlc.c37
-rw-r--r--drivers/char/n_r3964.c33
-rw-r--r--drivers/char/n_tty.c160
-rw-r--r--drivers/char/nozomi.c17
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c2
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.c26
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.h2
-rw-r--r--drivers/char/pcmcia/ipwireless/network.c15
-rw-r--r--drivers/char/pcmcia/ipwireless/network.h3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c23
-rw-r--r--drivers/char/pty.c32
-rw-r--r--drivers/char/random.c297
-rw-r--r--drivers/char/rio/cirrus.h210
-rw-r--r--drivers/char/rio/rio_linux.c10
-rw-r--r--drivers/char/rio/rio_linux.h6
-rw-r--r--drivers/char/rio/riocmd.c19
-rw-r--r--drivers/char/rio/rioctrl.c37
-rw-r--r--drivers/char/rio/riointr.c5
-rw-r--r--drivers/char/rio/rioparam.c70
-rw-r--r--drivers/char/rio/rioroute.c2
-rw-r--r--drivers/char/rio/riotty.c25
-rw-r--r--drivers/char/riscom8.c706
-rw-r--r--drivers/char/rocket.c43
-rw-r--r--drivers/char/rocket_int.h2
-rw-r--r--drivers/char/rtc.c6
-rw-r--r--drivers/char/serial167.c27
-rw-r--r--drivers/char/snsc.c18
-rw-r--r--drivers/char/snsc_event.c16
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/specialix.c111
-rw-r--r--drivers/char/stallion.c17
-rw-r--r--drivers/char/sx.c35
-rw-r--r--drivers/char/synclink.c45
-rw-r--r--drivers/char/synclink_gt.c80
-rw-r--r--drivers/char/synclinkmp.c41
-rw-r--r--drivers/char/sysrq.c46
-rw-r--r--drivers/char/toshiba.c5
-rw-r--r--drivers/char/tpm/Kconfig5
-rw-r--r--drivers/char/tpm/tpm_nsc.c2
-rw-r--r--drivers/char/tty_audit.c64
-rw-r--r--drivers/char/tty_io.c380
-rw-r--r--drivers/char/tty_ioctl.c126
-rw-r--r--drivers/char/viocons.c12
-rw-r--r--drivers/char/viotape.c9
-rw-r--r--drivers/char/vt.c28
-rw-r--r--drivers/char/vt_ioctl.c452
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c6
-rw-r--r--drivers/cpufreq/Kconfig9
-rw-r--r--drivers/cpufreq/cpufreq.c156
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c8
-rw-r--r--drivers/cpufreq/cpufreq_stats.c8
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/edac/amd76x_edac.c7
-rw-r--r--drivers/edac/e752x_edac.c220
-rw-r--r--drivers/edac/e7xxx_edac.c13
-rw-r--r--drivers/edac/edac_core.h2
-rw-r--r--drivers/edac/edac_device.c39
-rw-r--r--drivers/edac/edac_mc.c29
-rw-r--r--drivers/edac/edac_module.h1
-rw-r--r--drivers/edac/edac_pci.c14
-rw-r--r--drivers/edac/edac_pci_sysfs.c11
-rw-r--r--drivers/edac/i3000_edac.c13
-rw-r--r--drivers/edac/i5000_edac.c14
-rw-r--r--drivers/edac/i82443bxgx_edac.c7
-rw-r--r--drivers/edac/i82860_edac.c7
-rw-r--r--drivers/edac/i82875p_edac.c9
-rw-r--r--drivers/edac/i82975x_edac.c8
-rw-r--r--drivers/edac/pasemi_edac.c7
-rw-r--r--drivers/edac/r82600_edac.c7
-rw-r--r--drivers/firewire/fw-sbp2.c4
-rw-r--r--drivers/firmware/Kconfig9
-rw-r--r--drivers/firmware/dcdbas.c16
-rw-r--r--drivers/firmware/dell_rbu.c12
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpio/pca953x.c28
-rw-r--r--drivers/gpio/pcf857x.c36
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hwmon/ads7828.c2
-rw-r--r--drivers/hwmon/adt7473.c45
-rw-r--r--drivers/hwmon/asb100.c4
-rw-r--r--drivers/hwmon/f75375s.c29
-rw-r--r--drivers/hwmon/lm75.c5
-rw-r--r--drivers/hwmon/smsc47b397.c17
-rw-r--r--drivers/hwmon/w83793.c26
-rw-r--r--drivers/hwmon/w83l785ts.c4
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c5
-rw-r--r--drivers/i2c/busses/i2c-piix4.c10
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c14
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-stub.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c3
-rw-r--r--drivers/i2c/chips/ds1682.c10
-rw-r--r--drivers/i2c/chips/menelaus.c10
-rw-r--r--drivers/i2c/chips/tps65010.c34
-rw-r--r--drivers/i2c/chips/tsl2550.c10
-rw-r--r--drivers/i2c/i2c-core.c51
-rw-r--r--drivers/ide/Kconfig34
-rw-r--r--drivers/ide/Makefile2
-rw-r--r--drivers/ide/arm/icside.c2
-rw-r--r--drivers/ide/arm/palm_bk3710.c1
-rw-r--r--drivers/ide/arm/rapide.c1
-rw-r--r--drivers/ide/cris/Makefile3
-rw-r--r--drivers/ide/cris/ide-cris.c1086
-rw-r--r--drivers/ide/h8300/ide-h8300.c108
-rw-r--r--drivers/ide/ide-cd.c36
-rw-r--r--drivers/ide/ide-cd_verbose.c2
-rw-r--r--drivers/ide/ide-dma.c11
-rw-r--r--drivers/ide/ide-floppy.c27
-rw-r--r--drivers/ide/ide-io.c71
-rw-r--r--drivers/ide/ide-iops.c344
-rw-r--r--drivers/ide/ide-lib.c2
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/ide/ide-proc.c7
-rw-r--r--drivers/ide/ide-tape.c21
-rw-r--r--drivers/ide/ide-taskfile.c73
-rw-r--r--drivers/ide/ide.c4
-rw-r--r--drivers/ide/legacy/falconide.c26
-rw-r--r--drivers/ide/legacy/ide_platform.c4
-rw-r--r--drivers/ide/legacy/q40ide.c24
-rw-r--r--drivers/ide/mips/au1xxx-ide.c17
-rw-r--r--drivers/ide/mips/swarm.c1
-rw-r--r--drivers/ide/pci/alim15x3.c10
-rw-r--r--drivers/ide/pci/ns87415.c44
-rw-r--r--drivers/ide/pci/pdc202xx_new.c8
-rw-r--r--drivers/ide/pci/piix.c1
-rw-r--r--drivers/ide/pci/scc_pata.c196
-rw-r--r--drivers/ide/pci/sgiioc4.c1
-rw-r--r--drivers/ide/pci/siimage.c548
-rw-r--r--drivers/ide/ppc/pmac.c1
-rw-r--r--drivers/ieee1394/nodemgr.c5
-rw-r--r--drivers/infiniband/core/umem.c17
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c31
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h5
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h21
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c173
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c73
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c35
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c36
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c26
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c3
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c20
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h10
-rw-r--r--drivers/infiniband/hw/nes/Kconfig1
-rw-r--r--drivers/infiniband/hw/nes/nes.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.h5
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c371
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h19
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c180
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c90
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c40
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h7
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c9
-rw-r--r--drivers/input/input.c18
-rw-r--r--drivers/input/serio/hp_sdc.c1
-rw-r--r--drivers/input/serio/serport.c2
-rw-r--r--drivers/input/tablet/aiptek.c16
-rw-r--r--drivers/input/tablet/gtco.c14
-rw-r--r--drivers/input/tablet/kbtab.c4
-rw-r--r--drivers/isdn/capi/capi.c9
-rw-r--r--drivers/isdn/capi/kcapi_proc.c24
-rw-r--r--drivers/isdn/divert/divert_procfs.c5
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c15
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c8
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c10
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c8
-rw-r--r--drivers/isdn/i4l/isdn_tty.c30
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/lguest/lguest_device.c68
-rw-r--r--drivers/lguest/lguest_user.c4
-rw-r--r--drivers/macintosh/Kconfig8
-rw-r--r--drivers/macintosh/Makefile5
-rw-r--r--drivers/macintosh/adb.c30
-rw-r--r--drivers/macintosh/therm_pm72.c31
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c6
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c20
-rw-r--r--drivers/macintosh/windfarm_pm121.c1040
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c10
-rw-r--r--drivers/mca/mca-legacy.c18
-rw-r--r--drivers/mca/mca-proc.c2
-rw-r--r--drivers/md/dm-emc.c2
-rw-r--r--drivers/md/dm-mpath-hp-sw.c1
-rw-r--r--drivers/md/dm-mpath-rdac.c1
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/md.c121
-rw-r--r--drivers/md/raid1.c27
-rw-r--r--drivers/md/raid10.c29
-rw-r--r--drivers/md/raid5.c33
-rw-r--r--drivers/media/Kconfig172
-rw-r--r--drivers/media/Makefile10
-rw-r--r--drivers/media/common/Makefile1
-rw-r--r--drivers/media/common/tuners/Kconfig151
-rw-r--r--drivers/media/common/tuners/Makefile25
-rw-r--r--drivers/media/common/tuners/mt2060.c (renamed from drivers/media/dvb/frontends/mt2060.c)0
-rw-r--r--drivers/media/common/tuners/mt2060.h (renamed from drivers/media/dvb/frontends/mt2060.h)4
-rw-r--r--drivers/media/common/tuners/mt2060_priv.h (renamed from drivers/media/dvb/frontends/mt2060_priv.h)0
-rw-r--r--drivers/media/common/tuners/mt20xx.c (renamed from drivers/media/video/mt20xx.c)0
-rw-r--r--drivers/media/common/tuners/mt20xx.h (renamed from drivers/media/video/mt20xx.h)2
-rw-r--r--drivers/media/common/tuners/mt2131.c (renamed from drivers/media/dvb/frontends/mt2131.c)0
-rw-r--r--drivers/media/common/tuners/mt2131.h (renamed from drivers/media/dvb/frontends/mt2131.h)4
-rw-r--r--drivers/media/common/tuners/mt2131_priv.h (renamed from drivers/media/dvb/frontends/mt2131_priv.h)0
-rw-r--r--drivers/media/common/tuners/mt2266.c (renamed from drivers/media/dvb/frontends/mt2266.c)0
-rw-r--r--drivers/media/common/tuners/mt2266.h (renamed from drivers/media/dvb/frontends/mt2266.h)4
-rw-r--r--drivers/media/common/tuners/qt1010.c (renamed from drivers/media/dvb/frontends/qt1010.c)0
-rw-r--r--drivers/media/common/tuners/qt1010.h (renamed from drivers/media/dvb/frontends/qt1010.h)4
-rw-r--r--drivers/media/common/tuners/qt1010_priv.h (renamed from drivers/media/dvb/frontends/qt1010_priv.h)0
-rw-r--r--drivers/media/common/tuners/tda18271-common.c (renamed from drivers/media/dvb/frontends/tda18271-common.c)0
-rw-r--r--drivers/media/common/tuners/tda18271-fe.c (renamed from drivers/media/dvb/frontends/tda18271-fe.c)0
-rw-r--r--drivers/media/common/tuners/tda18271-maps.c (renamed from drivers/media/dvb/frontends/tda18271-tables.c)0
-rw-r--r--drivers/media/common/tuners/tda18271-priv.h (renamed from drivers/media/dvb/frontends/tda18271-priv.h)0
-rw-r--r--drivers/media/common/tuners/tda18271.h (renamed from drivers/media/dvb/frontends/tda18271.h)2
-rw-r--r--drivers/media/common/tuners/tda827x.c (renamed from drivers/media/dvb/frontends/tda827x.c)0
-rw-r--r--drivers/media/common/tuners/tda827x.h (renamed from drivers/media/dvb/frontends/tda827x.h)4
-rw-r--r--drivers/media/common/tuners/tda8290.c (renamed from drivers/media/video/tda8290.c)8
-rw-r--r--drivers/media/common/tuners/tda8290.h (renamed from drivers/media/video/tda8290.h)2
-rw-r--r--drivers/media/common/tuners/tda9887.c (renamed from drivers/media/video/tda9887.c)0
-rw-r--r--drivers/media/common/tuners/tda9887.h (renamed from drivers/media/video/tda9887.h)2
-rw-r--r--drivers/media/common/tuners/tea5761.c (renamed from drivers/media/video/tea5761.c)0
-rw-r--r--drivers/media/common/tuners/tea5761.h (renamed from drivers/media/video/tea5761.h)2
-rw-r--r--drivers/media/common/tuners/tea5767.c (renamed from drivers/media/video/tea5767.c)0
-rw-r--r--drivers/media/common/tuners/tea5767.h (renamed from drivers/media/video/tea5767.h)2
-rw-r--r--drivers/media/common/tuners/tuner-i2c.h (renamed from drivers/media/video/tuner-i2c.h)0
-rw-r--r--drivers/media/common/tuners/tuner-simple.c (renamed from drivers/media/video/tuner-simple.c)0
-rw-r--r--drivers/media/common/tuners/tuner-simple.h (renamed from drivers/media/video/tuner-simple.h)2
-rw-r--r--drivers/media/common/tuners/tuner-types.c (renamed from drivers/media/video/tuner-types.c)0
-rw-r--r--drivers/media/common/tuners/tuner-xc2028-types.h (renamed from drivers/media/video/tuner-xc2028-types.h)0
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c (renamed from drivers/media/video/tuner-xc2028.c)0
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h (renamed from drivers/media/video/tuner-xc2028.h)2
-rw-r--r--drivers/media/common/tuners/xc5000.c (renamed from drivers/media/dvb/frontends/xc5000.c)0
-rw-r--r--drivers/media/common/tuners/xc5000.h (renamed from drivers/media/dvb/frontends/xc5000.h)6
-rw-r--r--drivers/media/common/tuners/xc5000_priv.h (renamed from drivers/media/dvb/frontends/xc5000_priv.h)0
-rw-r--r--drivers/media/dvb/Kconfig4
-rw-r--r--drivers/media/dvb/b2c2/Kconfig2
-rw-r--r--drivers/media/dvb/b2c2/Makefile2
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig2
-rw-r--r--drivers/media/dvb/bt8xx/Makefile2
-rw-r--r--drivers/media/dvb/bt8xx/dst.c2
-rw-r--r--drivers/media/dvb/dvb-core/Kconfig34
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h2
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig26
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile2
-rw-r--r--drivers/media/dvb/frontends/Kconfig121
-rw-r--r--drivers/media/dvb/frontends/Makefile11
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c2
-rw-r--r--drivers/media/video/Kconfig50
-rw-r--r--drivers/media/video/Makefile14
-rw-r--r--drivers/media/video/au0828/Kconfig2
-rw-r--r--drivers/media/video/au0828/Makefile2
-rw-r--r--drivers/media/video/au0828/au0828-dvb.c2
-rw-r--r--drivers/media/video/bt8xx/Kconfig2
-rw-r--r--drivers/media/video/bt8xx/Makefile1
-rw-r--r--drivers/media/video/bt8xx/bttvp.h2
-rw-r--r--drivers/media/video/cs5345.c3
-rw-r--r--drivers/media/video/cs53l32a.c3
-rw-r--r--drivers/media/video/cx18/Kconfig20
-rw-r--r--drivers/media/video/cx18/Makefile11
-rw-r--r--drivers/media/video/cx18/cx18-audio.c73
-rw-r--r--drivers/media/video/cx18/cx18-audio.h26
-rw-r--r--drivers/media/video/cx18/cx18-av-audio.c361
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c879
-rw-r--r--drivers/media/video/cx18/cx18-av-core.h318
-rw-r--r--drivers/media/video/cx18/cx18-av-firmware.c120
-rw-r--r--drivers/media/video/cx18/cx18-av-vbi.c413
-rw-r--r--drivers/media/video/cx18/cx18-cards.c277
-rw-r--r--drivers/media/video/cx18/cx18-cards.h170
-rw-r--r--drivers/media/video/cx18/cx18-controls.c306
-rw-r--r--drivers/media/video/cx18/cx18-controls.h24
-rw-r--r--drivers/media/video/cx18/cx18-driver.c971
-rw-r--r--drivers/media/video/cx18/cx18-driver.h500
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c288
-rw-r--r--drivers/media/video/cx18/cx18-dvb.h25
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c711
-rw-r--r--drivers/media/video/cx18/cx18-fileops.h45
-rw-r--r--drivers/media/video/cx18/cx18-firmware.c373
-rw-r--r--drivers/media/video/cx18/cx18-firmware.h25
-rw-r--r--drivers/media/video/cx18/cx18-gpio.c74
-rw-r--r--drivers/media/video/cx18/cx18-gpio.h24
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c431
-rw-r--r--drivers/media/video/cx18/cx18-i2c.h33
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c851
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.h30
-rw-r--r--drivers/media/video/cx18/cx18-irq.c179
-rw-r--r--drivers/media/video/cx18/cx18-irq.h37
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c372
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.h73
-rw-r--r--drivers/media/video/cx18/cx18-queue.c282
-rw-r--r--drivers/media/video/cx18/cx18-queue.h59
-rw-r--r--drivers/media/video/cx18/cx18-scb.c121
-rw-r--r--drivers/media/video/cx18/cx18-scb.h285
-rw-r--r--drivers/media/video/cx18/cx18-streams.c566
-rw-r--r--drivers/media/video/cx18/cx18-streams.h33
-rw-r--r--drivers/media/video/cx18/cx18-vbi.c208
-rw-r--r--drivers/media/video/cx18/cx18-vbi.h26
-rw-r--r--drivers/media/video/cx18/cx18-version.h34
-rw-r--r--drivers/media/video/cx18/cx18-video.c45
-rw-r--r--drivers/media/video/cx18/cx18-video.h22
-rw-r--r--drivers/media/video/cx18/cx23418.h458
-rw-r--r--drivers/media/video/cx23885/Kconfig12
-rw-r--r--drivers/media/video/cx23885/Makefile1
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c3
-rw-r--r--drivers/media/video/cx88/Kconfig4
-rw-r--r--drivers/media/video/cx88/Makefile1
-rw-r--r--drivers/media/video/cx88/cx88-cards.c50
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c31
-rw-r--r--drivers/media/video/em28xx/Kconfig2
-rw-r--r--drivers/media/video/em28xx/Makefile1
-rw-r--r--drivers/media/video/ivtv/Kconfig2
-rw-r--r--drivers/media/video/ivtv/Makefile1
-rw-r--r--drivers/media/video/ivtv/ivtv-cards.c98
-rw-r--r--drivers/media/video/ivtv/ivtv-cards.h5
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c49
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-gpio.c9
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c3
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c44
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-version.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.c4
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/m52790.c3
-rw-r--r--drivers/media/video/msp3400-driver.c2
-rw-r--r--drivers/media/video/mt9m001.c12
-rw-r--r--drivers/media/video/mt9v022.c12
-rw-r--r--drivers/media/video/pvrusb2/Kconfig55
-rw-r--r--drivers/media/video/pvrusb2/Makefile1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-audio.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-context.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debug.h1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c8
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-dvb.c48
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-video-v4l.c2
-rw-r--r--drivers/media/video/saa7115.c3
-rw-r--r--drivers/media/video/saa7127.c3
-rw-r--r--drivers/media/video/saa7134/Kconfig6
-rw-r--r--drivers/media/video/saa7134/Makefile1
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c260
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c42
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c1
-rw-r--r--drivers/media/video/saa7134/saa7134.h1
-rw-r--r--drivers/media/video/saa717x.c3
-rw-r--r--drivers/media/video/tcm825x.c3
-rw-r--r--drivers/media/video/tlv320aic23b.c3
-rw-r--r--drivers/media/video/tuner-core.c127
-rw-r--r--drivers/media/video/tvaudio.c2
-rw-r--r--drivers/media/video/upd64031a.c3
-rw-r--r--drivers/media/video/upd64083.c3
-rw-r--r--drivers/media/video/usbvideo/vicam.c6
-rw-r--r--drivers/media/video/usbvision/Kconfig2
-rw-r--r--drivers/media/video/usbvision/Makefile1
-rw-r--r--drivers/media/video/v4l2-common.c7
-rw-r--r--drivers/media/video/videobuf-core.c5
-rw-r--r--drivers/media/video/vp27smpx.c3
-rw-r--r--drivers/media/video/wm8739.c3
-rw-r--r--drivers/media/video/wm8775.c3
-rw-r--r--drivers/media/video/zoran_procfs.c7
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/i2o_proc.c6
-rw-r--r--drivers/mfd/asic3.c6
-rw-r--r--drivers/mfd/htc-pasic3.c9
-rw-r--r--drivers/mfd/sm501.c4
-rw-r--r--drivers/misc/Kconfig21
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/eeepc-laptop.c666
-rw-r--r--drivers/misc/hdpuftrs/hdpu_cpustate.c5
-rw-r--r--drivers/misc/hdpuftrs/hdpu_nexus.c17
-rw-r--r--drivers/misc/ibmasm/command.c6
-rw-r--r--drivers/misc/ibmasm/heartbeat.c6
-rw-r--r--drivers/misc/intel_menlow.c24
-rw-r--r--drivers/misc/ioc4.c20
-rw-r--r--drivers/misc/kgdbts.c77
-rw-r--r--drivers/misc/phantom.c34
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c4
-rw-r--r--drivers/misc/sony-laptop.c4
-rw-r--r--drivers/misc/thinkpad_acpi.c765
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/mmci.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c14
-rw-r--r--drivers/mtd/devices/mtdram.c11
-rw-r--r--drivers/mtd/devices/phram.c13
-rw-r--r--drivers/mtd/devices/pmc551.c27
-rw-r--r--drivers/mtd/devices/slram.c15
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/uclinux.c6
-rw-r--r--drivers/mtd/mtdpart.c8
-rw-r--r--drivers/mtd/nand/at91_nand.c42
-rw-r--r--drivers/net/3c505.c30
-rw-r--r--drivers/net/3c505.h1
-rw-r--r--drivers/net/3c509.c47
-rw-r--r--drivers/net/3c515.c64
-rw-r--r--drivers/net/8390.c2
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/arm/Kconfig8
-rw-r--r--drivers/net/arm/Makefile1
-rw-r--r--drivers/net/arm/am79c961a.c10
-rw-r--r--drivers/net/arm/ixp4xx_eth.c1265
-rw-r--r--drivers/net/bfin_mac.c296
-rw-r--r--drivers/net/bfin_mac.h2
-rw-r--r--drivers/net/bnx2.c43
-rw-r--r--drivers/net/bnx2_fw2.h502
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/cxgb3/version.h2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/fec.c125
-rw-r--r--drivers/net/fec.h4
-rw-r--r--drivers/net/fec_mpc52xx.c120
-rw-r--r--drivers/net/fec_mpc52xx.h19
-rw-r--r--drivers/net/gianfar.c27
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/gianfar_mii.c38
-rw-r--r--drivers/net/gianfar_mii.h3
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hamradio/6pack.c36
-rw-r--r--drivers/net/hamradio/mkiss.c15
-rw-r--r--drivers/net/ibmveth.c9
-rw-r--r--drivers/net/irda/irtty-sir.c95
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c5
-rw-r--r--drivers/net/mlx4/cq.c4
-rw-r--r--drivers/net/mlx4/mr.c8
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/smsc.c83
-rw-r--r--drivers/net/ppp_async.c9
-rw-r--r--drivers/net/ppp_synctty.c9
-rw-r--r--drivers/net/pppoe.c4
-rw-r--r--drivers/net/pppol2tp.c4
-rw-r--r--drivers/net/r8169.c8
-rw-r--r--drivers/net/rionet.c16
-rw-r--r--drivers/net/s2io.c337
-rw-r--r--drivers/net/s2io.h82
-rw-r--r--drivers/net/sfc/Kconfig12
-rw-r--r--drivers/net/sfc/Makefile5
-rw-r--r--drivers/net/sfc/bitfield.h508
-rw-r--r--drivers/net/sfc/boards.c167
-rw-r--r--drivers/net/sfc/boards.h26
-rw-r--r--drivers/net/sfc/efx.c2208
-rw-r--r--drivers/net/sfc/efx.h67
-rw-r--r--drivers/net/sfc/enum.h50
-rw-r--r--drivers/net/sfc/ethtool.c460
-rw-r--r--drivers/net/sfc/ethtool.h27
-rw-r--r--drivers/net/sfc/falcon.c2722
-rw-r--r--drivers/net/sfc/falcon.h130
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h1135
-rw-r--r--drivers/net/sfc/falcon_io.h243
-rw-r--r--drivers/net/sfc/falcon_xmac.c585
-rw-r--r--drivers/net/sfc/gmii.h195
-rw-r--r--drivers/net/sfc/i2c-direct.c381
-rw-r--r--drivers/net/sfc/i2c-direct.h91
-rw-r--r--drivers/net/sfc/mac.h33
-rw-r--r--drivers/net/sfc/mdio_10g.c282
-rw-r--r--drivers/net/sfc/mdio_10g.h232
-rw-r--r--drivers/net/sfc/net_driver.h883
-rw-r--r--drivers/net/sfc/phy.h48
-rw-r--r--drivers/net/sfc/rx.c875
-rw-r--r--drivers/net/sfc/rx.h29
-rw-r--r--drivers/net/sfc/sfe4001.c252
-rw-r--r--drivers/net/sfc/spi.h71
-rw-r--r--drivers/net/sfc/tenxpress.c434
-rw-r--r--drivers/net/sfc/tx.c452
-rw-r--r--drivers/net/sfc/tx.h24
-rw-r--r--drivers/net/sfc/workarounds.h56
-rw-r--r--drivers/net/sfc/xenpack.h62
-rw-r--r--drivers/net/sfc/xfp_phy.c132
-rw-r--r--drivers/net/sis190.c136
-rw-r--r--drivers/net/slip.c13
-rw-r--r--drivers/net/tehuti.c2
-rw-r--r--drivers/net/tg3.c151
-rw-r--r--drivers/net/tg3.h15
-rw-r--r--drivers/net/tulip/de4x5.c35
-rw-r--r--drivers/net/tulip/de4x5.h2
-rw-r--r--drivers/net/tulip/tulip.h7
-rw-r--r--drivers/net/tulip/tulip_core.c10
-rw-r--r--drivers/net/virtio_net.c96
-rw-r--r--drivers/net/wan/pc300_tty.c24
-rw-r--r--drivers/net/wan/x25_asy.c279
-rw-r--r--drivers/net/wireless/airo.c94
-rw-r--r--drivers/net/wireless/ath5k/base.c8
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c53
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c26
-rw-r--r--drivers/net/wireless/libertas/scan.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/strip.c66
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c4
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/nubus/proc.c44
-rw-r--r--drivers/parisc/ccio-dma.c14
-rw-r--r--drivers/parisc/sba_iommu.c14
-rw-r--r--drivers/parport/ieee1284.c4
-rw-r--r--drivers/parport/parport_gsc.c4
-rw-r--r--drivers/parport/parport_pc.c12
-rw-r--r--drivers/pci/hotplug/pciehp.h17
-rw-r--r--drivers/pci/hotplug/pciehp_core.c19
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c46
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c565
-rw-r--r--drivers/pci/hotplug/shpchp_core.c11
-rw-r--r--drivers/pci/msi.c56
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/probe.c32
-rw-r--r--drivers/pci/proc.c15
-rw-r--r--drivers/pcmcia/Kconfig1
-rw-r--r--drivers/pcmcia/au1000_db1x00.c6
-rw-r--r--drivers/pcmcia/au1000_generic.c11
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c14
-rw-r--r--drivers/pcmcia/au1000_xxs1500.c2
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/cistpl.c39
-rw-r--r--drivers/pcmcia/cs.c13
-rw-r--r--drivers/pcmcia/cs_internal.h3
-rw-r--r--drivers/pcmcia/ds.c2
-rw-r--r--drivers/pcmcia/i82092.c6
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c4
-rw-r--r--drivers/pcmcia/pd6729.c6
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c8
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c4
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c2
-rw-r--r--drivers/pcmcia/sa1100_assabet.c4
-rw-r--r--drivers/pcmcia/sa1100_badge4.c8
-rw-r--r--drivers/pcmcia/sa1100_cerf.c2
-rw-r--r--drivers/pcmcia/sa1100_jornada720.c4
-rw-r--r--drivers/pcmcia/sa1100_neponset.c4
-rw-r--r--drivers/pcmcia/sa1100_shannon.c8
-rw-r--r--drivers/pcmcia/sa1100_simpad.c2
-rw-r--r--drivers/pcmcia/soc_common.c17
-rw-r--r--drivers/pcmcia/soc_common.h1
-rw-r--r--drivers/pcmcia/socket_sysfs.c52
-rw-r--r--drivers/pnp/base.h74
-rw-r--r--drivers/pnp/card.c55
-rw-r--r--drivers/pnp/core.c46
-rw-r--r--drivers/pnp/driver.c28
-rw-r--r--drivers/pnp/interface.c111
-rw-r--r--drivers/pnp/isapnp/Makefile4
-rw-r--r--drivers/pnp/isapnp/core.c340
-rw-r--r--drivers/pnp/isapnp/proc.c9
-rw-r--r--drivers/pnp/manager.c356
-rw-r--r--drivers/pnp/pnpacpi/Makefile4
-rw-r--r--drivers/pnp/pnpacpi/core.c92
-rw-r--r--drivers/pnp/pnpacpi/pnpacpi.h8
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c589
-rw-r--r--drivers/pnp/pnpbios/Makefile4
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c1
-rw-r--r--drivers/pnp/pnpbios/core.c31
-rw-r--r--drivers/pnp/pnpbios/pnpbios.h140
-rw-r--r--drivers/pnp/pnpbios/proc.c6
-rw-r--r--drivers/pnp/pnpbios/rsparser.c328
-rw-r--r--drivers/pnp/quirks.c15
-rw-r--r--drivers/pnp/resource.c361
-rw-r--r--drivers/pnp/support.c63
-rw-r--r--drivers/pnp/system.c21
-rw-r--r--drivers/power/ds2760_battery.c4
-rw-r--r--drivers/power/olpc_battery.c2
-rw-r--r--drivers/power/pda_power.c11
-rw-r--r--drivers/power/pmu_battery.c2
-rw-r--r--drivers/power/power_supply_core.c6
-rw-r--r--drivers/power/power_supply_leds.c4
-rw-r--r--drivers/ps3/ps3-lpm.c1
-rw-r--r--drivers/ps3/ps3-sys-manager.c7
-rw-r--r--drivers/rapidio/Kconfig8
-rw-r--r--drivers/rapidio/rio-access.c10
-rw-r--r--drivers/rapidio/rio-scan.c55
-rw-r--r--drivers/rapidio/rio-sysfs.c3
-rw-r--r--drivers/rapidio/rio.c2
-rw-r--r--drivers/rapidio/rio.h9
-rw-r--r--drivers/rtc/rtc-bfin.c2
-rw-r--r--drivers/rtc/rtc-cmos.c7
-rw-r--r--drivers/rtc/rtc-ds1307.c66
-rw-r--r--drivers/rtc/rtc-ds1374.c10
-rw-r--r--drivers/rtc/rtc-isl1208.c9
-rw-r--r--drivers/rtc/rtc-m41t80.c81
-rw-r--r--drivers/rtc/rtc-pcf8563.c10
-rw-r--r--drivers/rtc/rtc-proc.c8
-rw-r--r--drivers/rtc/rtc-rs5c372.c27
-rw-r--r--drivers/rtc/rtc-s35390a.c10
-rw-r--r--drivers/rtc/rtc-x1205.c10
-rw-r--r--drivers/s390/block/dasd_proc.c16
-rw-r--r--drivers/s390/char/con3215.c5
-rw-r--r--drivers/s390/char/sclp_config.c17
-rw-r--r--drivers/s390/char/sclp_tty.c4
-rw-r--r--drivers/s390/char/sclp_vt220.c6
-rw-r--r--drivers/s390/char/tape_proc.c9
-rw-r--r--drivers/s390/char/tty3270.c3
-rw-r--r--drivers/s390/cio/blacklist.c7
-rw-r--r--drivers/s390/cio/ccwgroup.c103
-rw-r--r--drivers/s390/cio/cio.c9
-rw-r--r--drivers/s390/cio/cio.h3
-rw-r--r--drivers/s390/cio/cmf.c11
-rw-r--r--drivers/s390/cio/css.c10
-rw-r--r--drivers/s390/cio/device.c17
-rw-r--r--drivers/s390/cio/device_fsm.c10
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/qdio.c12
-rw-r--r--drivers/s390/kvm/kvm_virtio.c23
-rw-r--r--drivers/s390/net/cu3088.c20
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/netiucv.c3
-rw-r--r--drivers/s390/net/qeth_core.h50
-rw-r--r--drivers/s390/net/qeth_core_main.c200
-rw-r--r--drivers/s390/net/qeth_l2_main.c30
-rw-r--r--drivers/s390/net/qeth_l3.h3
-rw-r--r--drivers/s390/net/qeth_l3_main.c30
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/sbus/char/cpwatchdog.c2
-rw-r--r--drivers/sbus/char/uctrl.c4
-rw-r--r--drivers/scsi/53c700.c6
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c133
-rw-r--r--drivers/scsi/aacraid/aacraid.h28
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c34
-rw-r--r--drivers/scsi/aacraid/linit.c22
-rw-r--r--drivers/scsi/aha152x.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c6
-rw-r--r--drivers/scsi/constants.c10
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h16
-rw-r--r--drivers/scsi/dpt/dptsig.h8
-rw-r--r--drivers/scsi/dpt/sys_info.h4
-rw-r--r--drivers/scsi/dpt_i2o.c642
-rw-r--r--drivers/scsi/dpti.h15
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hptiop.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h9
-rw-r--r--drivers/scsi/ide-scsi.c38
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/megaraid.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c17
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/mvsas.c4
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/scsi.c23
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c77
-rw-r--r--drivers/scsi/scsi_error.c16
-rw-r--r--drivers/scsi/scsi_lib.c38
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/sg.c12
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c6
-rw-r--r--drivers/scsi/u14-34f.c6
-rw-r--r--drivers/scsi/ultrastor.c4
-rw-r--r--drivers/serial/68328serial.c21
-rw-r--r--drivers/serial/68360serial.c28
-rw-r--r--drivers/serial/8250.c3
-rw-r--r--drivers/serial/8250_early.c4
-rw-r--r--drivers/serial/8250_pci.c14
-rw-r--r--drivers/serial/Kconfig43
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/bfin_5xx.c4
-rw-r--r--drivers/serial/bfin_sport_uart.c614
-rw-r--r--drivers/serial/bfin_sport_uart.h63
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/serial/crisv10.c31
-rw-r--r--drivers/serial/ioc3_serial.c36
-rw-r--r--drivers/serial/ioc4_serial.c32
-rw-r--r--drivers/serial/jsm/jsm.h1
-rw-r--r--drivers/serial/jsm/jsm_driver.c6
-rw-r--r--drivers/serial/kgdboc.c6
-rw-r--r--drivers/serial/mcfserial.c22
-rw-r--r--drivers/serial/mpc52xx_uart.c4
-rw-r--r--drivers/serial/netx-serial.c1
-rw-r--r--drivers/serial/s3c2410.c13
-rw-r--r--drivers/serial/sa1100.c4
-rw-r--r--drivers/serial/serial_core.c54
-rw-r--r--drivers/serial/sh-sci.c2
-rw-r--r--drivers/serial/sn_console.c2
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/sunsab.c2
-rw-r--r--drivers/serial/sunsu.c2
-rw-r--r--drivers/serial/sunzilog.c2
-rw-r--r--drivers/serial/uartlite.c2
-rw-r--r--drivers/serial/ucc_uart.c4
-rw-r--r--drivers/spi/atmel_spi.c29
-rw-r--r--drivers/spi/spi_bfin5xx.c7
-rw-r--r--drivers/spi/spi_s3c24xx.c6
-rw-r--r--drivers/thermal/Kconfig4
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/thermal_sys.c (renamed from drivers/thermal/thermal.c)165
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/Kconfig4
-rw-r--r--drivers/usb/atm/ueagle-atm.c48
-rw-r--r--drivers/usb/c67x00/Makefile9
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c243
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c412
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h133
-rw-r--r--drivers/usb/c67x00/c67x00-ll-hpi.c480
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c1170
-rw-r--r--drivers/usb/c67x00/c67x00.h294
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/core/inode.c4
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/gadget/Kconfig20
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/at91_udc.c11
-rw-r--r--drivers/usb/gadget/ether.c8
-rw-r--r--drivers/usb/gadget/file_storage.c25
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c7
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2404
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h487
-rw-r--r--drivers/usb/gadget/rndis.c40
-rw-r--r--drivers/usb/gadget/serial.c100
-rw-r--r--drivers/usb/gadget/usbstring.c2
-rw-r--r--drivers/usb/gadget/zero.c370
-rw-r--r--drivers/usb/host/Kconfig39
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/isp1760-hcd.c2231
-rw-r--r--drivers/usb/host/isp1760-hcd.h206
-rw-r--r--drivers/usb/host/isp1760-if.c298
-rw-r--r--drivers/usb/host/ohci-hub.c6
-rw-r--r--drivers/usb/host/sl811-hcd.c10
-rw-r--r--drivers/usb/host/uhci-hcd.c74
-rw-r--r--drivers/usb/host/uhci-hcd.h5
-rw-r--r--drivers/usb/misc/ldusb.c28
-rw-r--r--drivers/usb/misc/usbtest.c276
-rw-r--r--drivers/usb/serial/aircable.c98
-rw-r--r--drivers/usb/serial/airprime.c63
-rw-r--r--drivers/usb/serial/ark3116.c54
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/digi_acceleport.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.h11
-rw-r--r--drivers/usb/serial/mos7840.c5
-rw-r--r--drivers/usb/serial/usb-serial.c129
-rw-r--r--drivers/usb/serial/whiteheat.c4
-rw-r--r--drivers/usb/storage/Kconfig3
-rw-r--r--drivers/usb/storage/cypress_atacb.c2
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--drivers/usb/storage/libusual.c2
-rw-r--r--drivers/usb/storage/onetouch.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h28
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/video/Kconfig5
-rw-r--r--drivers/video/clps711xfb.c2
-rw-r--r--drivers/video/console/fbcon.c8
-rw-r--r--drivers/video/console/mdacon.c2
-rw-r--r--drivers/video/console/sticon.c4
-rw-r--r--drivers/video/console/vgacon.c4
-rw-r--r--drivers/video/matrox/matroxfb_misc.c28
-rw-r--r--drivers/video/metronomefb.c9
-rw-r--r--drivers/video/pxafb.c1297
-rw-r--r--drivers/video/pxafb.h70
-rw-r--r--drivers/virtio/virtio.c38
-rw-r--r--drivers/virtio/virtio_balloon.c12
-rw-r--r--drivers/virtio/virtio_pci.c34
-rw-r--r--drivers/virtio/virtio_ring.c5
-rw-r--r--drivers/w1/w1_log.h2
-rw-r--r--drivers/zorro/proc.c81
1041 files changed, 57672 insertions, 19107 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 80f0ec91e2cf..59f33fa6af3e 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -84,6 +84,8 @@ source "drivers/memstick/Kconfig"
84 84
85source "drivers/leds/Kconfig" 85source "drivers/leds/Kconfig"
86 86
87source "drivers/accessibility/Kconfig"
88
87source "drivers/infiniband/Kconfig" 89source "drivers/infiniband/Kconfig"
88 90
89source "drivers/edac/Kconfig" 91source "drivers/edac/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index e5e394a7e6c0..f65deda72d61 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_WATCHDOG) += watchdog/
70obj-$(CONFIG_PHONE) += telephony/ 70obj-$(CONFIG_PHONE) += telephony/
71obj-$(CONFIG_MD) += md/ 71obj-$(CONFIG_MD) += md/
72obj-$(CONFIG_BT) += bluetooth/ 72obj-$(CONFIG_BT) += bluetooth/
73obj-$(CONFIG_ACCESSIBILITY) += accessibility/
73obj-$(CONFIG_ISDN) += isdn/ 74obj-$(CONFIG_ISDN) += isdn/
74obj-$(CONFIG_EDAC) += edac/ 75obj-$(CONFIG_EDAC) += edac/
75obj-$(CONFIG_MCA) += mca/ 76obj-$(CONFIG_MCA) += mca/
diff --git a/drivers/accessibility/Kconfig b/drivers/accessibility/Kconfig
new file mode 100644
index 000000000000..1264c4b98094
--- /dev/null
+++ b/drivers/accessibility/Kconfig
@@ -0,0 +1,23 @@
1menuconfig ACCESSIBILITY
2 bool "Accessibility support"
3 ---help---
4 Enable a submenu where accessibility items may be enabled.
5
6 If unsure, say N.
7
8if ACCESSIBILITY
9config A11Y_BRAILLE_CONSOLE
10 bool "Console on braille device"
11 depends on VT
12 depends on SERIAL_CORE_CONSOLE
13 ---help---
14 Enables console output on a braille device connected to a 8250
15 serial port. For now only the VisioBraille device is supported.
16
17 To actually enable it, you need to pass option
18 console=brl,ttyS0
19 to the kernel. Options are the same as for serial console.
20
21 If unsure, say N.
22
23endif # ACCESSIBILITY
diff --git a/drivers/accessibility/Makefile b/drivers/accessibility/Makefile
new file mode 100644
index 000000000000..72b01a46546f
--- /dev/null
+++ b/drivers/accessibility/Makefile
@@ -0,0 +1 @@
obj-y += braille/
diff --git a/drivers/accessibility/braille/Makefile b/drivers/accessibility/braille/Makefile
new file mode 100644
index 000000000000..2e9f16c91347
--- /dev/null
+++ b/drivers/accessibility/braille/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille_console.o
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
new file mode 100644
index 000000000000..0a5f6b2114c5
--- /dev/null
+++ b/drivers/accessibility/braille/braille_console.c
@@ -0,0 +1,397 @@
1/*
2 * Minimalistic braille device kernel support.
3 *
4 * By default, shows console messages on the braille device.
5 * Pressing Insert switches to VC browsing.
6 *
7 * Copyright (C) Samuel Thibault <samuel.thibault@ens-lyon.org>
8 *
9 * This program is free software ; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation ; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY ; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with the program ; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/autoconf.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/console.h>
29#include <linux/notifier.h>
30
31#include <linux/selection.h>
32#include <linux/vt_kern.h>
33#include <linux/consolemap.h>
34
35#include <linux/keyboard.h>
36#include <linux/kbd_kern.h>
37#include <linux/input.h>
38
39MODULE_AUTHOR("samuel.thibault@ens-lyon.org");
40MODULE_DESCRIPTION("braille device");
41MODULE_LICENSE("GPL");
42
43/*
44 * Braille device support part.
45 */
46
47/* Emit various sounds */
48static int sound;
49module_param(sound, bool, 0);
50MODULE_PARM_DESC(sound, "emit sounds");
51
52static void beep(unsigned int freq)
53{
54 if (sound)
55 kd_mksound(freq, HZ/10);
56}
57
58/* mini console */
59#define WIDTH 40
60#define BRAILLE_KEY KEY_INSERT
61static u16 console_buf[WIDTH];
62static int console_cursor;
63
64/* mini view of VC */
65static int vc_x, vc_y, lastvc_x, lastvc_y;
66
67/* show console ? (or show VC) */
68static int console_show = 1;
69/* pending newline ? */
70static int console_newline = 1;
71static int lastVC = -1;
72
73static struct console *braille_co;
74
75/* Very VisioBraille-specific */
76static void braille_write(u16 *buf)
77{
78 static u16 lastwrite[WIDTH];
79 unsigned char data[1 + 1 + 2*WIDTH + 2 + 1], csum = 0, *c;
80 u16 out;
81 int i;
82
83 if (!braille_co)
84 return;
85
86 if (!memcmp(lastwrite, buf, WIDTH * sizeof(*buf)))
87 return;
88 memcpy(lastwrite, buf, WIDTH * sizeof(*buf));
89
90#define SOH 1
91#define STX 2
92#define ETX 2
93#define EOT 4
94#define ENQ 5
95 data[0] = STX;
96 data[1] = '>';
97 csum ^= '>';
98 c = &data[2];
99 for (i = 0; i < WIDTH; i++) {
100 out = buf[i];
101 if (out >= 0x100)
102 out = '?';
103 else if (out == 0x00)
104 out = ' ';
105 csum ^= out;
106 if (out <= 0x05) {
107 *c++ = SOH;
108 out |= 0x40;
109 }
110 *c++ = out;
111 }
112
113 if (csum <= 0x05) {
114 *c++ = SOH;
115 csum |= 0x40;
116 }
117 *c++ = csum;
118 *c++ = ETX;
119
120 braille_co->write(braille_co, data, c - data);
121}
122
123/* Follow the VC cursor*/
124static void vc_follow_cursor(struct vc_data *vc)
125{
126 vc_x = vc->vc_x - (vc->vc_x % WIDTH);
127 vc_y = vc->vc_y;
128 lastvc_x = vc->vc_x;
129 lastvc_y = vc->vc_y;
130}
131
132/* Maybe the VC cursor moved, if so follow it */
133static void vc_maybe_cursor_moved(struct vc_data *vc)
134{
135 if (vc->vc_x != lastvc_x || vc->vc_y != lastvc_y)
136 vc_follow_cursor(vc);
137}
138
139/* Show portion of VC at vc_x, vc_y */
140static void vc_refresh(struct vc_data *vc)
141{
142 u16 buf[WIDTH];
143 int i;
144
145 for (i = 0; i < WIDTH; i++) {
146 u16 glyph = screen_glyph(vc,
147 2 * (vc_x + i) + vc_y * vc->vc_size_row);
148 buf[i] = inverse_translate(vc, glyph, 1);
149 }
150 braille_write(buf);
151}
152
153/*
154 * Link to keyboard
155 */
156
157static int keyboard_notifier_call(struct notifier_block *blk,
158 unsigned long code, void *_param)
159{
160 struct keyboard_notifier_param *param = _param;
161 struct vc_data *vc = param->vc;
162 int ret = NOTIFY_OK;
163
164 if (!param->down)
165 return ret;
166
167 switch (code) {
168 case KBD_KEYCODE:
169 if (console_show) {
170 if (param->value == BRAILLE_KEY) {
171 console_show = 0;
172 beep(880);
173 vc_maybe_cursor_moved(vc);
174 vc_refresh(vc);
175 ret = NOTIFY_STOP;
176 }
177 } else {
178 ret = NOTIFY_STOP;
179 switch (param->value) {
180 case KEY_INSERT:
181 beep(440);
182 console_show = 1;
183 lastVC = -1;
184 braille_write(console_buf);
185 break;
186 case KEY_LEFT:
187 if (vc_x > 0) {
188 vc_x -= WIDTH;
189 if (vc_x < 0)
190 vc_x = 0;
191 } else if (vc_y >= 1) {
192 beep(880);
193 vc_y--;
194 vc_x = vc->vc_cols-WIDTH;
195 } else
196 beep(220);
197 break;
198 case KEY_RIGHT:
199 if (vc_x + WIDTH < vc->vc_cols) {
200 vc_x += WIDTH;
201 } else if (vc_y + 1 < vc->vc_rows) {
202 beep(880);
203 vc_y++;
204 vc_x = 0;
205 } else
206 beep(220);
207 break;
208 case KEY_DOWN:
209 if (vc_y + 1 < vc->vc_rows)
210 vc_y++;
211 else
212 beep(220);
213 break;
214 case KEY_UP:
215 if (vc_y >= 1)
216 vc_y--;
217 else
218 beep(220);
219 break;
220 case KEY_HOME:
221 vc_follow_cursor(vc);
222 break;
223 case KEY_PAGEUP:
224 vc_x = 0;
225 vc_y = 0;
226 break;
227 case KEY_PAGEDOWN:
228 vc_x = 0;
229 vc_y = vc->vc_rows-1;
230 break;
231 default:
232 ret = NOTIFY_OK;
233 break;
234 }
235 if (ret == NOTIFY_STOP)
236 vc_refresh(vc);
237 }
238 break;
239 case KBD_POST_KEYSYM:
240 {
241 unsigned char type = KTYP(param->value) - 0xf0;
242 if (type == KT_SPEC) {
243 unsigned char val = KVAL(param->value);
244 int on_off = -1;
245
246 switch (val) {
247 case KVAL(K_CAPS):
248 on_off = vc_kbd_led(kbd_table + fg_console,
249 VC_CAPSLOCK);
250 break;
251 case KVAL(K_NUM):
252 on_off = vc_kbd_led(kbd_table + fg_console,
253 VC_NUMLOCK);
254 break;
255 case KVAL(K_HOLD):
256 on_off = vc_kbd_led(kbd_table + fg_console,
257 VC_SCROLLOCK);
258 break;
259 }
260 if (on_off == 1)
261 beep(880);
262 else if (on_off == 0)
263 beep(440);
264 }
265 }
266 case KBD_UNBOUND_KEYCODE:
267 case KBD_UNICODE:
268 case KBD_KEYSYM:
269 /* Unused */
270 break;
271 }
272 return ret;
273}
274
275static struct notifier_block keyboard_notifier_block = {
276 .notifier_call = keyboard_notifier_call,
277};
278
279static int vt_notifier_call(struct notifier_block *blk,
280 unsigned long code, void *_param)
281{
282 struct vt_notifier_param *param = _param;
283 struct vc_data *vc = param->vc;
284 switch (code) {
285 case VT_ALLOCATE:
286 break;
287 case VT_DEALLOCATE:
288 break;
289 case VT_WRITE:
290 {
291 unsigned char c = param->c;
292 if (vc->vc_num != fg_console)
293 break;
294 switch (c) {
295 case '\b':
296 case 127:
297 if (console_cursor > 0) {
298 console_cursor--;
299 console_buf[console_cursor] = ' ';
300 }
301 break;
302 case '\n':
303 case '\v':
304 case '\f':
305 case '\r':
306 console_newline = 1;
307 break;
308 case '\t':
309 c = ' ';
310 /* Fallthrough */
311 default:
312 if (c < 32)
313 /* Ignore other control sequences */
314 break;
315 if (console_newline) {
316 memset(console_buf, 0, sizeof(console_buf));
317 console_cursor = 0;
318 console_newline = 0;
319 }
320 if (console_cursor == WIDTH)
321 memmove(console_buf, &console_buf[1],
322 (WIDTH-1) * sizeof(*console_buf));
323 else
324 console_cursor++;
325 console_buf[console_cursor-1] = c;
326 break;
327 }
328 if (console_show)
329 braille_write(console_buf);
330 else {
331 vc_maybe_cursor_moved(vc);
332 vc_refresh(vc);
333 }
334 break;
335 }
336 case VT_UPDATE:
337 /* Maybe a VT switch, flush */
338 if (console_show) {
339 if (vc->vc_num != lastVC) {
340 lastVC = vc->vc_num;
341 memset(console_buf, 0, sizeof(console_buf));
342 console_cursor = 0;
343 braille_write(console_buf);
344 }
345 } else {
346 vc_maybe_cursor_moved(vc);
347 vc_refresh(vc);
348 }
349 break;
350 }
351 return NOTIFY_OK;
352}
353
354static struct notifier_block vt_notifier_block = {
355 .notifier_call = vt_notifier_call,
356};
357
358/*
359 * Called from printk.c when console=brl is given
360 */
361
362int braille_register_console(struct console *console, int index,
363 char *console_options, char *braille_options)
364{
365 int ret;
366 if (!console_options)
367 /* Only support VisioBraille for now */
368 console_options = "57600o8";
369 if (braille_co)
370 return -ENODEV;
371 if (console->setup) {
372 ret = console->setup(console, console_options);
373 if (ret != 0)
374 return ret;
375 }
376 console->flags |= CON_ENABLED;
377 console->index = index;
378 braille_co = console;
379 return 0;
380}
381
382int braille_unregister_console(struct console *console)
383{
384 if (braille_co != console)
385 return -EINVAL;
386 braille_co = NULL;
387 return 0;
388}
389
390static int __init braille_init(void)
391{
392 register_keyboard_notifier(&keyboard_notifier_block);
393 register_vt_notifier(&vt_notifier_block);
394 return 0;
395}
396
397console_initcall(braille_init);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b4f5e8542829..c52fca833268 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -140,6 +140,7 @@ config ACPI_VIDEO
140 tristate "Video" 140 tristate "Video"
141 depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL 141 depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
142 depends on INPUT 142 depends on INPUT
143 select THERMAL
143 help 144 help
144 This driver implement the ACPI Extensions For Display Adapters 145 This driver implement the ACPI Extensions For Display Adapters
145 for integrated graphics devices on motherboard, as specified in 146 for integrated graphics devices on motherboard, as specified in
@@ -151,6 +152,7 @@ config ACPI_VIDEO
151 152
152config ACPI_FAN 153config ACPI_FAN
153 tristate "Fan" 154 tristate "Fan"
155 select THERMAL
154 default y 156 default y
155 help 157 help
156 This driver adds support for ACPI fan devices, allowing user-mode 158 This driver adds support for ACPI fan devices, allowing user-mode
@@ -172,6 +174,7 @@ config ACPI_BAY
172 174
173config ACPI_PROCESSOR 175config ACPI_PROCESSOR
174 tristate "Processor" 176 tristate "Processor"
177 select THERMAL
175 default y 178 default y
176 help 179 help
177 This driver installs ACPI as the idle handler for Linux, and uses 180 This driver installs ACPI as the idle handler for Linux, and uses
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 43a95e5640de..5b73f6a2cd86 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -92,6 +92,7 @@ struct acpi_ac {
92 92
93#ifdef CONFIG_ACPI_PROCFS_POWER 93#ifdef CONFIG_ACPI_PROCFS_POWER
94static const struct file_operations acpi_ac_fops = { 94static const struct file_operations acpi_ac_fops = {
95 .owner = THIS_MODULE,
95 .open = acpi_ac_open_fs, 96 .open = acpi_ac_open_fs,
96 .read = seq_read, 97 .read = seq_read,
97 .llseek = seq_lseek, 98 .llseek = seq_lseek,
@@ -195,16 +196,11 @@ static int acpi_ac_add_fs(struct acpi_device *device)
195 } 196 }
196 197
197 /* 'state' [R] */ 198 /* 'state' [R] */
198 entry = create_proc_entry(ACPI_AC_FILE_STATE, 199 entry = proc_create_data(ACPI_AC_FILE_STATE,
199 S_IRUGO, acpi_device_dir(device)); 200 S_IRUGO, acpi_device_dir(device),
201 &acpi_ac_fops, acpi_driver_data(device));
200 if (!entry) 202 if (!entry)
201 return -ENODEV; 203 return -ENODEV;
202 else {
203 entry->proc_fops = &acpi_ac_fops;
204 entry->data = acpi_driver_data(device);
205 entry->owner = THIS_MODULE;
206 }
207
208 return 0; 204 return 0;
209} 205}
210 206
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index d5729d5dc190..b1c723f9f58d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -741,15 +741,13 @@ static int acpi_battery_add_fs(struct acpi_device *device)
741 } 741 }
742 742
743 for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { 743 for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
744 entry = create_proc_entry(acpi_battery_file[i].name, 744 entry = proc_create_data(acpi_battery_file[i].name,
745 acpi_battery_file[i].mode, acpi_device_dir(device)); 745 acpi_battery_file[i].mode,
746 acpi_device_dir(device),
747 &acpi_battery_file[i].ops,
748 acpi_driver_data(device));
746 if (!entry) 749 if (!entry)
747 return -ENODEV; 750 return -ENODEV;
748 else {
749 entry->proc_fops = &acpi_battery_file[i].ops;
750 entry->data = acpi_driver_data(device);
751 entry->owner = THIS_MODULE;
752 }
753 } 751 }
754 return 0; 752 return 0;
755} 753}
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
index 1fa86811b8ee..d2fc94161848 100644
--- a/drivers/acpi/bay.c
+++ b/drivers/acpi/bay.c
@@ -201,6 +201,7 @@ static int is_ejectable_bay(acpi_handle handle)
201 return 0; 201 return 0;
202} 202}
203 203
204#if 0
204/** 205/**
205 * eject_removable_drive - try to eject this drive 206 * eject_removable_drive - try to eject this drive
206 * @dev : the device structure of the drive 207 * @dev : the device structure of the drive
@@ -225,6 +226,7 @@ int eject_removable_drive(struct device *dev)
225 return 0; 226 return 0;
226} 227}
227EXPORT_SYMBOL_GPL(eject_removable_drive); 228EXPORT_SYMBOL_GPL(eject_removable_drive);
229#endif /* 0 */
228 230
229static int acpi_bay_add_fs(struct bay *bay) 231static int acpi_bay_add_fs(struct bay *bay)
230{ 232{
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 2d1955c11833..a6dbcf4d9ef5 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -35,6 +35,7 @@
35#ifdef CONFIG_X86 35#ifdef CONFIG_X86
36#include <asm/mpspec.h> 36#include <asm/mpspec.h>
37#endif 37#endif
38#include <linux/pci.h>
38#include <acpi/acpi_bus.h> 39#include <acpi/acpi_bus.h>
39#include <acpi/acpi_drivers.h> 40#include <acpi/acpi_drivers.h>
40 41
@@ -784,6 +785,7 @@ static int __init acpi_init(void)
784 result = acpi_bus_init(); 785 result = acpi_bus_init();
785 786
786 if (!result) { 787 if (!result) {
788 pci_mmcfg_late_init();
787 if (!(pm_flags & PM_APM)) 789 if (!(pm_flags & PM_APM))
788 pm_flags |= PM_ACPI; 790 pm_flags |= PM_ACPI;
789 else { 791 else {
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 6c5da83cdb68..1dfec413588c 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -102,6 +102,7 @@ struct acpi_button {
102}; 102};
103 103
104static const struct file_operations acpi_button_info_fops = { 104static const struct file_operations acpi_button_info_fops = {
105 .owner = THIS_MODULE,
105 .open = acpi_button_info_open_fs, 106 .open = acpi_button_info_open_fs,
106 .read = seq_read, 107 .read = seq_read,
107 .llseek = seq_lseek, 108 .llseek = seq_lseek,
@@ -109,6 +110,7 @@ static const struct file_operations acpi_button_info_fops = {
109}; 110};
110 111
111static const struct file_operations acpi_button_state_fops = { 112static const struct file_operations acpi_button_state_fops = {
113 .owner = THIS_MODULE,
112 .open = acpi_button_state_open_fs, 114 .open = acpi_button_state_open_fs,
113 .read = seq_read, 115 .read = seq_read,
114 .llseek = seq_lseek, 116 .llseek = seq_lseek,
@@ -207,27 +209,21 @@ static int acpi_button_add_fs(struct acpi_device *device)
207 acpi_device_dir(device)->owner = THIS_MODULE; 209 acpi_device_dir(device)->owner = THIS_MODULE;
208 210
209 /* 'info' [R] */ 211 /* 'info' [R] */
210 entry = create_proc_entry(ACPI_BUTTON_FILE_INFO, 212 entry = proc_create_data(ACPI_BUTTON_FILE_INFO,
211 S_IRUGO, acpi_device_dir(device)); 213 S_IRUGO, acpi_device_dir(device),
214 &acpi_button_info_fops,
215 acpi_driver_data(device));
212 if (!entry) 216 if (!entry)
213 return -ENODEV; 217 return -ENODEV;
214 else {
215 entry->proc_fops = &acpi_button_info_fops;
216 entry->data = acpi_driver_data(device);
217 entry->owner = THIS_MODULE;
218 }
219 218
220 /* show lid state [R] */ 219 /* show lid state [R] */
221 if (button->type == ACPI_BUTTON_TYPE_LID) { 220 if (button->type == ACPI_BUTTON_TYPE_LID) {
222 entry = create_proc_entry(ACPI_BUTTON_FILE_STATE, 221 entry = proc_create_data(ACPI_BUTTON_FILE_STATE,
223 S_IRUGO, acpi_device_dir(device)); 222 S_IRUGO, acpi_device_dir(device),
223 &acpi_button_state_fops,
224 acpi_driver_data(device));
224 if (!entry) 225 if (!entry)
225 return -ENODEV; 226 return -ENODEV;
226 else {
227 entry->proc_fops = &acpi_button_state_fops;
228 entry->data = acpi_driver_data(device);
229 entry->owner = THIS_MODULE;
230 }
231 } 227 }
232 228
233 return 0; 229 return 0;
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c
index f049639bac35..c78078315be9 100644
--- a/drivers/acpi/dispatcher/dsfield.c
+++ b/drivers/acpi/dispatcher/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -89,12 +89,16 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
89 89
90 ACPI_FUNCTION_TRACE(ds_create_buffer_field); 90 ACPI_FUNCTION_TRACE(ds_create_buffer_field);
91 91
92 /* Get the name_string argument */ 92 /*
93 93 * Get the name_string argument (name of the new buffer_field)
94 */
94 if (op->common.aml_opcode == AML_CREATE_FIELD_OP) { 95 if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
96
97 /* For create_field, name is the 4th argument */
98
95 arg = acpi_ps_get_arg(op, 3); 99 arg = acpi_ps_get_arg(op, 3);
96 } else { 100 } else {
97 /* Create Bit/Byte/Word/Dword field */ 101 /* For all other create_xXXField operators, name is the 3rd argument */
98 102
99 arg = acpi_ps_get_arg(op, 2); 103 arg = acpi_ps_get_arg(op, 2);
100 } 104 }
@@ -107,26 +111,30 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
107 node = walk_state->deferred_node; 111 node = walk_state->deferred_node;
108 status = AE_OK; 112 status = AE_OK;
109 } else { 113 } else {
110 /* 114 /* Execute flag should always be set when this function is entered */
111 * During the load phase, we want to enter the name of the field into 115
112 * the namespace. During the execute phase (when we evaluate the size 116 if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
113 * operand), we want to lookup the name 117 return_ACPI_STATUS(AE_AML_INTERNAL);
114 */
115 if (walk_state->parse_flags & ACPI_PARSE_EXECUTE) {
116 flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE;
117 } else {
118 flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
119 ACPI_NS_ERROR_IF_FOUND;
120 } 118 }
121 119
122 /* 120 /* Creating new namespace node, should not already exist */
123 * Enter the name_string into the namespace 121
124 */ 122 flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
123 ACPI_NS_ERROR_IF_FOUND;
124
125 /* Mark node temporary if we are executing a method */
126
127 if (walk_state->method_node) {
128 flags |= ACPI_NS_TEMPORARY;
129 }
130
131 /* Enter the name_string into the namespace */
132
125 status = 133 status =
126 acpi_ns_lookup(walk_state->scope_info, 134 acpi_ns_lookup(walk_state->scope_info,
127 arg->common.value.string, ACPI_TYPE_ANY, 135 arg->common.value.string, ACPI_TYPE_ANY,
128 ACPI_IMODE_LOAD_PASS1, flags, walk_state, 136 ACPI_IMODE_LOAD_PASS1, flags, walk_state,
129 &(node)); 137 &node);
130 if (ACPI_FAILURE(status)) { 138 if (ACPI_FAILURE(status)) {
131 ACPI_ERROR_NAMESPACE(arg->common.value.string, status); 139 ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
132 return_ACPI_STATUS(status); 140 return_ACPI_STATUS(status);
@@ -136,13 +144,13 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
136 /* 144 /*
137 * We could put the returned object (Node) on the object stack for later, 145 * We could put the returned object (Node) on the object stack for later,
138 * but for now, we will put it in the "op" object that the parser uses, 146 * but for now, we will put it in the "op" object that the parser uses,
139 * so we can get it again at the end of this scope 147 * so we can get it again at the end of this scope.
140 */ 148 */
141 op->common.node = node; 149 op->common.node = node;
142 150
143 /* 151 /*
144 * If there is no object attached to the node, this node was just created 152 * If there is no object attached to the node, this node was just created
145 * and we need to create the field object. Otherwise, this was a lookup 153 * and we need to create the field object. Otherwise, this was a lookup
146 * of an existing node and we don't want to create the field object again. 154 * of an existing node and we don't want to create the field object again.
147 */ 155 */
148 obj_desc = acpi_ns_get_attached_object(node); 156 obj_desc = acpi_ns_get_attached_object(node);
@@ -164,9 +172,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
164 } 172 }
165 173
166 /* 174 /*
167 * Remember location in AML stream of the field unit 175 * Remember location in AML stream of the field unit opcode and operands --
168 * opcode and operands -- since the buffer and index 176 * since the buffer and index operands must be evaluated.
169 * operands must be evaluated.
170 */ 177 */
171 second_desc = obj_desc->common.next_object; 178 second_desc = obj_desc->common.next_object;
172 second_desc->extra.aml_start = op->named.data; 179 second_desc->extra.aml_start = op->named.data;
@@ -261,7 +268,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
261 268
262 case AML_INT_NAMEDFIELD_OP: 269 case AML_INT_NAMEDFIELD_OP:
263 270
264 /* Lookup the name */ 271 /* Lookup the name, it should already exist */
265 272
266 status = acpi_ns_lookup(walk_state->scope_info, 273 status = acpi_ns_lookup(walk_state->scope_info,
267 (char *)&arg->named.name, 274 (char *)&arg->named.name,
@@ -272,20 +279,23 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
272 if (ACPI_FAILURE(status)) { 279 if (ACPI_FAILURE(status)) {
273 ACPI_ERROR_NAMESPACE((char *)&arg->named.name, 280 ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
274 status); 281 status);
275 if (status != AE_ALREADY_EXISTS) { 282 return_ACPI_STATUS(status);
276 return_ACPI_STATUS(status);
277 }
278
279 /* Already exists, ignore error */
280 } else { 283 } else {
281 arg->common.node = info->field_node; 284 arg->common.node = info->field_node;
282 info->field_bit_length = arg->common.value.size; 285 info->field_bit_length = arg->common.value.size;
283 286
284 /* Create and initialize an object for the new Field Node */ 287 /*
285 288 * If there is no object attached to the node, this node was
286 status = acpi_ex_prep_field_value(info); 289 * just created and we need to create the field object.
287 if (ACPI_FAILURE(status)) { 290 * Otherwise, this was a lookup of an existing node and we
288 return_ACPI_STATUS(status); 291 * don't want to create the field object again.
292 */
293 if (!acpi_ns_get_attached_object
294 (info->field_node)) {
295 status = acpi_ex_prep_field_value(info);
296 if (ACPI_FAILURE(status)) {
297 return_ACPI_STATUS(status);
298 }
289 } 299 }
290 } 300 }
291 301
@@ -399,9 +409,27 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
399 union acpi_parse_object *arg = NULL; 409 union acpi_parse_object *arg = NULL;
400 struct acpi_namespace_node *node; 410 struct acpi_namespace_node *node;
401 u8 type = 0; 411 u8 type = 0;
412 u32 flags;
402 413
403 ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op); 414 ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op);
404 415
416 /* Execute flag should always be set when this function is entered */
417
418 if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
419 if (walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP) {
420
421 /* bank_field Op is deferred, just return OK */
422
423 return_ACPI_STATUS(AE_OK);
424 }
425
426 return_ACPI_STATUS(AE_AML_INTERNAL);
427 }
428
429 /*
430 * Get the field_list argument for this opcode. This is the start of the
431 * list of field elements.
432 */
405 switch (walk_state->opcode) { 433 switch (walk_state->opcode) {
406 case AML_FIELD_OP: 434 case AML_FIELD_OP:
407 arg = acpi_ps_get_arg(op, 2); 435 arg = acpi_ps_get_arg(op, 2);
@@ -422,20 +450,33 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
422 return_ACPI_STATUS(AE_BAD_PARAMETER); 450 return_ACPI_STATUS(AE_BAD_PARAMETER);
423 } 451 }
424 452
453 if (!arg) {
454 return_ACPI_STATUS(AE_AML_NO_OPERAND);
455 }
456
457 /* Creating new namespace node(s), should not already exist */
458
459 flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
460 ACPI_NS_ERROR_IF_FOUND;
461
462 /* Mark node(s) temporary if we are executing a method */
463
464 if (walk_state->method_node) {
465 flags |= ACPI_NS_TEMPORARY;
466 }
467
425 /* 468 /*
426 * Walk the list of entries in the field_list 469 * Walk the list of entries in the field_list
427 */ 470 */
428 while (arg) { 471 while (arg) {
429 472 /*
430 /* Ignore OFFSET and ACCESSAS terms here */ 473 * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
431 474 * field names in order to enter them into the namespace.
475 */
432 if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) { 476 if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
433 status = acpi_ns_lookup(walk_state->scope_info, 477 status = acpi_ns_lookup(walk_state->scope_info,
434 (char *)&arg->named.name, 478 (char *)&arg->named.name, type,
435 type, ACPI_IMODE_LOAD_PASS1, 479 ACPI_IMODE_LOAD_PASS1, flags,
436 ACPI_NS_NO_UPSEARCH |
437 ACPI_NS_DONT_OPEN_SCOPE |
438 ACPI_NS_ERROR_IF_FOUND,
439 walk_state, &node); 480 walk_state, &node);
440 if (ACPI_FAILURE(status)) { 481 if (ACPI_FAILURE(status)) {
441 ACPI_ERROR_NAMESPACE((char *)&arg->named.name, 482 ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
@@ -452,7 +493,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
452 arg->common.node = node; 493 arg->common.node = node;
453 } 494 }
454 495
455 /* Move to next field in the list */ 496 /* Get the next field element in the list */
456 497
457 arg = arg->common.next; 498 arg = arg->common.next;
458 } 499 }
@@ -466,7 +507,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
466 * 507 *
467 * PARAMETERS: Op - Op containing the Field definition and args 508 * PARAMETERS: Op - Op containing the Field definition and args
468 * region_node - Object for the containing Operation Region 509 * region_node - Object for the containing Operation Region
469 * ` walk_state - Current method state 510 * walk_state - Current method state
470 * 511 *
471 * RETURN: Status 512 * RETURN: Status
472 * 513 *
@@ -513,36 +554,13 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
513 return_ACPI_STATUS(status); 554 return_ACPI_STATUS(status);
514 } 555 }
515 556
516 /* Third arg is the bank_value */ 557 /*
517 558 * Third arg is the bank_value
518 /* TBD: This arg is a term_arg, not a constant, and must be evaluated */ 559 * This arg is a term_arg, not a constant
519 560 * It will be evaluated later, by acpi_ds_eval_bank_field_operands
561 */
520 arg = arg->common.next; 562 arg = arg->common.next;
521 563
522 /* Currently, only the following constants are supported */
523
524 switch (arg->common.aml_opcode) {
525 case AML_ZERO_OP:
526 info.bank_value = 0;
527 break;
528
529 case AML_ONE_OP:
530 info.bank_value = 1;
531 break;
532
533 case AML_BYTE_OP:
534 case AML_WORD_OP:
535 case AML_DWORD_OP:
536 case AML_QWORD_OP:
537 info.bank_value = (u32) arg->common.value.integer;
538 break;
539
540 default:
541 info.bank_value = 0;
542 ACPI_ERROR((AE_INFO,
543 "Non-constant BankValue for BankField is not implemented"));
544 }
545
546 /* Fourth arg is the field flags */ 564 /* Fourth arg is the field flags */
547 565
548 arg = arg->common.next; 566 arg = arg->common.next;
@@ -553,8 +571,17 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
553 info.field_type = ACPI_TYPE_LOCAL_BANK_FIELD; 571 info.field_type = ACPI_TYPE_LOCAL_BANK_FIELD;
554 info.region_node = region_node; 572 info.region_node = region_node;
555 573
556 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); 574 /*
575 * Use Info.data_register_node to store bank_field Op
576 * It's safe because data_register_node will never be used when create bank field
577 * We store aml_start and aml_length in the bank_field Op for late evaluation
578 * Used in acpi_ex_prep_field_value(Info)
579 *
580 * TBD: Or, should we add a field in struct acpi_create_field_info, like "void *ParentOp"?
581 */
582 info.data_register_node = (struct acpi_namespace_node *)op;
557 583
584 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
558 return_ACPI_STATUS(status); 585 return_ACPI_STATUS(status);
559} 586}
560 587
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c
index af923c388520..610b1ee102b0 100644
--- a/drivers/acpi/dispatcher/dsinit.c
+++ b/drivers/acpi/dispatcher/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index 1cbe61905824..e48a3ea03117 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,6 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acparser.h>
46#include <acpi/amlcode.h> 45#include <acpi/amlcode.h>
47#include <acpi/acdispat.h> 46#include <acpi/acdispat.h>
48#include <acpi/acinterp.h> 47#include <acpi/acinterp.h>
@@ -102,7 +101,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
102 walk_state->opcode, 101 walk_state->opcode,
103 walk_state->aml_offset, 102 walk_state->aml_offset,
104 NULL); 103 NULL);
105 (void)acpi_ex_enter_interpreter(); 104 acpi_ex_enter_interpreter();
106 } 105 }
107#ifdef ACPI_DISASSEMBLER 106#ifdef ACPI_DISASSEMBLER
108 if (ACPI_FAILURE(status)) { 107 if (ACPI_FAILURE(status)) {
@@ -232,9 +231,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
232 * recursive call. 231 * recursive call.
233 */ 232 */
234 if (!walk_state || 233 if (!walk_state ||
235 !obj_desc->method.mutex->mutex.owner_thread || 234 !obj_desc->method.mutex->mutex.thread_id ||
236 (walk_state->thread != 235 (walk_state->thread->thread_id !=
237 obj_desc->method.mutex->mutex.owner_thread)) { 236 obj_desc->method.mutex->mutex.thread_id)) {
238 /* 237 /*
239 * Acquire the method mutex. This releases the interpreter if we 238 * Acquire the method mutex. This releases the interpreter if we
240 * block (and reacquires it before it returns) 239 * block (and reacquires it before it returns)
@@ -254,8 +253,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
254 original_sync_level = 253 original_sync_level =
255 walk_state->thread->current_sync_level; 254 walk_state->thread->current_sync_level;
256 255
257 obj_desc->method.mutex->mutex.owner_thread = 256 obj_desc->method.mutex->mutex.thread_id =
258 walk_state->thread; 257 walk_state->thread->thread_id;
259 walk_state->thread->current_sync_level = 258 walk_state->thread->current_sync_level =
260 obj_desc->method.sync_level; 259 obj_desc->method.sync_level;
261 } else { 260 } else {
@@ -535,8 +534,6 @@ void
535acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, 534acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
536 struct acpi_walk_state *walk_state) 535 struct acpi_walk_state *walk_state)
537{ 536{
538 struct acpi_namespace_node *method_node;
539 acpi_status status;
540 537
541 ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state); 538 ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
542 539
@@ -551,34 +548,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
551 /* Delete all arguments and locals */ 548 /* Delete all arguments and locals */
552 549
553 acpi_ds_method_data_delete_all(walk_state); 550 acpi_ds_method_data_delete_all(walk_state);
554 }
555 551
556 /* 552 /*
557 * If method is serialized, release the mutex and restore the 553 * If method is serialized, release the mutex and restore the
558 * current sync level for this thread 554 * current sync level for this thread
559 */ 555 */
560 if (method_desc->method.mutex) { 556 if (method_desc->method.mutex) {
561 557
562 /* Acquisition Depth handles recursive calls */ 558 /* Acquisition Depth handles recursive calls */
563 559
564 method_desc->method.mutex->mutex.acquisition_depth--; 560 method_desc->method.mutex->mutex.acquisition_depth--;
565 if (!method_desc->method.mutex->mutex.acquisition_depth) { 561 if (!method_desc->method.mutex->mutex.acquisition_depth) {
566 walk_state->thread->current_sync_level = 562 walk_state->thread->current_sync_level =
567 method_desc->method.mutex->mutex. 563 method_desc->method.mutex->mutex.
568 original_sync_level; 564 original_sync_level;
569 565
570 acpi_os_release_mutex(method_desc->method.mutex->mutex. 566 acpi_os_release_mutex(method_desc->method.
571 os_mutex); 567 mutex->mutex.os_mutex);
572 method_desc->method.mutex->mutex.owner_thread = NULL; 568 method_desc->method.mutex->mutex.thread_id = 0;
569 }
573 } 570 }
574 }
575
576 if (walk_state) {
577 /*
578 * Delete any objects created by this method during execution.
579 * The method Node is stored in the walk state
580 */
581 method_node = walk_state->method_node;
582 571
583 /* 572 /*
584 * Delete any namespace objects created anywhere within 573 * Delete any namespace objects created anywhere within
@@ -620,7 +609,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
620 */ 609 */
621 if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED) 610 if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
622 && (!method_desc->method.mutex)) { 611 && (!method_desc->method.mutex)) {
623 status = acpi_ds_create_method_mutex(method_desc); 612 (void)acpi_ds_create_method_mutex(method_desc);
624 } 613 }
625 614
626 /* No more threads, we can free the owner_id */ 615 /* No more threads, we can free the owner_id */
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
index ba4626e06a5e..13c43eac35db 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 954ac8ce958a..1022e38994c2 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -157,7 +157,9 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
157 * will remain as named references. This behavior is not described 157 * will remain as named references. This behavior is not described
158 * in the ACPI spec, but it appears to be an oversight. 158 * in the ACPI spec, but it appears to be an oversight.
159 */ 159 */
160 obj_desc = (union acpi_operand_object *)op->common.node; 160 obj_desc =
161 ACPI_CAST_PTR(union acpi_operand_object,
162 op->common.node);
161 163
162 status = 164 status =
163 acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR 165 acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
@@ -172,7 +174,19 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
172 switch (op->common.node->type) { 174 switch (op->common.node->type) {
173 /* 175 /*
174 * For these types, we need the actual node, not the subobject. 176 * For these types, we need the actual node, not the subobject.
175 * However, the subobject got an extra reference count above. 177 * However, the subobject did not get an extra reference count above.
178 *
179 * TBD: should ex_resolve_node_to_value be changed to fix this?
180 */
181 case ACPI_TYPE_DEVICE:
182 case ACPI_TYPE_THERMAL:
183
184 acpi_ut_add_reference(op->common.node->object);
185
186 /*lint -fallthrough */
187 /*
188 * For these types, we need the actual node, not the subobject.
189 * The subobject got an extra reference count in ex_resolve_node_to_value.
176 */ 190 */
177 case ACPI_TYPE_MUTEX: 191 case ACPI_TYPE_MUTEX:
178 case ACPI_TYPE_METHOD: 192 case ACPI_TYPE_METHOD:
@@ -180,25 +194,15 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
180 case ACPI_TYPE_PROCESSOR: 194 case ACPI_TYPE_PROCESSOR:
181 case ACPI_TYPE_EVENT: 195 case ACPI_TYPE_EVENT:
182 case ACPI_TYPE_REGION: 196 case ACPI_TYPE_REGION:
183 case ACPI_TYPE_DEVICE:
184 case ACPI_TYPE_THERMAL:
185 197
186 obj_desc = 198 /* We will create a reference object for these types below */
187 (union acpi_operand_object *)op->common.
188 node;
189 break; 199 break;
190 200
191 default: 201 default:
192 break; 202 /*
193 } 203 * All other types - the node was resolved to an actual
194 204 * object, we are done.
195 /* 205 */
196 * If above resolved to an operand object, we are done. Otherwise,
197 * we have a NS node, we must create the package entry as a named
198 * reference.
199 */
200 if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
201 ACPI_DESC_TYPE_NAMED) {
202 goto exit; 206 goto exit;
203 } 207 }
204 } 208 }
@@ -223,7 +227,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
223 227
224 exit: 228 exit:
225 *obj_desc_ptr = obj_desc; 229 *obj_desc_ptr = obj_desc;
226 return_ACPI_STATUS(AE_OK); 230 return_ACPI_STATUS(status);
227} 231}
228 232
229/******************************************************************************* 233/*******************************************************************************
@@ -369,7 +373,9 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
369 union acpi_parse_object *parent; 373 union acpi_parse_object *parent;
370 union acpi_operand_object *obj_desc = NULL; 374 union acpi_operand_object *obj_desc = NULL;
371 acpi_status status = AE_OK; 375 acpi_status status = AE_OK;
372 acpi_native_uint i; 376 unsigned i;
377 u16 index;
378 u16 reference_count;
373 379
374 ACPI_FUNCTION_TRACE(ds_build_internal_package_obj); 380 ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
375 381
@@ -447,13 +453,60 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
447 package. 453 package.
448 elements[i]); 454 elements[i]);
449 } 455 }
456
457 if (*obj_desc_ptr) {
458
459 /* Existing package, get existing reference count */
460
461 reference_count =
462 (*obj_desc_ptr)->common.reference_count;
463 if (reference_count > 1) {
464
465 /* Make new element ref count match original ref count */
466
467 for (index = 0; index < (reference_count - 1);
468 index++) {
469 acpi_ut_add_reference((obj_desc->
470 package.
471 elements[i]));
472 }
473 }
474 }
475
450 arg = arg->common.next; 476 arg = arg->common.next;
451 } 477 }
452 478
453 if (!arg) { 479 /* Check for match between num_elements and actual length of package_list */
480
481 if (arg) {
482 /*
483 * num_elements was exhausted, but there are remaining elements in the
484 * package_list.
485 *
486 * Note: technically, this is an error, from ACPI spec: "It is an error
487 * for NumElements to be less than the number of elements in the
488 * PackageList". However, for now, we just print an error message and
489 * no exception is returned.
490 */
491 while (arg) {
492
493 /* Find out how many elements there really are */
494
495 i++;
496 arg = arg->common.next;
497 }
498
499 ACPI_ERROR((AE_INFO,
500 "Package List length (%X) larger than NumElements count (%X), truncated\n",
501 i, element_count));
502 } else if (i < element_count) {
503 /*
504 * Arg list (elements) was exhausted, but we did not reach num_elements count.
505 * Note: this is not an error, the package is padded out with NULLs.
506 */
454 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 507 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
455 "Package List length larger than NumElements count (%X), truncated\n", 508 "Package List length (%X) smaller than NumElements count (%X), padded with null elements\n",
456 element_count)); 509 i, element_count));
457 } 510 }
458 511
459 obj_desc->package.flags |= AOPOBJ_DATA_VALID; 512 obj_desc->package.flags |= AOPOBJ_DATA_VALID;
@@ -721,6 +774,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
721 /* Node was saved in Op */ 774 /* Node was saved in Op */
722 775
723 obj_desc->reference.node = op->common.node; 776 obj_desc->reference.node = op->common.node;
777 obj_desc->reference.object =
778 op->common.node->object;
724 } 779 }
725 780
726 obj_desc->reference.opcode = opcode; 781 obj_desc->reference.opcode = opcode;
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index f501e083aac7..a818e0ddb996 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,7 @@
49#include <acpi/acinterp.h> 49#include <acpi/acinterp.h>
50#include <acpi/acnamesp.h> 50#include <acpi/acnamesp.h>
51#include <acpi/acevents.h> 51#include <acpi/acevents.h>
52#include <acpi/actables.h>
52 53
53#define _COMPONENT ACPI_DISPATCHER 54#define _COMPONENT ACPI_DISPATCHER
54ACPI_MODULE_NAME("dsopcode") 55ACPI_MODULE_NAME("dsopcode")
@@ -219,6 +220,50 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
219 220
220/******************************************************************************* 221/*******************************************************************************
221 * 222 *
223 * FUNCTION: acpi_ds_get_bank_field_arguments
224 *
225 * PARAMETERS: obj_desc - A valid bank_field object
226 *
227 * RETURN: Status.
228 *
229 * DESCRIPTION: Get bank_field bank_value. This implements the late
230 * evaluation of these field attributes.
231 *
232 ******************************************************************************/
233
234acpi_status
235acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
236{
237 union acpi_operand_object *extra_desc;
238 struct acpi_namespace_node *node;
239 acpi_status status;
240
241 ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
242
243 if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
244 return_ACPI_STATUS(AE_OK);
245 }
246
247 /* Get the AML pointer (method object) and bank_field node */
248
249 extra_desc = acpi_ns_get_secondary_object(obj_desc);
250 node = obj_desc->bank_field.node;
251
252 ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
253 (ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
254 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
255 acpi_ut_get_node_name(node)));
256
257 /* Execute the AML code for the term_arg arguments */
258
259 status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
260 extra_desc->extra.aml_length,
261 extra_desc->extra.aml_start);
262 return_ACPI_STATUS(status);
263}
264
265/*******************************************************************************
266 *
222 * FUNCTION: acpi_ds_get_buffer_arguments 267 * FUNCTION: acpi_ds_get_buffer_arguments
223 * 268 *
224 * PARAMETERS: obj_desc - A valid Buffer object 269 * PARAMETERS: obj_desc - A valid Buffer object
@@ -770,7 +815,109 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
770 815
771 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n", 816 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
772 obj_desc, 817 obj_desc,
773 ACPI_FORMAT_UINT64(obj_desc->region.address), 818 ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
819 obj_desc->region.length));
820
821 /* Now the address and length are valid for this opregion */
822
823 obj_desc->region.flags |= AOPOBJ_DATA_VALID;
824
825 return_ACPI_STATUS(status);
826}
827
828/*******************************************************************************
829 *
830 * FUNCTION: acpi_ds_eval_table_region_operands
831 *
832 * PARAMETERS: walk_state - Current walk
833 * Op - A valid region Op object
834 *
835 * RETURN: Status
836 *
837 * DESCRIPTION: Get region address and length
838 * Called from acpi_ds_exec_end_op during data_table_region parse tree walk
839 *
840 ******************************************************************************/
841
842acpi_status
843acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
844 union acpi_parse_object *op)
845{
846 acpi_status status;
847 union acpi_operand_object *obj_desc;
848 union acpi_operand_object **operand;
849 struct acpi_namespace_node *node;
850 union acpi_parse_object *next_op;
851 acpi_native_uint table_index;
852 struct acpi_table_header *table;
853
854 ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
855
856 /*
857 * This is where we evaluate the signature_string and oem_iDString
858 * and oem_table_iDString of the data_table_region declaration
859 */
860 node = op->common.node;
861
862 /* next_op points to signature_string op */
863
864 next_op = op->common.value.arg;
865
866 /*
867 * Evaluate/create the signature_string and oem_iDString
868 * and oem_table_iDString operands
869 */
870 status = acpi_ds_create_operands(walk_state, next_op);
871 if (ACPI_FAILURE(status)) {
872 return_ACPI_STATUS(status);
873 }
874
875 /*
876 * Resolve the signature_string and oem_iDString
877 * and oem_table_iDString operands
878 */
879 status = acpi_ex_resolve_operands(op->common.aml_opcode,
880 ACPI_WALK_OPERANDS, walk_state);
881 if (ACPI_FAILURE(status)) {
882 return_ACPI_STATUS(status);
883 }
884
885 ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
886 acpi_ps_get_opcode_name(op->common.aml_opcode),
887 1, "after AcpiExResolveOperands");
888
889 operand = &walk_state->operands[0];
890
891 /* Find the ACPI table */
892
893 status = acpi_tb_find_table(operand[0]->string.pointer,
894 operand[1]->string.pointer,
895 operand[2]->string.pointer, &table_index);
896 if (ACPI_FAILURE(status)) {
897 return_ACPI_STATUS(status);
898 }
899
900 acpi_ut_remove_reference(operand[0]);
901 acpi_ut_remove_reference(operand[1]);
902 acpi_ut_remove_reference(operand[2]);
903
904 status = acpi_get_table_by_index(table_index, &table);
905 if (ACPI_FAILURE(status)) {
906 return_ACPI_STATUS(status);
907 }
908
909 obj_desc = acpi_ns_get_attached_object(node);
910 if (!obj_desc) {
911 return_ACPI_STATUS(AE_NOT_EXIST);
912 }
913
914 obj_desc->region.address =
915 (acpi_physical_address) ACPI_TO_INTEGER(table);
916 obj_desc->region.length = table->length;
917
918 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
919 obj_desc,
920 ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
774 obj_desc->region.length)); 921 obj_desc->region.length));
775 922
776 /* Now the address and length are valid for this opregion */ 923 /* Now the address and length are valid for this opregion */
@@ -808,6 +955,12 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
808 955
809 /* The first operand (for all of these data objects) is the length */ 956 /* The first operand (for all of these data objects) is the length */
810 957
958 /*
959 * Set proper index into operand stack for acpi_ds_obj_stack_push
960 * invoked inside acpi_ds_create_operand.
961 */
962 walk_state->operand_index = walk_state->num_operands;
963
811 status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1); 964 status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1);
812 if (ACPI_FAILURE(status)) { 965 if (ACPI_FAILURE(status)) {
813 return_ACPI_STATUS(status); 966 return_ACPI_STATUS(status);
@@ -878,6 +1031,106 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
878 1031
879/******************************************************************************* 1032/*******************************************************************************
880 * 1033 *
1034 * FUNCTION: acpi_ds_eval_bank_field_operands
1035 *
1036 * PARAMETERS: walk_state - Current walk
1037 * Op - A valid bank_field Op object
1038 *
1039 * RETURN: Status
1040 *
1041 * DESCRIPTION: Get bank_field bank_value
1042 * Called from acpi_ds_exec_end_op during bank_field parse tree walk
1043 *
1044 ******************************************************************************/
1045
1046acpi_status
1047acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
1048 union acpi_parse_object *op)
1049{
1050 acpi_status status;
1051 union acpi_operand_object *obj_desc;
1052 union acpi_operand_object *operand_desc;
1053 struct acpi_namespace_node *node;
1054 union acpi_parse_object *next_op;
1055 union acpi_parse_object *arg;
1056
1057 ACPI_FUNCTION_TRACE_PTR(ds_eval_bank_field_operands, op);
1058
1059 /*
1060 * This is where we evaluate the bank_value field of the
1061 * bank_field declaration
1062 */
1063
1064 /* next_op points to the op that holds the Region */
1065
1066 next_op = op->common.value.arg;
1067
1068 /* next_op points to the op that holds the Bank Register */
1069
1070 next_op = next_op->common.next;
1071
1072 /* next_op points to the op that holds the Bank Value */
1073
1074 next_op = next_op->common.next;
1075
1076 /*
1077 * Set proper index into operand stack for acpi_ds_obj_stack_push
1078 * invoked inside acpi_ds_create_operand.
1079 *
1080 * We use walk_state->Operands[0] to store the evaluated bank_value
1081 */
1082 walk_state->operand_index = 0;
1083
1084 status = acpi_ds_create_operand(walk_state, next_op, 0);
1085 if (ACPI_FAILURE(status)) {
1086 return_ACPI_STATUS(status);
1087 }
1088
1089 status = acpi_ex_resolve_to_value(&walk_state->operands[0], walk_state);
1090 if (ACPI_FAILURE(status)) {
1091 return_ACPI_STATUS(status);
1092 }
1093
1094 ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
1095 acpi_ps_get_opcode_name(op->common.aml_opcode),
1096 1, "after AcpiExResolveOperands");
1097
1098 /*
1099 * Get the bank_value operand and save it
1100 * (at Top of stack)
1101 */
1102 operand_desc = walk_state->operands[0];
1103
1104 /* Arg points to the start Bank Field */
1105
1106 arg = acpi_ps_get_arg(op, 4);
1107 while (arg) {
1108
1109 /* Ignore OFFSET and ACCESSAS terms here */
1110
1111 if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
1112 node = arg->common.node;
1113
1114 obj_desc = acpi_ns_get_attached_object(node);
1115 if (!obj_desc) {
1116 return_ACPI_STATUS(AE_NOT_EXIST);
1117 }
1118
1119 obj_desc->bank_field.value =
1120 (u32) operand_desc->integer.value;
1121 }
1122
1123 /* Move to next field in the list */
1124
1125 arg = arg->common.next;
1126 }
1127
1128 acpi_ut_remove_reference(operand_desc);
1129 return_ACPI_STATUS(status);
1130}
1131
1132/*******************************************************************************
1133 *
881 * FUNCTION: acpi_ds_exec_begin_control_op 1134 * FUNCTION: acpi_ds_exec_begin_control_op
882 * 1135 *
883 * PARAMETERS: walk_list - The list that owns the walk stack 1136 * PARAMETERS: walk_list - The list that owns the walk stack
@@ -1070,8 +1323,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
1070 * is set to anything other than zero! 1323 * is set to anything other than zero!
1071 */ 1324 */
1072 walk_state->return_desc = walk_state->operands[0]; 1325 walk_state->return_desc = walk_state->operands[0];
1073 } else if ((walk_state->results) && 1326 } else if (walk_state->result_count) {
1074 (walk_state->results->results.num_results > 0)) {
1075 1327
1076 /* Since we have a real Return(), delete any implicit return */ 1328 /* Since we have a real Return(), delete any implicit return */
1077 1329
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index 71503c036f7c..b398982f0d8b 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -278,7 +278,9 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
278 AML_VAR_PACKAGE_OP) 278 AML_VAR_PACKAGE_OP)
279 || (op->common.parent->common.aml_opcode == AML_BUFFER_OP) 279 || (op->common.parent->common.aml_opcode == AML_BUFFER_OP)
280 || (op->common.parent->common.aml_opcode == 280 || (op->common.parent->common.aml_opcode ==
281 AML_INT_EVAL_SUBTREE_OP)) { 281 AML_INT_EVAL_SUBTREE_OP)
282 || (op->common.parent->common.aml_opcode ==
283 AML_BANK_FIELD_OP)) {
282 /* 284 /*
283 * These opcodes allow term_arg(s) as operands and therefore 285 * These opcodes allow term_arg(s) as operands and therefore
284 * the operands can be method calls. The result is used. 286 * the operands can be method calls. The result is used.
@@ -472,7 +474,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
472 /* A valid name must be looked up in the namespace */ 474 /* A valid name must be looked up in the namespace */
473 475
474 if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) && 476 if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
475 (arg->common.value.string)) { 477 (arg->common.value.string) &&
478 !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
476 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Getting a name: Arg=%p\n", 479 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Getting a name: Arg=%p\n",
477 arg)); 480 arg));
478 481
@@ -595,7 +598,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
595 } else { 598 } else {
596 /* Check for null name case */ 599 /* Check for null name case */
597 600
598 if (arg->common.aml_opcode == AML_INT_NAMEPATH_OP) { 601 if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
602 !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
599 /* 603 /*
600 * If the name is null, this means that this is an 604 * If the name is null, this means that this is an
601 * optional result parameter that was not specified 605 * optional result parameter that was not specified
@@ -617,7 +621,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
617 return_ACPI_STATUS(AE_NOT_IMPLEMENTED); 621 return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
618 } 622 }
619 623
620 if (op_info->flags & AML_HAS_RETVAL) { 624 if ((op_info->flags & AML_HAS_RETVAL)
625 || (arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
621 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 626 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
622 "Argument previously created, already stacked\n")); 627 "Argument previously created, already stacked\n"));
623 628
@@ -630,9 +635,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
630 * Use value that was already previously returned 635 * Use value that was already previously returned
631 * by the evaluation of this argument 636 * by the evaluation of this argument
632 */ 637 */
633 status = 638 status = acpi_ds_result_pop(&obj_desc, walk_state);
634 acpi_ds_result_pop_from_bottom(&obj_desc,
635 walk_state);
636 if (ACPI_FAILURE(status)) { 639 if (ACPI_FAILURE(status)) {
637 /* 640 /*
638 * Only error is underflow, and this indicates 641 * Only error is underflow, and this indicates
@@ -698,27 +701,52 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
698{ 701{
699 acpi_status status = AE_OK; 702 acpi_status status = AE_OK;
700 union acpi_parse_object *arg; 703 union acpi_parse_object *arg;
704 union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
701 u32 arg_count = 0; 705 u32 arg_count = 0;
706 u32 index = walk_state->num_operands;
707 u32 i;
702 708
703 ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg); 709 ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
704 710
705 /* For all arguments in the list... */ 711 /* Get all arguments in the list */
706 712
707 arg = first_arg; 713 arg = first_arg;
708 while (arg) { 714 while (arg) {
709 status = acpi_ds_create_operand(walk_state, arg, arg_count); 715 if (index >= ACPI_OBJ_NUM_OPERANDS) {
710 if (ACPI_FAILURE(status)) { 716 return_ACPI_STATUS(AE_BAD_DATA);
711 goto cleanup;
712 } 717 }
713 718
714 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 719 arguments[index] = arg;
715 "Arg #%d (%p) done, Arg1=%p\n", arg_count, 720 walk_state->operands[index] = NULL;
716 arg, first_arg));
717 721
718 /* Move on to next argument, if any */ 722 /* Move on to next argument, if any */
719 723
720 arg = arg->common.next; 724 arg = arg->common.next;
721 arg_count++; 725 arg_count++;
726 index++;
727 }
728
729 index--;
730
731 /* It is the appropriate order to get objects from the Result stack */
732
733 for (i = 0; i < arg_count; i++) {
734 arg = arguments[index];
735
736 /* Force the filling of the operand stack in inverse order */
737
738 walk_state->operand_index = (u8) index;
739
740 status = acpi_ds_create_operand(walk_state, arg, index);
741 if (ACPI_FAILURE(status)) {
742 goto cleanup;
743 }
744
745 index--;
746
747 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
748 "Arg #%d (%p) done, Arg1=%p\n", index, arg,
749 first_arg));
722 } 750 }
723 751
724 return_ACPI_STATUS(status); 752 return_ACPI_STATUS(status);
@@ -729,9 +757,112 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
729 * pop everything off of the operand stack and delete those 757 * pop everything off of the operand stack and delete those
730 * objects 758 * objects
731 */ 759 */
732 (void)acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state); 760 acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
761
762 ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d", index));
763 return_ACPI_STATUS(status);
764}
765
766/*****************************************************************************
767 *
768 * FUNCTION: acpi_ds_evaluate_name_path
769 *
770 * PARAMETERS: walk_state - Current state of the parse tree walk,
771 * the opcode of current operation should be
772 * AML_INT_NAMEPATH_OP
773 *
774 * RETURN: Status
775 *
776 * DESCRIPTION: Translate the -name_path- parse tree object to the equivalent
777 * interpreter object, convert it to value, if needed, duplicate
778 * it, if needed, and push it onto the current result stack.
779 *
780 ****************************************************************************/
781
782acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
783{
784 acpi_status status = AE_OK;
785 union acpi_parse_object *op = walk_state->op;
786 union acpi_operand_object **operand = &walk_state->operands[0];
787 union acpi_operand_object *new_obj_desc;
788 u8 type;
789
790 ACPI_FUNCTION_TRACE_PTR(ds_evaluate_name_path, walk_state);
791
792 if (!op->common.parent) {
793
794 /* This happens after certain exception processing */
795
796 goto exit;
797 }
798
799 if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
800 (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) ||
801 (op->common.parent->common.aml_opcode == AML_REF_OF_OP)) {
802
803 /* TBD: Should we specify this feature as a bit of op_info->Flags of these opcodes? */
804
805 goto exit;
806 }
807
808 status = acpi_ds_create_operand(walk_state, op, 0);
809 if (ACPI_FAILURE(status)) {
810 goto exit;
811 }
812
813 if (op->common.flags & ACPI_PARSEOP_TARGET) {
814 new_obj_desc = *operand;
815 goto push_result;
816 }
817
818 type = ACPI_GET_OBJECT_TYPE(*operand);
819
820 status = acpi_ex_resolve_to_value(operand, walk_state);
821 if (ACPI_FAILURE(status)) {
822 goto exit;
823 }
824
825 if (type == ACPI_TYPE_INTEGER) {
826
827 /* It was incremented by acpi_ex_resolve_to_value */
828
829 acpi_ut_remove_reference(*operand);
830
831 status =
832 acpi_ut_copy_iobject_to_iobject(*operand, &new_obj_desc,
833 walk_state);
834 if (ACPI_FAILURE(status)) {
835 goto exit;
836 }
837 } else {
838 /*
839 * The object either was anew created or is
840 * a Namespace node - don't decrement it.
841 */
842 new_obj_desc = *operand;
843 }
844
845 /* Cleanup for name-path operand */
846
847 status = acpi_ds_obj_stack_pop(1, walk_state);
848 if (ACPI_FAILURE(status)) {
849 walk_state->result_obj = new_obj_desc;
850 goto exit;
851 }
852
853 push_result:
854
855 walk_state->result_obj = new_obj_desc;
856
857 status = acpi_ds_result_push(walk_state->result_obj, walk_state);
858 if (ACPI_SUCCESS(status)) {
859
860 /* Force to take it from stack */
861
862 op->common.flags |= ACPI_PARSEOP_IN_STACK;
863 }
864
865 exit:
733 866
734 ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d",
735 (arg_count + 1)));
736 return_ACPI_STATUS(status); 867 return_ACPI_STATUS(status);
737} 868}
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index 69693fa07224..b246b9657ead 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -285,11 +285,6 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
285 switch (opcode_class) { 285 switch (opcode_class) {
286 case AML_CLASS_CONTROL: 286 case AML_CLASS_CONTROL:
287 287
288 status = acpi_ds_result_stack_push(walk_state);
289 if (ACPI_FAILURE(status)) {
290 goto error_exit;
291 }
292
293 status = acpi_ds_exec_begin_control_op(walk_state, op); 288 status = acpi_ds_exec_begin_control_op(walk_state, op);
294 break; 289 break;
295 290
@@ -305,20 +300,11 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
305 status = acpi_ds_load2_begin_op(walk_state, NULL); 300 status = acpi_ds_load2_begin_op(walk_state, NULL);
306 } 301 }
307 302
308 if (op->common.aml_opcode == AML_REGION_OP) {
309 status = acpi_ds_result_stack_push(walk_state);
310 }
311 break; 303 break;
312 304
313 case AML_CLASS_EXECUTE: 305 case AML_CLASS_EXECUTE:
314 case AML_CLASS_CREATE: 306 case AML_CLASS_CREATE:
315 /* 307
316 * Most operators with arguments (except create_xxx_field operators)
317 * Start a new result/operand state
318 */
319 if (walk_state->op_info->object_type != ACPI_TYPE_BUFFER_FIELD) {
320 status = acpi_ds_result_stack_push(walk_state);
321 }
322 break; 308 break;
323 309
324 default: 310 default:
@@ -374,6 +360,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
374 /* Init the walk state */ 360 /* Init the walk state */
375 361
376 walk_state->num_operands = 0; 362 walk_state->num_operands = 0;
363 walk_state->operand_index = 0;
377 walk_state->return_desc = NULL; 364 walk_state->return_desc = NULL;
378 walk_state->result_obj = NULL; 365 walk_state->result_obj = NULL;
379 366
@@ -388,10 +375,17 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
388 /* Decode the Opcode Class */ 375 /* Decode the Opcode Class */
389 376
390 switch (op_class) { 377 switch (op_class) {
391 case AML_CLASS_ARGUMENT: /* constants, literals, etc. - do nothing */ 378 case AML_CLASS_ARGUMENT: /* Constants, literals, etc. */
379
380 if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
381 status = acpi_ds_evaluate_name_path(walk_state);
382 if (ACPI_FAILURE(status)) {
383 goto cleanup;
384 }
385 }
392 break; 386 break;
393 387
394 case AML_CLASS_EXECUTE: /* most operators with arguments */ 388 case AML_CLASS_EXECUTE: /* Most operators with arguments */
395 389
396 /* Build resolved operand stack */ 390 /* Build resolved operand stack */
397 391
@@ -400,13 +394,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
400 goto cleanup; 394 goto cleanup;
401 } 395 }
402 396
403 /* Done with this result state (Now that operand stack is built) */
404
405 status = acpi_ds_result_stack_pop(walk_state);
406 if (ACPI_FAILURE(status)) {
407 goto cleanup;
408 }
409
410 /* 397 /*
411 * All opcodes require operand resolution, with the only exceptions 398 * All opcodes require operand resolution, with the only exceptions
412 * being the object_type and size_of operators. 399 * being the object_type and size_of operators.
@@ -487,16 +474,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
487 474
488 status = acpi_ds_exec_end_control_op(walk_state, op); 475 status = acpi_ds_exec_end_control_op(walk_state, op);
489 476
490 /* Make sure to properly pop the result stack */
491
492 if (ACPI_SUCCESS(status)) {
493 status = acpi_ds_result_stack_pop(walk_state);
494 } else if (status == AE_CTRL_PENDING) {
495 status = acpi_ds_result_stack_pop(walk_state);
496 if (ACPI_SUCCESS(status)) {
497 status = AE_CTRL_PENDING;
498 }
499 }
500 break; 477 break;
501 478
502 case AML_TYPE_METHOD_CALL: 479 case AML_TYPE_METHOD_CALL:
@@ -516,7 +493,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
516 493
517 op->common.node = 494 op->common.node =
518 (struct acpi_namespace_node *)op->asl.value. 495 (struct acpi_namespace_node *)op->asl.value.
519 arg->asl.node->object; 496 arg->asl.node;
520 acpi_ut_add_reference(op->asl.value.arg->asl. 497 acpi_ut_add_reference(op->asl.value.arg->asl.
521 node->object); 498 node->object);
522 return_ACPI_STATUS(AE_OK); 499 return_ACPI_STATUS(AE_OK);
@@ -632,13 +609,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
632 break; 609 break;
633 } 610 }
634 611
635 /* Done with result state (Now that operand stack is built) */
636
637 status = acpi_ds_result_stack_pop(walk_state);
638 if (ACPI_FAILURE(status)) {
639 goto cleanup;
640 }
641
642 /* 612 /*
643 * If a result object was returned from above, push it on the 613 * If a result object was returned from above, push it on the
644 * current result stack 614 * current result stack
@@ -671,8 +641,28 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
671 if (ACPI_FAILURE(status)) { 641 if (ACPI_FAILURE(status)) {
672 break; 642 break;
673 } 643 }
644 } else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
645 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
646 "Executing DataTableRegion Strings Op=%p\n",
647 op));
648
649 status =
650 acpi_ds_eval_table_region_operands
651 (walk_state, op);
652 if (ACPI_FAILURE(status)) {
653 break;
654 }
655 } else if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
656 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
657 "Executing BankField Op=%p\n",
658 op));
674 659
675 status = acpi_ds_result_stack_pop(walk_state); 660 status =
661 acpi_ds_eval_bank_field_operands(walk_state,
662 op);
663 if (ACPI_FAILURE(status)) {
664 break;
665 }
676 } 666 }
677 break; 667 break;
678 668
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
index 8ab9d1b29a4c..dff7a3e445a8 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -443,6 +443,15 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
443 if (ACPI_FAILURE(status)) { 443 if (ACPI_FAILURE(status)) {
444 return_ACPI_STATUS(status); 444 return_ACPI_STATUS(status);
445 } 445 }
446 } else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
447 status =
448 acpi_ex_create_region(op->named.data,
449 op->named.length,
450 REGION_DATA_TABLE,
451 walk_state);
452 if (ACPI_FAILURE(status)) {
453 return_ACPI_STATUS(status);
454 }
446 } 455 }
447 } 456 }
448#endif 457#endif
@@ -767,6 +776,12 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
767 acpi_ns_lookup(walk_state->scope_info, buffer_ptr, 776 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
768 object_type, ACPI_IMODE_LOAD_PASS2, flags, 777 object_type, ACPI_IMODE_LOAD_PASS2, flags,
769 walk_state, &node); 778 walk_state, &node);
779
780 if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
781 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
782 "***New Node [%4.4s] %p is temporary\n",
783 acpi_ut_get_node_name(node), node));
784 }
770 break; 785 break;
771 } 786 }
772 787
@@ -823,6 +838,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
823 struct acpi_namespace_node *new_node; 838 struct acpi_namespace_node *new_node;
824#ifndef ACPI_NO_METHOD_EXECUTION 839#ifndef ACPI_NO_METHOD_EXECUTION
825 u32 i; 840 u32 i;
841 u8 region_space;
826#endif 842#endif
827 843
828 ACPI_FUNCTION_TRACE(ds_load2_end_op); 844 ACPI_FUNCTION_TRACE(ds_load2_end_op);
@@ -1003,11 +1019,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
1003 status = acpi_ex_create_event(walk_state); 1019 status = acpi_ex_create_event(walk_state);
1004 break; 1020 break;
1005 1021
1006 case AML_DATA_REGION_OP:
1007
1008 status = acpi_ex_create_table_region(walk_state);
1009 break;
1010
1011 case AML_ALIAS_OP: 1022 case AML_ALIAS_OP:
1012 1023
1013 status = acpi_ex_create_alias(walk_state); 1024 status = acpi_ex_create_alias(walk_state);
@@ -1035,6 +1046,15 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
1035 switch (op->common.aml_opcode) { 1046 switch (op->common.aml_opcode) {
1036#ifndef ACPI_NO_METHOD_EXECUTION 1047#ifndef ACPI_NO_METHOD_EXECUTION
1037 case AML_REGION_OP: 1048 case AML_REGION_OP:
1049 case AML_DATA_REGION_OP:
1050
1051 if (op->common.aml_opcode == AML_REGION_OP) {
1052 region_space = (acpi_adr_space_type)
1053 ((op->common.value.arg)->common.value.
1054 integer);
1055 } else {
1056 region_space = REGION_DATA_TABLE;
1057 }
1038 1058
1039 /* 1059 /*
1040 * If we are executing a method, initialize the region 1060 * If we are executing a method, initialize the region
@@ -1043,10 +1063,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
1043 status = 1063 status =
1044 acpi_ex_create_region(op->named.data, 1064 acpi_ex_create_region(op->named.data,
1045 op->named.length, 1065 op->named.length,
1046 (acpi_adr_space_type) 1066 region_space,
1047 ((op->common.value.
1048 arg)->common.value.
1049 integer),
1050 walk_state); 1067 walk_state);
1051 if (ACPI_FAILURE(status)) { 1068 if (ACPI_FAILURE(status)) {
1052 return (status); 1069 return (status);
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/dispatcher/dswscope.c
index 3927c495e4bf..9e6073265873 100644
--- a/drivers/acpi/dispatcher/dswscope.c
+++ b/drivers/acpi/dispatcher/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index 5afcdd9c7449..1386ced332ec 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -49,85 +49,9 @@
49#define _COMPONENT ACPI_DISPATCHER 49#define _COMPONENT ACPI_DISPATCHER
50ACPI_MODULE_NAME("dswstate") 50ACPI_MODULE_NAME("dswstate")
51 51
52/* Local prototypes */ 52 /* Local prototypes */
53#ifdef ACPI_OBSOLETE_FUNCTIONS 53static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws);
54acpi_status 54static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
55acpi_ds_result_insert(void *object,
56 u32 index, struct acpi_walk_state *walk_state);
57
58acpi_status acpi_ds_obj_stack_delete_all(struct acpi_walk_state *walk_state);
59
60acpi_status
61acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
62 struct acpi_walk_state *walk_state);
63
64void *acpi_ds_obj_stack_get_value(u32 index,
65 struct acpi_walk_state *walk_state);
66#endif
67
68#ifdef ACPI_FUTURE_USAGE
69/*******************************************************************************
70 *
71 * FUNCTION: acpi_ds_result_remove
72 *
73 * PARAMETERS: Object - Where to return the popped object
74 * Index - Where to extract the object
75 * walk_state - Current Walk state
76 *
77 * RETURN: Status
78 *
79 * DESCRIPTION: Pop an object off the bottom of this walk's result stack. In
80 * other words, this is a FIFO.
81 *
82 ******************************************************************************/
83
84acpi_status
85acpi_ds_result_remove(union acpi_operand_object **object,
86 u32 index, struct acpi_walk_state *walk_state)
87{
88 union acpi_generic_state *state;
89
90 ACPI_FUNCTION_NAME(ds_result_remove);
91
92 state = walk_state->results;
93 if (!state) {
94 ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
95 walk_state));
96 return (AE_NOT_EXIST);
97 }
98
99 if (index >= ACPI_OBJ_MAX_OPERAND) {
100 ACPI_ERROR((AE_INFO,
101 "Index out of range: %X State=%p Num=%X",
102 index, walk_state, state->results.num_results));
103 }
104
105 /* Check for a valid result object */
106
107 if (!state->results.obj_desc[index]) {
108 ACPI_ERROR((AE_INFO,
109 "Null operand! State=%p #Ops=%X, Index=%X",
110 walk_state, state->results.num_results, index));
111 return (AE_AML_NO_RETURN_VALUE);
112 }
113
114 /* Remove the object */
115
116 state->results.num_results--;
117
118 *object = state->results.obj_desc[index];
119 state->results.obj_desc[index] = NULL;
120
121 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
122 "Obj=%p [%s] Index=%X State=%p Num=%X\n",
123 *object,
124 (*object) ? acpi_ut_get_object_type_name(*object) :
125 "NULL", index, walk_state,
126 state->results.num_results));
127
128 return (AE_OK);
129}
130#endif /* ACPI_FUTURE_USAGE */
131 55
132/******************************************************************************* 56/*******************************************************************************
133 * 57 *
@@ -138,122 +62,67 @@ acpi_ds_result_remove(union acpi_operand_object **object,
138 * 62 *
139 * RETURN: Status 63 * RETURN: Status
140 * 64 *
141 * DESCRIPTION: Pop an object off the bottom of this walk's result stack. In 65 * DESCRIPTION: Pop an object off the top of this walk's result stack
142 * other words, this is a FIFO.
143 * 66 *
144 ******************************************************************************/ 67 ******************************************************************************/
145 68
146acpi_status 69acpi_status
147acpi_ds_result_pop(union acpi_operand_object ** object, 70acpi_ds_result_pop(union acpi_operand_object **object,
148 struct acpi_walk_state * walk_state) 71 struct acpi_walk_state *walk_state)
149{ 72{
150 acpi_native_uint index; 73 acpi_native_uint index;
151 union acpi_generic_state *state; 74 union acpi_generic_state *state;
75 acpi_status status;
152 76
153 ACPI_FUNCTION_NAME(ds_result_pop); 77 ACPI_FUNCTION_NAME(ds_result_pop);
154 78
155 state = walk_state->results; 79 state = walk_state->results;
156 if (!state) {
157 return (AE_OK);
158 }
159
160 if (!state->results.num_results) {
161 ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
162 walk_state));
163 return (AE_AML_NO_RETURN_VALUE);
164 }
165 80
166 /* Remove top element */ 81 /* Incorrect state of result stack */
167 82
168 state->results.num_results--; 83 if (state && !walk_state->result_count) {
169 84 ACPI_ERROR((AE_INFO, "No results on result stack"));
170 for (index = ACPI_OBJ_NUM_OPERANDS; index; index--) { 85 return (AE_AML_INTERNAL);
171
172 /* Check for a valid result object */
173
174 if (state->results.obj_desc[index - 1]) {
175 *object = state->results.obj_desc[index - 1];
176 state->results.obj_desc[index - 1] = NULL;
177
178 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
179 "Obj=%p [%s] Index=%X State=%p Num=%X\n",
180 *object,
181 (*object) ?
182 acpi_ut_get_object_type_name(*object)
183 : "NULL", (u32) index - 1, walk_state,
184 state->results.num_results));
185
186 return (AE_OK);
187 }
188 } 86 }
189 87
190 ACPI_ERROR((AE_INFO, "No result objects! State=%p", walk_state)); 88 if (!state && walk_state->result_count) {
191 return (AE_AML_NO_RETURN_VALUE); 89 ACPI_ERROR((AE_INFO, "No result state for result stack"));
192} 90 return (AE_AML_INTERNAL);
193 91 }
194/*******************************************************************************
195 *
196 * FUNCTION: acpi_ds_result_pop_from_bottom
197 *
198 * PARAMETERS: Object - Where to return the popped object
199 * walk_state - Current Walk state
200 *
201 * RETURN: Status
202 *
203 * DESCRIPTION: Pop an object off the bottom of this walk's result stack. In
204 * other words, this is a FIFO.
205 *
206 ******************************************************************************/
207
208acpi_status
209acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
210 struct acpi_walk_state * walk_state)
211{
212 acpi_native_uint index;
213 union acpi_generic_state *state;
214 92
215 ACPI_FUNCTION_NAME(ds_result_pop_from_bottom); 93 /* Empty result stack */
216 94
217 state = walk_state->results;
218 if (!state) { 95 if (!state) {
219 ACPI_ERROR((AE_INFO, 96 ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
220 "No result object pushed! State=%p", walk_state));
221 return (AE_NOT_EXIST);
222 }
223
224 if (!state->results.num_results) {
225 ACPI_ERROR((AE_INFO, "No result objects! State=%p",
226 walk_state)); 97 walk_state));
227 return (AE_AML_NO_RETURN_VALUE); 98 return (AE_AML_NO_RETURN_VALUE);
228 } 99 }
229 100
230 /* Remove Bottom element */ 101 /* Return object of the top element and clean that top element result stack */
231
232 *object = state->results.obj_desc[0];
233
234 /* Push entire stack down one element */
235
236 for (index = 0; index < state->results.num_results; index++) {
237 state->results.obj_desc[index] =
238 state->results.obj_desc[index + 1];
239 }
240 102
241 state->results.num_results--; 103 walk_state->result_count--;
242 104 index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
243 /* Check for a valid result object */
244 105
106 *object = state->results.obj_desc[index];
245 if (!*object) { 107 if (!*object) {
246 ACPI_ERROR((AE_INFO, 108 ACPI_ERROR((AE_INFO,
247 "Null operand! State=%p #Ops=%X Index=%X", 109 "No result objects on result stack, State=%p",
248 walk_state, state->results.num_results, 110 walk_state));
249 (u32) index));
250 return (AE_AML_NO_RETURN_VALUE); 111 return (AE_AML_NO_RETURN_VALUE);
251 } 112 }
252 113
253 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] Results=%p State=%p\n", 114 state->results.obj_desc[index] = NULL;
254 *object, 115 if (index == 0) {
255 (*object) ? acpi_ut_get_object_type_name(*object) : 116 status = acpi_ds_result_stack_pop(walk_state);
256 "NULL", state, walk_state)); 117 if (ACPI_FAILURE(status)) {
118 return (status);
119 }
120 }
121
122 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
123 "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object,
124 acpi_ut_get_object_type_name(*object),
125 (u32) index, walk_state, walk_state->result_count));
257 126
258 return (AE_OK); 127 return (AE_OK);
259} 128}
@@ -276,39 +145,56 @@ acpi_ds_result_push(union acpi_operand_object * object,
276 struct acpi_walk_state * walk_state) 145 struct acpi_walk_state * walk_state)
277{ 146{
278 union acpi_generic_state *state; 147 union acpi_generic_state *state;
148 acpi_status status;
149 acpi_native_uint index;
279 150
280 ACPI_FUNCTION_NAME(ds_result_push); 151 ACPI_FUNCTION_NAME(ds_result_push);
281 152
153 if (walk_state->result_count > walk_state->result_size) {
154 ACPI_ERROR((AE_INFO, "Result stack is full"));
155 return (AE_AML_INTERNAL);
156 } else if (walk_state->result_count == walk_state->result_size) {
157
158 /* Extend the result stack */
159
160 status = acpi_ds_result_stack_push(walk_state);
161 if (ACPI_FAILURE(status)) {
162 ACPI_ERROR((AE_INFO,
163 "Failed to extend the result stack"));
164 return (status);
165 }
166 }
167
168 if (!(walk_state->result_count < walk_state->result_size)) {
169 ACPI_ERROR((AE_INFO, "No free elements in result stack"));
170 return (AE_AML_INTERNAL);
171 }
172
282 state = walk_state->results; 173 state = walk_state->results;
283 if (!state) { 174 if (!state) {
284 ACPI_ERROR((AE_INFO, "No result stack frame during push")); 175 ACPI_ERROR((AE_INFO, "No result stack frame during push"));
285 return (AE_AML_INTERNAL); 176 return (AE_AML_INTERNAL);
286 } 177 }
287 178
288 if (state->results.num_results == ACPI_OBJ_NUM_OPERANDS) {
289 ACPI_ERROR((AE_INFO,
290 "Result stack overflow: Obj=%p State=%p Num=%X",
291 object, walk_state, state->results.num_results));
292 return (AE_STACK_OVERFLOW);
293 }
294
295 if (!object) { 179 if (!object) {
296 ACPI_ERROR((AE_INFO, 180 ACPI_ERROR((AE_INFO,
297 "Null Object! Obj=%p State=%p Num=%X", 181 "Null Object! Obj=%p State=%p Num=%X",
298 object, walk_state, state->results.num_results)); 182 object, walk_state, walk_state->result_count));
299 return (AE_BAD_PARAMETER); 183 return (AE_BAD_PARAMETER);
300 } 184 }
301 185
302 state->results.obj_desc[state->results.num_results] = object; 186 /* Assign the address of object to the top free element of result stack */
303 state->results.num_results++; 187
188 index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
189 state->results.obj_desc[index] = object;
190 walk_state->result_count++;
304 191
305 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p Num=%X Cur=%X\n", 192 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p Num=%X Cur=%X\n",
306 object, 193 object,
307 object ?
308 acpi_ut_get_object_type_name((union 194 acpi_ut_get_object_type_name((union
309 acpi_operand_object *) 195 acpi_operand_object *)
310 object) : "NULL", 196 object), walk_state,
311 walk_state, state->results.num_results, 197 walk_state->result_count,
312 walk_state->current_result)); 198 walk_state->current_result));
313 199
314 return (AE_OK); 200 return (AE_OK);
@@ -322,16 +208,25 @@ acpi_ds_result_push(union acpi_operand_object * object,
322 * 208 *
323 * RETURN: Status 209 * RETURN: Status
324 * 210 *
325 * DESCRIPTION: Push an object onto the walk_state result stack. 211 * DESCRIPTION: Push an object onto the walk_state result stack
326 * 212 *
327 ******************************************************************************/ 213 ******************************************************************************/
328 214
329acpi_status acpi_ds_result_stack_push(struct acpi_walk_state * walk_state) 215static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
330{ 216{
331 union acpi_generic_state *state; 217 union acpi_generic_state *state;
332 218
333 ACPI_FUNCTION_NAME(ds_result_stack_push); 219 ACPI_FUNCTION_NAME(ds_result_stack_push);
334 220
221 /* Check for stack overflow */
222
223 if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
224 ACPI_RESULTS_OBJ_NUM_MAX) {
225 ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
226 walk_state, walk_state->result_size));
227 return (AE_STACK_OVERFLOW);
228 }
229
335 state = acpi_ut_create_generic_state(); 230 state = acpi_ut_create_generic_state();
336 if (!state) { 231 if (!state) {
337 return (AE_NO_MEMORY); 232 return (AE_NO_MEMORY);
@@ -340,6 +235,10 @@ acpi_status acpi_ds_result_stack_push(struct acpi_walk_state * walk_state)
340 state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT; 235 state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT;
341 acpi_ut_push_generic_state(&walk_state->results, state); 236 acpi_ut_push_generic_state(&walk_state->results, state);
342 237
238 /* Increase the length of the result stack by the length of frame */
239
240 walk_state->result_size += ACPI_RESULTS_FRAME_OBJ_NUM;
241
343 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n", 242 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n",
344 state, walk_state)); 243 state, walk_state));
345 244
@@ -354,11 +253,11 @@ acpi_status acpi_ds_result_stack_push(struct acpi_walk_state * walk_state)
354 * 253 *
355 * RETURN: Status 254 * RETURN: Status
356 * 255 *
357 * DESCRIPTION: Pop an object off of the walk_state result stack. 256 * DESCRIPTION: Pop an object off of the walk_state result stack
358 * 257 *
359 ******************************************************************************/ 258 ******************************************************************************/
360 259
361acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state) 260static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
362{ 261{
363 union acpi_generic_state *state; 262 union acpi_generic_state *state;
364 263
@@ -367,18 +266,27 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
367 /* Check for stack underflow */ 266 /* Check for stack underflow */
368 267
369 if (walk_state->results == NULL) { 268 if (walk_state->results == NULL) {
370 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Underflow - State=%p\n", 269 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
270 "Result stack underflow - State=%p\n",
371 walk_state)); 271 walk_state));
372 return (AE_AML_NO_OPERAND); 272 return (AE_AML_NO_OPERAND);
373 } 273 }
374 274
275 if (walk_state->result_size < ACPI_RESULTS_FRAME_OBJ_NUM) {
276 ACPI_ERROR((AE_INFO, "Insufficient result stack size"));
277 return (AE_AML_INTERNAL);
278 }
279
375 state = acpi_ut_pop_generic_state(&walk_state->results); 280 state = acpi_ut_pop_generic_state(&walk_state->results);
281 acpi_ut_delete_generic_state(state);
282
283 /* Decrease the length of result stack by the length of frame */
284
285 walk_state->result_size -= ACPI_RESULTS_FRAME_OBJ_NUM;
376 286
377 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 287 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
378 "Result=%p RemainingResults=%X State=%p\n", 288 "Result=%p RemainingResults=%X State=%p\n",
379 state, state->results.num_results, walk_state)); 289 state, walk_state->result_count, walk_state));
380
381 acpi_ut_delete_generic_state(state);
382 290
383 return (AE_OK); 291 return (AE_OK);
384} 292}
@@ -412,9 +320,13 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
412 320
413 /* Put the object onto the stack */ 321 /* Put the object onto the stack */
414 322
415 walk_state->operands[walk_state->num_operands] = object; 323 walk_state->operands[walk_state->operand_index] = object;
416 walk_state->num_operands++; 324 walk_state->num_operands++;
417 325
326 /* For the usual order of filling the operand stack */
327
328 walk_state->operand_index++;
329
418 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n", 330 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n",
419 object, 331 object,
420 acpi_ut_get_object_type_name((union 332 acpi_ut_get_object_type_name((union
@@ -484,43 +396,36 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
484 * 396 *
485 ******************************************************************************/ 397 ******************************************************************************/
486 398
487acpi_status 399void
488acpi_ds_obj_stack_pop_and_delete(u32 pop_count, 400acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
489 struct acpi_walk_state * walk_state) 401 struct acpi_walk_state *walk_state)
490{ 402{
491 u32 i; 403 acpi_native_int i;
492 union acpi_operand_object *obj_desc; 404 union acpi_operand_object *obj_desc;
493 405
494 ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete); 406 ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete);
495 407
496 for (i = 0; i < pop_count; i++) { 408 if (pop_count == 0) {
497 409 return;
498 /* Check for stack underflow */ 410 }
499 411
412 for (i = (acpi_native_int) (pop_count - 1); i >= 0; i--) {
500 if (walk_state->num_operands == 0) { 413 if (walk_state->num_operands == 0) {
501 ACPI_ERROR((AE_INFO, 414 return;
502 "Object stack underflow! Count=%X State=%p #Ops=%X",
503 pop_count, walk_state,
504 walk_state->num_operands));
505 return (AE_STACK_UNDERFLOW);
506 } 415 }
507 416
508 /* Pop the stack and delete an object if present in this stack entry */ 417 /* Pop the stack and delete an object if present in this stack entry */
509 418
510 walk_state->num_operands--; 419 walk_state->num_operands--;
511 obj_desc = walk_state->operands[walk_state->num_operands]; 420 obj_desc = walk_state->operands[i];
512 if (obj_desc) { 421 if (obj_desc) {
513 acpi_ut_remove_reference(walk_state-> 422 acpi_ut_remove_reference(walk_state->operands[i]);
514 operands[walk_state-> 423 walk_state->operands[i] = NULL;
515 num_operands]);
516 walk_state->operands[walk_state->num_operands] = NULL;
517 } 424 }
518 } 425 }
519 426
520 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n", 427 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
521 pop_count, walk_state, walk_state->num_operands)); 428 pop_count, walk_state, walk_state->num_operands));
522
523 return (AE_OK);
524} 429}
525 430
526/******************************************************************************* 431/*******************************************************************************
@@ -560,7 +465,7 @@ struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
560 * 465 *
561 * RETURN: None 466 * RETURN: None
562 * 467 *
563 * DESCRIPTION: Place the Thread state at the head of the state list. 468 * DESCRIPTION: Place the Thread state at the head of the state list
564 * 469 *
565 ******************************************************************************/ 470 ******************************************************************************/
566 471
@@ -636,7 +541,6 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union
636 *thread) 541 *thread)
637{ 542{
638 struct acpi_walk_state *walk_state; 543 struct acpi_walk_state *walk_state;
639 acpi_status status;
640 544
641 ACPI_FUNCTION_TRACE(ds_create_walk_state); 545 ACPI_FUNCTION_TRACE(ds_create_walk_state);
642 546
@@ -659,14 +563,6 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union
659 acpi_ds_method_data_init(walk_state); 563 acpi_ds_method_data_init(walk_state);
660#endif 564#endif
661 565
662 /* Create an initial result stack entry */
663
664 status = acpi_ds_result_stack_push(walk_state);
665 if (ACPI_FAILURE(status)) {
666 ACPI_FREE(walk_state);
667 return_PTR(NULL);
668 }
669
670 /* Put the new state at the head of the walk list */ 566 /* Put the new state at the head of the walk list */
671 567
672 if (thread) { 568 if (thread) {
@@ -860,190 +756,3 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
860 ACPI_FREE(walk_state); 756 ACPI_FREE(walk_state);
861 return_VOID; 757 return_VOID;
862} 758}
863
864#ifdef ACPI_OBSOLETE_FUNCTIONS
865/*******************************************************************************
866 *
867 * FUNCTION: acpi_ds_result_insert
868 *
869 * PARAMETERS: Object - Object to push
870 * Index - Where to insert the object
871 * walk_state - Current Walk state
872 *
873 * RETURN: Status
874 *
875 * DESCRIPTION: Insert an object onto this walk's result stack
876 *
877 ******************************************************************************/
878
879acpi_status
880acpi_ds_result_insert(void *object,
881 u32 index, struct acpi_walk_state *walk_state)
882{
883 union acpi_generic_state *state;
884
885 ACPI_FUNCTION_NAME(ds_result_insert);
886
887 state = walk_state->results;
888 if (!state) {
889 ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
890 walk_state));
891 return (AE_NOT_EXIST);
892 }
893
894 if (index >= ACPI_OBJ_NUM_OPERANDS) {
895 ACPI_ERROR((AE_INFO,
896 "Index out of range: %X Obj=%p State=%p Num=%X",
897 index, object, walk_state,
898 state->results.num_results));
899 return (AE_BAD_PARAMETER);
900 }
901
902 if (!object) {
903 ACPI_ERROR((AE_INFO,
904 "Null Object! Index=%X Obj=%p State=%p Num=%X",
905 index, object, walk_state,
906 state->results.num_results));
907 return (AE_BAD_PARAMETER);
908 }
909
910 state->results.obj_desc[index] = object;
911 state->results.num_results++;
912
913 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
914 "Obj=%p [%s] State=%p Num=%X Cur=%X\n",
915 object,
916 object ?
917 acpi_ut_get_object_type_name((union
918 acpi_operand_object *)
919 object) : "NULL",
920 walk_state, state->results.num_results,
921 walk_state->current_result));
922
923 return (AE_OK);
924}
925
926/*******************************************************************************
927 *
928 * FUNCTION: acpi_ds_obj_stack_delete_all
929 *
930 * PARAMETERS: walk_state - Current Walk state
931 *
932 * RETURN: Status
933 *
934 * DESCRIPTION: Clear the object stack by deleting all objects that are on it.
935 * Should be used with great care, if at all!
936 *
937 ******************************************************************************/
938
939acpi_status acpi_ds_obj_stack_delete_all(struct acpi_walk_state * walk_state)
940{
941 u32 i;
942
943 ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_delete_all, walk_state);
944
945 /* The stack size is configurable, but fixed */
946
947 for (i = 0; i < ACPI_OBJ_NUM_OPERANDS; i++) {
948 if (walk_state->operands[i]) {
949 acpi_ut_remove_reference(walk_state->operands[i]);
950 walk_state->operands[i] = NULL;
951 }
952 }
953
954 return_ACPI_STATUS(AE_OK);
955}
956
957/*******************************************************************************
958 *
959 * FUNCTION: acpi_ds_obj_stack_pop_object
960 *
961 * PARAMETERS: Object - Where to return the popped object
962 * walk_state - Current Walk state
963 *
964 * RETURN: Status
965 *
966 * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT
967 * deleted by this routine.
968 *
969 ******************************************************************************/
970
971acpi_status
972acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
973 struct acpi_walk_state *walk_state)
974{
975 ACPI_FUNCTION_NAME(ds_obj_stack_pop_object);
976
977 /* Check for stack underflow */
978
979 if (walk_state->num_operands == 0) {
980 ACPI_ERROR((AE_INFO,
981 "Missing operand/stack empty! State=%p #Ops=%X",
982 walk_state, walk_state->num_operands));
983 *object = NULL;
984 return (AE_AML_NO_OPERAND);
985 }
986
987 /* Pop the stack */
988
989 walk_state->num_operands--;
990
991 /* Check for a valid operand */
992
993 if (!walk_state->operands[walk_state->num_operands]) {
994 ACPI_ERROR((AE_INFO,
995 "Null operand! State=%p #Ops=%X",
996 walk_state, walk_state->num_operands));
997 *object = NULL;
998 return (AE_AML_NO_OPERAND);
999 }
1000
1001 /* Get operand and set stack entry to null */
1002
1003 *object = walk_state->operands[walk_state->num_operands];
1004 walk_state->operands[walk_state->num_operands] = NULL;
1005
1006 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n",
1007 *object, acpi_ut_get_object_type_name(*object),
1008 walk_state, walk_state->num_operands));
1009
1010 return (AE_OK);
1011}
1012
1013/*******************************************************************************
1014 *
1015 * FUNCTION: acpi_ds_obj_stack_get_value
1016 *
1017 * PARAMETERS: Index - Stack index whose value is desired. Based
1018 * on the top of the stack (index=0 == top)
1019 * walk_state - Current Walk state
1020 *
1021 * RETURN: Pointer to the requested operand
1022 *
1023 * DESCRIPTION: Retrieve an object from this walk's operand stack. Index must
1024 * be within the range of the current stack pointer.
1025 *
1026 ******************************************************************************/
1027
1028void *acpi_ds_obj_stack_get_value(u32 index, struct acpi_walk_state *walk_state)
1029{
1030
1031 ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_get_value, walk_state);
1032
1033 /* Can't do it if the stack is empty */
1034
1035 if (walk_state->num_operands == 0) {
1036 return_PTR(NULL);
1037 }
1038
1039 /* or if the index is past the top of the stack */
1040
1041 if (index > (walk_state->num_operands - (u32) 1)) {
1042 return_PTR(NULL);
1043 }
1044
1045 return_PTR(walk_state->
1046 operands[(acpi_native_uint) (walk_state->num_operands - 1) -
1047 index]);
1048}
1049#endif
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 7222a18a0319..0924992187e8 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -73,38 +73,14 @@ enum ec_event {
73 73
74#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 74#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
75#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 75#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
76#define ACPI_EC_UDELAY 100 /* Wait 100us before polling EC again */
76 77
77enum { 78enum {
78 EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */ 79 EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */
79 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 80 EC_FLAGS_QUERY_PENDING, /* Query is pending */
80 EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */ 81 EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */
81 EC_FLAGS_NO_ADDRESS_GPE, /* Expect GPE only for non-address event */ 82 EC_FLAGS_NO_GPE, /* Don't use GPE mode */
82 EC_FLAGS_ADDRESS, /* Address is being written */ 83 EC_FLAGS_RESCHEDULE_POLL /* Re-schedule poll */
83 EC_FLAGS_NO_WDATA_GPE, /* Don't expect WDATA GPE event */
84 EC_FLAGS_WDATA, /* Data is being written */
85 EC_FLAGS_NO_OBF1_GPE, /* Don't expect GPE before read */
86};
87
88static int acpi_ec_remove(struct acpi_device *device, int type);
89static int acpi_ec_start(struct acpi_device *device);
90static int acpi_ec_stop(struct acpi_device *device, int type);
91static int acpi_ec_add(struct acpi_device *device);
92
93static const struct acpi_device_id ec_device_ids[] = {
94 {"PNP0C09", 0},
95 {"", 0},
96};
97
98static struct acpi_driver acpi_ec_driver = {
99 .name = "ec",
100 .class = ACPI_EC_CLASS,
101 .ids = ec_device_ids,
102 .ops = {
103 .add = acpi_ec_add,
104 .remove = acpi_ec_remove,
105 .start = acpi_ec_start,
106 .stop = acpi_ec_stop,
107 },
108}; 84};
109 85
110/* If we find an EC via the ECDT, we need to keep a ptr to its context */ 86/* If we find an EC via the ECDT, we need to keep a ptr to its context */
@@ -129,6 +105,8 @@ static struct acpi_ec {
129 struct mutex lock; 105 struct mutex lock;
130 wait_queue_head_t wait; 106 wait_queue_head_t wait;
131 struct list_head list; 107 struct list_head list;
108 struct delayed_work work;
109 atomic_t irq_count;
132 u8 handlers_installed; 110 u8 handlers_installed;
133} *boot_ec, *first_ec; 111} *boot_ec, *first_ec;
134 112
@@ -177,65 +155,52 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event)
177 return 0; 155 return 0;
178} 156}
179 157
180static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) 158static void ec_schedule_ec_poll(struct acpi_ec *ec)
181{ 159{
182 int ret = 0; 160 if (test_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags))
161 schedule_delayed_work(&ec->work,
162 msecs_to_jiffies(ACPI_EC_DELAY));
163}
164
165static void ec_switch_to_poll_mode(struct acpi_ec *ec)
166{
167 set_bit(EC_FLAGS_NO_GPE, &ec->flags);
168 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
169 acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
170 set_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
171}
183 172
184 if (unlikely(event == ACPI_EC_EVENT_OBF_1 && 173static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
185 test_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags))) 174{
186 force_poll = 1; 175 atomic_set(&ec->irq_count, 0);
187 if (unlikely(test_bit(EC_FLAGS_ADDRESS, &ec->flags) &&
188 test_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags)))
189 force_poll = 1;
190 if (unlikely(test_bit(EC_FLAGS_WDATA, &ec->flags) &&
191 test_bit(EC_FLAGS_NO_WDATA_GPE, &ec->flags)))
192 force_poll = 1;
193 if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) && 176 if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) &&
194 likely(!force_poll)) { 177 likely(!force_poll)) {
195 if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event), 178 if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event),
196 msecs_to_jiffies(ACPI_EC_DELAY))) 179 msecs_to_jiffies(ACPI_EC_DELAY)))
197 goto end; 180 return 0;
198 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 181 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
199 if (acpi_ec_check_status(ec, event)) { 182 if (acpi_ec_check_status(ec, event)) {
200 if (event == ACPI_EC_EVENT_OBF_1) { 183 /* missing GPEs, switch back to poll mode */
201 /* miss OBF_1 GPE, don't expect it */ 184 if (printk_ratelimit())
202 pr_info(PREFIX "missing OBF confirmation, " 185 pr_info(PREFIX "missing confirmations, "
203 "don't expect it any longer.\n");
204 set_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags);
205 } else if (test_bit(EC_FLAGS_ADDRESS, &ec->flags)) {
206 /* miss address GPE, don't expect it anymore */
207 pr_info(PREFIX "missing address confirmation, "
208 "don't expect it any longer.\n");
209 set_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags);
210 } else if (test_bit(EC_FLAGS_WDATA, &ec->flags)) {
211 /* miss write data GPE, don't expect it */
212 pr_info(PREFIX "missing write data confirmation, "
213 "don't expect it any longer.\n");
214 set_bit(EC_FLAGS_NO_WDATA_GPE, &ec->flags);
215 } else {
216 /* missing GPEs, switch back to poll mode */
217 if (printk_ratelimit())
218 pr_info(PREFIX "missing confirmations, "
219 "switch off interrupt mode.\n"); 186 "switch off interrupt mode.\n");
220 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); 187 ec_switch_to_poll_mode(ec);
221 } 188 ec_schedule_ec_poll(ec);
222 goto end; 189 return 0;
223 } 190 }
224 } else { 191 } else {
225 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); 192 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
226 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 193 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
227 while (time_before(jiffies, delay)) { 194 while (time_before(jiffies, delay)) {
228 if (acpi_ec_check_status(ec, event)) 195 if (acpi_ec_check_status(ec, event))
229 goto end; 196 return 0;
197 udelay(ACPI_EC_UDELAY);
230 } 198 }
231 } 199 }
232 pr_err(PREFIX "acpi_ec_wait timeout," 200 pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
233 " status = %d, expect_event = %d\n", 201 acpi_ec_read_status(ec),
234 acpi_ec_read_status(ec), event); 202 (event == ACPI_EC_EVENT_OBF_1) ? "\"b0=1\"" : "\"b1=0\"");
235 ret = -ETIME; 203 return -ETIME;
236 end:
237 clear_bit(EC_FLAGS_ADDRESS, &ec->flags);
238 return ret;
239} 204}
240 205
241static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, 206static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
@@ -245,8 +210,8 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
245{ 210{
246 int result = 0; 211 int result = 0;
247 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 212 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
248 acpi_ec_write_cmd(ec, command);
249 pr_debug(PREFIX "transaction start\n"); 213 pr_debug(PREFIX "transaction start\n");
214 acpi_ec_write_cmd(ec, command);
250 for (; wdata_len > 0; --wdata_len) { 215 for (; wdata_len > 0; --wdata_len) {
251 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll); 216 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
252 if (result) { 217 if (result) {
@@ -254,15 +219,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
254 "write_cmd timeout, command = %d\n", command); 219 "write_cmd timeout, command = %d\n", command);
255 goto end; 220 goto end;
256 } 221 }
257 /* mark the address byte written to EC */
258 if (rdata_len + wdata_len > 1)
259 set_bit(EC_FLAGS_ADDRESS, &ec->flags);
260 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 222 set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
261 acpi_ec_write_data(ec, *(wdata++)); 223 acpi_ec_write_data(ec, *(wdata++));
262 } 224 }
263 225
264 if (!rdata_len) { 226 if (!rdata_len) {
265 set_bit(EC_FLAGS_WDATA, &ec->flags);
266 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll); 227 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
267 if (result) { 228 if (result) {
268 pr_err(PREFIX 229 pr_err(PREFIX
@@ -527,47 +488,51 @@ static u32 acpi_ec_gpe_handler(void *data)
527{ 488{
528 acpi_status status = AE_OK; 489 acpi_status status = AE_OK;
529 struct acpi_ec *ec = data; 490 struct acpi_ec *ec = data;
491 u8 state = acpi_ec_read_status(ec);
530 492
531 pr_debug(PREFIX "~~~> interrupt\n"); 493 pr_debug(PREFIX "~~~> interrupt\n");
494 atomic_inc(&ec->irq_count);
495 if (atomic_read(&ec->irq_count) > 5) {
496 pr_err(PREFIX "GPE storm detected, disabling EC GPE\n");
497 ec_switch_to_poll_mode(ec);
498 goto end;
499 }
532 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 500 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
533 if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) 501 if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
534 wake_up(&ec->wait); 502 wake_up(&ec->wait);
535 503
536 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_SCI) { 504 if (state & ACPI_EC_FLAG_SCI) {
537 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) 505 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
538 status = acpi_os_execute(OSL_EC_BURST_HANDLER, 506 status = acpi_os_execute(OSL_EC_BURST_HANDLER,
539 acpi_ec_gpe_query, ec); 507 acpi_ec_gpe_query, ec);
540 } else if (unlikely(!test_bit(EC_FLAGS_GPE_MODE, &ec->flags))) { 508 } else if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
509 !test_bit(EC_FLAGS_NO_GPE, &ec->flags) &&
510 in_interrupt()) {
541 /* this is non-query, must be confirmation */ 511 /* this is non-query, must be confirmation */
542 if (printk_ratelimit()) 512 if (printk_ratelimit())
543 pr_info(PREFIX "non-query interrupt received," 513 pr_info(PREFIX "non-query interrupt received,"
544 " switching to interrupt mode\n"); 514 " switching to interrupt mode\n");
545 set_bit(EC_FLAGS_GPE_MODE, &ec->flags); 515 set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
516 clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
546 } 517 }
547 518end:
519 ec_schedule_ec_poll(ec);
548 return ACPI_SUCCESS(status) ? 520 return ACPI_SUCCESS(status) ?
549 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 521 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
550} 522}
551 523
524static void do_ec_poll(struct work_struct *work)
525{
526 struct acpi_ec *ec = container_of(work, struct acpi_ec, work.work);
527 atomic_set(&ec->irq_count, 0);
528 (void)acpi_ec_gpe_handler(ec);
529}
530
552/* -------------------------------------------------------------------------- 531/* --------------------------------------------------------------------------
553 Address Space Management 532 Address Space Management
554 -------------------------------------------------------------------------- */ 533 -------------------------------------------------------------------------- */
555 534
556static acpi_status 535static acpi_status
557acpi_ec_space_setup(acpi_handle region_handle,
558 u32 function, void *handler_context, void **return_context)
559{
560 /*
561 * The EC object is in the handler context and is needed
562 * when calling the acpi_ec_space_handler.
563 */
564 *return_context = (function != ACPI_REGION_DEACTIVATE) ?
565 handler_context : NULL;
566
567 return AE_OK;
568}
569
570static acpi_status
571acpi_ec_space_handler(u32 function, acpi_physical_address address, 536acpi_ec_space_handler(u32 function, acpi_physical_address address,
572 u32 bits, acpi_integer *value, 537 u32 bits, acpi_integer *value,
573 void *handler_context, void *region_context) 538 void *handler_context, void *region_context)
@@ -669,16 +634,11 @@ static int acpi_ec_add_fs(struct acpi_device *device)
669 return -ENODEV; 634 return -ENODEV;
670 } 635 }
671 636
672 entry = create_proc_entry(ACPI_EC_FILE_INFO, S_IRUGO, 637 entry = proc_create_data(ACPI_EC_FILE_INFO, S_IRUGO,
673 acpi_device_dir(device)); 638 acpi_device_dir(device),
639 &acpi_ec_info_ops, acpi_driver_data(device));
674 if (!entry) 640 if (!entry)
675 return -ENODEV; 641 return -ENODEV;
676 else {
677 entry->proc_fops = &acpi_ec_info_ops;
678 entry->data = acpi_driver_data(device);
679 entry->owner = THIS_MODULE;
680 }
681
682 return 0; 642 return 0;
683} 643}
684 644
@@ -709,6 +669,8 @@ static struct acpi_ec *make_acpi_ec(void)
709 mutex_init(&ec->lock); 669 mutex_init(&ec->lock);
710 init_waitqueue_head(&ec->wait); 670 init_waitqueue_head(&ec->wait);
711 INIT_LIST_HEAD(&ec->list); 671 INIT_LIST_HEAD(&ec->list);
672 INIT_DELAYED_WORK_DEFERRABLE(&ec->work, do_ec_poll);
673 atomic_set(&ec->irq_count, 0);
712 return ec; 674 return ec;
713} 675}
714 676
@@ -741,17 +703,21 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
741 status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec->gpe); 703 status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec->gpe);
742 if (ACPI_FAILURE(status)) 704 if (ACPI_FAILURE(status))
743 return status; 705 return status;
744 /* Find and register all query methods */
745 acpi_walk_namespace(ACPI_TYPE_METHOD, handle, 1,
746 acpi_ec_register_query_methods, ec, NULL);
747 /* Use the global lock for all EC transactions? */ 706 /* Use the global lock for all EC transactions? */
748 acpi_evaluate_integer(handle, "_GLK", NULL, &ec->global_lock); 707 acpi_evaluate_integer(handle, "_GLK", NULL, &ec->global_lock);
749 ec->handle = handle; 708 ec->handle = handle;
750 return AE_CTRL_TERMINATE; 709 return AE_CTRL_TERMINATE;
751} 710}
752 711
712static void ec_poll_stop(struct acpi_ec *ec)
713{
714 clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
715 cancel_delayed_work(&ec->work);
716}
717
753static void ec_remove_handlers(struct acpi_ec *ec) 718static void ec_remove_handlers(struct acpi_ec *ec)
754{ 719{
720 ec_poll_stop(ec);
755 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, 721 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
756 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) 722 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
757 pr_err(PREFIX "failed to remove space handler\n"); 723 pr_err(PREFIX "failed to remove space handler\n");
@@ -771,31 +737,28 @@ static int acpi_ec_add(struct acpi_device *device)
771 strcpy(acpi_device_class(device), ACPI_EC_CLASS); 737 strcpy(acpi_device_class(device), ACPI_EC_CLASS);
772 738
773 /* Check for boot EC */ 739 /* Check for boot EC */
774 if (boot_ec) { 740 if (boot_ec &&
775 if (boot_ec->handle == device->handle) { 741 (boot_ec->handle == device->handle ||
776 /* Pre-loaded EC from DSDT, just move pointer */ 742 boot_ec->handle == ACPI_ROOT_OBJECT)) {
777 ec = boot_ec; 743 ec = boot_ec;
778 boot_ec = NULL; 744 boot_ec = NULL;
779 goto end; 745 } else {
780 } else if (boot_ec->handle == ACPI_ROOT_OBJECT) { 746 ec = make_acpi_ec();
781 /* ECDT-based EC, time to shut it down */ 747 if (!ec)
782 ec_remove_handlers(boot_ec); 748 return -ENOMEM;
783 kfree(boot_ec); 749 if (ec_parse_device(device->handle, 0, ec, NULL) !=
784 first_ec = boot_ec = NULL; 750 AE_CTRL_TERMINATE) {
751 kfree(ec);
752 return -EINVAL;
785 } 753 }
786 } 754 }
787 755
788 ec = make_acpi_ec();
789 if (!ec)
790 return -ENOMEM;
791
792 if (ec_parse_device(device->handle, 0, ec, NULL) !=
793 AE_CTRL_TERMINATE) {
794 kfree(ec);
795 return -EINVAL;
796 }
797 ec->handle = device->handle; 756 ec->handle = device->handle;
798 end: 757
758 /* Find and register all query methods */
759 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
760 acpi_ec_register_query_methods, ec, NULL);
761
799 if (!first_ec) 762 if (!first_ec)
800 first_ec = ec; 763 first_ec = ec;
801 acpi_driver_data(device) = ec; 764 acpi_driver_data(device) = ec;
@@ -870,7 +833,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
870 status = acpi_install_address_space_handler(ec->handle, 833 status = acpi_install_address_space_handler(ec->handle,
871 ACPI_ADR_SPACE_EC, 834 ACPI_ADR_SPACE_EC,
872 &acpi_ec_space_handler, 835 &acpi_ec_space_handler,
873 &acpi_ec_space_setup, ec); 836 NULL, ec);
874 if (ACPI_FAILURE(status)) { 837 if (ACPI_FAILURE(status)) {
875 acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler); 838 acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler);
876 return -ENODEV; 839 return -ENODEV;
@@ -897,6 +860,7 @@ static int acpi_ec_start(struct acpi_device *device)
897 860
898 /* EC is fully operational, allow queries */ 861 /* EC is fully operational, allow queries */
899 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 862 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
863 ec_schedule_ec_poll(ec);
900 return ret; 864 return ret;
901} 865}
902 866
@@ -924,6 +888,11 @@ int __init acpi_boot_ec_enable(void)
924 return -EFAULT; 888 return -EFAULT;
925} 889}
926 890
891static const struct acpi_device_id ec_device_ids[] = {
892 {"PNP0C09", 0},
893 {"", 0},
894};
895
927int __init acpi_ec_ecdt_probe(void) 896int __init acpi_ec_ecdt_probe(void)
928{ 897{
929 int ret; 898 int ret;
@@ -944,6 +913,7 @@ int __init acpi_ec_ecdt_probe(void)
944 boot_ec->data_addr = ecdt_ptr->data.address; 913 boot_ec->data_addr = ecdt_ptr->data.address;
945 boot_ec->gpe = ecdt_ptr->gpe; 914 boot_ec->gpe = ecdt_ptr->gpe;
946 boot_ec->handle = ACPI_ROOT_OBJECT; 915 boot_ec->handle = ACPI_ROOT_OBJECT;
916 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
947 } else { 917 } else {
948 /* This workaround is needed only on some broken machines, 918 /* This workaround is needed only on some broken machines,
949 * which require early EC, but fail to provide ECDT */ 919 * which require early EC, but fail to provide ECDT */
@@ -973,6 +943,39 @@ int __init acpi_ec_ecdt_probe(void)
973 return -ENODEV; 943 return -ENODEV;
974} 944}
975 945
946static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state)
947{
948 struct acpi_ec *ec = acpi_driver_data(device);
949 /* Stop using GPE */
950 set_bit(EC_FLAGS_NO_GPE, &ec->flags);
951 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
952 acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
953 return 0;
954}
955
956static int acpi_ec_resume(struct acpi_device *device)
957{
958 struct acpi_ec *ec = acpi_driver_data(device);
959 /* Enable use of GPE back */
960 clear_bit(EC_FLAGS_NO_GPE, &ec->flags);
961 acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
962 return 0;
963}
964
965static struct acpi_driver acpi_ec_driver = {
966 .name = "ec",
967 .class = ACPI_EC_CLASS,
968 .ids = ec_device_ids,
969 .ops = {
970 .add = acpi_ec_add,
971 .remove = acpi_ec_remove,
972 .start = acpi_ec_start,
973 .stop = acpi_ec_stop,
974 .suspend = acpi_ec_suspend,
975 .resume = acpi_ec_resume,
976 },
977};
978
976static int __init acpi_ec_init(void) 979static int __init acpi_ec_init(void)
977{ 980{
978 int result = 0; 981 int result = 0;
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index abec1ca94cf4..0c24bd4d6562 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -102,6 +102,7 @@ static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait)
102} 102}
103 103
104static const struct file_operations acpi_system_event_ops = { 104static const struct file_operations acpi_system_event_ops = {
105 .owner = THIS_MODULE,
105 .open = acpi_system_open_event, 106 .open = acpi_system_open_event,
106 .read = acpi_system_read_event, 107 .read = acpi_system_read_event,
107 .release = acpi_system_close_event, 108 .release = acpi_system_close_event,
@@ -294,10 +295,9 @@ static int __init acpi_event_init(void)
294 295
295#ifdef CONFIG_ACPI_PROC_EVENT 296#ifdef CONFIG_ACPI_PROC_EVENT
296 /* 'event' [R] */ 297 /* 'event' [R] */
297 entry = create_proc_entry("event", S_IRUSR, acpi_root_dir); 298 entry = proc_create("event", S_IRUSR, acpi_root_dir,
298 if (entry) 299 &acpi_system_event_ops);
299 entry->proc_fops = &acpi_system_event_ops; 300 if (!entry)
300 else
301 return -ENODEV; 301 return -ENODEV;
302#endif 302#endif
303 303
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index 3048801a37b5..5d30e5be1b1c 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index 0dadd2adc800..5354be44f876 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -248,10 +248,6 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
248 248
249 ACPI_FUNCTION_TRACE(ev_disable_gpe); 249 ACPI_FUNCTION_TRACE(ev_disable_gpe);
250 250
251 if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) {
252 return_ACPI_STATUS(AE_OK);
253 }
254
255 /* Make sure HW enable masks are updated */ 251 /* Make sure HW enable masks are updated */
256 252
257 status = 253 status =
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index 361ebe6c4a6f..e6c4d4c49e79 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index 21cb749d0c75..2113e58e2221 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -49,22 +49,7 @@
49#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evmisc") 50ACPI_MODULE_NAME("evmisc")
51 51
52/* Names for Notify() values, used for debug output */
53#ifdef ACPI_DEBUG_OUTPUT
54static const char *acpi_notify_value_names[] = {
55 "Bus Check",
56 "Device Check",
57 "Device Wake",
58 "Eject Request",
59 "Device Check Light",
60 "Frequency Mismatch",
61 "Bus Mode Mismatch",
62 "Power Fault"
63};
64#endif
65
66/* Pointer to FACS needed for the Global Lock */ 52/* Pointer to FACS needed for the Global Lock */
67
68static struct acpi_table_facs *facs = NULL; 53static struct acpi_table_facs *facs = NULL;
69 54
70/* Local prototypes */ 55/* Local prototypes */
@@ -94,7 +79,6 @@ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
94 switch (node->type) { 79 switch (node->type) {
95 case ACPI_TYPE_DEVICE: 80 case ACPI_TYPE_DEVICE:
96 case ACPI_TYPE_PROCESSOR: 81 case ACPI_TYPE_PROCESSOR:
97 case ACPI_TYPE_POWER:
98 case ACPI_TYPE_THERMAL: 82 case ACPI_TYPE_THERMAL:
99 /* 83 /*
100 * These are the ONLY objects that can receive ACPI notifications 84 * These are the ONLY objects that can receive ACPI notifications
@@ -139,17 +123,9 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
139 * initiate soft-off or sleep operation? 123 * initiate soft-off or sleep operation?
140 */ 124 */
141 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 125 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
142 "Dispatching Notify(%X) on node %p\n", notify_value, 126 "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n",
143 node)); 127 acpi_ut_get_node_name(node), node, notify_value,
144 128 acpi_ut_get_notify_name(notify_value)));
145 if (notify_value <= 7) {
146 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notify value: %s\n",
147 acpi_notify_value_names[notify_value]));
148 } else {
149 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
150 "Notify value: 0x%2.2X **Device Specific**\n",
151 notify_value));
152 }
153 129
154 /* Get the notify object attached to the NS Node */ 130 /* Get the notify object attached to the NS Node */
155 131
@@ -159,10 +135,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
159 /* We have the notify object, Get the right handler */ 135 /* We have the notify object, Get the right handler */
160 136
161 switch (node->type) { 137 switch (node->type) {
138
139 /* Notify allowed only on these types */
140
162 case ACPI_TYPE_DEVICE: 141 case ACPI_TYPE_DEVICE:
163 case ACPI_TYPE_THERMAL: 142 case ACPI_TYPE_THERMAL:
164 case ACPI_TYPE_PROCESSOR: 143 case ACPI_TYPE_PROCESSOR:
165 case ACPI_TYPE_POWER:
166 144
167 if (notify_value <= ACPI_MAX_SYS_NOTIFY) { 145 if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
168 handler_obj = 146 handler_obj =
@@ -179,8 +157,13 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
179 } 157 }
180 } 158 }
181 159
182 /* If there is any handler to run, schedule the dispatcher */ 160 /*
183 161 * If there is any handler to run, schedule the dispatcher.
162 * Check for:
163 * 1) Global system notify handler
164 * 2) Global device notify handler
165 * 3) Per-device notify handler
166 */
184 if ((acpi_gbl_system_notify.handler 167 if ((acpi_gbl_system_notify.handler
185 && (notify_value <= ACPI_MAX_SYS_NOTIFY)) 168 && (notify_value <= ACPI_MAX_SYS_NOTIFY))
186 || (acpi_gbl_device_notify.handler 169 || (acpi_gbl_device_notify.handler
@@ -190,6 +173,13 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
190 return (AE_NO_MEMORY); 173 return (AE_NO_MEMORY);
191 } 174 }
192 175
176 if (!handler_obj) {
177 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
178 "Executing system notify handler for Notify (%4.4s, %X) node %p\n",
179 acpi_ut_get_node_name(node),
180 notify_value, node));
181 }
182
193 notify_info->common.descriptor_type = 183 notify_info->common.descriptor_type =
194 ACPI_DESC_TYPE_STATE_NOTIFY; 184 ACPI_DESC_TYPE_STATE_NOTIFY;
195 notify_info->notify.node = node; 185 notify_info->notify.node = node;
@@ -202,15 +192,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
202 if (ACPI_FAILURE(status)) { 192 if (ACPI_FAILURE(status)) {
203 acpi_ut_delete_generic_state(notify_info); 193 acpi_ut_delete_generic_state(notify_info);
204 } 194 }
205 } 195 } else {
206
207 if (!handler_obj) {
208 /* 196 /*
209 * There is no per-device notify handler for this device. 197 * There is no notify handler (per-device or system) for this device.
210 * This may or may not be a problem.
211 */ 198 */
212 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 199 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
213 "No notify handler for Notify(%4.4s, %X) node %p\n", 200 "No notify handler for Notify (%4.4s, %X) node %p\n",
214 acpi_ut_get_node_name(node), notify_value, 201 acpi_ut_get_node_name(node), notify_value,
215 node)); 202 node));
216 } 203 }
@@ -349,9 +336,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
349 336
350 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); 337 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
351 338
352 status = 339 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
353 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, 340 ACPI_CAST_INDIRECT_PTR(struct
354 (struct acpi_table_header **)&facs); 341 acpi_table_header,
342 &facs));
355 if (ACPI_FAILURE(status)) { 343 if (ACPI_FAILURE(status)) {
356 return_ACPI_STATUS(status); 344 return_ACPI_STATUS(status);
357 } 345 }
@@ -439,7 +427,8 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
439 * Only one thread can acquire the GL at a time, the global_lock_mutex 427 * Only one thread can acquire the GL at a time, the global_lock_mutex
440 * enforces this. This interface releases the interpreter if we must wait. 428 * enforces this. This interface releases the interpreter if we must wait.
441 */ 429 */
442 status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, 0); 430 status = acpi_ex_system_wait_mutex(
431 acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
443 if (status == AE_TIME) { 432 if (status == AE_TIME) {
444 if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) { 433 if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
445 acpi_ev_global_lock_acquired++; 434 acpi_ev_global_lock_acquired++;
@@ -448,9 +437,9 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
448 } 437 }
449 438
450 if (ACPI_FAILURE(status)) { 439 if (ACPI_FAILURE(status)) {
451 status = 440 status = acpi_ex_system_wait_mutex(
452 acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, 441 acpi_gbl_global_lock_mutex->mutex.os_mutex,
453 timeout); 442 timeout);
454 } 443 }
455 if (ACPI_FAILURE(status)) { 444 if (ACPI_FAILURE(status)) {
456 return_ACPI_STATUS(status); 445 return_ACPI_STATUS(status);
@@ -460,6 +449,19 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
460 acpi_ev_global_lock_acquired++; 449 acpi_ev_global_lock_acquired++;
461 450
462 /* 451 /*
452 * Update the global lock handle and check for wraparound. The handle is
453 * only used for the external global lock interfaces, but it is updated
454 * here to properly handle the case where a single thread may acquire the
455 * lock via both the AML and the acpi_acquire_global_lock interfaces. The
456 * handle is therefore updated on the first acquire from a given thread
457 * regardless of where the acquisition request originated.
458 */
459 acpi_gbl_global_lock_handle++;
460 if (acpi_gbl_global_lock_handle == 0) {
461 acpi_gbl_global_lock_handle = 1;
462 }
463
464 /*
463 * Make sure that a global lock actually exists. If not, just treat 465 * Make sure that a global lock actually exists. If not, just treat
464 * the lock as a standard mutex. 466 * the lock as a standard mutex.
465 */ 467 */
@@ -555,7 +557,7 @@ acpi_status acpi_ev_release_global_lock(void)
555 /* Release the local GL mutex */ 557 /* Release the local GL mutex */
556 acpi_ev_global_lock_thread_id = NULL; 558 acpi_ev_global_lock_thread_id = NULL;
557 acpi_ev_global_lock_acquired = 0; 559 acpi_ev_global_lock_acquired = 0;
558 acpi_os_release_mutex(acpi_gbl_global_lock_mutex); 560 acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
559 return_ACPI_STATUS(status); 561 return_ACPI_STATUS(status);
560} 562}
561 563
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 58ad09725dd2..1628f5934752 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
394 ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, 394 ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
395 "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", 395 "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
396 &region_obj->region.handler->address_space, handler, 396 &region_obj->region.handler->address_space, handler,
397 ACPI_FORMAT_UINT64(address), 397 ACPI_FORMAT_NATIVE_UINT(address),
398 acpi_ut_get_region_name(region_obj->region. 398 acpi_ut_get_region_name(region_obj->region.
399 space_id))); 399 space_id)));
400 400
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index b1aaa0e84588..2e3d2c5e4f4d 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
index 7e5d15ce2395..2a8b77877610 100644
--- a/drivers/acpi/events/evsci.c
+++ b/drivers/acpi/events/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index 6d866a01f5f4..94a6efe020be 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -758,6 +758,12 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
758 * 758 *
759 * DESCRIPTION: Acquire the ACPI Global Lock 759 * DESCRIPTION: Acquire the ACPI Global Lock
760 * 760 *
761 * Note: Allows callers with the same thread ID to acquire the global lock
762 * multiple times. In other words, externally, the behavior of the global lock
763 * is identical to an AML mutex. On the first acquire, a new handle is
764 * returned. On any subsequent calls to acquire by the same thread, the same
765 * handle is returned.
766 *
761 ******************************************************************************/ 767 ******************************************************************************/
762acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) 768acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
763{ 769{
@@ -770,14 +776,19 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
770 /* Must lock interpreter to prevent race conditions */ 776 /* Must lock interpreter to prevent race conditions */
771 777
772 acpi_ex_enter_interpreter(); 778 acpi_ex_enter_interpreter();
773 status = acpi_ev_acquire_global_lock(timeout); 779
774 acpi_ex_exit_interpreter(); 780 status = acpi_ex_acquire_mutex_object(timeout,
781 acpi_gbl_global_lock_mutex,
782 acpi_os_get_thread_id());
775 783
776 if (ACPI_SUCCESS(status)) { 784 if (ACPI_SUCCESS(status)) {
777 acpi_gbl_global_lock_handle++; 785
786 /* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */
787
778 *handle = acpi_gbl_global_lock_handle; 788 *handle = acpi_gbl_global_lock_handle;
779 } 789 }
780 790
791 acpi_ex_exit_interpreter();
781 return (status); 792 return (status);
782} 793}
783 794
@@ -798,11 +809,11 @@ acpi_status acpi_release_global_lock(u32 handle)
798{ 809{
799 acpi_status status; 810 acpi_status status;
800 811
801 if (handle != acpi_gbl_global_lock_handle) { 812 if (!handle || (handle != acpi_gbl_global_lock_handle)) {
802 return (AE_NOT_ACQUIRED); 813 return (AE_NOT_ACQUIRED);
803 } 814 }
804 815
805 status = acpi_ev_release_global_lock(); 816 status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
806 return (status); 817 return (status);
807} 818}
808 819
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 9cbd3414a574..99a7502e6a87 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
index 7bf09c5fb242..e8750807e57d 100644
--- a/drivers/acpi/events/evxfregn.c
+++ b/drivers/acpi/events/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index 25802f302ffe..24da921d13e3 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,6 @@
45#include <acpi/acinterp.h> 45#include <acpi/acinterp.h>
46#include <acpi/amlcode.h> 46#include <acpi/amlcode.h>
47#include <acpi/acnamesp.h> 47#include <acpi/acnamesp.h>
48#include <acpi/acevents.h>
49#include <acpi/actables.h> 48#include <acpi/actables.h>
50#include <acpi/acdispat.h> 49#include <acpi/acdispat.h>
51 50
@@ -138,6 +137,14 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
138 137
139 ACPI_FUNCTION_TRACE(ex_load_table_op); 138 ACPI_FUNCTION_TRACE(ex_load_table_op);
140 139
140 /* Validate lengths for the signature_string, OEMIDString, OEMtable_iD */
141
142 if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
143 (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
144 (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
145 return_ACPI_STATUS(AE_BAD_PARAMETER);
146 }
147
141 /* Find the ACPI table in the RSDT/XSDT */ 148 /* Find the ACPI table in the RSDT/XSDT */
142 149
143 status = acpi_tb_find_table(operand[0]->string.pointer, 150 status = acpi_tb_find_table(operand[0]->string.pointer,
@@ -229,11 +236,18 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
229 status = acpi_get_table_by_index(table_index, &table); 236 status = acpi_get_table_by_index(table_index, &table);
230 if (ACPI_SUCCESS(status)) { 237 if (ACPI_SUCCESS(status)) {
231 ACPI_INFO((AE_INFO, 238 ACPI_INFO((AE_INFO,
232 "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]", 239 "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
233 table->signature, table->oem_id, 240 table->signature, table->oem_id,
234 table->oem_table_id)); 241 table->oem_table_id));
235 } 242 }
236 243
244 /* Invoke table handler if present */
245
246 if (acpi_gbl_table_handler) {
247 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
248 acpi_gbl_table_handler_context);
249 }
250
237 *return_desc = ddb_handle; 251 *return_desc = ddb_handle;
238 return_ACPI_STATUS(status); 252 return_ACPI_STATUS(status);
239} 253}
@@ -268,6 +282,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
268 struct acpi_table_desc table_desc; 282 struct acpi_table_desc table_desc;
269 acpi_native_uint table_index; 283 acpi_native_uint table_index;
270 acpi_status status; 284 acpi_status status;
285 u32 length;
271 286
272 ACPI_FUNCTION_TRACE(ex_load_op); 287 ACPI_FUNCTION_TRACE(ex_load_op);
273 288
@@ -278,16 +293,16 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
278 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 293 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
279 case ACPI_TYPE_REGION: 294 case ACPI_TYPE_REGION:
280 295
296 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n",
297 obj_desc,
298 acpi_ut_get_object_type_name(obj_desc)));
299
281 /* Region must be system_memory (from ACPI spec) */ 300 /* Region must be system_memory (from ACPI spec) */
282 301
283 if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { 302 if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
284 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 303 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
285 } 304 }
286 305
287 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n",
288 obj_desc,
289 acpi_ut_get_object_type_name(obj_desc)));
290
291 /* 306 /*
292 * If the Region Address and Length have not been previously evaluated, 307 * If the Region Address and Length have not been previously evaluated,
293 * evaluate them now and save the results. 308 * evaluate them now and save the results.
@@ -299,6 +314,11 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
299 } 314 }
300 } 315 }
301 316
317 /*
318 * We will simply map the memory region for the table. However, the
319 * memory region is technically not guaranteed to remain stable and
320 * we may eventually have to copy the table to a local buffer.
321 */
302 table_desc.address = obj_desc->region.address; 322 table_desc.address = obj_desc->region.address;
303 table_desc.length = obj_desc->region.length; 323 table_desc.length = obj_desc->region.length;
304 table_desc.flags = ACPI_TABLE_ORIGIN_MAPPED; 324 table_desc.flags = ACPI_TABLE_ORIGIN_MAPPED;
@@ -306,18 +326,41 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
306 326
307 case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */ 327 case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */
308 328
309 /* Simply extract the buffer from the buffer object */
310
311 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 329 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
312 "Load from Buffer or Field %p %s\n", obj_desc, 330 "Load from Buffer or Field %p %s\n", obj_desc,
313 acpi_ut_get_object_type_name(obj_desc))); 331 acpi_ut_get_object_type_name(obj_desc)));
314 332
315 table_desc.pointer = ACPI_CAST_PTR(struct acpi_table_header, 333 length = obj_desc->buffer.length;
316 obj_desc->buffer.pointer); 334
317 table_desc.length = table_desc.pointer->length; 335 /* Must have at least an ACPI table header */
318 table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED; 336
337 if (length < sizeof(struct acpi_table_header)) {
338 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
339 }
340
341 /* Validate checksum here. It won't get validated in tb_add_table */
319 342
320 obj_desc->buffer.pointer = NULL; 343 status =
344 acpi_tb_verify_checksum(ACPI_CAST_PTR
345 (struct acpi_table_header,
346 obj_desc->buffer.pointer), length);
347 if (ACPI_FAILURE(status)) {
348 return_ACPI_STATUS(status);
349 }
350
351 /*
352 * We need to copy the buffer since the original buffer could be
353 * changed or deleted in the future
354 */
355 table_desc.pointer = ACPI_ALLOCATE(length);
356 if (!table_desc.pointer) {
357 return_ACPI_STATUS(AE_NO_MEMORY);
358 }
359
360 ACPI_MEMCPY(table_desc.pointer, obj_desc->buffer.pointer,
361 length);
362 table_desc.length = length;
363 table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
321 break; 364 break;
322 365
323 default: 366 default:
@@ -333,7 +376,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
333 } 376 }
334 377
335 status = 378 status =
336 acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle); 379 acpi_ex_add_table(table_index, walk_state->scope_info->scope.node,
380 &ddb_handle);
337 if (ACPI_FAILURE(status)) { 381 if (ACPI_FAILURE(status)) {
338 382
339 /* On error, table_ptr was deallocated above */ 383 /* On error, table_ptr was deallocated above */
@@ -349,11 +393,23 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
349 393
350 /* table_ptr was deallocated above */ 394 /* table_ptr was deallocated above */
351 395
396 acpi_ut_remove_reference(ddb_handle);
352 return_ACPI_STATUS(status); 397 return_ACPI_STATUS(status);
353 } 398 }
354 399
400 /* Invoke table handler if present */
401
402 if (acpi_gbl_table_handler) {
403 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD,
404 table_desc.pointer,
405 acpi_gbl_table_handler_context);
406 }
407
355 cleanup: 408 cleanup:
356 if (ACPI_FAILURE(status)) { 409 if (ACPI_FAILURE(status)) {
410
411 /* Delete allocated buffer or mapping */
412
357 acpi_tb_delete_table(&table_desc); 413 acpi_tb_delete_table(&table_desc);
358 } 414 }
359 return_ACPI_STATUS(status); 415 return_ACPI_STATUS(status);
@@ -376,6 +432,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
376 acpi_status status = AE_OK; 432 acpi_status status = AE_OK;
377 union acpi_operand_object *table_desc = ddb_handle; 433 union acpi_operand_object *table_desc = ddb_handle;
378 acpi_native_uint table_index; 434 acpi_native_uint table_index;
435 struct acpi_table_header *table;
379 436
380 ACPI_FUNCTION_TRACE(ex_unload_table); 437 ACPI_FUNCTION_TRACE(ex_unload_table);
381 438
@@ -395,17 +452,25 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
395 452
396 table_index = (acpi_native_uint) table_desc->reference.object; 453 table_index = (acpi_native_uint) table_desc->reference.object;
397 454
455 /* Invoke table handler if present */
456
457 if (acpi_gbl_table_handler) {
458 status = acpi_get_table_by_index(table_index, &table);
459 if (ACPI_SUCCESS(status)) {
460 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
461 table,
462 acpi_gbl_table_handler_context);
463 }
464 }
465
398 /* 466 /*
399 * Delete the entire namespace under this table Node 467 * Delete the entire namespace under this table Node
400 * (Offset contains the table_id) 468 * (Offset contains the table_id)
401 */ 469 */
402 acpi_tb_delete_namespace_by_owner(table_index); 470 acpi_tb_delete_namespace_by_owner(table_index);
403 acpi_tb_release_owner_id(table_index); 471 (void)acpi_tb_release_owner_id(table_index);
404 472
405 acpi_tb_set_table_loaded_flag(table_index, FALSE); 473 acpi_tb_set_table_loaded_flag(table_index, FALSE);
406 474
407 /* Delete the table descriptor (ddb_handle) */ 475 return_ACPI_STATUS(AE_OK);
408
409 acpi_ut_remove_reference(table_desc);
410 return_ACPI_STATUS(status);
411} 476}
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index 79f2c0d42c06..fd954b4ed83d 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index 6e9a23e47fef..60e62c4f0577 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -96,6 +96,9 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
96 * to the original Node. 96 * to the original Node.
97 */ 97 */
98 switch (target_node->type) { 98 switch (target_node->type) {
99
100 /* For these types, the sub-object can change dynamically via a Store */
101
99 case ACPI_TYPE_INTEGER: 102 case ACPI_TYPE_INTEGER:
100 case ACPI_TYPE_STRING: 103 case ACPI_TYPE_STRING:
101 case ACPI_TYPE_BUFFER: 104 case ACPI_TYPE_BUFFER:
@@ -103,9 +106,18 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
103 case ACPI_TYPE_BUFFER_FIELD: 106 case ACPI_TYPE_BUFFER_FIELD:
104 107
105 /* 108 /*
109 * These types open a new scope, so we need the NS node in order to access
110 * any children.
111 */
112 case ACPI_TYPE_DEVICE:
113 case ACPI_TYPE_POWER:
114 case ACPI_TYPE_PROCESSOR:
115 case ACPI_TYPE_THERMAL:
116 case ACPI_TYPE_LOCAL_SCOPE:
117
118 /*
106 * The new alias has the type ALIAS and points to the original 119 * The new alias has the type ALIAS and points to the original
107 * NS node, not the object itself. This is because for these 120 * NS node, not the object itself.
108 * types, the object can change dynamically via a Store.
109 */ 121 */
110 alias_node->type = ACPI_TYPE_LOCAL_ALIAS; 122 alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
111 alias_node->object = 123 alias_node->object =
@@ -115,9 +127,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
115 case ACPI_TYPE_METHOD: 127 case ACPI_TYPE_METHOD:
116 128
117 /* 129 /*
118 * The new alias has the type ALIAS and points to the original 130 * Control method aliases need to be differentiated
119 * NS node, not the object itself. This is because for these
120 * types, the object can change dynamically via a Store.
121 */ 131 */
122 alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS; 132 alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS;
123 alias_node->object = 133 alias_node->object =
@@ -342,101 +352,6 @@ acpi_ex_create_region(u8 * aml_start,
342 352
343/******************************************************************************* 353/*******************************************************************************
344 * 354 *
345 * FUNCTION: acpi_ex_create_table_region
346 *
347 * PARAMETERS: walk_state - Current state
348 *
349 * RETURN: Status
350 *
351 * DESCRIPTION: Create a new data_table_region object
352 *
353 ******************************************************************************/
354
355acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
356{
357 acpi_status status;
358 union acpi_operand_object **operand = &walk_state->operands[0];
359 union acpi_operand_object *obj_desc;
360 struct acpi_namespace_node *node;
361 union acpi_operand_object *region_obj2;
362 acpi_native_uint table_index;
363 struct acpi_table_header *table;
364
365 ACPI_FUNCTION_TRACE(ex_create_table_region);
366
367 /* Get the Node from the object stack */
368
369 node = walk_state->op->common.node;
370
371 /*
372 * If the region object is already attached to this node,
373 * just return
374 */
375 if (acpi_ns_get_attached_object(node)) {
376 return_ACPI_STATUS(AE_OK);
377 }
378
379 /* Find the ACPI table */
380
381 status = acpi_tb_find_table(operand[1]->string.pointer,
382 operand[2]->string.pointer,
383 operand[3]->string.pointer, &table_index);
384 if (ACPI_FAILURE(status)) {
385 return_ACPI_STATUS(status);
386 }
387
388 /* Create the region descriptor */
389
390 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_REGION);
391 if (!obj_desc) {
392 return_ACPI_STATUS(AE_NO_MEMORY);
393 }
394
395 region_obj2 = obj_desc->common.next_object;
396 region_obj2->extra.region_context = NULL;
397
398 status = acpi_get_table_by_index(table_index, &table);
399 if (ACPI_FAILURE(status)) {
400 return_ACPI_STATUS(status);
401 }
402
403 /* Init the region from the operands */
404
405 obj_desc->region.space_id = REGION_DATA_TABLE;
406 obj_desc->region.address =
407 (acpi_physical_address) ACPI_TO_INTEGER(table);
408 obj_desc->region.length = table->length;
409 obj_desc->region.node = node;
410 obj_desc->region.flags = AOPOBJ_DATA_VALID;
411
412 /* Install the new region object in the parent Node */
413
414 status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
415 if (ACPI_FAILURE(status)) {
416 goto cleanup;
417 }
418
419 status = acpi_ev_initialize_region(obj_desc, FALSE);
420 if (ACPI_FAILURE(status)) {
421 if (status == AE_NOT_EXIST) {
422 status = AE_OK;
423 } else {
424 goto cleanup;
425 }
426 }
427
428 obj_desc->region.flags |= AOPOBJ_SETUP_COMPLETE;
429
430 cleanup:
431
432 /* Remove local reference to the object */
433
434 acpi_ut_remove_reference(obj_desc);
435 return_ACPI_STATUS(status);
436}
437
438/*******************************************************************************
439 *
440 * FUNCTION: acpi_ex_create_processor 355 * FUNCTION: acpi_ex_create_processor
441 * 356 *
442 * PARAMETERS: walk_state - Current state 357 * PARAMETERS: walk_state - Current state
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 51c9c29987c3..74f1b22601b3 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -500,25 +500,28 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
500 acpi_os_printf("Reference: Debug\n"); 500 acpi_os_printf("Reference: Debug\n");
501 break; 501 break;
502 502
503 case AML_NAME_OP: 503 case AML_INDEX_OP:
504 504
505 ACPI_DUMP_PATHNAME(obj_desc->reference.object, 505 acpi_os_printf("Reference: Index %p\n",
506 "Reference: Name: ", ACPI_LV_INFO, 506 obj_desc->reference.object);
507 _COMPONENT);
508 ACPI_DUMP_ENTRY(obj_desc->reference.object,
509 ACPI_LV_INFO);
510 break; 507 break;
511 508
512 case AML_INDEX_OP: 509 case AML_LOAD_OP:
513 510
514 acpi_os_printf("Reference: Index %p\n", 511 acpi_os_printf("Reference: [DdbHandle] TableIndex %p\n",
515 obj_desc->reference.object); 512 obj_desc->reference.object);
516 break; 513 break;
517 514
518 case AML_REF_OF_OP: 515 case AML_REF_OF_OP:
519 516
520 acpi_os_printf("Reference: (RefOf) %p\n", 517 acpi_os_printf("Reference: (RefOf) %p [%s]\n",
521 obj_desc->reference.object); 518 obj_desc->reference.object,
519 acpi_ut_get_type_name(((union
520 acpi_operand_object
521 *)obj_desc->
522 reference.
523 object)->common.
524 type));
522 break; 525 break;
523 526
524 case AML_ARG_OP: 527 case AML_ARG_OP:
@@ -559,8 +562,9 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
559 562
560 case AML_INT_NAMEPATH_OP: 563 case AML_INT_NAMEPATH_OP:
561 564
562 acpi_os_printf("Reference.Node->Name %X\n", 565 acpi_os_printf("Reference: Namepath %X [%4.4s]\n",
563 obj_desc->reference.node->name.integer); 566 obj_desc->reference.node->name.integer,
567 obj_desc->reference.node->name.ascii);
564 break; 568 break;
565 569
566 default: 570 default:
@@ -640,8 +644,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
640 acpi_os_printf("\n"); 644 acpi_os_printf("\n");
641 } else { 645 } else {
642 acpi_os_printf(" base %8.8X%8.8X Length %X\n", 646 acpi_os_printf(" base %8.8X%8.8X Length %X\n",
643 ACPI_FORMAT_UINT64(obj_desc->region. 647 ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
644 address), 648 address),
645 obj_desc->region.length); 649 obj_desc->region.length);
646 } 650 }
647 break; 651 break;
@@ -877,20 +881,43 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
877 ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER; 881 ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER;
878 882
879 if (obj_desc->reference.opcode == AML_INT_NAMEPATH_OP) { 883 if (obj_desc->reference.opcode == AML_INT_NAMEPATH_OP) {
880 acpi_os_printf("Named Object %p ", obj_desc->reference.node); 884 acpi_os_printf(" Named Object %p ", obj_desc->reference.node);
881 885
882 status = 886 status =
883 acpi_ns_handle_to_pathname(obj_desc->reference.node, 887 acpi_ns_handle_to_pathname(obj_desc->reference.node,
884 &ret_buf); 888 &ret_buf);
885 if (ACPI_FAILURE(status)) { 889 if (ACPI_FAILURE(status)) {
886 acpi_os_printf("Could not convert name to pathname\n"); 890 acpi_os_printf(" Could not convert name to pathname\n");
887 } else { 891 } else {
888 acpi_os_printf("%s\n", (char *)ret_buf.pointer); 892 acpi_os_printf("%s\n", (char *)ret_buf.pointer);
889 ACPI_FREE(ret_buf.pointer); 893 ACPI_FREE(ret_buf.pointer);
890 } 894 }
891 } else if (obj_desc->reference.object) { 895 } else if (obj_desc->reference.object) {
892 acpi_os_printf("\nReferenced Object: %p\n", 896 if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
893 obj_desc->reference.object); 897 ACPI_DESC_TYPE_OPERAND) {
898 acpi_os_printf(" Target: %p",
899 obj_desc->reference.object);
900 if (obj_desc->reference.opcode == AML_LOAD_OP) {
901 /*
902 * For DDBHandle reference,
903 * obj_desc->Reference.Object is the table index
904 */
905 acpi_os_printf(" [DDBHandle]\n");
906 } else {
907 acpi_os_printf(" [%s]\n",
908 acpi_ut_get_type_name(((union
909 acpi_operand_object
910 *)
911 obj_desc->
912 reference.
913 object)->
914 common.
915 type));
916 }
917 } else {
918 acpi_os_printf(" Target: %p\n",
919 obj_desc->reference.object);
920 }
894 } 921 }
895} 922}
896 923
@@ -976,7 +1003,9 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
976 1003
977 case ACPI_TYPE_LOCAL_REFERENCE: 1004 case ACPI_TYPE_LOCAL_REFERENCE:
978 1005
979 acpi_os_printf("[Object Reference] "); 1006 acpi_os_printf("[Object Reference] %s",
1007 (acpi_ps_get_opcode_info
1008 (obj_desc->reference.opcode))->name);
980 acpi_ex_dump_reference_obj(obj_desc); 1009 acpi_ex_dump_reference_obj(obj_desc);
981 break; 1010 break;
982 1011
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/executer/exfield.c
index 2d88a3d8d1ad..3e440d84226a 100644
--- a/drivers/acpi/executer/exfield.c
+++ b/drivers/acpi/executer/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -71,7 +71,6 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
71 union acpi_operand_object *buffer_desc; 71 union acpi_operand_object *buffer_desc;
72 acpi_size length; 72 acpi_size length;
73 void *buffer; 73 void *buffer;
74 u8 locked;
75 74
76 ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc); 75 ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
77 76
@@ -111,9 +110,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
111 110
112 /* Lock entire transaction if requested */ 111 /* Lock entire transaction if requested */
113 112
114 locked = 113 acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
115 acpi_ex_acquire_global_lock(obj_desc->common_field.
116 field_flags);
117 114
118 /* 115 /*
119 * Perform the read. 116 * Perform the read.
@@ -125,7 +122,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
125 buffer.pointer), 122 buffer.pointer),
126 ACPI_READ | (obj_desc->field. 123 ACPI_READ | (obj_desc->field.
127 attribute << 16)); 124 attribute << 16));
128 acpi_ex_release_global_lock(locked); 125 acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
129 goto exit; 126 goto exit;
130 } 127 }
131 128
@@ -175,13 +172,12 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
175 172
176 /* Lock entire transaction if requested */ 173 /* Lock entire transaction if requested */
177 174
178 locked = 175 acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
179 acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
180 176
181 /* Read from the field */ 177 /* Read from the field */
182 178
183 status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length); 179 status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
184 acpi_ex_release_global_lock(locked); 180 acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
185 181
186 exit: 182 exit:
187 if (ACPI_FAILURE(status)) { 183 if (ACPI_FAILURE(status)) {
@@ -214,10 +210,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
214{ 210{
215 acpi_status status; 211 acpi_status status;
216 u32 length; 212 u32 length;
217 u32 required_length;
218 void *buffer; 213 void *buffer;
219 void *new_buffer;
220 u8 locked;
221 union acpi_operand_object *buffer_desc; 214 union acpi_operand_object *buffer_desc;
222 215
223 ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc); 216 ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
@@ -278,9 +271,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
278 271
279 /* Lock entire transaction if requested */ 272 /* Lock entire transaction if requested */
280 273
281 locked = 274 acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
282 acpi_ex_acquire_global_lock(obj_desc->common_field.
283 field_flags);
284 275
285 /* 276 /*
286 * Perform the write (returns status and perhaps data in the 277 * Perform the write (returns status and perhaps data in the
@@ -291,7 +282,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
291 (acpi_integer *) buffer, 282 (acpi_integer *) buffer,
292 ACPI_WRITE | (obj_desc->field. 283 ACPI_WRITE | (obj_desc->field.
293 attribute << 16)); 284 attribute << 16));
294 acpi_ex_release_global_lock(locked); 285 acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
295 286
296 *result_desc = buffer_desc; 287 *result_desc = buffer_desc;
297 return_ACPI_STATUS(status); 288 return_ACPI_STATUS(status);
@@ -319,35 +310,6 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
319 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 310 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
320 } 311 }
321 312
322 /*
323 * We must have a buffer that is at least as long as the field
324 * we are writing to. This is because individual fields are
325 * indivisible and partial writes are not supported -- as per
326 * the ACPI specification.
327 */
328 new_buffer = NULL;
329 required_length =
330 ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
331
332 if (length < required_length) {
333
334 /* We need to create a new buffer */
335
336 new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
337 if (!new_buffer) {
338 return_ACPI_STATUS(AE_NO_MEMORY);
339 }
340
341 /*
342 * Copy the original data to the new buffer, starting
343 * at Byte zero. All unused (upper) bytes of the
344 * buffer will be 0.
345 */
346 ACPI_MEMCPY((char *)new_buffer, (char *)buffer, length);
347 buffer = new_buffer;
348 length = required_length;
349 }
350
351 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, 313 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
352 "FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n", 314 "FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
353 source_desc, 315 source_desc,
@@ -366,19 +328,12 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
366 328
367 /* Lock entire transaction if requested */ 329 /* Lock entire transaction if requested */
368 330
369 locked = 331 acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
370 acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
371 332
372 /* Write to the field */ 333 /* Write to the field */
373 334
374 status = acpi_ex_insert_into_field(obj_desc, buffer, length); 335 status = acpi_ex_insert_into_field(obj_desc, buffer, length);
375 acpi_ex_release_global_lock(locked); 336 acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
376
377 /* Free temporary buffer if we used one */
378
379 if (new_buffer) {
380 ACPI_FREE(new_buffer);
381 }
382 337
383 return_ACPI_STATUS(status); 338 return_ACPI_STATUS(status);
384} 339}
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c
index 65a48b6170ee..e336b5dc7a50 100644
--- a/drivers/acpi/executer/exfldio.c
+++ b/drivers/acpi/executer/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -263,7 +263,8 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
263 rgn_desc->region.space_id, 263 rgn_desc->region.space_id,
264 obj_desc->common_field.access_byte_width, 264 obj_desc->common_field.access_byte_width,
265 obj_desc->common_field.base_byte_offset, 265 obj_desc->common_field.base_byte_offset,
266 field_datum_byte_offset, (void *)address)); 266 field_datum_byte_offset, ACPI_CAST_PTR(void,
267 address)));
267 268
268 /* Invoke the appropriate address_space/op_region handler */ 269 /* Invoke the appropriate address_space/op_region handler */
269 270
@@ -805,18 +806,39 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
805 u32 datum_count; 806 u32 datum_count;
806 u32 field_datum_count; 807 u32 field_datum_count;
807 u32 i; 808 u32 i;
809 u32 required_length;
810 void *new_buffer;
808 811
809 ACPI_FUNCTION_TRACE(ex_insert_into_field); 812 ACPI_FUNCTION_TRACE(ex_insert_into_field);
810 813
811 /* Validate input buffer */ 814 /* Validate input buffer */
812 815
813 if (buffer_length < 816 new_buffer = NULL;
814 ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) { 817 required_length =
815 ACPI_ERROR((AE_INFO, 818 ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
816 "Field size %X (bits) is too large for buffer (%X)", 819 /*
817 obj_desc->common_field.bit_length, buffer_length)); 820 * We must have a buffer that is at least as long as the field
821 * we are writing to. This is because individual fields are
822 * indivisible and partial writes are not supported -- as per
823 * the ACPI specification.
824 */
825 if (buffer_length < required_length) {
818 826
819 return_ACPI_STATUS(AE_BUFFER_OVERFLOW); 827 /* We need to create a new buffer */
828
829 new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
830 if (!new_buffer) {
831 return_ACPI_STATUS(AE_NO_MEMORY);
832 }
833
834 /*
835 * Copy the original data to the new buffer, starting
836 * at Byte zero. All unused (upper) bytes of the
837 * buffer will be 0.
838 */
839 ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length);
840 buffer = new_buffer;
841 buffer_length = required_length;
820 } 842 }
821 843
822 /* 844 /*
@@ -866,7 +888,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
866 merged_datum, 888 merged_datum,
867 field_offset); 889 field_offset);
868 if (ACPI_FAILURE(status)) { 890 if (ACPI_FAILURE(status)) {
869 return_ACPI_STATUS(status); 891 goto exit;
870 } 892 }
871 893
872 field_offset += obj_desc->common_field.access_byte_width; 894 field_offset += obj_desc->common_field.access_byte_width;
@@ -924,5 +946,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
924 mask, merged_datum, 946 mask, merged_datum,
925 field_offset); 947 field_offset);
926 948
949 exit:
950 /* Free temporary buffer if we used one */
951
952 if (new_buffer) {
953 ACPI_FREE(new_buffer);
954 }
927 return_ACPI_STATUS(status); 955 return_ACPI_STATUS(status);
928} 956}
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
index f13d1cec2d6d..cc956a5b5267 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/executer/exmisc.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index 6748e3ef0997..c873ab40cd0e 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -126,6 +126,79 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
126 126
127/******************************************************************************* 127/*******************************************************************************
128 * 128 *
129 * FUNCTION: acpi_ex_acquire_mutex_object
130 *
131 * PARAMETERS: time_desc - Timeout in milliseconds
132 * obj_desc - Mutex object
133 * Thread - Current thread state
134 *
135 * RETURN: Status
136 *
137 * DESCRIPTION: Acquire an AML mutex, low-level interface. Provides a common
138 * path that supports multiple acquires by the same thread.
139 *
140 * MUTEX: Interpreter must be locked
141 *
142 * NOTE: This interface is called from three places:
143 * 1) From acpi_ex_acquire_mutex, via an AML Acquire() operator
144 * 2) From acpi_ex_acquire_global_lock when an AML Field access requires the
145 * global lock
146 * 3) From the external interface, acpi_acquire_global_lock
147 *
148 ******************************************************************************/
149
150acpi_status
151acpi_ex_acquire_mutex_object(u16 timeout,
152 union acpi_operand_object *obj_desc,
153 acpi_thread_id thread_id)
154{
155 acpi_status status;
156
157 ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex_object, obj_desc);
158
159 if (!obj_desc) {
160 return_ACPI_STATUS(AE_BAD_PARAMETER);
161 }
162
163 /* Support for multiple acquires by the owning thread */
164
165 if (obj_desc->mutex.thread_id == thread_id) {
166 /*
167 * The mutex is already owned by this thread, just increment the
168 * acquisition depth
169 */
170 obj_desc->mutex.acquisition_depth++;
171 return_ACPI_STATUS(AE_OK);
172 }
173
174 /* Acquire the mutex, wait if necessary. Special case for Global Lock */
175
176 if (obj_desc == acpi_gbl_global_lock_mutex) {
177 status = acpi_ev_acquire_global_lock(timeout);
178 } else {
179 status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
180 timeout);
181 }
182
183 if (ACPI_FAILURE(status)) {
184
185 /* Includes failure from a timeout on time_desc */
186
187 return_ACPI_STATUS(status);
188 }
189
190 /* Acquired the mutex: update mutex object */
191
192 obj_desc->mutex.thread_id = thread_id;
193 obj_desc->mutex.acquisition_depth = 1;
194 obj_desc->mutex.original_sync_level = 0;
195 obj_desc->mutex.owner_thread = NULL; /* Used only for AML Acquire() */
196
197 return_ACPI_STATUS(AE_OK);
198}
199
200/*******************************************************************************
201 *
129 * FUNCTION: acpi_ex_acquire_mutex 202 * FUNCTION: acpi_ex_acquire_mutex
130 * 203 *
131 * PARAMETERS: time_desc - Timeout integer 204 * PARAMETERS: time_desc - Timeout integer
@@ -151,7 +224,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
151 return_ACPI_STATUS(AE_BAD_PARAMETER); 224 return_ACPI_STATUS(AE_BAD_PARAMETER);
152 } 225 }
153 226
154 /* Sanity check: we must have a valid thread ID */ 227 /* Must have a valid thread ID */
155 228
156 if (!walk_state->thread) { 229 if (!walk_state->thread) {
157 ACPI_ERROR((AE_INFO, 230 ACPI_ERROR((AE_INFO,
@@ -161,7 +234,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
161 } 234 }
162 235
163 /* 236 /*
164 * Current Sync must be less than or equal to the sync level of the 237 * Current sync level must be less than or equal to the sync level of the
165 * mutex. This mechanism provides some deadlock prevention 238 * mutex. This mechanism provides some deadlock prevention
166 */ 239 */
167 if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { 240 if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
@@ -172,51 +245,89 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
172 return_ACPI_STATUS(AE_AML_MUTEX_ORDER); 245 return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
173 } 246 }
174 247
175 /* Support for multiple acquires by the owning thread */ 248 status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value,
249 obj_desc,
250 walk_state->thread->thread_id);
251 if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) {
176 252
177 if (obj_desc->mutex.owner_thread) { 253 /* Save Thread object, original/current sync levels */
178 if (obj_desc->mutex.owner_thread->thread_id == 254
179 walk_state->thread->thread_id) { 255 obj_desc->mutex.owner_thread = walk_state->thread;
180 /* 256 obj_desc->mutex.original_sync_level =
181 * The mutex is already owned by this thread, just increment the 257 walk_state->thread->current_sync_level;
182 * acquisition depth 258 walk_state->thread->current_sync_level =
183 */ 259 obj_desc->mutex.sync_level;
184 obj_desc->mutex.acquisition_depth++; 260
185 return_ACPI_STATUS(AE_OK); 261 /* Link the mutex to the current thread for force-unlock at method exit */
186 } 262
263 acpi_ex_link_mutex(obj_desc, walk_state->thread);
187 } 264 }
188 265
189 /* Acquire the mutex, wait if necessary. Special case for Global Lock */ 266 return_ACPI_STATUS(status);
267}
190 268
191 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) { 269/*******************************************************************************
192 status = 270 *
193 acpi_ev_acquire_global_lock((u16) time_desc->integer.value); 271 * FUNCTION: acpi_ex_release_mutex_object
194 } else { 272 *
195 status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex, 273 * PARAMETERS: obj_desc - The object descriptor for this op
196 (u16) time_desc->integer. 274 *
197 value); 275 * RETURN: Status
276 *
277 * DESCRIPTION: Release a previously acquired Mutex, low level interface.
278 * Provides a common path that supports multiple releases (after
279 * previous multiple acquires) by the same thread.
280 *
281 * MUTEX: Interpreter must be locked
282 *
283 * NOTE: This interface is called from three places:
284 * 1) From acpi_ex_release_mutex, via an AML Acquire() operator
285 * 2) From acpi_ex_release_global_lock when an AML Field access requires the
286 * global lock
287 * 3) From the external interface, acpi_release_global_lock
288 *
289 ******************************************************************************/
290
291acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
292{
293 acpi_status status = AE_OK;
294
295 ACPI_FUNCTION_TRACE(ex_release_mutex_object);
296
297 if (obj_desc->mutex.acquisition_depth == 0) {
298 return (AE_NOT_ACQUIRED);
198 } 299 }
199 300
200 if (ACPI_FAILURE(status)) { 301 /* Match multiple Acquires with multiple Releases */
201 302
202 /* Includes failure from a timeout on time_desc */ 303 obj_desc->mutex.acquisition_depth--;
304 if (obj_desc->mutex.acquisition_depth != 0) {
203 305
204 return_ACPI_STATUS(status); 306 /* Just decrement the depth and return */
307
308 return_ACPI_STATUS(AE_OK);
205 } 309 }
206 310
207 /* Have the mutex: update mutex and walk info and save the sync_level */ 311 if (obj_desc->mutex.owner_thread) {
208 312
209 obj_desc->mutex.owner_thread = walk_state->thread; 313 /* Unlink the mutex from the owner's list */
210 obj_desc->mutex.acquisition_depth = 1;
211 obj_desc->mutex.original_sync_level =
212 walk_state->thread->current_sync_level;
213 314
214 walk_state->thread->current_sync_level = obj_desc->mutex.sync_level; 315 acpi_ex_unlink_mutex(obj_desc);
316 obj_desc->mutex.owner_thread = NULL;
317 }
215 318
216 /* Link the mutex to the current thread for force-unlock at method exit */ 319 /* Release the mutex, special case for Global Lock */
217 320
218 acpi_ex_link_mutex(obj_desc, walk_state->thread); 321 if (obj_desc == acpi_gbl_global_lock_mutex) {
219 return_ACPI_STATUS(AE_OK); 322 status = acpi_ev_release_global_lock();
323 } else {
324 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
325 }
326
327 /* Clear mutex info */
328
329 obj_desc->mutex.thread_id = 0;
330 return_ACPI_STATUS(status);
220} 331}
221 332
222/******************************************************************************* 333/*******************************************************************************
@@ -253,22 +364,13 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
253 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); 364 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
254 } 365 }
255 366
256 /* Sanity check: we must have a valid thread ID */
257
258 if (!walk_state->thread) {
259 ACPI_ERROR((AE_INFO,
260 "Cannot release Mutex [%4.4s], null thread info",
261 acpi_ut_get_node_name(obj_desc->mutex.node)));
262 return_ACPI_STATUS(AE_AML_INTERNAL);
263 }
264
265 /* 367 /*
266 * The Mutex is owned, but this thread must be the owner. 368 * The Mutex is owned, but this thread must be the owner.
267 * Special case for Global Lock, any thread can release 369 * Special case for Global Lock, any thread can release
268 */ 370 */
269 if ((obj_desc->mutex.owner_thread->thread_id != 371 if ((obj_desc->mutex.owner_thread->thread_id !=
270 walk_state->thread->thread_id) 372 walk_state->thread->thread_id)
271 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) { 373 && (obj_desc != acpi_gbl_global_lock_mutex)) {
272 ACPI_ERROR((AE_INFO, 374 ACPI_ERROR((AE_INFO,
273 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", 375 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
274 (unsigned long)walk_state->thread->thread_id, 376 (unsigned long)walk_state->thread->thread_id,
@@ -278,45 +380,37 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
278 return_ACPI_STATUS(AE_AML_NOT_OWNER); 380 return_ACPI_STATUS(AE_AML_NOT_OWNER);
279 } 381 }
280 382
383 /* Must have a valid thread ID */
384
385 if (!walk_state->thread) {
386 ACPI_ERROR((AE_INFO,
387 "Cannot release Mutex [%4.4s], null thread info",
388 acpi_ut_get_node_name(obj_desc->mutex.node)));
389 return_ACPI_STATUS(AE_AML_INTERNAL);
390 }
391
281 /* 392 /*
282 * The sync level of the mutex must be less than or equal to the current 393 * The sync level of the mutex must be less than or equal to the current
283 * sync level 394 * sync level
284 */ 395 */
285 if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) { 396 if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
286 ACPI_ERROR((AE_INFO, 397 ACPI_ERROR((AE_INFO,
287 "Cannot release Mutex [%4.4s], incorrect SyncLevel", 398 "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
288 acpi_ut_get_node_name(obj_desc->mutex.node))); 399 acpi_ut_get_node_name(obj_desc->mutex.node),
400 obj_desc->mutex.sync_level,
401 walk_state->thread->current_sync_level));
289 return_ACPI_STATUS(AE_AML_MUTEX_ORDER); 402 return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
290 } 403 }
291 404
292 /* Match multiple Acquires with multiple Releases */ 405 status = acpi_ex_release_mutex_object(obj_desc);
293
294 obj_desc->mutex.acquisition_depth--;
295 if (obj_desc->mutex.acquisition_depth != 0) {
296
297 /* Just decrement the depth and return */
298
299 return_ACPI_STATUS(AE_OK);
300 }
301
302 /* Unlink the mutex from the owner's list */
303 406
304 acpi_ex_unlink_mutex(obj_desc); 407 if (obj_desc->mutex.acquisition_depth == 0) {
305 408
306 /* Release the mutex, special case for Global Lock */ 409 /* Restore the original sync_level */
307 410
308 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) { 411 walk_state->thread->current_sync_level =
309 status = acpi_ev_release_global_lock(); 412 obj_desc->mutex.original_sync_level;
310 } else {
311 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
312 } 413 }
313
314 /* Update the mutex and restore sync_level */
315
316 obj_desc->mutex.owner_thread = NULL;
317 walk_state->thread->current_sync_level =
318 obj_desc->mutex.original_sync_level;
319
320 return_ACPI_STATUS(status); 414 return_ACPI_STATUS(status);
321} 415}
322 416
@@ -357,7 +451,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
357 451
358 /* Release the mutex, special case for Global Lock */ 452 /* Release the mutex, special case for Global Lock */
359 453
360 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) { 454 if (obj_desc == acpi_gbl_global_lock_mutex) {
361 455
362 /* Ignore errors */ 456 /* Ignore errors */
363 457
@@ -369,6 +463,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
369 /* Mark mutex unowned */ 463 /* Mark mutex unowned */
370 464
371 obj_desc->mutex.owner_thread = NULL; 465 obj_desc->mutex.owner_thread = NULL;
466 obj_desc->mutex.thread_id = 0;
372 467
373 /* Update Thread sync_level (Last mutex is the important one) */ 468 /* Update Thread sync_level (Last mutex is the important one) */
374 469
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
index 308eae52dc05..817e67be3697 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/executer/exnames.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
index 252f10acbbcc..7c3bea575e02 100644
--- a/drivers/acpi/executer/exoparg1.c
+++ b/drivers/acpi/executer/exoparg1.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -121,6 +121,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
121 121
122 if ((ACPI_FAILURE(status)) || walk_state->result_obj) { 122 if ((ACPI_FAILURE(status)) || walk_state->result_obj) {
123 acpi_ut_remove_reference(return_desc); 123 acpi_ut_remove_reference(return_desc);
124 walk_state->result_obj = NULL;
124 } else { 125 } else {
125 /* Save the return value */ 126 /* Save the return value */
126 127
@@ -739,26 +740,38 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
739 value = acpi_gbl_integer_byte_width; 740 value = acpi_gbl_integer_byte_width;
740 break; 741 break;
741 742
742 case ACPI_TYPE_BUFFER:
743 value = temp_desc->buffer.length;
744 break;
745
746 case ACPI_TYPE_STRING: 743 case ACPI_TYPE_STRING:
747 value = temp_desc->string.length; 744 value = temp_desc->string.length;
748 break; 745 break;
749 746
747 case ACPI_TYPE_BUFFER:
748
749 /* Buffer arguments may not be evaluated at this point */
750
751 status = acpi_ds_get_buffer_arguments(temp_desc);
752 value = temp_desc->buffer.length;
753 break;
754
750 case ACPI_TYPE_PACKAGE: 755 case ACPI_TYPE_PACKAGE:
756
757 /* Package arguments may not be evaluated at this point */
758
759 status = acpi_ds_get_package_arguments(temp_desc);
751 value = temp_desc->package.count; 760 value = temp_desc->package.count;
752 break; 761 break;
753 762
754 default: 763 default:
755 ACPI_ERROR((AE_INFO, 764 ACPI_ERROR((AE_INFO,
756 "Operand is not Buf/Int/Str/Pkg - found type %s", 765 "Operand must be Buffer/Integer/String/Package - found type %s",
757 acpi_ut_get_type_name(type))); 766 acpi_ut_get_type_name(type)));
758 status = AE_AML_OPERAND_TYPE; 767 status = AE_AML_OPERAND_TYPE;
759 goto cleanup; 768 goto cleanup;
760 } 769 }
761 770
771 if (ACPI_FAILURE(status)) {
772 goto cleanup;
773 }
774
762 /* 775 /*
763 * Now that we have the size of the object, create a result 776 * Now that we have the size of the object, create a result
764 * object to hold the value 777 * object to hold the value
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
index 17e652e65379..8e8bbb6ccebd 100644
--- a/drivers/acpi/executer/exoparg2.c
+++ b/drivers/acpi/executer/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -241,10 +241,6 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
241 goto cleanup; 241 goto cleanup;
242 } 242 }
243 243
244 /* Return the remainder */
245
246 walk_state->result_obj = return_desc1;
247
248 cleanup: 244 cleanup:
249 /* 245 /*
250 * Since the remainder is not returned indirectly, remove a reference to 246 * Since the remainder is not returned indirectly, remove a reference to
@@ -259,6 +255,12 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
259 acpi_ut_remove_reference(return_desc1); 255 acpi_ut_remove_reference(return_desc1);
260 } 256 }
261 257
258 /* Save return object (the remainder) on success */
259
260 else {
261 walk_state->result_obj = return_desc1;
262 }
263
262 return_ACPI_STATUS(status); 264 return_ACPI_STATUS(status);
263} 265}
264 266
@@ -490,6 +492,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
490 492
491 if (ACPI_FAILURE(status)) { 493 if (ACPI_FAILURE(status)) {
492 acpi_ut_remove_reference(return_desc); 494 acpi_ut_remove_reference(return_desc);
495 walk_state->result_obj = NULL;
493 } 496 }
494 497
495 return_ACPI_STATUS(status); 498 return_ACPI_STATUS(status);
@@ -583,8 +586,6 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
583 return_desc->integer.value = ACPI_INTEGER_MAX; 586 return_desc->integer.value = ACPI_INTEGER_MAX;
584 } 587 }
585 588
586 walk_state->result_obj = return_desc;
587
588 cleanup: 589 cleanup:
589 590
590 /* Delete return object on error */ 591 /* Delete return object on error */
@@ -593,5 +594,11 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
593 acpi_ut_remove_reference(return_desc); 594 acpi_ut_remove_reference(return_desc);
594 } 595 }
595 596
597 /* Save return object on success */
598
599 else {
600 walk_state->result_obj = return_desc;
601 }
602
596 return_ACPI_STATUS(status); 603 return_ACPI_STATUS(status);
597} 604}
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/executer/exoparg3.c
index 7fe67cf82cee..9cb4197681af 100644
--- a/drivers/acpi/executer/exoparg3.c
+++ b/drivers/acpi/executer/exoparg3.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -260,6 +260,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
260 260
261 if (ACPI_FAILURE(status) || walk_state->result_obj) { 261 if (ACPI_FAILURE(status) || walk_state->result_obj) {
262 acpi_ut_remove_reference(return_desc); 262 acpi_ut_remove_reference(return_desc);
263 walk_state->result_obj = NULL;
263 } 264 }
264 265
265 /* Set the return object and exit */ 266 /* Set the return object and exit */
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/executer/exoparg6.c
index bd80a9cb3d65..67d48737af53 100644
--- a/drivers/acpi/executer/exoparg6.c
+++ b/drivers/acpi/executer/exoparg6.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -322,8 +322,6 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
322 goto cleanup; 322 goto cleanup;
323 } 323 }
324 324
325 walk_state->result_obj = return_desc;
326
327 cleanup: 325 cleanup:
328 326
329 /* Delete return object on error */ 327 /* Delete return object on error */
@@ -332,5 +330,11 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
332 acpi_ut_remove_reference(return_desc); 330 acpi_ut_remove_reference(return_desc);
333 } 331 }
334 332
333 /* Save return object on success */
334
335 else {
336 walk_state->result_obj = return_desc;
337 }
338
335 return_ACPI_STATUS(status); 339 return_ACPI_STATUS(status);
336} 340}
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index efe5d4b461a4..3a2f8cd4c62a 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -412,6 +412,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
412acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) 412acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
413{ 413{
414 union acpi_operand_object *obj_desc; 414 union acpi_operand_object *obj_desc;
415 union acpi_operand_object *second_desc = NULL;
415 u32 type; 416 u32 type;
416 acpi_status status; 417 acpi_status status;
417 418
@@ -494,6 +495,20 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
494 obj_desc->field.access_byte_width, 495 obj_desc->field.access_byte_width,
495 obj_desc->bank_field.region_obj, 496 obj_desc->bank_field.region_obj,
496 obj_desc->bank_field.bank_obj)); 497 obj_desc->bank_field.bank_obj));
498
499 /*
500 * Remember location in AML stream of the field unit
501 * opcode and operands -- since the bank_value
502 * operands must be evaluated.
503 */
504 second_desc = obj_desc->common.next_object;
505 second_desc->extra.aml_start =
506 ((union acpi_parse_object *)(info->data_register_node))->
507 named.data;
508 second_desc->extra.aml_length =
509 ((union acpi_parse_object *)(info->data_register_node))->
510 named.length;
511
497 break; 512 break;
498 513
499 case ACPI_TYPE_LOCAL_INDEX_FIELD: 514 case ACPI_TYPE_LOCAL_INDEX_FIELD:
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
index 3f51b7e84a17..7cd8bb54fa01 100644
--- a/drivers/acpi/executer/exregion.c
+++ b/drivers/acpi/executer/exregion.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -160,7 +160,7 @@ acpi_ex_system_memory_space_handler(u32 function,
160 if (!mem_info->mapped_logical_address) { 160 if (!mem_info->mapped_logical_address) {
161 ACPI_ERROR((AE_INFO, 161 ACPI_ERROR((AE_INFO,
162 "Could not map memory at %8.8X%8.8X, size %X", 162 "Could not map memory at %8.8X%8.8X, size %X",
163 ACPI_FORMAT_UINT64(address), 163 ACPI_FORMAT_NATIVE_UINT(address),
164 (u32) window_size)); 164 (u32) window_size));
165 mem_info->mapped_length = 0; 165 mem_info->mapped_length = 0;
166 return_ACPI_STATUS(AE_NO_MEMORY); 166 return_ACPI_STATUS(AE_NO_MEMORY);
@@ -182,7 +182,8 @@ acpi_ex_system_memory_space_handler(u32 function,
182 182
183 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 183 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
184 "System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n", 184 "System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
185 bit_width, function, ACPI_FORMAT_UINT64(address))); 185 bit_width, function,
186 ACPI_FORMAT_NATIVE_UINT(address)));
186 187
187 /* 188 /*
188 * Perform the memory read or write 189 * Perform the memory read or write
@@ -284,7 +285,8 @@ acpi_ex_system_io_space_handler(u32 function,
284 285
285 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 286 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
286 "System-IO (width %d) R/W %d Address=%8.8X%8.8X\n", 287 "System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
287 bit_width, function, ACPI_FORMAT_UINT64(address))); 288 bit_width, function,
289 ACPI_FORMAT_NATIVE_UINT(address)));
288 290
289 /* Decode the function parameter */ 291 /* Decode the function parameter */
290 292
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
index 2b3a01cc4929..5596f42c9676 100644
--- a/drivers/acpi/executer/exresnte.c
+++ b/drivers/acpi/executer/exresnte.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -116,9 +116,11 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
116 * Several object types require no further processing: 116 * Several object types require no further processing:
117 * 1) Device/Thermal objects don't have a "real" subobject, return the Node 117 * 1) Device/Thermal objects don't have a "real" subobject, return the Node
118 * 2) Method locals and arguments have a pseudo-Node 118 * 2) Method locals and arguments have a pseudo-Node
119 * 3) 10/2007: Added method type to assist with Package construction.
119 */ 120 */
120 if ((entry_type == ACPI_TYPE_DEVICE) || 121 if ((entry_type == ACPI_TYPE_DEVICE) ||
121 (entry_type == ACPI_TYPE_THERMAL) || 122 (entry_type == ACPI_TYPE_THERMAL) ||
123 (entry_type == ACPI_TYPE_METHOD) ||
122 (node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) { 124 (node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
123 return_ACPI_STATUS(AE_OK); 125 return_ACPI_STATUS(AE_OK);
124 } 126 }
@@ -214,7 +216,6 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
214 /* For these objects, just return the object attached to the Node */ 216 /* For these objects, just return the object attached to the Node */
215 217
216 case ACPI_TYPE_MUTEX: 218 case ACPI_TYPE_MUTEX:
217 case ACPI_TYPE_METHOD:
218 case ACPI_TYPE_POWER: 219 case ACPI_TYPE_POWER:
219 case ACPI_TYPE_PROCESSOR: 220 case ACPI_TYPE_PROCESSOR:
220 case ACPI_TYPE_EVENT: 221 case ACPI_TYPE_EVENT:
@@ -238,13 +239,12 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
238 case ACPI_TYPE_LOCAL_REFERENCE: 239 case ACPI_TYPE_LOCAL_REFERENCE:
239 240
240 switch (source_desc->reference.opcode) { 241 switch (source_desc->reference.opcode) {
241 case AML_LOAD_OP: 242 case AML_LOAD_OP: /* This is a ddb_handle */
243 case AML_REF_OF_OP:
244 case AML_INDEX_OP:
242 245
243 /* This is a ddb_handle */
244 /* Return an additional reference to the object */ 246 /* Return an additional reference to the object */
245 247
246 case AML_REF_OF_OP:
247
248 obj_desc = source_desc; 248 obj_desc = source_desc;
249 acpi_ut_add_reference(obj_desc); 249 acpi_ut_add_reference(obj_desc);
250 break; 250 break;
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
index 6c64e55dab0e..b35f7c817acf 100644
--- a/drivers/acpi/executer/exresolv.c
+++ b/drivers/acpi/executer/exresolv.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -140,7 +140,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
140{ 140{
141 acpi_status status = AE_OK; 141 acpi_status status = AE_OK;
142 union acpi_operand_object *stack_desc; 142 union acpi_operand_object *stack_desc;
143 void *temp_node;
144 union acpi_operand_object *obj_desc = NULL; 143 union acpi_operand_object *obj_desc = NULL;
145 u16 opcode; 144 u16 opcode;
146 145
@@ -156,23 +155,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
156 opcode = stack_desc->reference.opcode; 155 opcode = stack_desc->reference.opcode;
157 156
158 switch (opcode) { 157 switch (opcode) {
159 case AML_NAME_OP:
160
161 /*
162 * Convert name reference to a namespace node
163 * Then, acpi_ex_resolve_node_to_value can be used to get the value
164 */
165 temp_node = stack_desc->reference.object;
166
167 /* Delete the Reference Object */
168
169 acpi_ut_remove_reference(stack_desc);
170
171 /* Return the namespace node */
172
173 (*stack_ptr) = temp_node;
174 break;
175
176 case AML_LOCAL_OP: 158 case AML_LOCAL_OP:
177 case AML_ARG_OP: 159 case AML_ARG_OP:
178 160
@@ -207,15 +189,25 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
207 switch (stack_desc->reference.target_type) { 189 switch (stack_desc->reference.target_type) {
208 case ACPI_TYPE_BUFFER_FIELD: 190 case ACPI_TYPE_BUFFER_FIELD:
209 191
210 /* Just return - leave the Reference on the stack */ 192 /* Just return - do not dereference */
211 break; 193 break;
212 194
213 case ACPI_TYPE_PACKAGE: 195 case ACPI_TYPE_PACKAGE:
214 196
197 /* If method call or copy_object - do not dereference */
198
199 if ((walk_state->opcode ==
200 AML_INT_METHODCALL_OP)
201 || (walk_state->opcode == AML_COPY_OP)) {
202 break;
203 }
204
205 /* Otherwise, dereference the package_index to a package element */
206
215 obj_desc = *stack_desc->reference.where; 207 obj_desc = *stack_desc->reference.where;
216 if (obj_desc) { 208 if (obj_desc) {
217 /* 209 /*
218 * Valid obj descriptor, copy pointer to return value 210 * Valid object descriptor, copy pointer to return value
219 * (i.e., dereference the package index) 211 * (i.e., dereference the package index)
220 * Delete the ref object, increment the returned object 212 * Delete the ref object, increment the returned object
221 */ 213 */
@@ -224,11 +216,11 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
224 *stack_ptr = obj_desc; 216 *stack_ptr = obj_desc;
225 } else { 217 } else {
226 /* 218 /*
227 * A NULL object descriptor means an unitialized element of 219 * A NULL object descriptor means an uninitialized element of
228 * the package, can't dereference it 220 * the package, can't dereference it
229 */ 221 */
230 ACPI_ERROR((AE_INFO, 222 ACPI_ERROR((AE_INFO,
231 "Attempt to deref an Index to NULL pkg element Idx=%p", 223 "Attempt to dereference an Index to NULL package element Idx=%p",
232 stack_desc)); 224 stack_desc));
233 status = AE_AML_UNINITIALIZED_ELEMENT; 225 status = AE_AML_UNINITIALIZED_ELEMENT;
234 } 226 }
@@ -239,7 +231,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
239 /* Invalid reference object */ 231 /* Invalid reference object */
240 232
241 ACPI_ERROR((AE_INFO, 233 ACPI_ERROR((AE_INFO,
242 "Unknown TargetType %X in Index/Reference obj %p", 234 "Unknown TargetType %X in Index/Reference object %p",
243 stack_desc->reference.target_type, 235 stack_desc->reference.target_type,
244 stack_desc)); 236 stack_desc));
245 status = AE_AML_INTERNAL; 237 status = AE_AML_INTERNAL;
@@ -251,7 +243,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
251 case AML_DEBUG_OP: 243 case AML_DEBUG_OP:
252 case AML_LOAD_OP: 244 case AML_LOAD_OP:
253 245
254 /* Just leave the object as-is */ 246 /* Just leave the object as-is, do not dereference */
255 247
256 break; 248 break;
257 249
@@ -390,10 +382,10 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
390 } 382 }
391 383
392 /* 384 /*
393 * For reference objects created via the ref_of or Index operators, 385 * For reference objects created via the ref_of, Index, or Load/load_table
394 * we need to get to the base object (as per the ACPI specification 386 * operators, we need to get to the base object (as per the ACPI
395 * of the object_type and size_of operators). This means traversing 387 * specification of the object_type and size_of operators). This means
396 * the list of possibly many nested references. 388 * traversing the list of possibly many nested references.
397 */ 389 */
398 while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) { 390 while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
399 switch (obj_desc->reference.opcode) { 391 switch (obj_desc->reference.opcode) {
@@ -463,6 +455,11 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
463 } 455 }
464 break; 456 break;
465 457
458 case AML_LOAD_OP:
459
460 type = ACPI_TYPE_DDB_HANDLE;
461 goto exit;
462
466 case AML_LOCAL_OP: 463 case AML_LOCAL_OP:
467 case AML_ARG_OP: 464 case AML_ARG_OP:
468 465
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index 09d897b3f6d5..73e29e566a70 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -137,7 +137,6 @@ acpi_ex_resolve_operands(u16 opcode,
137 union acpi_operand_object *obj_desc; 137 union acpi_operand_object *obj_desc;
138 acpi_status status = AE_OK; 138 acpi_status status = AE_OK;
139 u8 object_type; 139 u8 object_type;
140 void *temp_node;
141 u32 arg_types; 140 u32 arg_types;
142 const struct acpi_opcode_info *op_info; 141 const struct acpi_opcode_info *op_info;
143 u32 this_arg_type; 142 u32 this_arg_type;
@@ -239,7 +238,6 @@ acpi_ex_resolve_operands(u16 opcode,
239 238
240 /*lint -fallthrough */ 239 /*lint -fallthrough */
241 240
242 case AML_NAME_OP:
243 case AML_INDEX_OP: 241 case AML_INDEX_OP:
244 case AML_REF_OF_OP: 242 case AML_REF_OF_OP:
245 case AML_ARG_OP: 243 case AML_ARG_OP:
@@ -332,15 +330,6 @@ acpi_ex_resolve_operands(u16 opcode,
332 if (ACPI_FAILURE(status)) { 330 if (ACPI_FAILURE(status)) {
333 return_ACPI_STATUS(status); 331 return_ACPI_STATUS(status);
334 } 332 }
335
336 if (obj_desc->reference.opcode == AML_NAME_OP) {
337
338 /* Convert a named reference to the actual named object */
339
340 temp_node = obj_desc->reference.object;
341 acpi_ut_remove_reference(obj_desc);
342 (*stack_ptr) = temp_node;
343 }
344 goto next_operand; 333 goto next_operand;
345 334
346 case ARGI_DATAREFOBJ: /* Store operator only */ 335 case ARGI_DATAREFOBJ: /* Store operator only */
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
index f4b69a637820..76c875bc3154 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/executer/exstore.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -84,8 +84,12 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
84 84
85 ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc); 85 ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
86 86
87 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s", 87 /* Print line header as long as we are not in the middle of an object display */
88 level, " ")); 88
89 if (!((level > 0) && index == 0)) {
90 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
91 level, " "));
92 }
89 93
90 /* Display index for package output only */ 94 /* Display index for package output only */
91 95
@@ -95,12 +99,12 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
95 } 99 }
96 100
97 if (!source_desc) { 101 if (!source_desc) {
98 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "<Null Object>\n")); 102 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
99 return_VOID; 103 return_VOID;
100 } 104 }
101 105
102 if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) { 106 if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
103 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: ", 107 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
104 acpi_ut_get_object_type_name 108 acpi_ut_get_object_type_name
105 (source_desc))); 109 (source_desc)));
106 110
@@ -123,6 +127,8 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
123 return_VOID; 127 return_VOID;
124 } 128 }
125 129
130 /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
131
126 switch (ACPI_GET_OBJECT_TYPE(source_desc)) { 132 switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
127 case ACPI_TYPE_INTEGER: 133 case ACPI_TYPE_INTEGER:
128 134
@@ -147,7 +153,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
147 (u32) source_desc->buffer.length)); 153 (u32) source_desc->buffer.length));
148 ACPI_DUMP_BUFFER(source_desc->buffer.pointer, 154 ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
149 (source_desc->buffer.length < 155 (source_desc->buffer.length <
150 32) ? source_desc->buffer.length : 32); 156 256) ? source_desc->buffer.length : 256);
151 break; 157 break;
152 158
153 case ACPI_TYPE_STRING: 159 case ACPI_TYPE_STRING:
@@ -160,7 +166,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
160 case ACPI_TYPE_PACKAGE: 166 case ACPI_TYPE_PACKAGE:
161 167
162 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, 168 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
163 "[0x%.2X Elements]\n", 169 "[Contains 0x%.2X Elements]\n",
164 source_desc->package.count)); 170 source_desc->package.count));
165 171
166 /* Output the entire contents of the package */ 172 /* Output the entire contents of the package */
@@ -180,12 +186,59 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
180 (source_desc->reference.opcode), 186 (source_desc->reference.opcode),
181 source_desc->reference.offset)); 187 source_desc->reference.offset));
182 } else { 188 } else {
183 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s]\n", 189 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s]",
184 acpi_ps_get_opcode_name 190 acpi_ps_get_opcode_name
185 (source_desc->reference.opcode))); 191 (source_desc->reference.opcode)));
186 } 192 }
187 193
188 if (source_desc->reference.object) { 194 if (source_desc->reference.opcode == AML_LOAD_OP) { /* Load and load_table */
195 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
196 " Table OwnerId %p\n",
197 source_desc->reference.object));
198 break;
199 }
200
201 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " "));
202
203 /* Check for valid node first, then valid object */
204
205 if (source_desc->reference.node) {
206 if (ACPI_GET_DESCRIPTOR_TYPE
207 (source_desc->reference.node) !=
208 ACPI_DESC_TYPE_NAMED) {
209 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
210 " %p - Not a valid namespace node\n",
211 source_desc->reference.
212 node));
213 } else {
214 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
215 "Node %p [%4.4s] ",
216 source_desc->reference.
217 node,
218 (source_desc->reference.
219 node)->name.ascii));
220
221 switch ((source_desc->reference.node)->type) {
222
223 /* These types have no attached object */
224
225 case ACPI_TYPE_DEVICE:
226 acpi_os_printf("Device\n");
227 break;
228
229 case ACPI_TYPE_THERMAL:
230 acpi_os_printf("Thermal Zone\n");
231 break;
232
233 default:
234 acpi_ex_do_debug_object((source_desc->
235 reference.
236 node)->object,
237 level + 4, 0);
238 break;
239 }
240 }
241 } else if (source_desc->reference.object) {
189 if (ACPI_GET_DESCRIPTOR_TYPE 242 if (ACPI_GET_DESCRIPTOR_TYPE
190 (source_desc->reference.object) == 243 (source_desc->reference.object) ==
191 ACPI_DESC_TYPE_NAMED) { 244 ACPI_DESC_TYPE_NAMED) {
@@ -198,18 +251,13 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
198 acpi_ex_do_debug_object(source_desc->reference. 251 acpi_ex_do_debug_object(source_desc->reference.
199 object, level + 4, 0); 252 object, level + 4, 0);
200 } 253 }
201 } else if (source_desc->reference.node) {
202 acpi_ex_do_debug_object((source_desc->reference.node)->
203 object, level + 4, 0);
204 } 254 }
205 break; 255 break;
206 256
207 default: 257 default:
208 258
209 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p %s\n", 259 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
210 source_desc, 260 source_desc));
211 acpi_ut_get_object_type_name
212 (source_desc)));
213 break; 261 break;
214 } 262 }
215 263
@@ -313,7 +361,6 @@ acpi_ex_store(union acpi_operand_object *source_desc,
313 * 4) Store to the debug object 361 * 4) Store to the debug object
314 */ 362 */
315 switch (ref_desc->reference.opcode) { 363 switch (ref_desc->reference.opcode) {
316 case AML_NAME_OP:
317 case AML_REF_OF_OP: 364 case AML_REF_OF_OP:
318 365
319 /* Storing an object into a Name "container" */ 366 /* Storing an object into a Name "container" */
@@ -415,11 +462,24 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
415 */ 462 */
416 obj_desc = *(index_desc->reference.where); 463 obj_desc = *(index_desc->reference.where);
417 464
418 status = 465 if (ACPI_GET_OBJECT_TYPE(source_desc) ==
419 acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc, 466 ACPI_TYPE_LOCAL_REFERENCE
420 walk_state); 467 && source_desc->reference.opcode == AML_LOAD_OP) {
421 if (ACPI_FAILURE(status)) { 468
422 return_ACPI_STATUS(status); 469 /* This is a DDBHandle, just add a reference to it */
470
471 acpi_ut_add_reference(source_desc);
472 new_desc = source_desc;
473 } else {
474 /* Normal object, copy it */
475
476 status =
477 acpi_ut_copy_iobject_to_iobject(source_desc,
478 &new_desc,
479 walk_state);
480 if (ACPI_FAILURE(status)) {
481 return_ACPI_STATUS(status);
482 }
423 } 483 }
424 484
425 if (obj_desc) { 485 if (obj_desc) {
@@ -571,10 +631,17 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
571 631
572 /* If no implicit conversion, drop into the default case below */ 632 /* If no implicit conversion, drop into the default case below */
573 633
574 if ((!implicit_conversion) || (walk_state->opcode == AML_COPY_OP)) { 634 if ((!implicit_conversion) ||
575 635 ((walk_state->opcode == AML_COPY_OP) &&
576 /* Force execution of default (no implicit conversion) */ 636 (target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
577 637 (target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
638 (target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
639 /*
640 * Force execution of default (no implicit conversion). Note:
641 * copy_object does not perform an implicit conversion, as per the ACPI
642 * spec -- except in case of region/bank/index fields -- because these
643 * objects must retain their original type permanently.
644 */
578 target_type = ACPI_TYPE_ANY; 645 target_type = ACPI_TYPE_ANY;
579 } 646 }
580 647
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
index 1d622c625c64..a6d2168b81f9 100644
--- a/drivers/acpi/executer/exstoren.c
+++ b/drivers/acpi/executer/exstoren.c
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2007, R. Byron Moore 10 * Copyright (C) 2000 - 2008, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/executer/exstorob.c
index 8233d40178ee..9a75ff09fb0c 100644
--- a/drivers/acpi/executer/exstorob.c
+++ b/drivers/acpi/executer/exstorob.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
index 9460baff3032..68990f1df371 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/executer/exsystem.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,6 @@
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include <acpi/acinterp.h>
47#include <acpi/acevents.h>
48 47
49#define _COMPONENT ACPI_EXECUTER 48#define _COMPONENT ACPI_EXECUTER
50ACPI_MODULE_NAME("exsystem") 49ACPI_MODULE_NAME("exsystem")
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
index 6b0aeccbb69b..86c03880b523 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/executer/exutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -61,7 +61,6 @@
61#include <acpi/acpi.h> 61#include <acpi/acpi.h>
62#include <acpi/acinterp.h> 62#include <acpi/acinterp.h>
63#include <acpi/amlcode.h> 63#include <acpi/amlcode.h>
64#include <acpi/acevents.h>
65 64
66#define _COMPONENT ACPI_EXECUTER 65#define _COMPONENT ACPI_EXECUTER
67ACPI_MODULE_NAME("exutils") 66ACPI_MODULE_NAME("exutils")
@@ -217,9 +216,10 @@ void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
217 216
218 /* 217 /*
219 * Object must be a valid number and we must be executing 218 * Object must be a valid number and we must be executing
220 * a control method 219 * a control method. NS node could be there for AML_INT_NAMEPATH_OP.
221 */ 220 */
222 if ((!obj_desc) || 221 if ((!obj_desc) ||
222 (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) ||
223 (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) { 223 (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) {
224 return; 224 return;
225 } 225 }
@@ -240,72 +240,73 @@ void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
240 * PARAMETERS: field_flags - Flags with Lock rule: 240 * PARAMETERS: field_flags - Flags with Lock rule:
241 * always_lock or never_lock 241 * always_lock or never_lock
242 * 242 *
243 * RETURN: TRUE/FALSE indicating whether the lock was actually acquired 243 * RETURN: None
244 * 244 *
245 * DESCRIPTION: Obtain the global lock and keep track of this fact via two 245 * DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field
246 * methods. A global variable keeps the state of the lock, and 246 * flags specifiy that it is to be obtained before field access.
247 * the state is returned to the caller.
248 * 247 *
249 ******************************************************************************/ 248 ******************************************************************************/
250 249
251u8 acpi_ex_acquire_global_lock(u32 field_flags) 250void acpi_ex_acquire_global_lock(u32 field_flags)
252{ 251{
253 u8 locked = FALSE;
254 acpi_status status; 252 acpi_status status;
255 253
256 ACPI_FUNCTION_TRACE(ex_acquire_global_lock); 254 ACPI_FUNCTION_TRACE(ex_acquire_global_lock);
257 255
258 /* Only attempt lock if the always_lock bit is set */ 256 /* Only use the lock if the always_lock bit is set */
257
258 if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
259 return_VOID;
260 }
259 261
260 if (field_flags & AML_FIELD_LOCK_RULE_MASK) { 262 /* Attempt to get the global lock, wait forever */
261 263
262 /* We should attempt to get the lock, wait forever */ 264 status = acpi_ex_acquire_mutex_object(ACPI_WAIT_FOREVER,
265 acpi_gbl_global_lock_mutex,
266 acpi_os_get_thread_id());
263 267
264 status = acpi_ev_acquire_global_lock(ACPI_WAIT_FOREVER); 268 if (ACPI_FAILURE(status)) {
265 if (ACPI_SUCCESS(status)) { 269 ACPI_EXCEPTION((AE_INFO, status,
266 locked = TRUE; 270 "Could not acquire Global Lock"));
267 } else {
268 ACPI_EXCEPTION((AE_INFO, status,
269 "Could not acquire Global Lock"));
270 }
271 } 271 }
272 272
273 return_UINT8(locked); 273 return_VOID;
274} 274}
275 275
276/******************************************************************************* 276/*******************************************************************************
277 * 277 *
278 * FUNCTION: acpi_ex_release_global_lock 278 * FUNCTION: acpi_ex_release_global_lock
279 * 279 *
280 * PARAMETERS: locked_by_me - Return value from corresponding call to 280 * PARAMETERS: field_flags - Flags with Lock rule:
281 * acquire_global_lock. 281 * always_lock or never_lock
282 * 282 *
283 * RETURN: None 283 * RETURN: None
284 * 284 *
285 * DESCRIPTION: Release the global lock if it is locked. 285 * DESCRIPTION: Release the ACPI hardware Global Lock
286 * 286 *
287 ******************************************************************************/ 287 ******************************************************************************/
288 288
289void acpi_ex_release_global_lock(u8 locked_by_me) 289void acpi_ex_release_global_lock(u32 field_flags)
290{ 290{
291 acpi_status status; 291 acpi_status status;
292 292
293 ACPI_FUNCTION_TRACE(ex_release_global_lock); 293 ACPI_FUNCTION_TRACE(ex_release_global_lock);
294 294
295 /* Only attempt unlock if the caller locked it */ 295 /* Only use the lock if the always_lock bit is set */
296 296
297 if (locked_by_me) { 297 if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
298 return_VOID;
299 }
298 300
299 /* OK, now release the lock */ 301 /* Release the global lock */
300 302
301 status = acpi_ev_release_global_lock(); 303 status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
302 if (ACPI_FAILURE(status)) { 304 if (ACPI_FAILURE(status)) {
303 305
304 /* Report the error, but there isn't much else we can do */ 306 /* Report the error, but there isn't much else we can do */
305 307
306 ACPI_EXCEPTION((AE_INFO, status, 308 ACPI_EXCEPTION((AE_INFO, status,
307 "Could not release ACPI Global Lock")); 309 "Could not release Global Lock"));
308 }
309 } 310 }
310 311
311 return_VOID; 312 return_VOID;
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index c8e3cba423ef..6cf10cbc1eee 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -192,17 +192,13 @@ static int acpi_fan_add_fs(struct acpi_device *device)
192 } 192 }
193 193
194 /* 'status' [R/W] */ 194 /* 'status' [R/W] */
195 entry = create_proc_entry(ACPI_FAN_FILE_STATE, 195 entry = proc_create_data(ACPI_FAN_FILE_STATE,
196 S_IFREG | S_IRUGO | S_IWUSR, 196 S_IFREG | S_IRUGO | S_IWUSR,
197 acpi_device_dir(device)); 197 acpi_device_dir(device),
198 &acpi_fan_state_ops,
199 device);
198 if (!entry) 200 if (!entry)
199 return -ENODEV; 201 return -ENODEV;
200 else {
201 entry->proc_fops = &acpi_fan_state_ops;
202 entry->data = device;
203 entry->owner = THIS_MODULE;
204 }
205
206 return 0; 202 return 0;
207} 203}
208 204
@@ -260,24 +256,23 @@ static int acpi_fan_add(struct acpi_device *device)
260 result = PTR_ERR(cdev); 256 result = PTR_ERR(cdev);
261 goto end; 257 goto end;
262 } 258 }
263 if (cdev) { 259
264 printk(KERN_INFO PREFIX 260 printk(KERN_INFO PREFIX
265 "%s is registered as cooling_device%d\n", 261 "%s is registered as cooling_device%d\n",
266 device->dev.bus_id, cdev->id); 262 device->dev.bus_id, cdev->id);
267 263
268 acpi_driver_data(device) = cdev; 264 acpi_driver_data(device) = cdev;
269 result = sysfs_create_link(&device->dev.kobj, 265 result = sysfs_create_link(&device->dev.kobj,
270 &cdev->device.kobj, 266 &cdev->device.kobj,
271 "thermal_cooling"); 267 "thermal_cooling");
272 if (result) 268 if (result)
273 return result; 269 printk(KERN_ERR PREFIX "Create sysfs link\n");
274 270
275 result = sysfs_create_link(&cdev->device.kobj, 271 result = sysfs_create_link(&cdev->device.kobj,
276 &device->dev.kobj, 272 &device->dev.kobj,
277 "device"); 273 "device");
278 if (result) 274 if (result)
279 return result; 275 printk(KERN_ERR PREFIX "Create sysfs link\n");
280 }
281 276
282 result = acpi_fan_add_fs(device); 277 result = acpi_fan_add_fs(device);
283 if (result) 278 if (result)
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index eda0978b57c6..06f8634fe58b 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -142,6 +142,7 @@ EXPORT_SYMBOL(acpi_get_physical_device);
142 142
143static int acpi_bind_one(struct device *dev, acpi_handle handle) 143static int acpi_bind_one(struct device *dev, acpi_handle handle)
144{ 144{
145 struct acpi_device *acpi_dev;
145 acpi_status status; 146 acpi_status status;
146 147
147 if (dev->archdata.acpi_handle) { 148 if (dev->archdata.acpi_handle) {
@@ -157,6 +158,16 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
157 } 158 }
158 dev->archdata.acpi_handle = handle; 159 dev->archdata.acpi_handle = handle;
159 160
161 status = acpi_bus_get_device(handle, &acpi_dev);
162 if (!ACPI_FAILURE(status)) {
163 int ret;
164
165 ret = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
166 "firmware_node");
167 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
168 "physical_node");
169 }
170
160 return 0; 171 return 0;
161} 172}
162 173
@@ -165,8 +176,17 @@ static int acpi_unbind_one(struct device *dev)
165 if (!dev->archdata.acpi_handle) 176 if (!dev->archdata.acpi_handle)
166 return 0; 177 return 0;
167 if (dev == acpi_get_physical_device(dev->archdata.acpi_handle)) { 178 if (dev == acpi_get_physical_device(dev->archdata.acpi_handle)) {
179 struct acpi_device *acpi_dev;
180
168 /* acpi_get_physical_device increase refcnt by one */ 181 /* acpi_get_physical_device increase refcnt by one */
169 put_device(dev); 182 put_device(dev);
183
184 if (!acpi_bus_get_device(dev->archdata.acpi_handle,
185 &acpi_dev)) {
186 sysfs_remove_link(&dev->kobj, "firmware_node");
187 sysfs_remove_link(&acpi_dev->dev.kobj, "physical_node");
188 }
189
170 acpi_detach_data(dev->archdata.acpi_handle, 190 acpi_detach_data(dev->archdata.acpi_handle,
171 acpi_glue_data_handler); 191 acpi_glue_data_handler);
172 dev->archdata.acpi_handle = NULL; 192 dev->archdata.acpi_handle = NULL;
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/hardware/hwacpi.c
index 6031ca13dd2f..816894ea839e 100644
--- a/drivers/acpi/hardware/hwacpi.c
+++ b/drivers/acpi/hardware/hwacpi.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
index 117a05cadaaa..14bc4f456ae8 100644
--- a/drivers/acpi/hardware/hwgpe.c
+++ b/drivers/acpi/hardware/hwgpe.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c
index 73f9c5fb1ba7..ddf792adcf96 100644
--- a/drivers/acpi/hardware/hwregs.c
+++ b/drivers/acpi/hardware/hwregs.c
@@ -7,7 +7,7 @@
7 ******************************************************************************/ 7 ******************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2007, R. Byron Moore 10 * Copyright (C) 2000 - 2008, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index 4290e0193097..d9937e05ec6a 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -70,9 +70,10 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
70 70
71 /* Get the FACS */ 71 /* Get the FACS */
72 72
73 status = 73 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
74 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, 74 ACPI_CAST_INDIRECT_PTR(struct
75 (struct acpi_table_header **)&facs); 75 acpi_table_header,
76 &facs));
76 if (ACPI_FAILURE(status)) { 77 if (ACPI_FAILURE(status)) {
77 return_ACPI_STATUS(status); 78 return_ACPI_STATUS(status);
78 } 79 }
@@ -124,9 +125,10 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
124 125
125 /* Get the FACS */ 126 /* Get the FACS */
126 127
127 status = 128 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
128 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, 129 ACPI_CAST_INDIRECT_PTR(struct
129 (struct acpi_table_header **)&facs); 130 acpi_table_header,
131 &facs));
130 if (ACPI_FAILURE(status)) { 132 if (ACPI_FAILURE(status)) {
131 return_ACPI_STATUS(status); 133 return_ACPI_STATUS(status);
132 } 134 }
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/hardware/hwtimer.c
index c32eab696acd..b53d575491b9 100644
--- a/drivers/acpi/hardware/hwtimer.c
+++ b/drivers/acpi/hardware/hwtimer.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c
index 57faf598bad8..c39a7f68b889 100644
--- a/drivers/acpi/namespace/nsaccess.c
+++ b/drivers/acpi/namespace/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -208,8 +208,7 @@ acpi_status acpi_ns_root_initialize(void)
208 /* Special case for ACPI Global Lock */ 208 /* Special case for ACPI Global Lock */
209 209
210 if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { 210 if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
211 acpi_gbl_global_lock_mutex = 211 acpi_gbl_global_lock_mutex = obj_desc;
212 obj_desc->mutex.os_mutex;
213 212
214 /* Create additional counting semaphore for global lock */ 213 /* Create additional counting semaphore for global lock */
215 214
@@ -582,44 +581,68 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
582 return_ACPI_STATUS(status); 581 return_ACPI_STATUS(status);
583 } 582 }
584 583
585 /* 584 /* More segments to follow? */
586 * Sanity typecheck of the target object: 585
587 * 586 if (num_segments > 0) {
588 * If 1) This is the last segment (num_segments == 0) 587 /*
589 * 2) And we are looking for a specific type 588 * If we have an alias to an object that opens a scope (such as a
590 * (Not checking for TYPE_ANY) 589 * device or processor), we need to dereference the alias here so that
591 * 3) Which is not an alias 590 * we can access any children of the original node (via the remaining
592 * 4) Which is not a local type (TYPE_SCOPE) 591 * segments).
593 * 5) And the type of target object is known (not TYPE_ANY) 592 */
594 * 6) And target object does not match what we are looking for 593 if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) {
595 * 594 if (acpi_ns_opens_scope
596 * Then we have a type mismatch. Just warn and ignore it. 595 (((struct acpi_namespace_node *)this_node->
597 */ 596 object)->type)) {
598 if ((num_segments == 0) && 597 this_node =
599 (type_to_check_for != ACPI_TYPE_ANY) && 598 (struct acpi_namespace_node *)
600 (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) && 599 this_node->object;
601 (type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS) && 600 }
602 (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE) && 601 }
603 (this_node->type != ACPI_TYPE_ANY) &&
604 (this_node->type != type_to_check_for)) {
605
606 /* Complain about a type mismatch */
607
608 ACPI_WARNING((AE_INFO,
609 "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
610 ACPI_CAST_PTR(char, &simple_name),
611 acpi_ut_get_type_name(this_node->type),
612 acpi_ut_get_type_name
613 (type_to_check_for)));
614 } 602 }
615 603
616 /* 604 /* Special handling for the last segment (num_segments == 0) */
617 * If this is the last name segment and we are not looking for a 605
618 * specific type, but the type of found object is known, use that type 606 else {
619 * to see if it opens a scope. 607 /*
620 */ 608 * Sanity typecheck of the target object:
621 if ((num_segments == 0) && (type == ACPI_TYPE_ANY)) { 609 *
622 type = this_node->type; 610 * If 1) This is the last segment (num_segments == 0)
611 * 2) And we are looking for a specific type
612 * (Not checking for TYPE_ANY)
613 * 3) Which is not an alias
614 * 4) Which is not a local type (TYPE_SCOPE)
615 * 5) And the type of target object is known (not TYPE_ANY)
616 * 6) And target object does not match what we are looking for
617 *
618 * Then we have a type mismatch. Just warn and ignore it.
619 */
620 if ((type_to_check_for != ACPI_TYPE_ANY) &&
621 (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) &&
622 (type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS)
623 && (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE)
624 && (this_node->type != ACPI_TYPE_ANY)
625 && (this_node->type != type_to_check_for)) {
626
627 /* Complain about a type mismatch */
628
629 ACPI_WARNING((AE_INFO,
630 "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
631 ACPI_CAST_PTR(char, &simple_name),
632 acpi_ut_get_type_name(this_node->
633 type),
634 acpi_ut_get_type_name
635 (type_to_check_for)));
636 }
637
638 /*
639 * If this is the last name segment and we are not looking for a
640 * specific type, but the type of found object is known, use that type
641 * to (later) see if it opens a scope.
642 */
643 if (type == ACPI_TYPE_ANY) {
644 type = this_node->type;
645 }
623 } 646 }
624 647
625 /* Point to next name segment and make this node current */ 648 /* Point to next name segment and make this node current */
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/namespace/nsalloc.c
index 1d693d8ad2d8..3a1740ac2edc 100644
--- a/drivers/acpi/namespace/nsalloc.c
+++ b/drivers/acpi/namespace/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index 1fc4f86676e1..5445751b8a3e 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -249,7 +249,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
249 acpi_os_printf("ID %X Len %.4X Addr %p\n", 249 acpi_os_printf("ID %X Len %.4X Addr %p\n",
250 obj_desc->processor.proc_id, 250 obj_desc->processor.proc_id,
251 obj_desc->processor.length, 251 obj_desc->processor.length,
252 (char *)obj_desc->processor.address); 252 ACPI_CAST_PTR(void,
253 obj_desc->processor.
254 address));
253 break; 255 break;
254 256
255 case ACPI_TYPE_DEVICE: 257 case ACPI_TYPE_DEVICE:
@@ -320,9 +322,8 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
320 space_id)); 322 space_id));
321 if (obj_desc->region.flags & AOPOBJ_DATA_VALID) { 323 if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
322 acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n", 324 acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
323 ACPI_FORMAT_UINT64(obj_desc-> 325 ACPI_FORMAT_NATIVE_UINT
324 region. 326 (obj_desc->region.address),
325 address),
326 obj_desc->region.length); 327 obj_desc->region.length);
327 } else { 328 } else {
328 acpi_os_printf 329 acpi_os_printf
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/namespace/nsdumpdv.c
index 5097e167939e..428f50fde11a 100644
--- a/drivers/acpi/namespace/nsdumpdv.c
+++ b/drivers/acpi/namespace/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 97b2ac57c16b..14bdfa92bea0 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index 33db2241044e..6d6d930c8e18 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -244,6 +244,10 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
244 info->field_count++; 244 info->field_count++;
245 break; 245 break;
246 246
247 case ACPI_TYPE_LOCAL_BANK_FIELD:
248 info->field_count++;
249 break;
250
247 case ACPI_TYPE_BUFFER: 251 case ACPI_TYPE_BUFFER:
248 info->buffer_count++; 252 info->buffer_count++;
249 break; 253 break;
@@ -287,6 +291,12 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
287 status = acpi_ds_get_buffer_field_arguments(obj_desc); 291 status = acpi_ds_get_buffer_field_arguments(obj_desc);
288 break; 292 break;
289 293
294 case ACPI_TYPE_LOCAL_BANK_FIELD:
295
296 info->field_init++;
297 status = acpi_ds_get_bank_field_arguments(obj_desc);
298 break;
299
290 case ACPI_TYPE_BUFFER: 300 case ACPI_TYPE_BUFFER:
291 301
292 info->buffer_init++; 302 info->buffer_init++;
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
index d4f9654fd20f..2c92f6cf5ce1 100644
--- a/drivers/acpi/namespace/nsload.c
+++ b/drivers/acpi/namespace/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -107,11 +107,11 @@ acpi_ns_load_table(acpi_native_uint table_index,
107 goto unlock; 107 goto unlock;
108 } 108 }
109 109
110 status = acpi_ns_parse_table(table_index, node->child); 110 status = acpi_ns_parse_table(table_index, node);
111 if (ACPI_SUCCESS(status)) { 111 if (ACPI_SUCCESS(status)) {
112 acpi_tb_set_table_loaded_flag(table_index, TRUE); 112 acpi_tb_set_table_loaded_flag(table_index, TRUE);
113 } else { 113 } else {
114 acpi_tb_release_owner_id(table_index); 114 (void)acpi_tb_release_owner_id(table_index);
115 } 115 }
116 116
117 unlock: 117 unlock:
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index cbd94af08cc5..cffef1bcbdbc 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -180,6 +180,12 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
180 next_node = node; 180 next_node = node;
181 181
182 while (next_node && (next_node != acpi_gbl_root_node)) { 182 while (next_node && (next_node != acpi_gbl_root_node)) {
183 if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
184 ACPI_ERROR((AE_INFO,
185 "Invalid NS Node (%p) while traversing path",
186 next_node));
187 return 0;
188 }
183 size += ACPI_PATH_SEGMENT_LENGTH; 189 size += ACPI_PATH_SEGMENT_LENGTH;
184 next_node = acpi_ns_get_parent_node(next_node); 190 next_node = acpi_ns_get_parent_node(next_node);
185 } 191 }
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/namespace/nsobject.c
index d9d7377bc6e6..15fe09e24f71 100644
--- a/drivers/acpi/namespace/nsobject.c
+++ b/drivers/acpi/namespace/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c
index e696aa847990..46a79b0103b6 100644
--- a/drivers/acpi/namespace/nsparse.c
+++ b/drivers/acpi/namespace/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -64,7 +64,8 @@ ACPI_MODULE_NAME("nsparse")
64 ******************************************************************************/ 64 ******************************************************************************/
65acpi_status 65acpi_status
66acpi_ns_one_complete_parse(acpi_native_uint pass_number, 66acpi_ns_one_complete_parse(acpi_native_uint pass_number,
67 acpi_native_uint table_index) 67 acpi_native_uint table_index,
68 struct acpi_namespace_node * start_node)
68{ 69{
69 union acpi_parse_object *parse_root; 70 union acpi_parse_object *parse_root;
70 acpi_status status; 71 acpi_status status;
@@ -111,14 +112,25 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number,
111 aml_start = (u8 *) table + sizeof(struct acpi_table_header); 112 aml_start = (u8 *) table + sizeof(struct acpi_table_header);
112 aml_length = table->length - sizeof(struct acpi_table_header); 113 aml_length = table->length - sizeof(struct acpi_table_header);
113 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, 114 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
114 aml_start, aml_length, NULL, 115 aml_start, (u32) aml_length,
115 (u8) pass_number); 116 NULL, (u8) pass_number);
116 } 117 }
117 118
118 if (ACPI_FAILURE(status)) { 119 if (ACPI_FAILURE(status)) {
119 acpi_ds_delete_walk_state(walk_state); 120 acpi_ds_delete_walk_state(walk_state);
120 acpi_ps_delete_parse_tree(parse_root); 121 goto cleanup;
121 return_ACPI_STATUS(status); 122 }
123
124 /* start_node is the default location to load the table */
125
126 if (start_node && start_node != acpi_gbl_root_node) {
127 status =
128 acpi_ds_scope_stack_push(start_node, ACPI_TYPE_METHOD,
129 walk_state);
130 if (ACPI_FAILURE(status)) {
131 acpi_ds_delete_walk_state(walk_state);
132 goto cleanup;
133 }
122 } 134 }
123 135
124 /* Parse the AML */ 136 /* Parse the AML */
@@ -127,6 +139,7 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number,
127 (unsigned)pass_number)); 139 (unsigned)pass_number));
128 status = acpi_ps_parse_aml(walk_state); 140 status = acpi_ps_parse_aml(walk_state);
129 141
142 cleanup:
130 acpi_ps_delete_parse_tree(parse_root); 143 acpi_ps_delete_parse_tree(parse_root);
131 return_ACPI_STATUS(status); 144 return_ACPI_STATUS(status);
132} 145}
@@ -163,7 +176,9 @@ acpi_ns_parse_table(acpi_native_uint table_index,
163 * performs another complete parse of the AML. 176 * performs another complete parse of the AML.
164 */ 177 */
165 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); 178 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
166 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index); 179 status =
180 acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index,
181 start_node);
167 if (ACPI_FAILURE(status)) { 182 if (ACPI_FAILURE(status)) {
168 return_ACPI_STATUS(status); 183 return_ACPI_STATUS(status);
169 } 184 }
@@ -178,7 +193,9 @@ acpi_ns_parse_table(acpi_native_uint table_index,
178 * parse objects are all cached. 193 * parse objects are all cached.
179 */ 194 */
180 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n")); 195 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
181 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index); 196 status =
197 acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index,
198 start_node);
182 if (ACPI_FAILURE(status)) { 199 if (ACPI_FAILURE(status)) {
183 return_ACPI_STATUS(status); 200 return_ACPI_STATUS(status);
184 } 201 }
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
index e863be665ce8..8399276cba1e 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/namespace/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
index 90fd059615ff..64c039843ed2 100644
--- a/drivers/acpi/namespace/nsutils.c
+++ b/drivers/acpi/namespace/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index 280b8357c46c..3c905ce26d7d 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -77,9 +77,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct
77 77
78 /* It's really the parent's _scope_ that we want */ 78 /* It's really the parent's _scope_ that we want */
79 79
80 if (parent_node->child) { 80 next_node = parent_node->child;
81 next_node = parent_node->child;
82 }
83 } 81 }
84 82
85 else { 83 else {
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index b92133faf5b7..a8d549187c84 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -467,10 +467,13 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
467 return (AE_CTRL_DEPTH); 467 return (AE_CTRL_DEPTH);
468 } 468 }
469 469
470 if (!(flags & ACPI_STA_DEVICE_PRESENT)) { 470 if (!(flags & ACPI_STA_DEVICE_PRESENT) &&
471 471 !(flags & ACPI_STA_DEVICE_FUNCTIONING)) {
472 /* Don't examine children of the device if not present */ 472 /*
473 473 * Don't examine the children of the device only when the
474 * device is neither present nor functional. See ACPI spec,
475 * description of _STA for more information.
476 */
474 return (AE_CTRL_DEPTH); 477 return (AE_CTRL_DEPTH);
475 } 478 }
476 479
@@ -539,7 +542,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
539 * value is returned to the caller. 542 * value is returned to the caller.
540 * 543 *
541 * This is a wrapper for walk_namespace, but the callback performs 544 * This is a wrapper for walk_namespace, but the callback performs
542 * additional filtering. Please see acpi_get_device_callback. 545 * additional filtering. Please see acpi_ns_get_device_callback.
543 * 546 *
544 ******************************************************************************/ 547 ******************************************************************************/
545 548
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
index b489781b22a8..a287ed550f54 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/namespace/nsxfobj.c
index faa375887201..2b375ee80cef 100644
--- a/drivers/acpi/namespace/nsxfobj.c
+++ b/drivers/acpi/namespace/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index a498a6cc68fe..235a1386888a 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -742,6 +742,7 @@ EXPORT_SYMBOL(acpi_os_execute);
742void acpi_os_wait_events_complete(void *context) 742void acpi_os_wait_events_complete(void *context)
743{ 743{
744 flush_workqueue(kacpid_wq); 744 flush_workqueue(kacpid_wq);
745 flush_workqueue(kacpi_notify_wq);
745} 746}
746 747
747EXPORT_SYMBOL(acpi_os_wait_events_complete); 748EXPORT_SYMBOL(acpi_os_wait_events_complete);
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
index c2b9835c890b..f1e8bf65e24e 100644
--- a/drivers/acpi/parser/psargs.c
+++ b/drivers/acpi/parser/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -230,12 +230,12 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
230 struct acpi_parse_state *parser_state, 230 struct acpi_parse_state *parser_state,
231 union acpi_parse_object *arg, u8 possible_method_call) 231 union acpi_parse_object *arg, u8 possible_method_call)
232{ 232{
233 acpi_status status;
233 char *path; 234 char *path;
234 union acpi_parse_object *name_op; 235 union acpi_parse_object *name_op;
235 acpi_status status;
236 union acpi_operand_object *method_desc; 236 union acpi_operand_object *method_desc;
237 struct acpi_namespace_node *node; 237 struct acpi_namespace_node *node;
238 union acpi_generic_state scope_info; 238 u8 *start = parser_state->aml;
239 239
240 ACPI_FUNCTION_TRACE(ps_get_next_namepath); 240 ACPI_FUNCTION_TRACE(ps_get_next_namepath);
241 241
@@ -249,25 +249,18 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
249 return_ACPI_STATUS(AE_OK); 249 return_ACPI_STATUS(AE_OK);
250 } 250 }
251 251
252 /* Setup search scope info */
253
254 scope_info.scope.node = NULL;
255 node = parser_state->start_node;
256 if (node) {
257 scope_info.scope.node = node;
258 }
259
260 /* 252 /*
261 * Lookup the name in the internal namespace. We don't want to add 253 * Lookup the name in the internal namespace, starting with the current
262 * anything new to the namespace here, however, so we use MODE_EXECUTE. 254 * scope. We don't want to add anything new to the namespace here,
255 * however, so we use MODE_EXECUTE.
263 * Allow searching of the parent tree, but don't open a new scope - 256 * Allow searching of the parent tree, but don't open a new scope -
264 * we just want to lookup the object (must be mode EXECUTE to perform 257 * we just want to lookup the object (must be mode EXECUTE to perform
265 * the upsearch) 258 * the upsearch)
266 */ 259 */
267 status = 260 status = acpi_ns_lookup(walk_state->scope_info, path,
268 acpi_ns_lookup(&scope_info, path, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, 261 ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
269 ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, 262 ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
270 NULL, &node); 263 NULL, &node);
271 264
272 /* 265 /*
273 * If this name is a control method invocation, we must 266 * If this name is a control method invocation, we must
@@ -275,6 +268,16 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
275 */ 268 */
276 if (ACPI_SUCCESS(status) && 269 if (ACPI_SUCCESS(status) &&
277 possible_method_call && (node->type == ACPI_TYPE_METHOD)) { 270 possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
271 if (walk_state->op->common.aml_opcode == AML_UNLOAD_OP) {
272 /*
273 * acpi_ps_get_next_namestring has increased the AML pointer,
274 * so we need to restore the saved AML pointer for method call.
275 */
276 walk_state->parser_state.aml = start;
277 walk_state->arg_count = 1;
278 acpi_ps_init_op(arg, AML_INT_METHODCALL_OP);
279 return_ACPI_STATUS(AE_OK);
280 }
278 281
279 /* This name is actually a control method invocation */ 282 /* This name is actually a control method invocation */
280 283
@@ -686,9 +689,29 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
686 return_ACPI_STATUS(AE_NO_MEMORY); 689 return_ACPI_STATUS(AE_NO_MEMORY);
687 } 690 }
688 691
689 status = 692 /* To support super_name arg of Unload */
690 acpi_ps_get_next_namepath(walk_state, parser_state, 693
691 arg, 0); 694 if (walk_state->op->common.aml_opcode == AML_UNLOAD_OP) {
695 status =
696 acpi_ps_get_next_namepath(walk_state,
697 parser_state, arg,
698 1);
699
700 /*
701 * If the super_name arg of Unload is a method call,
702 * we have restored the AML pointer, just free this Arg
703 */
704 if (arg->common.aml_opcode ==
705 AML_INT_METHODCALL_OP) {
706 acpi_ps_free_op(arg);
707 arg = NULL;
708 }
709 } else {
710 status =
711 acpi_ps_get_next_namepath(walk_state,
712 parser_state, arg,
713 0);
714 }
692 } else { 715 } else {
693 /* Single complex argument, nothing returned */ 716 /* Single complex argument, nothing returned */
694 717
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
index 773aee82fbb8..c06238e55d98 100644
--- a/drivers/acpi/parser/psloop.c
+++ b/drivers/acpi/parser/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -182,6 +182,7 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
182 ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state); 182 ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
183 183
184 unnamed_op->common.value.arg = NULL; 184 unnamed_op->common.value.arg = NULL;
185 unnamed_op->common.arg_list_length = 0;
185 unnamed_op->common.aml_opcode = walk_state->opcode; 186 unnamed_op->common.aml_opcode = walk_state->opcode;
186 187
187 /* 188 /*
@@ -241,7 +242,8 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
241 acpi_ps_append_arg(*op, unnamed_op->common.value.arg); 242 acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
242 acpi_gbl_depth++; 243 acpi_gbl_depth++;
243 244
244 if ((*op)->common.aml_opcode == AML_REGION_OP) { 245 if ((*op)->common.aml_opcode == AML_REGION_OP ||
246 (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
245 /* 247 /*
246 * Defer final parsing of an operation_region body, because we don't 248 * Defer final parsing of an operation_region body, because we don't
247 * have enough info in the first pass to parse it correctly (i.e., 249 * have enough info in the first pass to parse it correctly (i.e.,
@@ -280,6 +282,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
280 acpi_status status = AE_OK; 282 acpi_status status = AE_OK;
281 union acpi_parse_object *op; 283 union acpi_parse_object *op;
282 union acpi_parse_object *named_op = NULL; 284 union acpi_parse_object *named_op = NULL;
285 union acpi_parse_object *parent_scope;
286 u8 argument_count;
287 const struct acpi_opcode_info *op_info;
283 288
284 ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state); 289 ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
285 290
@@ -320,8 +325,32 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
320 op->named.length = 0; 325 op->named.length = 0;
321 } 326 }
322 327
323 acpi_ps_append_arg(acpi_ps_get_parent_scope 328 if (walk_state->opcode == AML_BANK_FIELD_OP) {
324 (&(walk_state->parser_state)), op); 329 /*
330 * Backup to beginning of bank_field declaration
331 * body_length is unknown until we parse the body
332 */
333 op->named.data = aml_op_start;
334 op->named.length = 0;
335 }
336
337 parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
338 acpi_ps_append_arg(parent_scope, op);
339
340 if (parent_scope) {
341 op_info =
342 acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
343 if (op_info->flags & AML_HAS_TARGET) {
344 argument_count =
345 acpi_ps_get_argument_count(op_info->type);
346 if (parent_scope->common.arg_list_length >
347 argument_count) {
348 op->common.flags |= ACPI_PARSEOP_TARGET;
349 }
350 } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
351 op->common.flags |= ACPI_PARSEOP_TARGET;
352 }
353 }
325 354
326 if (walk_state->descending_callback != NULL) { 355 if (walk_state->descending_callback != NULL) {
327 /* 356 /*
@@ -603,13 +632,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
603 acpi_ps_pop_scope(&(walk_state->parser_state), op, 632 acpi_ps_pop_scope(&(walk_state->parser_state), op,
604 &walk_state->arg_types, 633 &walk_state->arg_types,
605 &walk_state->arg_count); 634 &walk_state->arg_count);
606
607 if ((*op)->common.aml_opcode != AML_WHILE_OP) {
608 status2 = acpi_ds_result_stack_pop(walk_state);
609 if (ACPI_FAILURE(status2)) {
610 return_ACPI_STATUS(status2);
611 }
612 }
613 } 635 }
614 636
615 /* Close this iteration of the While loop */ 637 /* Close this iteration of the While loop */
@@ -640,10 +662,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
640 if (ACPI_FAILURE(status2)) { 662 if (ACPI_FAILURE(status2)) {
641 return_ACPI_STATUS(status2); 663 return_ACPI_STATUS(status2);
642 } 664 }
643 status2 = acpi_ds_result_stack_pop(walk_state);
644 if (ACPI_FAILURE(status2)) {
645 return_ACPI_STATUS(status2);
646 }
647 665
648 acpi_ut_delete_generic_state 666 acpi_ut_delete_generic_state
649 (acpi_ut_pop_generic_state 667 (acpi_ut_pop_generic_state
@@ -1005,7 +1023,8 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
1005 acpi_gbl_depth--; 1023 acpi_gbl_depth--;
1006 } 1024 }
1007 1025
1008 if (op->common.aml_opcode == AML_REGION_OP) { 1026 if (op->common.aml_opcode == AML_REGION_OP ||
1027 op->common.aml_opcode == AML_DATA_REGION_OP) {
1009 /* 1028 /*
1010 * Skip parsing of control method or opregion body, 1029 * Skip parsing of control method or opregion body,
1011 * because we don't have enough info in the first pass 1030 * because we don't have enough info in the first pass
@@ -1030,6 +1049,16 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
1030 (u32) (parser_state->aml - op->named.data); 1049 (u32) (parser_state->aml - op->named.data);
1031 } 1050 }
1032 1051
1052 if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
1053 /*
1054 * Backup to beginning of bank_field declaration
1055 *
1056 * body_length is unknown until we parse the body
1057 */
1058 op->named.length =
1059 (u32) (parser_state->aml - op->named.data);
1060 }
1061
1033 /* This op complete, notify the dispatcher */ 1062 /* This op complete, notify the dispatcher */
1034 1063
1035 if (walk_state->ascending_callback != NULL) { 1064 if (walk_state->ascending_callback != NULL) {
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index 9296e86761d7..f425ab30eae8 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,9 @@
49#define _COMPONENT ACPI_PARSER 49#define _COMPONENT ACPI_PARSER
50ACPI_MODULE_NAME("psopcode") 50ACPI_MODULE_NAME("psopcode")
51 51
52static const u8 acpi_gbl_argument_count[] =
53 { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
54
52/******************************************************************************* 55/*******************************************************************************
53 * 56 *
54 * NAME: acpi_gbl_aml_op_info 57 * NAME: acpi_gbl_aml_op_info
@@ -59,6 +62,7 @@ ACPI_MODULE_NAME("psopcode")
59 * the operand type. 62 * the operand type.
60 * 63 *
61 ******************************************************************************/ 64 ******************************************************************************/
65
62/* 66/*
63 * Summary of opcode types/flags 67 * Summary of opcode types/flags
64 * 68 *
@@ -176,6 +180,7 @@ ACPI_MODULE_NAME("psopcode")
176 AML_CREATE_QWORD_FIELD_OP 180 AML_CREATE_QWORD_FIELD_OP
177 181
178 ******************************************************************************/ 182 ******************************************************************************/
183
179/* 184/*
180 * Master Opcode information table. A summary of everything we know about each 185 * Master Opcode information table. A summary of everything we know about each
181 * opcode, all in one place. 186 * opcode, all in one place.
@@ -515,9 +520,10 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
515 AML_TYPE_NAMED_FIELD, 520 AML_TYPE_NAMED_FIELD,
516 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), 521 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
517/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, 522/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
518 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, 523 ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT,
519 AML_TYPE_NAMED_FIELD, 524 AML_TYPE_NAMED_FIELD,
520 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), 525 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD |
526 AML_DEFER),
521 527
522/* Internal opcodes that map to invalid AML opcodes */ 528/* Internal opcodes that map to invalid AML opcodes */
523 529
@@ -619,9 +625,9 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
619 AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R), 625 AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
620/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP, 626/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP,
621 ARGI_DATA_REGION_OP, ACPI_TYPE_REGION, 627 ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
622 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, 628 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
623 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 629 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
624 AML_NSNODE | AML_NAMED), 630 AML_NSNODE | AML_NAMED | AML_DEFER),
625/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, 631/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
626 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, 632 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
627 AML_TYPE_NAMED_NO_OBJ, 633 AML_TYPE_NAMED_NO_OBJ,
@@ -779,3 +785,25 @@ char *acpi_ps_get_opcode_name(u16 opcode)
779 785
780#endif 786#endif
781} 787}
788
789/*******************************************************************************
790 *
791 * FUNCTION: acpi_ps_get_argument_count
792 *
793 * PARAMETERS: op_type - Type associated with the AML opcode
794 *
795 * RETURN: Argument count
796 *
797 * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
798 *
799 ******************************************************************************/
800
801u8 acpi_ps_get_argument_count(u32 op_type)
802{
803
804 if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
805 return (acpi_gbl_argument_count[op_type]);
806 }
807
808 return (0);
809}
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
index 5d63f48e56b5..15e1702e48d6 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/parser/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -205,6 +205,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
205 || (op->common.parent->common.aml_opcode == 205 || (op->common.parent->common.aml_opcode ==
206 AML_PACKAGE_OP) 206 AML_PACKAGE_OP)
207 || (op->common.parent->common.aml_opcode == 207 || (op->common.parent->common.aml_opcode ==
208 AML_BANK_FIELD_OP)
209 || (op->common.parent->common.aml_opcode ==
208 AML_VAR_PACKAGE_OP)) { 210 AML_VAR_PACKAGE_OP)) {
209 replacement_op = 211 replacement_op =
210 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP); 212 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
@@ -349,19 +351,13 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
349 351
350 parser_state->aml = walk_state->aml_last_while; 352 parser_state->aml = walk_state->aml_last_while;
351 walk_state->control_state->common.value = FALSE; 353 walk_state->control_state->common.value = FALSE;
352 status = acpi_ds_result_stack_pop(walk_state); 354 status = AE_CTRL_BREAK;
353 if (ACPI_SUCCESS(status)) {
354 status = AE_CTRL_BREAK;
355 }
356 break; 355 break;
357 356
358 case AE_CTRL_CONTINUE: 357 case AE_CTRL_CONTINUE:
359 358
360 parser_state->aml = walk_state->aml_last_while; 359 parser_state->aml = walk_state->aml_last_while;
361 status = acpi_ds_result_stack_pop(walk_state); 360 status = AE_CTRL_CONTINUE;
362 if (ACPI_SUCCESS(status)) {
363 status = AE_CTRL_CONTINUE;
364 }
365 break; 361 break;
366 362
367 case AE_CTRL_PENDING: 363 case AE_CTRL_PENDING:
@@ -383,10 +379,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
383 * Just close out this package 379 * Just close out this package
384 */ 380 */
385 parser_state->aml = acpi_ps_get_next_package_end(parser_state); 381 parser_state->aml = acpi_ps_get_next_package_end(parser_state);
386 status = acpi_ds_result_stack_pop(walk_state); 382 status = AE_CTRL_PENDING;
387 if (ACPI_SUCCESS(status)) {
388 status = AE_CTRL_PENDING;
389 }
390 break; 383 break;
391 384
392 case AE_CTRL_FALSE: 385 case AE_CTRL_FALSE:
@@ -541,7 +534,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
541 if ((status == AE_ALREADY_EXISTS) && 534 if ((status == AE_ALREADY_EXISTS) &&
542 (!walk_state->method_desc->method.mutex)) { 535 (!walk_state->method_desc->method.mutex)) {
543 ACPI_INFO((AE_INFO, 536 ACPI_INFO((AE_INFO,
544 "Marking method %4.4s as Serialized", 537 "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
545 walk_state->method_node->name. 538 walk_state->method_node->name.
546 ascii)); 539 ascii));
547 540
@@ -601,6 +594,30 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
601 * The object is deleted 594 * The object is deleted
602 */ 595 */
603 if (!previous_walk_state->return_desc) { 596 if (!previous_walk_state->return_desc) {
597 /*
598 * In slack mode execution, if there is no return value
599 * we should implicitly return zero (0) as a default value.
600 */
601 if (acpi_gbl_enable_interpreter_slack &&
602 !previous_walk_state->
603 implicit_return_obj) {
604 previous_walk_state->
605 implicit_return_obj =
606 acpi_ut_create_internal_object
607 (ACPI_TYPE_INTEGER);
608 if (!previous_walk_state->
609 implicit_return_obj) {
610 return_ACPI_STATUS
611 (AE_NO_MEMORY);
612 }
613
614 previous_walk_state->
615 implicit_return_obj->
616 integer.value = 0;
617 }
618
619 /* Restart the calling control method */
620
604 status = 621 status =
605 acpi_ds_restart_control_method 622 acpi_ds_restart_control_method
606 (walk_state, 623 (walk_state,
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/parser/psscope.c
index 77cfa4ed0cfe..ee50e67c9443 100644
--- a/drivers/acpi/parser/psscope.c
+++ b/drivers/acpi/parser/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/parser/pstree.c
index 966e7ea2a0c4..1dd355ddd182 100644
--- a/drivers/acpi/parser/pstree.c
+++ b/drivers/acpi/parser/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -171,6 +171,8 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
171 while (arg) { 171 while (arg) {
172 arg->common.parent = op; 172 arg->common.parent = op;
173 arg = arg->common.next; 173 arg = arg->common.next;
174
175 op->common.arg_list_length++;
174 } 176 }
175} 177}
176 178
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/parser/psutils.c
index 8ca52002db55..7cf1f65cd5bb 100644
--- a/drivers/acpi/parser/psutils.c
+++ b/drivers/acpi/parser/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/parser/pswalk.c
index 49f9757434e4..8b86ad5a3201 100644
--- a/drivers/acpi/parser/pswalk.c
+++ b/drivers/acpi/parser/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
index 94103bced75e..52581454c47c 100644
--- a/drivers/acpi/parser/psxface.c
+++ b/drivers/acpi/parser/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 76bf6d90c700..81e4f081a4ae 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -93,6 +93,7 @@ struct acpi_power_resource {
93static struct list_head acpi_power_resource_list; 93static struct list_head acpi_power_resource_list;
94 94
95static const struct file_operations acpi_power_fops = { 95static const struct file_operations acpi_power_fops = {
96 .owner = THIS_MODULE,
96 .open = acpi_power_open_fs, 97 .open = acpi_power_open_fs,
97 .read = seq_read, 98 .read = seq_read,
98 .llseek = seq_lseek, 99 .llseek = seq_lseek,
@@ -121,7 +122,7 @@ acpi_power_get_context(acpi_handle handle,
121 } 122 }
122 123
123 *resource = acpi_driver_data(device); 124 *resource = acpi_driver_data(device);
124 if (!resource) 125 if (!*resource)
125 return -ENODEV; 126 return -ENODEV;
126 127
127 return 0; 128 return 0;
@@ -543,15 +544,11 @@ static int acpi_power_add_fs(struct acpi_device *device)
543 } 544 }
544 545
545 /* 'status' [R] */ 546 /* 'status' [R] */
546 entry = create_proc_entry(ACPI_POWER_FILE_STATUS, 547 entry = proc_create_data(ACPI_POWER_FILE_STATUS,
547 S_IRUGO, acpi_device_dir(device)); 548 S_IRUGO, acpi_device_dir(device),
549 &acpi_power_fops, acpi_driver_data(device));
548 if (!entry) 550 if (!entry)
549 return -EIO; 551 return -EIO;
550 else {
551 entry->proc_fops = &acpi_power_fops;
552 entry->data = acpi_driver_data(device);
553 }
554
555 return 0; 552 return 0;
556} 553}
557 554
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index a825b431b64f..386e5aa48834 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -112,6 +112,7 @@ static struct acpi_driver acpi_processor_driver = {
112#define UNINSTALL_NOTIFY_HANDLER 2 112#define UNINSTALL_NOTIFY_HANDLER 2
113 113
114static const struct file_operations acpi_processor_info_fops = { 114static const struct file_operations acpi_processor_info_fops = {
115 .owner = THIS_MODULE,
115 .open = acpi_processor_info_open_fs, 116 .open = acpi_processor_info_open_fs,
116 .read = seq_read, 117 .read = seq_read,
117 .llseek = seq_lseek, 118 .llseek = seq_lseek,
@@ -326,40 +327,30 @@ static int acpi_processor_add_fs(struct acpi_device *device)
326 acpi_device_dir(device)->owner = THIS_MODULE; 327 acpi_device_dir(device)->owner = THIS_MODULE;
327 328
328 /* 'info' [R] */ 329 /* 'info' [R] */
329 entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO, 330 entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
330 S_IRUGO, acpi_device_dir(device)); 331 S_IRUGO, acpi_device_dir(device),
332 &acpi_processor_info_fops,
333 acpi_driver_data(device));
331 if (!entry) 334 if (!entry)
332 return -EIO; 335 return -EIO;
333 else {
334 entry->proc_fops = &acpi_processor_info_fops;
335 entry->data = acpi_driver_data(device);
336 entry->owner = THIS_MODULE;
337 }
338 336
339 /* 'throttling' [R/W] */ 337 /* 'throttling' [R/W] */
340 entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, 338 entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
341 S_IFREG | S_IRUGO | S_IWUSR, 339 S_IFREG | S_IRUGO | S_IWUSR,
342 acpi_device_dir(device)); 340 acpi_device_dir(device),
341 &acpi_processor_throttling_fops,
342 acpi_driver_data(device));
343 if (!entry) 343 if (!entry)
344 return -EIO; 344 return -EIO;
345 else {
346 entry->proc_fops = &acpi_processor_throttling_fops;
347 entry->data = acpi_driver_data(device);
348 entry->owner = THIS_MODULE;
349 }
350 345
351 /* 'limit' [R/W] */ 346 /* 'limit' [R/W] */
352 entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, 347 entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
353 S_IFREG | S_IRUGO | S_IWUSR, 348 S_IFREG | S_IRUGO | S_IWUSR,
354 acpi_device_dir(device)); 349 acpi_device_dir(device),
350 &acpi_processor_limit_fops,
351 acpi_driver_data(device));
355 if (!entry) 352 if (!entry)
356 return -EIO; 353 return -EIO;
357 else {
358 entry->proc_fops = &acpi_processor_limit_fops;
359 entry->data = acpi_driver_data(device);
360 entry->owner = THIS_MODULE;
361 }
362
363 return 0; 354 return 0;
364} 355}
365 356
@@ -612,6 +603,15 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
612 request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 603 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
613 } 604 }
614 605
606 /*
607 * If ACPI describes a slot number for this CPU, we can use it
608 * ensure we get the right value in the "physical id" field
609 * of /proc/cpuinfo
610 */
611 status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
612 if (ACPI_SUCCESS(status))
613 arch_fix_phys_package_id(pr->id, object.integer.value);
614
615 return 0; 615 return 0;
616} 616}
617 617
@@ -674,22 +674,21 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
674 result = PTR_ERR(pr->cdev); 674 result = PTR_ERR(pr->cdev);
675 goto end; 675 goto end;
676 } 676 }
677 if (pr->cdev) { 677
678 printk(KERN_INFO PREFIX 678 printk(KERN_INFO PREFIX
679 "%s is registered as cooling_device%d\n", 679 "%s is registered as cooling_device%d\n",
680 device->dev.bus_id, pr->cdev->id); 680 device->dev.bus_id, pr->cdev->id);
681 681
682 result = sysfs_create_link(&device->dev.kobj, 682 result = sysfs_create_link(&device->dev.kobj,
683 &pr->cdev->device.kobj, 683 &pr->cdev->device.kobj,
684 "thermal_cooling"); 684 "thermal_cooling");
685 if (result) 685 if (result)
686 return result; 686 printk(KERN_ERR PREFIX "Create sysfs link\n");
687 result = sysfs_create_link(&pr->cdev->device.kobj, 687 result = sysfs_create_link(&pr->cdev->device.kobj,
688 &device->dev.kobj, 688 &device->dev.kobj,
689 "device"); 689 "device");
690 if (result) 690 if (result)
691 return result; 691 printk(KERN_ERR PREFIX "Create sysfs link\n");
692 }
693 692
694 if (pr->flags.throttling) { 693 if (pr->flags.throttling) {
695 printk(KERN_INFO PREFIX "%s [%s] (supports", 694 printk(KERN_INFO PREFIX "%s [%s] (supports",
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0d90ff5fd117..2dd2c1f3a01c 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -847,6 +847,7 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
847 /* all processors need to support C1 */ 847 /* all processors need to support C1 */
848 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 848 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
849 pr->power.states[ACPI_STATE_C1].valid = 1; 849 pr->power.states[ACPI_STATE_C1].valid = 1;
850 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
850 } 851 }
851 /* the C0 state only exists as a filler in our array */ 852 /* the C0 state only exists as a filler in our array */
852 pr->power.states[ACPI_STATE_C0].valid = 1; 853 pr->power.states[ACPI_STATE_C0].valid = 1;
@@ -959,6 +960,9 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
959 cx.address); 960 cx.address);
960 } 961 }
961 962
963 if (cx.type == ACPI_STATE_C1) {
964 cx.valid = 1;
965 }
962 966
963 obj = &(element->package.elements[2]); 967 obj = &(element->package.elements[2]);
964 if (obj->type != ACPI_TYPE_INTEGER) 968 if (obj->type != ACPI_TYPE_INTEGER)
@@ -1282,6 +1286,7 @@ static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1282} 1286}
1283 1287
1284static const struct file_operations acpi_processor_power_fops = { 1288static const struct file_operations acpi_processor_power_fops = {
1289 .owner = THIS_MODULE,
1285 .open = acpi_processor_power_open_fs, 1290 .open = acpi_processor_power_open_fs,
1286 .read = seq_read, 1291 .read = seq_read,
1287 .llseek = seq_lseek, 1292 .llseek = seq_lseek,
@@ -1294,6 +1299,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1294{ 1299{
1295 int result = 0; 1300 int result = 0;
1296 1301
1302 if (boot_option_idle_override)
1303 return 0;
1297 1304
1298 if (!pr) 1305 if (!pr)
1299 return -EINVAL; 1306 return -EINVAL;
@@ -1733,6 +1740,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1733{ 1740{
1734 int ret; 1741 int ret;
1735 1742
1743 if (boot_option_idle_override)
1744 return 0;
1745
1736 if (!pr) 1746 if (!pr)
1737 return -EINVAL; 1747 return -EINVAL;
1738 1748
@@ -1763,6 +1773,8 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1763 struct proc_dir_entry *entry = NULL; 1773 struct proc_dir_entry *entry = NULL;
1764 unsigned int i; 1774 unsigned int i;
1765 1775
1776 if (boot_option_idle_override)
1777 return 0;
1766 1778
1767 if (!first_run) { 1779 if (!first_run) {
1768 dmi_check_system(processor_power_dmi_table); 1780 dmi_check_system(processor_power_dmi_table);
@@ -1798,7 +1810,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1798 * Note that we use previously set idle handler will be used on 1810 * Note that we use previously set idle handler will be used on
1799 * platforms that only support C1. 1811 * platforms that only support C1.
1800 */ 1812 */
1801 if ((pr->flags.power) && (!boot_option_idle_override)) { 1813 if (pr->flags.power) {
1802#ifdef CONFIG_CPU_IDLE 1814#ifdef CONFIG_CPU_IDLE
1803 acpi_processor_setup_cpuidle(pr); 1815 acpi_processor_setup_cpuidle(pr);
1804 pr->power.dev.cpu = pr->id; 1816 pr->power.dev.cpu = pr->id;
@@ -1822,24 +1834,23 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1822 } 1834 }
1823 1835
1824 /* 'power' [R] */ 1836 /* 'power' [R] */
1825 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1837 entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1826 S_IRUGO, acpi_device_dir(device)); 1838 S_IRUGO, acpi_device_dir(device),
1839 &acpi_processor_power_fops,
1840 acpi_driver_data(device));
1827 if (!entry) 1841 if (!entry)
1828 return -EIO; 1842 return -EIO;
1829 else {
1830 entry->proc_fops = &acpi_processor_power_fops;
1831 entry->data = acpi_driver_data(device);
1832 entry->owner = THIS_MODULE;
1833 }
1834
1835 return 0; 1843 return 0;
1836} 1844}
1837 1845
1838int acpi_processor_power_exit(struct acpi_processor *pr, 1846int acpi_processor_power_exit(struct acpi_processor *pr,
1839 struct acpi_device *device) 1847 struct acpi_device *device)
1840{ 1848{
1849 if (boot_option_idle_override)
1850 return 0;
1851
1841#ifdef CONFIG_CPU_IDLE 1852#ifdef CONFIG_CPU_IDLE
1842 if ((pr->flags.power) && (!boot_option_idle_override)) 1853 if (pr->flags.power)
1843 cpuidle_unregister_device(&pr->power.dev); 1854 cpuidle_unregister_device(&pr->power.dev);
1844#endif 1855#endif
1845 pr->flags.power_setup_done = 0; 1856 pr->flags.power_setup_done = 0;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index b477a4be8a69..d80b2d1441af 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -411,6 +411,7 @@ EXPORT_SYMBOL(acpi_processor_notify_smm);
411 411
412static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file); 412static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
413static struct file_operations acpi_processor_perf_fops = { 413static struct file_operations acpi_processor_perf_fops = {
414 .owner = THIS_MODULE,
414 .open = acpi_processor_perf_open_fs, 415 .open = acpi_processor_perf_open_fs,
415 .read = seq_read, 416 .read = seq_read,
416 .llseek = seq_lseek, 417 .llseek = seq_lseek,
@@ -456,7 +457,6 @@ static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
456 457
457static void acpi_cpufreq_add_file(struct acpi_processor *pr) 458static void acpi_cpufreq_add_file(struct acpi_processor *pr)
458{ 459{
459 struct proc_dir_entry *entry = NULL;
460 struct acpi_device *device = NULL; 460 struct acpi_device *device = NULL;
461 461
462 462
@@ -464,14 +464,9 @@ static void acpi_cpufreq_add_file(struct acpi_processor *pr)
464 return; 464 return;
465 465
466 /* add file 'performance' [R/W] */ 466 /* add file 'performance' [R/W] */
467 entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, 467 proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO,
468 S_IFREG | S_IRUGO, 468 acpi_device_dir(device),
469 acpi_device_dir(device)); 469 &acpi_processor_perf_fops, acpi_driver_data(device));
470 if (entry){
471 entry->proc_fops = &acpi_processor_perf_fops;
472 entry->data = acpi_driver_data(device);
473 entry->owner = THIS_MODULE;
474 }
475 return; 470 return;
476} 471}
477 472
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 9cb43f52f7b6..ef34b18f95ca 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -97,7 +97,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
97#define CPUFREQ_THERMAL_MIN_STEP 0 97#define CPUFREQ_THERMAL_MIN_STEP 0
98#define CPUFREQ_THERMAL_MAX_STEP 3 98#define CPUFREQ_THERMAL_MAX_STEP 3
99 99
100static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS]; 100static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
101static unsigned int acpi_thermal_cpufreq_is_init = 0; 101static unsigned int acpi_thermal_cpufreq_is_init = 0;
102 102
103static int cpu_has_cpufreq(unsigned int cpu) 103static int cpu_has_cpufreq(unsigned int cpu)
@@ -113,9 +113,9 @@ static int acpi_thermal_cpufreq_increase(unsigned int cpu)
113 if (!cpu_has_cpufreq(cpu)) 113 if (!cpu_has_cpufreq(cpu))
114 return -ENODEV; 114 return -ENODEV;
115 115
116 if (cpufreq_thermal_reduction_pctg[cpu] < 116 if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
117 CPUFREQ_THERMAL_MAX_STEP) { 117 CPUFREQ_THERMAL_MAX_STEP) {
118 cpufreq_thermal_reduction_pctg[cpu]++; 118 per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
119 cpufreq_update_policy(cpu); 119 cpufreq_update_policy(cpu);
120 return 0; 120 return 0;
121 } 121 }
@@ -128,14 +128,14 @@ static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
128 if (!cpu_has_cpufreq(cpu)) 128 if (!cpu_has_cpufreq(cpu))
129 return -ENODEV; 129 return -ENODEV;
130 130
131 if (cpufreq_thermal_reduction_pctg[cpu] > 131 if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
132 (CPUFREQ_THERMAL_MIN_STEP + 1)) 132 (CPUFREQ_THERMAL_MIN_STEP + 1))
133 cpufreq_thermal_reduction_pctg[cpu]--; 133 per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
134 else 134 else
135 cpufreq_thermal_reduction_pctg[cpu] = 0; 135 per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
136 cpufreq_update_policy(cpu); 136 cpufreq_update_policy(cpu);
137 /* We reached max freq again and can leave passive mode */ 137 /* We reached max freq again and can leave passive mode */
138 return !cpufreq_thermal_reduction_pctg[cpu]; 138 return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
139} 139}
140 140
141static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, 141static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
@@ -147,9 +147,10 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
147 if (event != CPUFREQ_ADJUST) 147 if (event != CPUFREQ_ADJUST)
148 goto out; 148 goto out;
149 149
150 max_freq = 150 max_freq = (
151 (policy->cpuinfo.max_freq * 151 policy->cpuinfo.max_freq *
152 (100 - cpufreq_thermal_reduction_pctg[policy->cpu] * 20)) / 100; 152 (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
153 ) / 100;
153 154
154 cpufreq_verify_within_limits(policy, 0, max_freq); 155 cpufreq_verify_within_limits(policy, 0, max_freq);
155 156
@@ -174,7 +175,7 @@ static int cpufreq_get_cur_state(unsigned int cpu)
174 if (!cpu_has_cpufreq(cpu)) 175 if (!cpu_has_cpufreq(cpu))
175 return 0; 176 return 0;
176 177
177 return cpufreq_thermal_reduction_pctg[cpu]; 178 return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
178} 179}
179 180
180static int cpufreq_set_cur_state(unsigned int cpu, int state) 181static int cpufreq_set_cur_state(unsigned int cpu, int state)
@@ -182,7 +183,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
182 if (!cpu_has_cpufreq(cpu)) 183 if (!cpu_has_cpufreq(cpu))
183 return 0; 184 return 0;
184 185
185 cpufreq_thermal_reduction_pctg[cpu] = state; 186 per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
186 cpufreq_update_policy(cpu); 187 cpufreq_update_policy(cpu);
187 return 0; 188 return 0;
188} 189}
@@ -191,8 +192,9 @@ void acpi_thermal_cpufreq_init(void)
191{ 192{
192 int i; 193 int i;
193 194
194 for (i = 0; i < NR_CPUS; i++) 195 for (i = 0; i < nr_cpu_ids; i++)
195 cpufreq_thermal_reduction_pctg[i] = 0; 196 if (cpu_present(i))
197 per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
196 198
197 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, 199 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
198 CPUFREQ_POLICY_NOTIFIER); 200 CPUFREQ_POLICY_NOTIFIER);
@@ -507,6 +509,7 @@ static ssize_t acpi_processor_write_limit(struct file * file,
507} 509}
508 510
509struct file_operations acpi_processor_limit_fops = { 511struct file_operations acpi_processor_limit_fops = {
512 .owner = THIS_MODULE,
510 .open = acpi_processor_limit_open_fs, 513 .open = acpi_processor_limit_open_fs,
511 .read = seq_read, 514 .read = seq_read,
512 .write = acpi_processor_write_limit, 515 .write = acpi_processor_write_limit,
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0bba3a914e86..bb06738860c4 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -1252,6 +1252,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
1252} 1252}
1253 1253
1254struct file_operations acpi_processor_throttling_fops = { 1254struct file_operations acpi_processor_throttling_fops = {
1255 .owner = THIS_MODULE,
1255 .open = acpi_processor_throttling_open_fs, 1256 .open = acpi_processor_throttling_open_fs,
1256 .read = seq_read, 1257 .read = seq_read,
1257 .write = acpi_processor_write_throttling, 1258 .write = acpi_processor_write_throttling,
diff --git a/drivers/acpi/resources/rsaddr.c b/drivers/acpi/resources/rsaddr.c
index 271e61509eeb..7f96332822bf 100644
--- a/drivers/acpi/resources/rsaddr.c
+++ b/drivers/acpi/resources/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 0dd2ce8a3475..8a112d11d491 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -73,7 +73,7 @@ acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length);
73 73
74static u8 acpi_rs_count_set_bits(u16 bit_field) 74static u8 acpi_rs_count_set_bits(u16 bit_field)
75{ 75{
76 u8 bits_set; 76 acpi_native_uint bits_set;
77 77
78 ACPI_FUNCTION_ENTRY(); 78 ACPI_FUNCTION_ENTRY();
79 79
@@ -81,10 +81,10 @@ static u8 acpi_rs_count_set_bits(u16 bit_field)
81 81
82 /* Zero the least significant bit that is set */ 82 /* Zero the least significant bit that is set */
83 83
84 bit_field &= (bit_field - 1); 84 bit_field &= (u16) (bit_field - 1);
85 } 85 }
86 86
87 return (bits_set); 87 return ((u8) bits_set);
88} 88}
89 89
90/******************************************************************************* 90/*******************************************************************************
@@ -211,6 +211,24 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
211 * variable-length fields 211 * variable-length fields
212 */ 212 */
213 switch (resource->type) { 213 switch (resource->type) {
214 case ACPI_RESOURCE_TYPE_IRQ:
215
216 /* Length can be 3 or 2 */
217
218 if (resource->data.irq.descriptor_length == 2) {
219 total_size--;
220 }
221 break;
222
223 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
224
225 /* Length can be 1 or 0 */
226
227 if (resource->data.irq.descriptor_length == 0) {
228 total_size--;
229 }
230 break;
231
214 case ACPI_RESOURCE_TYPE_VENDOR: 232 case ACPI_RESOURCE_TYPE_VENDOR:
215 /* 233 /*
216 * Vendor Defined Resource: 234 * Vendor Defined Resource:
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index 50da494c3ee2..faddaee1bc07 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index 46da116a4030..6bbbb7b8941a 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -87,8 +87,10 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
87 * 87 *
88 ******************************************************************************/ 88 ******************************************************************************/
89 89
90struct acpi_rsdump_info acpi_rs_dump_irq[6] = { 90struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
91 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL}, 91 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
92 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
93 "Descriptor Length", NULL},
92 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering", 94 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
93 acpi_gbl_he_decode}, 95 acpi_gbl_he_decode},
94 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity", 96 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
@@ -115,9 +117,11 @@ struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
115 NULL} 117 NULL}
116}; 118};
117 119
118struct acpi_rsdump_info acpi_rs_dump_start_dpf[3] = { 120struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
119 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf), 121 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
120 "Start-Dependent-Functions", NULL}, 122 "Start-Dependent-Functions", NULL},
123 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
124 "Descriptor Length", NULL},
121 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority), 125 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
122 "Compatibility Priority", acpi_gbl_config_decode}, 126 "Compatibility Priority", acpi_gbl_config_decode},
123 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness), 127 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
index 2c2adb6292c1..3f0a1fedbe0e 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/resources/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsio.c b/drivers/acpi/resources/rsio.c
index b297bc3e4419..b66d42e7402e 100644
--- a/drivers/acpi/resources/rsio.c
+++ b/drivers/acpi/resources/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -185,7 +185,7 @@ struct acpi_rsconvert_info acpi_rs_convert_end_tag[2] = {
185 * 185 *
186 ******************************************************************************/ 186 ******************************************************************************/
187 187
188struct acpi_rsconvert_info acpi_rs_get_start_dpf[5] = { 188struct acpi_rsconvert_info acpi_rs_get_start_dpf[6] = {
189 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_START_DEPENDENT, 189 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_START_DEPENDENT,
190 ACPI_RS_SIZE(struct acpi_resource_start_dependent), 190 ACPI_RS_SIZE(struct acpi_resource_start_dependent),
191 ACPI_RSC_TABLE_SIZE(acpi_rs_get_start_dpf)}, 191 ACPI_RSC_TABLE_SIZE(acpi_rs_get_start_dpf)},
@@ -196,6 +196,12 @@ struct acpi_rsconvert_info acpi_rs_get_start_dpf[5] = {
196 ACPI_ACCEPTABLE_CONFIGURATION, 196 ACPI_ACCEPTABLE_CONFIGURATION,
197 2}, 197 2},
198 198
199 /* Get the descriptor length (0 or 1 for Start Dpf descriptor) */
200
201 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
202 AML_OFFSET(start_dpf.descriptor_type),
203 0},
204
199 /* All done if there is no flag byte present in the descriptor */ 205 /* All done if there is no flag byte present in the descriptor */
200 206
201 {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 1}, 207 {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 1},
@@ -219,7 +225,9 @@ struct acpi_rsconvert_info acpi_rs_get_start_dpf[5] = {
219 * 225 *
220 ******************************************************************************/ 226 ******************************************************************************/
221 227
222struct acpi_rsconvert_info acpi_rs_set_start_dpf[6] = { 228struct acpi_rsconvert_info acpi_rs_set_start_dpf[10] = {
229 /* Start with a default descriptor of length 1 */
230
223 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_START_DEPENDENT, 231 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_START_DEPENDENT,
224 sizeof(struct aml_resource_start_dependent), 232 sizeof(struct aml_resource_start_dependent),
225 ACPI_RSC_TABLE_SIZE(acpi_rs_set_start_dpf)}, 233 ACPI_RSC_TABLE_SIZE(acpi_rs_set_start_dpf)},
@@ -236,6 +244,33 @@ struct acpi_rsconvert_info acpi_rs_set_start_dpf[6] = {
236 AML_OFFSET(start_dpf.flags), 244 AML_OFFSET(start_dpf.flags),
237 2}, 245 2},
238 /* 246 /*
247 * All done if the output descriptor length is required to be 1
248 * (i.e., optimization to 0 bytes cannot be attempted)
249 */
250 {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
251 ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
252 1},
253
254 /* Set length to 0 bytes (no flags byte) */
255
256 {ACPI_RSC_LENGTH, 0, 0,
257 sizeof(struct aml_resource_start_dependent_noprio)},
258
259 /*
260 * All done if the output descriptor length is required to be 0.
261 *
262 * TBD: Perhaps we should check for error if input flags are not
263 * compatible with a 0-byte descriptor.
264 */
265 {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
266 ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
267 0},
268
269 /* Reset length to 1 byte (descriptor with flags byte) */
270
271 {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_start_dependent)},
272
273 /*
239 * All done if flags byte is necessary -- if either priority value 274 * All done if flags byte is necessary -- if either priority value
240 * is not ACPI_ACCEPTABLE_CONFIGURATION 275 * is not ACPI_ACCEPTABLE_CONFIGURATION
241 */ 276 */
diff --git a/drivers/acpi/resources/rsirq.c b/drivers/acpi/resources/rsirq.c
index 5657f7b95039..a8805efc0366 100644
--- a/drivers/acpi/resources/rsirq.c
+++ b/drivers/acpi/resources/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("rsirq")
52 * acpi_rs_get_irq 52 * acpi_rs_get_irq
53 * 53 *
54 ******************************************************************************/ 54 ******************************************************************************/
55struct acpi_rsconvert_info acpi_rs_get_irq[7] = { 55struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
56 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ, 56 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ,
57 ACPI_RS_SIZE(struct acpi_resource_irq), 57 ACPI_RS_SIZE(struct acpi_resource_irq),
58 ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)}, 58 ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)},
@@ -69,6 +69,12 @@ struct acpi_rsconvert_info acpi_rs_get_irq[7] = {
69 ACPI_EDGE_SENSITIVE, 69 ACPI_EDGE_SENSITIVE,
70 1}, 70 1},
71 71
72 /* Get the descriptor length (2 or 3 for IRQ descriptor) */
73
74 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.irq.descriptor_length),
75 AML_OFFSET(irq.descriptor_type),
76 0},
77
72 /* All done if no flag byte present in descriptor */ 78 /* All done if no flag byte present in descriptor */
73 79
74 {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3}, 80 {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3},
@@ -94,7 +100,9 @@ struct acpi_rsconvert_info acpi_rs_get_irq[7] = {
94 * 100 *
95 ******************************************************************************/ 101 ******************************************************************************/
96 102
97struct acpi_rsconvert_info acpi_rs_set_irq[9] = { 103struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
104 /* Start with a default descriptor of length 3 */
105
98 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ, 106 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ,
99 sizeof(struct aml_resource_irq), 107 sizeof(struct aml_resource_irq),
100 ACPI_RSC_TABLE_SIZE(acpi_rs_set_irq)}, 108 ACPI_RSC_TABLE_SIZE(acpi_rs_set_irq)},
@@ -105,7 +113,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[9] = {
105 AML_OFFSET(irq.irq_mask), 113 AML_OFFSET(irq.irq_mask),
106 ACPI_RS_OFFSET(data.irq.interrupt_count)}, 114 ACPI_RS_OFFSET(data.irq.interrupt_count)},
107 115
108 /* Set the flags byte by default */ 116 /* Set the flags byte */
109 117
110 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering), 118 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
111 AML_OFFSET(irq.flags), 119 AML_OFFSET(irq.flags),
@@ -118,6 +126,33 @@ struct acpi_rsconvert_info acpi_rs_set_irq[9] = {
118 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable), 126 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
119 AML_OFFSET(irq.flags), 127 AML_OFFSET(irq.flags),
120 4}, 128 4},
129
130 /*
131 * All done if the output descriptor length is required to be 3
132 * (i.e., optimization to 2 bytes cannot be attempted)
133 */
134 {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
135 ACPI_RS_OFFSET(data.irq.descriptor_length),
136 3},
137
138 /* Set length to 2 bytes (no flags byte) */
139
140 {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)},
141
142 /*
143 * All done if the output descriptor length is required to be 2.
144 *
145 * TBD: Perhaps we should check for error if input flags are not
146 * compatible with a 2-byte descriptor.
147 */
148 {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
149 ACPI_RS_OFFSET(data.irq.descriptor_length),
150 2},
151
152 /* Reset length to 3 bytes (descriptor with flags byte) */
153
154 {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq)},
155
121 /* 156 /*
122 * Check if the flags byte is necessary. Not needed if the flags are: 157 * Check if the flags byte is necessary. Not needed if the flags are:
123 * ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH, ACPI_EXCLUSIVE 158 * ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH, ACPI_EXCLUSIVE
@@ -134,7 +169,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[9] = {
134 ACPI_RS_OFFSET(data.irq.sharable), 169 ACPI_RS_OFFSET(data.irq.sharable),
135 ACPI_EXCLUSIVE}, 170 ACPI_EXCLUSIVE},
136 171
137 /* irq_no_flags() descriptor can be used */ 172 /* We can optimize to a 2-byte irq_no_flags() descriptor */
138 173
139 {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)} 174 {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)}
140}; 175};
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index ca21e4660c79..b78c7e797a19 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsmemory.c b/drivers/acpi/resources/rsmemory.c
index 521eab7dd8df..63b21abd90bb 100644
--- a/drivers/acpi/resources/rsmemory.c
+++ b/drivers/acpi/resources/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index c7081afa893a..de1ac3881b22 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -497,6 +497,17 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
497 } 497 }
498 break; 498 break;
499 499
500 case ACPI_RSC_EXIT_EQ:
501 /*
502 * Control - Exit conversion if equal
503 */
504 if (*ACPI_ADD_PTR(u8, resource,
505 COMPARE_TARGET(info)) ==
506 COMPARE_VALUE(info)) {
507 goto exit;
508 }
509 break;
510
500 default: 511 default:
501 512
502 ACPI_ERROR((AE_INFO, "Invalid conversion opcode")); 513 ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index 11c0bd7b9cfd..befe2302f41b 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -97,17 +97,17 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
97u16 acpi_rs_encode_bitmask(u8 * list, u8 count) 97u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
98{ 98{
99 acpi_native_uint i; 99 acpi_native_uint i;
100 u16 mask; 100 acpi_native_uint mask;
101 101
102 ACPI_FUNCTION_ENTRY(); 102 ACPI_FUNCTION_ENTRY();
103 103
104 /* Encode the list into a single bitmask */ 104 /* Encode the list into a single bitmask */
105 105
106 for (i = 0, mask = 0; i < count; i++) { 106 for (i = 0, mask = 0; i < count; i++) {
107 mask |= (0x0001 << list[i]); 107 mask |= (0x1 << list[i]);
108 } 108 }
109 109
110 return (mask); 110 return ((u16) mask);
111} 111}
112 112
113/******************************************************************************* 113/*******************************************************************************
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 4c3fd4cdaf73..f59f4c4e034c 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 585ae3c9c8ea..10a36512647c 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -483,8 +483,6 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir,
483 struct file_operations *state_fops, 483 struct file_operations *state_fops,
484 struct file_operations *alarm_fops, void *data) 484 struct file_operations *alarm_fops, void *data)
485{ 485{
486 struct proc_dir_entry *entry = NULL;
487
488 if (!*dir) { 486 if (!*dir) {
489 *dir = proc_mkdir(dir_name, parent_dir); 487 *dir = proc_mkdir(dir_name, parent_dir);
490 if (!*dir) { 488 if (!*dir) {
@@ -494,34 +492,19 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir,
494 } 492 }
495 493
496 /* 'info' [R] */ 494 /* 'info' [R] */
497 if (info_fops) { 495 if (info_fops)
498 entry = create_proc_entry(ACPI_SBS_FILE_INFO, S_IRUGO, *dir); 496 proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
499 if (entry) { 497 info_fops, data);
500 entry->proc_fops = info_fops;
501 entry->data = data;
502 entry->owner = THIS_MODULE;
503 }
504 }
505 498
506 /* 'state' [R] */ 499 /* 'state' [R] */
507 if (state_fops) { 500 if (state_fops)
508 entry = create_proc_entry(ACPI_SBS_FILE_STATE, S_IRUGO, *dir); 501 proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
509 if (entry) { 502 state_fops, data);
510 entry->proc_fops = state_fops;
511 entry->data = data;
512 entry->owner = THIS_MODULE;
513 }
514 }
515 503
516 /* 'alarm' [R/W] */ 504 /* 'alarm' [R/W] */
517 if (alarm_fops) { 505 if (alarm_fops)
518 entry = create_proc_entry(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir); 506 proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
519 if (entry) { 507 alarm_fops, data);
520 entry->proc_fops = alarm_fops;
521 entry->data = data;
522 entry->owner = THIS_MODULE;
523 }
524 }
525 return 0; 508 return 0;
526} 509}
527 510
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e6ce262b5d44..6d85289f1c12 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -677,9 +677,8 @@ acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
677 device->wakeup.resources.count = package->package.count - 2; 677 device->wakeup.resources.count = package->package.count - 2;
678 for (i = 0; i < device->wakeup.resources.count; i++) { 678 for (i = 0; i < device->wakeup.resources.count; i++) {
679 element = &(package->package.elements[i + 2]); 679 element = &(package->package.elements[i + 2]);
680 if (element->type != ACPI_TYPE_ANY) { 680 if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
681 return AE_BAD_DATA; 681 return AE_BAD_DATA;
682 }
683 682
684 device->wakeup.resources.handles[i] = element->reference.handle; 683 device->wakeup.resources.handles[i] = element->reference.handle;
685 } 684 }
@@ -692,6 +691,9 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
692 acpi_status status = 0; 691 acpi_status status = 0;
693 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 692 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
694 union acpi_object *package = NULL; 693 union acpi_object *package = NULL;
694 union acpi_object in_arg[3];
695 struct acpi_object_list arg_list = { 3, in_arg };
696 acpi_status psw_status = AE_OK;
695 697
696 struct acpi_device_id button_device_ids[] = { 698 struct acpi_device_id button_device_ids[] = {
697 {"PNP0C0D", 0}, 699 {"PNP0C0D", 0},
@@ -700,7 +702,6 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
700 {"", 0}, 702 {"", 0},
701 }; 703 };
702 704
703
704 /* _PRW */ 705 /* _PRW */
705 status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer); 706 status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer);
706 if (ACPI_FAILURE(status)) { 707 if (ACPI_FAILURE(status)) {
@@ -718,6 +719,45 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
718 kfree(buffer.pointer); 719 kfree(buffer.pointer);
719 720
720 device->wakeup.flags.valid = 1; 721 device->wakeup.flags.valid = 1;
722 /* Call _PSW/_DSW object to disable its ability to wake the sleeping
723 * system for the ACPI device with the _PRW object.
724 * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
725 * So it is necessary to call _DSW object first. Only when it is not
726 * present will the _PSW object used.
727 */
728 /*
729 * Three agruments are needed for the _DSW object.
730 * Argument 0: enable/disable the wake capabilities
731 * When _DSW object is called to disable the wake capabilities, maybe
732 * the first argument is filled. The value of the other two agruments
733 * is meaningless.
734 */
735 in_arg[0].type = ACPI_TYPE_INTEGER;
736 in_arg[0].integer.value = 0;
737 in_arg[1].type = ACPI_TYPE_INTEGER;
738 in_arg[1].integer.value = 0;
739 in_arg[2].type = ACPI_TYPE_INTEGER;
740 in_arg[2].integer.value = 0;
741 psw_status = acpi_evaluate_object(device->handle, "_DSW",
742 &arg_list, NULL);
743 if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
744 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in evaluate _DSW\n"));
745 /*
746 * When the _DSW object is not present, OSPM will call _PSW object.
747 */
748 if (psw_status == AE_NOT_FOUND) {
749 /*
750 * Only one agruments is required for the _PSW object.
751 * agrument 0: enable/disable the wake capabilities
752 */
753 arg_list.count = 1;
754 in_arg[0].integer.value = 0;
755 psw_status = acpi_evaluate_object(device->handle, "_PSW",
756 &arg_list, NULL);
757 if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
758 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in "
759 "evaluate _PSW\n"));
760 }
721 /* Power button, Lid switch always enable wakeup */ 761 /* Power button, Lid switch always enable wakeup */
722 if (!acpi_match_device_ids(device, button_device_ids)) 762 if (!acpi_match_device_ids(device, button_device_ids))
723 device->wakeup.flags.run_wake = 1; 763 device->wakeup.flags.run_wake = 1;
@@ -882,10 +922,7 @@ static void acpi_device_get_busid(struct acpi_device *device,
882static int 922static int
883acpi_video_bus_match(struct acpi_device *device) 923acpi_video_bus_match(struct acpi_device *device)
884{ 924{
885 acpi_handle h_dummy1; 925 acpi_handle h_dummy;
886 acpi_handle h_dummy2;
887 acpi_handle h_dummy3;
888
889 926
890 if (!device) 927 if (!device)
891 return -EINVAL; 928 return -EINVAL;
@@ -895,18 +932,18 @@ acpi_video_bus_match(struct acpi_device *device)
895 */ 932 */
896 933
897 /* Does this device able to support video switching ? */ 934 /* Does this device able to support video switching ? */
898 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy1)) && 935 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) &&
899 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy2))) 936 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
900 return 0; 937 return 0;
901 938
902 /* Does this device able to retrieve a video ROM ? */ 939 /* Does this device able to retrieve a video ROM ? */
903 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy1))) 940 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
904 return 0; 941 return 0;
905 942
906 /* Does this device able to configure which video head to be POSTed ? */ 943 /* Does this device able to configure which video head to be POSTed ? */
907 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy1)) && 944 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy)) &&
908 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy2)) && 945 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy)) &&
909 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy3))) 946 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy)))
910 return 0; 947 return 0;
911 948
912 return -ENODEV; 949 return -ENODEV;
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 71183eea7906..c3b0cd88d09f 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -51,7 +51,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
51} 51}
52 52
53#ifdef CONFIG_SUSPEND 53#ifdef CONFIG_SUSPEND
54static struct platform_suspend_ops acpi_pm_ops; 54static struct platform_suspend_ops acpi_suspend_ops;
55 55
56extern void do_suspend_lowlevel(void); 56extern void do_suspend_lowlevel(void);
57 57
@@ -65,11 +65,11 @@ static u32 acpi_suspend_states[] = {
65static int init_8259A_after_S1; 65static int init_8259A_after_S1;
66 66
67/** 67/**
68 * acpi_pm_begin - Set the target system sleep state to the state 68 * acpi_suspend_begin - Set the target system sleep state to the state
69 * associated with given @pm_state, if supported. 69 * associated with given @pm_state, if supported.
70 */ 70 */
71 71
72static int acpi_pm_begin(suspend_state_t pm_state) 72static int acpi_suspend_begin(suspend_state_t pm_state)
73{ 73{
74 u32 acpi_state = acpi_suspend_states[pm_state]; 74 u32 acpi_state = acpi_suspend_states[pm_state];
75 int error = 0; 75 int error = 0;
@@ -85,13 +85,13 @@ static int acpi_pm_begin(suspend_state_t pm_state)
85} 85}
86 86
87/** 87/**
88 * acpi_pm_prepare - Do preliminary suspend work. 88 * acpi_suspend_prepare - Do preliminary suspend work.
89 * 89 *
90 * If necessary, set the firmware waking vector and do arch-specific 90 * If necessary, set the firmware waking vector and do arch-specific
91 * nastiness to get the wakeup code to the waking vector. 91 * nastiness to get the wakeup code to the waking vector.
92 */ 92 */
93 93
94static int acpi_pm_prepare(void) 94static int acpi_suspend_prepare(void)
95{ 95{
96 int error = acpi_sleep_prepare(acpi_target_sleep_state); 96 int error = acpi_sleep_prepare(acpi_target_sleep_state);
97 97
@@ -104,7 +104,7 @@ static int acpi_pm_prepare(void)
104} 104}
105 105
106/** 106/**
107 * acpi_pm_enter - Actually enter a sleep state. 107 * acpi_suspend_enter - Actually enter a sleep state.
108 * @pm_state: ignored 108 * @pm_state: ignored
109 * 109 *
110 * Flush caches and go to sleep. For STR we have to call arch-specific 110 * Flush caches and go to sleep. For STR we have to call arch-specific
@@ -112,7 +112,7 @@ static int acpi_pm_prepare(void)
112 * It's unfortunate, but it works. Please fix if you're feeling frisky. 112 * It's unfortunate, but it works. Please fix if you're feeling frisky.
113 */ 113 */
114 114
115static int acpi_pm_enter(suspend_state_t pm_state) 115static int acpi_suspend_enter(suspend_state_t pm_state)
116{ 116{
117 acpi_status status = AE_OK; 117 acpi_status status = AE_OK;
118 unsigned long flags = 0; 118 unsigned long flags = 0;
@@ -169,13 +169,13 @@ static int acpi_pm_enter(suspend_state_t pm_state)
169} 169}
170 170
171/** 171/**
172 * acpi_pm_finish - Instruct the platform to leave a sleep state. 172 * acpi_suspend_finish - Instruct the platform to leave a sleep state.
173 * 173 *
174 * This is called after we wake back up (or if entering the sleep state 174 * This is called after we wake back up (or if entering the sleep state
175 * failed). 175 * failed).
176 */ 176 */
177 177
178static void acpi_pm_finish(void) 178static void acpi_suspend_finish(void)
179{ 179{
180 u32 acpi_state = acpi_target_sleep_state; 180 u32 acpi_state = acpi_target_sleep_state;
181 181
@@ -196,19 +196,19 @@ static void acpi_pm_finish(void)
196} 196}
197 197
198/** 198/**
199 * acpi_pm_end - Finish up suspend sequence. 199 * acpi_suspend_end - Finish up suspend sequence.
200 */ 200 */
201 201
202static void acpi_pm_end(void) 202static void acpi_suspend_end(void)
203{ 203{
204 /* 204 /*
205 * This is necessary in case acpi_pm_finish() is not called during a 205 * This is necessary in case acpi_suspend_finish() is not called during a
206 * failing transition to a sleep state. 206 * failing transition to a sleep state.
207 */ 207 */
208 acpi_target_sleep_state = ACPI_STATE_S0; 208 acpi_target_sleep_state = ACPI_STATE_S0;
209} 209}
210 210
211static int acpi_pm_state_valid(suspend_state_t pm_state) 211static int acpi_suspend_state_valid(suspend_state_t pm_state)
212{ 212{
213 u32 acpi_state; 213 u32 acpi_state;
214 214
@@ -224,13 +224,13 @@ static int acpi_pm_state_valid(suspend_state_t pm_state)
224 } 224 }
225} 225}
226 226
227static struct platform_suspend_ops acpi_pm_ops = { 227static struct platform_suspend_ops acpi_suspend_ops = {
228 .valid = acpi_pm_state_valid, 228 .valid = acpi_suspend_state_valid,
229 .begin = acpi_pm_begin, 229 .begin = acpi_suspend_begin,
230 .prepare = acpi_pm_prepare, 230 .prepare = acpi_suspend_prepare,
231 .enter = acpi_pm_enter, 231 .enter = acpi_suspend_enter,
232 .finish = acpi_pm_finish, 232 .finish = acpi_suspend_finish,
233 .end = acpi_pm_end, 233 .end = acpi_suspend_end,
234}; 234};
235 235
236/* 236/*
@@ -492,7 +492,7 @@ int __init acpi_sleep_init(void)
492 } 492 }
493 } 493 }
494 494
495 suspend_set_ops(&acpi_pm_ops); 495 suspend_set_ops(&acpi_suspend_ops);
496#endif 496#endif
497 497
498#ifdef CONFIG_HIBERNATION 498#ifdef CONFIG_HIBERNATION
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index f8df5217d477..8a5fe8710513 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -440,6 +440,7 @@ acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
440} 440}
441 441
442static const struct file_operations acpi_system_wakeup_device_fops = { 442static const struct file_operations acpi_system_wakeup_device_fops = {
443 .owner = THIS_MODULE,
443 .open = acpi_system_wakeup_device_open_fs, 444 .open = acpi_system_wakeup_device_open_fs,
444 .read = seq_read, 445 .read = seq_read,
445 .write = acpi_system_write_wakeup_device, 446 .write = acpi_system_write_wakeup_device,
@@ -449,6 +450,7 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
449 450
450#ifdef CONFIG_ACPI_PROCFS 451#ifdef CONFIG_ACPI_PROCFS
451static const struct file_operations acpi_system_sleep_fops = { 452static const struct file_operations acpi_system_sleep_fops = {
453 .owner = THIS_MODULE,
452 .open = acpi_system_sleep_open_fs, 454 .open = acpi_system_sleep_open_fs,
453 .read = seq_read, 455 .read = seq_read,
454 .write = acpi_system_write_sleep, 456 .write = acpi_system_write_sleep,
@@ -459,6 +461,7 @@ static const struct file_operations acpi_system_sleep_fops = {
459 461
460#ifdef HAVE_ACPI_LEGACY_ALARM 462#ifdef HAVE_ACPI_LEGACY_ALARM
461static const struct file_operations acpi_system_alarm_fops = { 463static const struct file_operations acpi_system_alarm_fops = {
464 .owner = THIS_MODULE,
462 .open = acpi_system_alarm_open_fs, 465 .open = acpi_system_alarm_open_fs,
463 .read = seq_read, 466 .read = seq_read,
464 .write = acpi_system_write_alarm, 467 .write = acpi_system_write_alarm,
@@ -477,37 +480,26 @@ static u32 rtc_handler(void *context)
477 480
478static int __init acpi_sleep_proc_init(void) 481static int __init acpi_sleep_proc_init(void)
479{ 482{
480 struct proc_dir_entry *entry = NULL;
481
482 if (acpi_disabled) 483 if (acpi_disabled)
483 return 0; 484 return 0;
484 485
485#ifdef CONFIG_ACPI_PROCFS 486#ifdef CONFIG_ACPI_PROCFS
486 /* 'sleep' [R/W] */ 487 /* 'sleep' [R/W] */
487 entry = 488 proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR,
488 create_proc_entry("sleep", S_IFREG | S_IRUGO | S_IWUSR, 489 acpi_root_dir, &acpi_system_sleep_fops);
489 acpi_root_dir);
490 if (entry)
491 entry->proc_fops = &acpi_system_sleep_fops;
492#endif /* CONFIG_ACPI_PROCFS */ 490#endif /* CONFIG_ACPI_PROCFS */
493 491
494#ifdef HAVE_ACPI_LEGACY_ALARM 492#ifdef HAVE_ACPI_LEGACY_ALARM
495 /* 'alarm' [R/W] */ 493 /* 'alarm' [R/W] */
496 entry = 494 proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
497 create_proc_entry("alarm", S_IFREG | S_IRUGO | S_IWUSR, 495 acpi_root_dir, &acpi_system_alarm_fops);
498 acpi_root_dir);
499 if (entry)
500 entry->proc_fops = &acpi_system_alarm_fops;
501 496
502 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); 497 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
503#endif /* HAVE_ACPI_LEGACY_ALARM */ 498#endif /* HAVE_ACPI_LEGACY_ALARM */
504 499
505 /* 'wakeup device' [R/W] */ 500 /* 'wakeup device' [R/W] */
506 entry = 501 proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
507 create_proc_entry("wakeup", S_IFREG | S_IRUGO | S_IWUSR, 502 acpi_root_dir, &acpi_system_wakeup_device_fops);
508 acpi_root_dir);
509 if (entry)
510 entry->proc_fops = &acpi_system_wakeup_device_fops;
511 503
512 return 0; 504 return 0;
513} 505}
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 4749f379a915..769f24855eb6 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -396,6 +396,7 @@ static int acpi_system_info_open_fs(struct inode *inode, struct file *file)
396} 396}
397 397
398static const struct file_operations acpi_system_info_ops = { 398static const struct file_operations acpi_system_info_ops = {
399 .owner = THIS_MODULE,
399 .open = acpi_system_info_open_fs, 400 .open = acpi_system_info_open_fs,
400 .read = seq_read, 401 .read = seq_read,
401 .llseek = seq_lseek, 402 .llseek = seq_lseek,
@@ -406,6 +407,7 @@ static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t,
406 loff_t *); 407 loff_t *);
407 408
408static const struct file_operations acpi_system_dsdt_ops = { 409static const struct file_operations acpi_system_dsdt_ops = {
410 .owner = THIS_MODULE,
409 .read = acpi_system_read_dsdt, 411 .read = acpi_system_read_dsdt,
410}; 412};
411 413
@@ -430,6 +432,7 @@ static ssize_t acpi_system_read_fadt(struct file *, char __user *, size_t,
430 loff_t *); 432 loff_t *);
431 433
432static const struct file_operations acpi_system_fadt_ops = { 434static const struct file_operations acpi_system_fadt_ops = {
435 .owner = THIS_MODULE,
433 .read = acpi_system_read_fadt, 436 .read = acpi_system_read_fadt,
434}; 437};
435 438
@@ -454,31 +457,23 @@ static int acpi_system_procfs_init(void)
454{ 457{
455 struct proc_dir_entry *entry; 458 struct proc_dir_entry *entry;
456 int error = 0; 459 int error = 0;
457 char *name;
458 460
459 /* 'info' [R] */ 461 /* 'info' [R] */
460 name = ACPI_SYSTEM_FILE_INFO; 462 entry = proc_create(ACPI_SYSTEM_FILE_INFO, S_IRUGO, acpi_root_dir,
461 entry = create_proc_entry(name, S_IRUGO, acpi_root_dir); 463 &acpi_system_info_ops);
462 if (!entry) 464 if (!entry)
463 goto Error; 465 goto Error;
464 else {
465 entry->proc_fops = &acpi_system_info_ops;
466 }
467 466
468 /* 'dsdt' [R] */ 467 /* 'dsdt' [R] */
469 name = ACPI_SYSTEM_FILE_DSDT; 468 entry = proc_create(ACPI_SYSTEM_FILE_DSDT, S_IRUSR, acpi_root_dir,
470 entry = create_proc_entry(name, S_IRUSR, acpi_root_dir); 469 &acpi_system_dsdt_ops);
471 if (entry) 470 if (!entry)
472 entry->proc_fops = &acpi_system_dsdt_ops;
473 else
474 goto Error; 471 goto Error;
475 472
476 /* 'fadt' [R] */ 473 /* 'fadt' [R] */
477 name = ACPI_SYSTEM_FILE_FADT; 474 entry = proc_create(ACPI_SYSTEM_FILE_FADT, S_IRUSR, acpi_root_dir,
478 entry = create_proc_entry(name, S_IRUSR, acpi_root_dir); 475 &acpi_system_fadt_ops);
479 if (entry) 476 if (!entry)
480 entry->proc_fops = &acpi_system_fadt_ops;
481 else
482 goto Error; 477 goto Error;
483 478
484 Done: 479 Done:
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
index 002bb33003af..949d4114eb9f 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/tables/tbfadt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/tables/tbfind.c
index 058c064948e1..9ca3afc98c80 100644
--- a/drivers/acpi/tables/tbfind.c
+++ b/drivers/acpi/tables/tbfind.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -70,12 +70,22 @@ acpi_tb_find_table(char *signature,
70{ 70{
71 acpi_native_uint i; 71 acpi_native_uint i;
72 acpi_status status; 72 acpi_status status;
73 struct acpi_table_header header;
73 74
74 ACPI_FUNCTION_TRACE(tb_find_table); 75 ACPI_FUNCTION_TRACE(tb_find_table);
75 76
77 /* Normalize the input strings */
78
79 ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
80 ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE);
81 ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
82 ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
83
84 /* Search for the table */
85
76 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) { 86 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
77 if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature), 87 if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
78 signature, ACPI_NAME_SIZE)) { 88 header.signature, ACPI_NAME_SIZE)) {
79 89
80 /* Not the requested table */ 90 /* Not the requested table */
81 91
@@ -104,20 +114,24 @@ acpi_tb_find_table(char *signature,
104 114
105 if (!ACPI_MEMCMP 115 if (!ACPI_MEMCMP
106 (acpi_gbl_root_table_list.tables[i].pointer->signature, 116 (acpi_gbl_root_table_list.tables[i].pointer->signature,
107 signature, ACPI_NAME_SIZE) && (!oem_id[0] 117 header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
108 || 118 ||
109 !ACPI_MEMCMP 119 !ACPI_MEMCMP
110 (acpi_gbl_root_table_list. 120 (acpi_gbl_root_table_list.
111 tables[i].pointer->oem_id, 121 tables[i].pointer->
112 oem_id, ACPI_OEM_ID_SIZE)) 122 oem_id,
123 header.oem_id,
124 ACPI_OEM_ID_SIZE))
113 && (!oem_table_id[0] 125 && (!oem_table_id[0]
114 || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i]. 126 || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
115 pointer->oem_table_id, oem_table_id, 127 pointer->oem_table_id,
128 header.oem_table_id,
116 ACPI_OEM_TABLE_ID_SIZE))) { 129 ACPI_OEM_TABLE_ID_SIZE))) {
117 *table_index = i; 130 *table_index = i;
118 131
119 ACPI_DEBUG_PRINT((ACPI_DB_TABLES, 132 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
120 "Found table [%4.4s]\n", signature)); 133 "Found table [%4.4s]\n",
134 header.signature));
121 return_ACPI_STATUS(AE_OK); 135 return_ACPI_STATUS(AE_OK);
122 } 136 }
123 } 137 }
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index 3bc0c67a9283..402f93e1ff20 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -125,13 +125,20 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc,
125 125
126 /* The table must be either an SSDT or a PSDT or an OEMx */ 126 /* The table must be either an SSDT or a PSDT or an OEMx */
127 127
128 if ((!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_PSDT)) 128 if (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_PSDT)&&
129 && 129 !ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)&&
130 (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)) 130 strncmp(table_desc->pointer->signature, "OEM", 3)) {
131 && (strncmp(table_desc->pointer->signature, "OEM", 3))) { 131 /* Check for a printable name */
132 ACPI_ERROR((AE_INFO, 132 if (acpi_ut_valid_acpi_name(
133 "Table has invalid signature [%4.4s], must be SSDT, PSDT or OEMx", 133 *(u32 *) table_desc->pointer->signature)) {
134 table_desc->pointer->signature)); 134 ACPI_ERROR((AE_INFO, "Table has invalid signature "
135 "[%4.4s], must be SSDT or PSDT",
136 table_desc->pointer->signature));
137 } else {
138 ACPI_ERROR((AE_INFO, "Table has invalid signature "
139 "(0x%8.8X), must be SSDT or PSDT",
140 *(u32 *) table_desc->pointer->signature));
141 }
135 return_ACPI_STATUS(AE_BAD_SIGNATURE); 142 return_ACPI_STATUS(AE_BAD_SIGNATURE);
136 } 143 }
137 144
@@ -162,6 +169,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc,
162 169
163 acpi_tb_delete_table(table_desc); 170 acpi_tb_delete_table(table_desc);
164 *table_index = i; 171 *table_index = i;
172 status = AE_ALREADY_EXISTS;
165 goto release; 173 goto release;
166 } 174 }
167 175
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
index 010f19652f80..bc019b9b6a68 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/tables/tbutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -212,7 +212,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
212 212
213 if (checksum) { 213 if (checksum) {
214 ACPI_WARNING((AE_INFO, 214 ACPI_WARNING((AE_INFO,
215 "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X", 215 "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
216 table->signature, table->checksum, 216 table->signature, table->checksum,
217 (u8) (table->checksum - checksum))); 217 (u8) (table->checksum - checksum)));
218 218
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index a9e3331fee5d..fb57b93c2495 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2007, R. Byron Moore 9 * Copyright (C) 2000 - 2008, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -635,6 +635,95 @@ acpi_status acpi_load_tables(void)
635ACPI_EXPORT_SYMBOL(acpi_load_tables) 635ACPI_EXPORT_SYMBOL(acpi_load_tables)
636 636
637 637
638/*******************************************************************************
639 *
640 * FUNCTION: acpi_install_table_handler
641 *
642 * PARAMETERS: Handler - Table event handler
643 * Context - Value passed to the handler on each event
644 *
645 * RETURN: Status
646 *
647 * DESCRIPTION: Install table event handler
648 *
649 ******************************************************************************/
650acpi_status
651acpi_install_table_handler(acpi_tbl_handler handler, void *context)
652{
653 acpi_status status;
654
655 ACPI_FUNCTION_TRACE(acpi_install_table_handler);
656
657 if (!handler) {
658 return_ACPI_STATUS(AE_BAD_PARAMETER);
659 }
660
661 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
662 if (ACPI_FAILURE(status)) {
663 return_ACPI_STATUS(status);
664 }
665
666 /* Don't allow more than one handler */
667
668 if (acpi_gbl_table_handler) {
669 status = AE_ALREADY_EXISTS;
670 goto cleanup;
671 }
672
673 /* Install the handler */
674
675 acpi_gbl_table_handler = handler;
676 acpi_gbl_table_handler_context = context;
677
678 cleanup:
679 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
680 return_ACPI_STATUS(status);
681}
682
683ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
684
685/*******************************************************************************
686 *
687 * FUNCTION: acpi_remove_table_handler
688 *
689 * PARAMETERS: Handler - Table event handler that was installed
690 * previously.
691 *
692 * RETURN: Status
693 *
694 * DESCRIPTION: Remove table event handler
695 *
696 ******************************************************************************/
697acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
698{
699 acpi_status status;
700
701 ACPI_FUNCTION_TRACE(acpi_remove_table_handler);
702
703 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
704 if (ACPI_FAILURE(status)) {
705 return_ACPI_STATUS(status);
706 }
707
708 /* Make sure that the installed handler is the same */
709
710 if (!handler || handler != acpi_gbl_table_handler) {
711 status = AE_BAD_PARAMETER;
712 goto cleanup;
713 }
714
715 /* Remove the handler */
716
717 acpi_gbl_table_handler = NULL;
718
719 cleanup:
720 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
721 return_ACPI_STATUS(status);
722}
723
724ACPI_EXPORT_SYMBOL(acpi_remove_table_handler)
725
726
638static int __init acpi_no_auto_ssdt_setup(char *s) { 727static int __init acpi_no_auto_ssdt_setup(char *s) {
639 728
640 printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n"); 729 printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index 9ecb4b6c1e7d..b8c0dfa084f6 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 766bd25d3376..504385b1f211 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -198,6 +198,7 @@ struct acpi_thermal {
198}; 198};
199 199
200static const struct file_operations acpi_thermal_state_fops = { 200static const struct file_operations acpi_thermal_state_fops = {
201 .owner = THIS_MODULE,
201 .open = acpi_thermal_state_open_fs, 202 .open = acpi_thermal_state_open_fs,
202 .read = seq_read, 203 .read = seq_read,
203 .llseek = seq_lseek, 204 .llseek = seq_lseek,
@@ -205,6 +206,7 @@ static const struct file_operations acpi_thermal_state_fops = {
205}; 206};
206 207
207static const struct file_operations acpi_thermal_temp_fops = { 208static const struct file_operations acpi_thermal_temp_fops = {
209 .owner = THIS_MODULE,
208 .open = acpi_thermal_temp_open_fs, 210 .open = acpi_thermal_temp_open_fs,
209 .read = seq_read, 211 .read = seq_read,
210 .llseek = seq_lseek, 212 .llseek = seq_lseek,
@@ -212,6 +214,7 @@ static const struct file_operations acpi_thermal_temp_fops = {
212}; 214};
213 215
214static const struct file_operations acpi_thermal_trip_fops = { 216static const struct file_operations acpi_thermal_trip_fops = {
217 .owner = THIS_MODULE,
215 .open = acpi_thermal_trip_open_fs, 218 .open = acpi_thermal_trip_open_fs,
216 .read = seq_read, 219 .read = seq_read,
217 .llseek = seq_lseek, 220 .llseek = seq_lseek,
@@ -219,6 +222,7 @@ static const struct file_operations acpi_thermal_trip_fops = {
219}; 222};
220 223
221static const struct file_operations acpi_thermal_cooling_fops = { 224static const struct file_operations acpi_thermal_cooling_fops = {
225 .owner = THIS_MODULE,
222 .open = acpi_thermal_cooling_open_fs, 226 .open = acpi_thermal_cooling_open_fs,
223 .read = seq_read, 227 .read = seq_read,
224 .write = acpi_thermal_write_cooling_mode, 228 .write = acpi_thermal_write_cooling_mode,
@@ -227,6 +231,7 @@ static const struct file_operations acpi_thermal_cooling_fops = {
227}; 231};
228 232
229static const struct file_operations acpi_thermal_polling_fops = { 233static const struct file_operations acpi_thermal_polling_fops = {
234 .owner = THIS_MODULE,
230 .open = acpi_thermal_polling_open_fs, 235 .open = acpi_thermal_polling_open_fs,
231 .read = seq_read, 236 .read = seq_read,
232 .write = acpi_thermal_write_polling, 237 .write = acpi_thermal_write_polling,
@@ -884,10 +889,15 @@ static void acpi_thermal_check(void *data)
884static int thermal_get_temp(struct thermal_zone_device *thermal, char *buf) 889static int thermal_get_temp(struct thermal_zone_device *thermal, char *buf)
885{ 890{
886 struct acpi_thermal *tz = thermal->devdata; 891 struct acpi_thermal *tz = thermal->devdata;
892 int result;
887 893
888 if (!tz) 894 if (!tz)
889 return -EINVAL; 895 return -EINVAL;
890 896
897 result = acpi_thermal_get_temperature(tz);
898 if (result)
899 return result;
900
891 return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS(tz->temperature)); 901 return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS(tz->temperature));
892} 902}
893 903
@@ -1012,6 +1022,18 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
1012 return -EINVAL; 1022 return -EINVAL;
1013} 1023}
1014 1024
1025static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
1026 unsigned long *temperature) {
1027 struct acpi_thermal *tz = thermal->devdata;
1028
1029 if (tz->trips.critical.flags.valid) {
1030 *temperature = KELVIN_TO_MILLICELSIUS(
1031 tz->trips.critical.temperature);
1032 return 0;
1033 } else
1034 return -EINVAL;
1035}
1036
1015typedef int (*cb)(struct thermal_zone_device *, int, 1037typedef int (*cb)(struct thermal_zone_device *, int,
1016 struct thermal_cooling_device *); 1038 struct thermal_cooling_device *);
1017static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, 1039static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
@@ -1103,6 +1125,7 @@ static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
1103 .set_mode = thermal_set_mode, 1125 .set_mode = thermal_set_mode,
1104 .get_trip_type = thermal_get_trip_type, 1126 .get_trip_type = thermal_get_trip_type,
1105 .get_trip_temp = thermal_get_trip_temp, 1127 .get_trip_temp = thermal_get_trip_temp,
1128 .get_crit_temp = thermal_get_crit_temp,
1106}; 1129};
1107 1130
1108static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz) 1131static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
@@ -1123,7 +1146,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
1123 1146
1124 for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && 1147 for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
1125 tz->trips.active[i].flags.valid; i++, trips++); 1148 tz->trips.active[i].flags.valid; i++, trips++);
1126 tz->thermal_zone = thermal_zone_device_register("ACPI thermal zone", 1149 tz->thermal_zone = thermal_zone_device_register("acpitz",
1127 trips, tz, &acpi_thermal_zone_ops); 1150 trips, tz, &acpi_thermal_zone_ops);
1128 if (IS_ERR(tz->thermal_zone)) 1151 if (IS_ERR(tz->thermal_zone))
1129 return -ENODEV; 1152 return -ENODEV;
@@ -1419,63 +1442,47 @@ static int acpi_thermal_add_fs(struct acpi_device *device)
1419 } 1442 }
1420 1443
1421 /* 'state' [R] */ 1444 /* 'state' [R] */
1422 entry = create_proc_entry(ACPI_THERMAL_FILE_STATE, 1445 entry = proc_create_data(ACPI_THERMAL_FILE_STATE,
1423 S_IRUGO, acpi_device_dir(device)); 1446 S_IRUGO, acpi_device_dir(device),
1447 &acpi_thermal_state_fops,
1448 acpi_driver_data(device));
1424 if (!entry) 1449 if (!entry)
1425 return -ENODEV; 1450 return -ENODEV;
1426 else {
1427 entry->proc_fops = &acpi_thermal_state_fops;
1428 entry->data = acpi_driver_data(device);
1429 entry->owner = THIS_MODULE;
1430 }
1431 1451
1432 /* 'temperature' [R] */ 1452 /* 'temperature' [R] */
1433 entry = create_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE, 1453 entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE,
1434 S_IRUGO, acpi_device_dir(device)); 1454 S_IRUGO, acpi_device_dir(device),
1455 &acpi_thermal_temp_fops,
1456 acpi_driver_data(device));
1435 if (!entry) 1457 if (!entry)
1436 return -ENODEV; 1458 return -ENODEV;
1437 else {
1438 entry->proc_fops = &acpi_thermal_temp_fops;
1439 entry->data = acpi_driver_data(device);
1440 entry->owner = THIS_MODULE;
1441 }
1442 1459
1443 /* 'trip_points' [R] */ 1460 /* 'trip_points' [R] */
1444 entry = create_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS, 1461 entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS,
1445 S_IRUGO, 1462 S_IRUGO,
1446 acpi_device_dir(device)); 1463 acpi_device_dir(device),
1464 &acpi_thermal_trip_fops,
1465 acpi_driver_data(device));
1447 if (!entry) 1466 if (!entry)
1448 return -ENODEV; 1467 return -ENODEV;
1449 else {
1450 entry->proc_fops = &acpi_thermal_trip_fops;
1451 entry->data = acpi_driver_data(device);
1452 entry->owner = THIS_MODULE;
1453 }
1454 1468
1455 /* 'cooling_mode' [R/W] */ 1469 /* 'cooling_mode' [R/W] */
1456 entry = create_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE, 1470 entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE,
1457 S_IFREG | S_IRUGO | S_IWUSR, 1471 S_IFREG | S_IRUGO | S_IWUSR,
1458 acpi_device_dir(device)); 1472 acpi_device_dir(device),
1473 &acpi_thermal_cooling_fops,
1474 acpi_driver_data(device));
1459 if (!entry) 1475 if (!entry)
1460 return -ENODEV; 1476 return -ENODEV;
1461 else {
1462 entry->proc_fops = &acpi_thermal_cooling_fops;
1463 entry->data = acpi_driver_data(device);
1464 entry->owner = THIS_MODULE;
1465 }
1466 1477
1467 /* 'polling_frequency' [R/W] */ 1478 /* 'polling_frequency' [R/W] */
1468 entry = create_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ, 1479 entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ,
1469 S_IFREG | S_IRUGO | S_IWUSR, 1480 S_IFREG | S_IRUGO | S_IWUSR,
1470 acpi_device_dir(device)); 1481 acpi_device_dir(device),
1482 &acpi_thermal_polling_fops,
1483 acpi_driver_data(device));
1471 if (!entry) 1484 if (!entry)
1472 return -ENODEV; 1485 return -ENODEV;
1473 else {
1474 entry->proc_fops = &acpi_thermal_polling_fops;
1475 entry->data = acpi_driver_data(device);
1476 entry->owner = THIS_MODULE;
1477 }
1478
1479 return 0; 1486 return 0;
1480} 1487}
1481 1488
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 6e56d5f7c43a..ede084829a70 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -147,7 +147,7 @@ acpi_status acpi_ut_delete_caches(void)
147 147
148 if (acpi_gbl_display_final_mem_stats) { 148 if (acpi_gbl_display_final_mem_stats) {
149 ACPI_STRCPY(buffer, "MEMORY"); 149 ACPI_STRCPY(buffer, "MEMORY");
150 acpi_db_display_statistics(buffer); 150 (void)acpi_db_display_statistics(buffer);
151 } 151 }
152#endif 152#endif
153 153
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 285a0f531760..245fa80cf600 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 879eaa10d3ae..655c290aca7b 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -43,6 +43,8 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/amlcode.h> 45#include <acpi/amlcode.h>
46#include <acpi/acnamesp.h>
47
46 48
47#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
48ACPI_MODULE_NAME("utcopy") 50ACPI_MODULE_NAME("utcopy")
@@ -172,22 +174,21 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
172 174
173 case ACPI_TYPE_LOCAL_REFERENCE: 175 case ACPI_TYPE_LOCAL_REFERENCE:
174 176
175 /* 177 /* This is an object reference. */
176 * This is an object reference. Attempt to dereference it. 178
177 */
178 switch (internal_object->reference.opcode) { 179 switch (internal_object->reference.opcode) {
179 case AML_INT_NAMEPATH_OP: 180 case AML_INT_NAMEPATH_OP:
180 181
181 /* For namepath, return the object handle ("reference") */ 182 /* For namepath, return the object handle ("reference") */
182 183
183 default: 184 default:
184 /* 185
185 * Use the object type of "Any" to indicate a reference 186 /* We are referring to the namespace node */
186 * to object containing a handle to an ACPI named object. 187
187 */
188 external_object->type = ACPI_TYPE_ANY;
189 external_object->reference.handle = 188 external_object->reference.handle =
190 internal_object->reference.node; 189 internal_object->reference.node;
190 external_object->reference.actual_type =
191 acpi_ns_get_type(internal_object->reference.node);
191 break; 192 break;
192 } 193 }
193 break; 194 break;
@@ -215,6 +216,11 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
215 /* 216 /*
216 * There is no corresponding external object type 217 * There is no corresponding external object type
217 */ 218 */
219 ACPI_ERROR((AE_INFO,
220 "Unsupported object type, cannot convert to external object: %s",
221 acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
222 (internal_object))));
223
218 return_ACPI_STATUS(AE_SUPPORT); 224 return_ACPI_STATUS(AE_SUPPORT);
219 } 225 }
220 226
@@ -455,6 +461,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
455 case ACPI_TYPE_STRING: 461 case ACPI_TYPE_STRING:
456 case ACPI_TYPE_BUFFER: 462 case ACPI_TYPE_BUFFER:
457 case ACPI_TYPE_INTEGER: 463 case ACPI_TYPE_INTEGER:
464 case ACPI_TYPE_LOCAL_REFERENCE:
458 465
459 internal_object = acpi_ut_create_internal_object((u8) 466 internal_object = acpi_ut_create_internal_object((u8)
460 external_object-> 467 external_object->
@@ -464,9 +471,18 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
464 } 471 }
465 break; 472 break;
466 473
474 case ACPI_TYPE_ANY: /* This is the case for a NULL object */
475
476 *ret_internal_object = NULL;
477 return_ACPI_STATUS(AE_OK);
478
467 default: 479 default:
468 /* All other types are not supported */ 480 /* All other types are not supported */
469 481
482 ACPI_ERROR((AE_INFO,
483 "Unsupported object type, cannot convert to internal object: %s",
484 acpi_ut_get_type_name(external_object->type)));
485
470 return_ACPI_STATUS(AE_SUPPORT); 486 return_ACPI_STATUS(AE_SUPPORT);
471 } 487 }
472 488
@@ -502,6 +518,10 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
502 external_object->buffer.length); 518 external_object->buffer.length);
503 519
504 internal_object->buffer.length = external_object->buffer.length; 520 internal_object->buffer.length = external_object->buffer.length;
521
522 /* Mark buffer data valid */
523
524 internal_object->buffer.flags |= AOPOBJ_DATA_VALID;
505 break; 525 break;
506 526
507 case ACPI_TYPE_INTEGER: 527 case ACPI_TYPE_INTEGER:
@@ -509,6 +529,15 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
509 internal_object->integer.value = external_object->integer.value; 529 internal_object->integer.value = external_object->integer.value;
510 break; 530 break;
511 531
532 case ACPI_TYPE_LOCAL_REFERENCE:
533
534 /* TBD: should validate incoming handle */
535
536 internal_object->reference.opcode = AML_INT_NAMEPATH_OP;
537 internal_object->reference.node =
538 external_object->reference.handle;
539 break;
540
512 default: 541 default:
513 /* Other types can't get here */ 542 /* Other types can't get here */
514 break; 543 break;
@@ -570,13 +599,17 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
570 599
571 /* Truncate package and delete it */ 600 /* Truncate package and delete it */
572 601
573 package_object->package.count = i; 602 package_object->package.count = (u32) i;
574 package_elements[i] = NULL; 603 package_elements[i] = NULL;
575 acpi_ut_remove_reference(package_object); 604 acpi_ut_remove_reference(package_object);
576 return_ACPI_STATUS(status); 605 return_ACPI_STATUS(status);
577 } 606 }
578 } 607 }
579 608
609 /* Mark package data valid */
610
611 package_object->package.flags |= AOPOBJ_DATA_VALID;
612
580 *internal_object = package_object; 613 *internal_object = package_object;
581 return_ACPI_STATUS(status); 614 return_ACPI_STATUS(status);
582} 615}
@@ -709,7 +742,15 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
709 /* 742 /*
710 * We copied the reference object, so we now must add a reference 743 * We copied the reference object, so we now must add a reference
711 * to the object pointed to by the reference 744 * to the object pointed to by the reference
745 *
746 * DDBHandle reference (from Load/load_table is a special reference,
747 * it's Reference.Object is the table index, so does not need to
748 * increase the reference count
712 */ 749 */
750 if (source_desc->reference.opcode == AML_LOAD_OP) {
751 break;
752 }
753
713 acpi_ut_add_reference(source_desc->reference.object); 754 acpi_ut_add_reference(source_desc->reference.object);
714 break; 755 break;
715 756
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index 7361204b1eef..f938f465efa4 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -68,9 +68,9 @@ static const char *acpi_ut_trim_function_name(const char *function_name);
68 68
69void acpi_ut_init_stack_ptr_trace(void) 69void acpi_ut_init_stack_ptr_trace(void)
70{ 70{
71 u32 current_sp; 71 acpi_size current_sp;
72 72
73 acpi_gbl_entry_stack_pointer = ACPI_PTR_DIFF(&current_sp, NULL); 73 acpi_gbl_entry_stack_pointer = &current_sp;
74} 74}
75 75
76/******************************************************************************* 76/*******************************************************************************
@@ -89,10 +89,8 @@ void acpi_ut_track_stack_ptr(void)
89{ 89{
90 acpi_size current_sp; 90 acpi_size current_sp;
91 91
92 current_sp = ACPI_PTR_DIFF(&current_sp, NULL); 92 if (&current_sp < acpi_gbl_lowest_stack_pointer) {
93 93 acpi_gbl_lowest_stack_pointer = &current_sp;
94 if (current_sp < acpi_gbl_lowest_stack_pointer) {
95 acpi_gbl_lowest_stack_pointer = current_sp;
96 } 94 }
97 95
98 if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) { 96 if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) {
@@ -203,6 +201,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
203 201
204 va_start(args, format); 202 va_start(args, format);
205 acpi_os_vprintf(format, args); 203 acpi_os_vprintf(format, args);
204 va_end(args);
206} 205}
207 206
208ACPI_EXPORT_SYMBOL(acpi_ut_debug_print) 207ACPI_EXPORT_SYMBOL(acpi_ut_debug_print)
@@ -240,6 +239,7 @@ acpi_ut_debug_print_raw(u32 requested_debug_level,
240 239
241 va_start(args, format); 240 va_start(args, format);
242 acpi_os_vprintf(format, args); 241 acpi_os_vprintf(format, args);
242 va_end(args);
243} 243}
244 244
245ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw) 245ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw)
@@ -524,6 +524,11 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
524 u32 temp32; 524 u32 temp32;
525 u8 buf_char; 525 u8 buf_char;
526 526
527 if (!buffer) {
528 acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n");
529 return;
530 }
531
527 if ((count < 4) || (count & 0x01)) { 532 if ((count < 4) || (count & 0x01)) {
528 display = DB_BYTE_DISPLAY; 533 display = DB_BYTE_DISPLAY;
529 } 534 }
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index f777cebdc46d..1fbc35139e84 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -158,7 +158,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
158 "***** Mutex %p, OS Mutex %p\n", 158 "***** Mutex %p, OS Mutex %p\n",
159 object, object->mutex.os_mutex)); 159 object, object->mutex.os_mutex));
160 160
161 if (object->mutex.os_mutex == acpi_gbl_global_lock_mutex) { 161 if (object == acpi_gbl_global_lock_mutex) {
162 162
163 /* Global Lock has extra semaphore */ 163 /* Global Lock has extra semaphore */
164 164
@@ -252,6 +252,17 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
252 } 252 }
253 break; 253 break;
254 254
255 case ACPI_TYPE_LOCAL_BANK_FIELD:
256
257 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
258 "***** Bank Field %p\n", object));
259
260 second_desc = acpi_ns_get_secondary_object(object);
261 if (second_desc) {
262 acpi_ut_delete_object_desc(second_desc);
263 }
264 break;
265
255 default: 266 default:
256 break; 267 break;
257 } 268 }
@@ -524,10 +535,12 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
524 535
525 case ACPI_TYPE_LOCAL_REFERENCE: 536 case ACPI_TYPE_LOCAL_REFERENCE:
526 /* 537 /*
527 * The target of an Index (a package, string, or buffer) must track 538 * The target of an Index (a package, string, or buffer) or a named
528 * changes to the ref count of the index. 539 * reference must track changes to the ref count of the index or
540 * target object.
529 */ 541 */
530 if (object->reference.opcode == AML_INDEX_OP) { 542 if ((object->reference.opcode == AML_INDEX_OP) ||
543 (object->reference.opcode == AML_INT_NAMEPATH_OP)) {
531 next_object = object->reference.object; 544 next_object = object->reference.object;
532 } 545 }
533 break; 546 break;
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
index 0042b7e78b26..05e61be267d5 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/utilities/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index 630c9a2c5b7b..a6e71b801d2d 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -602,6 +602,48 @@ char *acpi_ut_get_mutex_name(u32 mutex_id)
602 602
603 return (acpi_gbl_mutex_names[mutex_id]); 603 return (acpi_gbl_mutex_names[mutex_id]);
604} 604}
605
606/*******************************************************************************
607 *
608 * FUNCTION: acpi_ut_get_notify_name
609 *
610 * PARAMETERS: notify_value - Value from the Notify() request
611 *
612 * RETURN: String corresponding to the Notify Value.
613 *
614 * DESCRIPTION: Translate a Notify Value to a notify namestring.
615 *
616 ******************************************************************************/
617
618/* Names for Notify() values, used for debug output */
619
620static const char *acpi_gbl_notify_value_names[] = {
621 "Bus Check",
622 "Device Check",
623 "Device Wake",
624 "Eject Request",
625 "Device Check Light",
626 "Frequency Mismatch",
627 "Bus Mode Mismatch",
628 "Power Fault",
629 "Capabilities Check",
630 "Device PLD Check",
631 "Reserved",
632 "System Locality Update"
633};
634
635const char *acpi_ut_get_notify_name(u32 notify_value)
636{
637
638 if (notify_value <= ACPI_NOTIFY_MAX) {
639 return (acpi_gbl_notify_value_names[notify_value]);
640 } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
641 return ("Reserved");
642 } else { /* Greater or equal to 0x80 */
643
644 return ("**Device Specific**");
645 }
646}
605#endif 647#endif
606 648
607/******************************************************************************* 649/*******************************************************************************
@@ -675,12 +717,13 @@ void acpi_ut_init_globals(void)
675 acpi_gbl_gpe_fadt_blocks[0] = NULL; 717 acpi_gbl_gpe_fadt_blocks[0] = NULL;
676 acpi_gbl_gpe_fadt_blocks[1] = NULL; 718 acpi_gbl_gpe_fadt_blocks[1] = NULL;
677 719
678 /* Global notify handlers */ 720 /* Global handlers */
679 721
680 acpi_gbl_system_notify.handler = NULL; 722 acpi_gbl_system_notify.handler = NULL;
681 acpi_gbl_device_notify.handler = NULL; 723 acpi_gbl_device_notify.handler = NULL;
682 acpi_gbl_exception_handler = NULL; 724 acpi_gbl_exception_handler = NULL;
683 acpi_gbl_init_handler = NULL; 725 acpi_gbl_init_handler = NULL;
726 acpi_gbl_table_handler = NULL;
684 727
685 /* Global Lock support */ 728 /* Global Lock support */
686 729
@@ -722,7 +765,7 @@ void acpi_ut_init_globals(void)
722 acpi_gbl_root_node_struct.flags = ANOBJ_END_OF_PEER_LIST; 765 acpi_gbl_root_node_struct.flags = ANOBJ_END_OF_PEER_LIST;
723 766
724#ifdef ACPI_DEBUG_OUTPUT 767#ifdef ACPI_DEBUG_OUTPUT
725 acpi_gbl_lowest_stack_pointer = ACPI_SIZE_MAX; 768 acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
726#endif 769#endif
727 770
728#ifdef ACPI_DBG_TRACK_ALLOCATIONS 771#ifdef ACPI_DBG_TRACK_ALLOCATIONS
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/utilities/utinit.c
index ad3c0d0a5cf8..cae515fc02d3 100644
--- a/drivers/acpi/utilities/utinit.c
+++ b/drivers/acpi/utilities/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -125,9 +125,12 @@ void acpi_ut_subsystem_shutdown(void)
125 acpi_gbl_startup_flags = 0; 125 acpi_gbl_startup_flags = 0;
126 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n")); 126 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
127 127
128#ifndef ACPI_ASL_COMPILER
129
128 /* Close the acpi_event Handling */ 130 /* Close the acpi_event Handling */
129 131
130 acpi_ev_terminate(); 132 acpi_ev_terminate();
133#endif
131 134
132 /* Close the Namespace */ 135 /* Close the Namespace */
133 136
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/utilities/utmath.c
index 0c56a0d20b29..c927324fdd26 100644
--- a/drivers/acpi/utilities/utmath.c
+++ b/drivers/acpi/utilities/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -276,7 +276,7 @@ acpi_ut_short_divide(acpi_integer in_dividend,
276 *out_quotient = in_dividend / divisor; 276 *out_quotient = in_dividend / divisor;
277 } 277 }
278 if (out_remainder) { 278 if (out_remainder) {
279 *out_remainder = (u32) in_dividend % divisor; 279 *out_remainder = (u32) (in_dividend % divisor);
280 } 280 }
281 281
282 return_ACPI_STATUS(AE_OK); 282 return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 2d19f71e9cfa..e4ba7192cd15 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -1033,6 +1033,7 @@ acpi_ut_error(char *module_name, u32 line_number, char *format, ...)
1033 va_start(args, format); 1033 va_start(args, format);
1034 acpi_os_vprintf(format, args); 1034 acpi_os_vprintf(format, args);
1035 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); 1035 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
1036 va_end(args);
1036} 1037}
1037 1038
1038void ACPI_INTERNAL_VAR_XFACE 1039void ACPI_INTERNAL_VAR_XFACE
@@ -1061,6 +1062,8 @@ acpi_ut_warning(char *module_name, u32 line_number, char *format, ...)
1061 va_start(args, format); 1062 va_start(args, format);
1062 acpi_os_vprintf(format, args); 1063 acpi_os_vprintf(format, args);
1063 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); 1064 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
1065 va_end(args);
1066 va_end(args);
1064} 1067}
1065 1068
1066void ACPI_INTERNAL_VAR_XFACE 1069void ACPI_INTERNAL_VAR_XFACE
@@ -1077,4 +1080,5 @@ acpi_ut_info(char *module_name, u32 line_number, char *format, ...)
1077 va_start(args, format); 1080 va_start(args, format);
1078 acpi_os_vprintf(format, args); 1081 acpi_os_vprintf(format, args);
1079 acpi_os_printf("\n"); 1082 acpi_os_printf("\n");
1083 va_end(args);
1080} 1084}
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index 4820bc86d1f5..f7d602b1a894 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index e08b3fa6639f..e68466de8044 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -107,6 +107,7 @@ union acpi_operand_object *acpi_ut_create_internal_object_dbg(char *module_name,
107 switch (type) { 107 switch (type) {
108 case ACPI_TYPE_REGION: 108 case ACPI_TYPE_REGION:
109 case ACPI_TYPE_BUFFER_FIELD: 109 case ACPI_TYPE_BUFFER_FIELD:
110 case ACPI_TYPE_LOCAL_BANK_FIELD:
110 111
111 /* These types require a secondary object */ 112 /* These types require a secondary object */
112 113
@@ -469,9 +470,8 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
469 case ACPI_TYPE_PROCESSOR: 470 case ACPI_TYPE_PROCESSOR:
470 case ACPI_TYPE_POWER: 471 case ACPI_TYPE_POWER:
471 472
472 /* 473 /* No extra data for these types */
473 * No extra data for these types 474
474 */
475 break; 475 break;
476 476
477 case ACPI_TYPE_LOCAL_REFERENCE: 477 case ACPI_TYPE_LOCAL_REFERENCE:
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index b630ee137ee1..c3e3e1308edc 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/utilities/utstate.c
index edcaafad0a31..63a6d3d77d88 100644
--- a/drivers/acpi/utilities/utstate.c
+++ b/drivers/acpi/utilities/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index 2d496918b3cd..f8bdadf3c32f 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore 8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,7 @@
49#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utxface") 50ACPI_MODULE_NAME("utxface")
51 51
52#ifndef ACPI_ASL_COMPILER
52/******************************************************************************* 53/*******************************************************************************
53 * 54 *
54 * FUNCTION: acpi_initialize_subsystem 55 * FUNCTION: acpi_initialize_subsystem
@@ -192,24 +193,6 @@ acpi_status acpi_enable_subsystem(u32 flags)
192 } 193 }
193 } 194 }
194 195
195 /*
196 * Complete the GPE initialization for the GPE blocks defined in the FADT
197 * (GPE block 0 and 1).
198 *
199 * Note1: This is where the _PRW methods are executed for the GPEs. These
200 * methods can only be executed after the SCI and Global Lock handlers are
201 * installed and initialized.
202 *
203 * Note2: Currently, there seems to be no need to run the _REG methods
204 * before execution of the _PRW methods and enabling of the GPEs.
205 */
206 if (!(flags & ACPI_NO_EVENT_INIT)) {
207 status = acpi_ev_install_fadt_gpes();
208 if (ACPI_FAILURE(status)) {
209 return (status);
210 }
211 }
212
213 return_ACPI_STATUS(status); 196 return_ACPI_STATUS(status);
214} 197}
215 198
@@ -280,6 +263,23 @@ acpi_status acpi_initialize_objects(u32 flags)
280 } 263 }
281 264
282 /* 265 /*
266 * Complete the GPE initialization for the GPE blocks defined in the FADT
267 * (GPE block 0 and 1).
268 *
269 * Note1: This is where the _PRW methods are executed for the GPEs. These
270 * methods can only be executed after the SCI and Global Lock handlers are
271 * installed and initialized.
272 *
273 * Note2: Currently, there seems to be no need to run the _REG methods
274 * before execution of the _PRW methods and enabling of the GPEs.
275 */
276 if (!(flags & ACPI_NO_EVENT_INIT)) {
277 status = acpi_ev_install_fadt_gpes();
278 if (ACPI_FAILURE(status))
279 return (status);
280 }
281
282 /*
283 * Empty the caches (delete the cached objects) on the assumption that 283 * Empty the caches (delete the cached objects) on the assumption that
284 * the table load filled them up more than they will be at runtime -- 284 * the table load filled them up more than they will be at runtime --
285 * thus wasting non-paged memory. 285 * thus wasting non-paged memory.
@@ -292,6 +292,7 @@ acpi_status acpi_initialize_objects(u32 flags)
292 292
293ACPI_EXPORT_SYMBOL(acpi_initialize_objects) 293ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
294 294
295#endif
295/******************************************************************************* 296/*******************************************************************************
296 * 297 *
297 * FUNCTION: acpi_terminate 298 * FUNCTION: acpi_terminate
@@ -335,6 +336,7 @@ acpi_status acpi_terminate(void)
335} 336}
336 337
337ACPI_EXPORT_SYMBOL(acpi_terminate) 338ACPI_EXPORT_SYMBOL(acpi_terminate)
339#ifndef ACPI_ASL_COMPILER
338#ifdef ACPI_FUTURE_USAGE 340#ifdef ACPI_FUTURE_USAGE
339/******************************************************************************* 341/*******************************************************************************
340 * 342 *
@@ -490,3 +492,4 @@ acpi_status acpi_purge_cached_objects(void)
490} 492}
491 493
492ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects) 494ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
495#endif
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 44ea60cf21c0..100926143818 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -398,7 +398,7 @@ acpi_evaluate_reference(acpi_handle handle,
398 398
399 element = &(package->package.elements[i]); 399 element = &(package->package.elements[i]);
400 400
401 if (element->type != ACPI_TYPE_ANY) { 401 if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
402 status = AE_BAD_DATA; 402 status = AE_BAD_DATA;
403 printk(KERN_ERR PREFIX 403 printk(KERN_ERR PREFIX
404 "Expecting a [Reference] package element, found type %X\n", 404 "Expecting a [Reference] package element, found type %X\n",
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 980a74188781..5e5dda3a3027 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -57,8 +57,6 @@
57#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88 57#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS 0x88
58#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89 58#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF 0x89
59 59
60#define ACPI_VIDEO_HEAD_INVALID (~0u - 1)
61#define ACPI_VIDEO_HEAD_END (~0u)
62#define MAX_NAME_LEN 20 60#define MAX_NAME_LEN 20
63 61
64#define ACPI_VIDEO_DISPLAY_CRT 1 62#define ACPI_VIDEO_DISPLAY_CRT 1
@@ -192,6 +190,7 @@ struct acpi_video_device {
192/* bus */ 190/* bus */
193static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file); 191static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file);
194static struct file_operations acpi_video_bus_info_fops = { 192static struct file_operations acpi_video_bus_info_fops = {
193 .owner = THIS_MODULE,
195 .open = acpi_video_bus_info_open_fs, 194 .open = acpi_video_bus_info_open_fs,
196 .read = seq_read, 195 .read = seq_read,
197 .llseek = seq_lseek, 196 .llseek = seq_lseek,
@@ -200,6 +199,7 @@ static struct file_operations acpi_video_bus_info_fops = {
200 199
201static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file); 200static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file);
202static struct file_operations acpi_video_bus_ROM_fops = { 201static struct file_operations acpi_video_bus_ROM_fops = {
202 .owner = THIS_MODULE,
203 .open = acpi_video_bus_ROM_open_fs, 203 .open = acpi_video_bus_ROM_open_fs,
204 .read = seq_read, 204 .read = seq_read,
205 .llseek = seq_lseek, 205 .llseek = seq_lseek,
@@ -209,6 +209,7 @@ static struct file_operations acpi_video_bus_ROM_fops = {
209static int acpi_video_bus_POST_info_open_fs(struct inode *inode, 209static int acpi_video_bus_POST_info_open_fs(struct inode *inode,
210 struct file *file); 210 struct file *file);
211static struct file_operations acpi_video_bus_POST_info_fops = { 211static struct file_operations acpi_video_bus_POST_info_fops = {
212 .owner = THIS_MODULE,
212 .open = acpi_video_bus_POST_info_open_fs, 213 .open = acpi_video_bus_POST_info_open_fs,
213 .read = seq_read, 214 .read = seq_read,
214 .llseek = seq_lseek, 215 .llseek = seq_lseek,
@@ -217,6 +218,7 @@ static struct file_operations acpi_video_bus_POST_info_fops = {
217 218
218static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file); 219static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file);
219static struct file_operations acpi_video_bus_POST_fops = { 220static struct file_operations acpi_video_bus_POST_fops = {
221 .owner = THIS_MODULE,
220 .open = acpi_video_bus_POST_open_fs, 222 .open = acpi_video_bus_POST_open_fs,
221 .read = seq_read, 223 .read = seq_read,
222 .llseek = seq_lseek, 224 .llseek = seq_lseek,
@@ -225,6 +227,7 @@ static struct file_operations acpi_video_bus_POST_fops = {
225 227
226static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file); 228static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file);
227static struct file_operations acpi_video_bus_DOS_fops = { 229static struct file_operations acpi_video_bus_DOS_fops = {
230 .owner = THIS_MODULE,
228 .open = acpi_video_bus_DOS_open_fs, 231 .open = acpi_video_bus_DOS_open_fs,
229 .read = seq_read, 232 .read = seq_read,
230 .llseek = seq_lseek, 233 .llseek = seq_lseek,
@@ -235,6 +238,7 @@ static struct file_operations acpi_video_bus_DOS_fops = {
235static int acpi_video_device_info_open_fs(struct inode *inode, 238static int acpi_video_device_info_open_fs(struct inode *inode,
236 struct file *file); 239 struct file *file);
237static struct file_operations acpi_video_device_info_fops = { 240static struct file_operations acpi_video_device_info_fops = {
241 .owner = THIS_MODULE,
238 .open = acpi_video_device_info_open_fs, 242 .open = acpi_video_device_info_open_fs,
239 .read = seq_read, 243 .read = seq_read,
240 .llseek = seq_lseek, 244 .llseek = seq_lseek,
@@ -244,6 +248,7 @@ static struct file_operations acpi_video_device_info_fops = {
244static int acpi_video_device_state_open_fs(struct inode *inode, 248static int acpi_video_device_state_open_fs(struct inode *inode,
245 struct file *file); 249 struct file *file);
246static struct file_operations acpi_video_device_state_fops = { 250static struct file_operations acpi_video_device_state_fops = {
251 .owner = THIS_MODULE,
247 .open = acpi_video_device_state_open_fs, 252 .open = acpi_video_device_state_open_fs,
248 .read = seq_read, 253 .read = seq_read,
249 .llseek = seq_lseek, 254 .llseek = seq_lseek,
@@ -253,6 +258,7 @@ static struct file_operations acpi_video_device_state_fops = {
253static int acpi_video_device_brightness_open_fs(struct inode *inode, 258static int acpi_video_device_brightness_open_fs(struct inode *inode,
254 struct file *file); 259 struct file *file);
255static struct file_operations acpi_video_device_brightness_fops = { 260static struct file_operations acpi_video_device_brightness_fops = {
261 .owner = THIS_MODULE,
256 .open = acpi_video_device_brightness_open_fs, 262 .open = acpi_video_device_brightness_open_fs,
257 .read = seq_read, 263 .read = seq_read,
258 .llseek = seq_lseek, 264 .llseek = seq_lseek,
@@ -262,6 +268,7 @@ static struct file_operations acpi_video_device_brightness_fops = {
262static int acpi_video_device_EDID_open_fs(struct inode *inode, 268static int acpi_video_device_EDID_open_fs(struct inode *inode,
263 struct file *file); 269 struct file *file);
264static struct file_operations acpi_video_device_EDID_fops = { 270static struct file_operations acpi_video_device_EDID_fops = {
271 .owner = THIS_MODULE,
265 .open = acpi_video_device_EDID_open_fs, 272 .open = acpi_video_device_EDID_open_fs,
266 .read = seq_read, 273 .read = seq_read,
267 .llseek = seq_lseek, 274 .llseek = seq_lseek,
@@ -734,21 +741,19 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
734 if (IS_ERR(device->cdev)) 741 if (IS_ERR(device->cdev))
735 return; 742 return;
736 743
737 if (device->cdev) { 744 printk(KERN_INFO PREFIX
738 printk(KERN_INFO PREFIX 745 "%s is registered as cooling_device%d\n",
739 "%s is registered as cooling_device%d\n", 746 device->dev->dev.bus_id, device->cdev->id);
740 device->dev->dev.bus_id, device->cdev->id); 747 result = sysfs_create_link(&device->dev->dev.kobj,
741 result = sysfs_create_link(&device->dev->dev.kobj, 748 &device->cdev->device.kobj,
742 &device->cdev->device.kobj, 749 "thermal_cooling");
743 "thermal_cooling"); 750 if (result)
744 if (result) 751 printk(KERN_ERR PREFIX "Create sysfs link\n");
745 printk(KERN_ERR PREFIX "Create sysfs link\n"); 752 result = sysfs_create_link(&device->cdev->device.kobj,
746 result = sysfs_create_link(&device->cdev->device.kobj, 753 &device->dev->dev.kobj, "device");
747 &device->dev->dev.kobj, 754 if (result)
748 "device"); 755 printk(KERN_ERR PREFIX "Create sysfs link\n");
749 if (result) 756
750 printk(KERN_ERR PREFIX "Create sysfs link\n");
751 }
752 } 757 }
753 if (device->cap._DCS && device->cap._DSS){ 758 if (device->cap._DCS && device->cap._DSS){
754 static int count = 0; 759 static int count = 0;
@@ -1050,87 +1055,82 @@ acpi_video_device_EDID_open_fs(struct inode *inode, struct file *file)
1050 1055
1051static int acpi_video_device_add_fs(struct acpi_device *device) 1056static int acpi_video_device_add_fs(struct acpi_device *device)
1052{ 1057{
1053 struct proc_dir_entry *entry = NULL; 1058 struct proc_dir_entry *entry, *device_dir;
1054 struct acpi_video_device *vid_dev; 1059 struct acpi_video_device *vid_dev;
1055 1060
1056
1057 if (!device)
1058 return -ENODEV;
1059
1060 vid_dev = acpi_driver_data(device); 1061 vid_dev = acpi_driver_data(device);
1061 if (!vid_dev) 1062 if (!vid_dev)
1062 return -ENODEV; 1063 return -ENODEV;
1063 1064
1064 if (!acpi_device_dir(device)) { 1065 device_dir = proc_mkdir(acpi_device_bid(device),
1065 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 1066 vid_dev->video->dir);
1066 vid_dev->video->dir); 1067 if (!device_dir)
1067 if (!acpi_device_dir(device)) 1068 return -ENOMEM;
1068 return -ENODEV; 1069
1069 acpi_device_dir(device)->owner = THIS_MODULE; 1070 device_dir->owner = THIS_MODULE;
1070 }
1071 1071
1072 /* 'info' [R] */ 1072 /* 'info' [R] */
1073 entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device)); 1073 entry = proc_create_data("info", S_IRUGO, device_dir,
1074 &acpi_video_device_info_fops, acpi_driver_data(device));
1074 if (!entry) 1075 if (!entry)
1075 return -ENODEV; 1076 goto err_remove_dir;
1076 else {
1077 entry->proc_fops = &acpi_video_device_info_fops;
1078 entry->data = acpi_driver_data(device);
1079 entry->owner = THIS_MODULE;
1080 }
1081 1077
1082 /* 'state' [R/W] */ 1078 /* 'state' [R/W] */
1083 entry = 1079 acpi_video_device_state_fops.write = acpi_video_device_write_state;
1084 create_proc_entry("state", S_IFREG | S_IRUGO | S_IWUSR, 1080 entry = proc_create_data("state", S_IFREG | S_IRUGO | S_IWUSR,
1085 acpi_device_dir(device)); 1081 device_dir,
1082 &acpi_video_device_state_fops,
1083 acpi_driver_data(device));
1086 if (!entry) 1084 if (!entry)
1087 return -ENODEV; 1085 goto err_remove_info;
1088 else {
1089 acpi_video_device_state_fops.write = acpi_video_device_write_state;
1090 entry->proc_fops = &acpi_video_device_state_fops;
1091 entry->data = acpi_driver_data(device);
1092 entry->owner = THIS_MODULE;
1093 }
1094 1086
1095 /* 'brightness' [R/W] */ 1087 /* 'brightness' [R/W] */
1096 entry = 1088 acpi_video_device_brightness_fops.write =
1097 create_proc_entry("brightness", S_IFREG | S_IRUGO | S_IWUSR, 1089 acpi_video_device_write_brightness;
1098 acpi_device_dir(device)); 1090 entry = proc_create_data("brightness", S_IFREG | S_IRUGO | S_IWUSR,
1091 device_dir,
1092 &acpi_video_device_brightness_fops,
1093 acpi_driver_data(device));
1099 if (!entry) 1094 if (!entry)
1100 return -ENODEV; 1095 goto err_remove_state;
1101 else {
1102 acpi_video_device_brightness_fops.write = acpi_video_device_write_brightness;
1103 entry->proc_fops = &acpi_video_device_brightness_fops;
1104 entry->data = acpi_driver_data(device);
1105 entry->owner = THIS_MODULE;
1106 }
1107 1096
1108 /* 'EDID' [R] */ 1097 /* 'EDID' [R] */
1109 entry = create_proc_entry("EDID", S_IRUGO, acpi_device_dir(device)); 1098 entry = proc_create_data("EDID", S_IRUGO, device_dir,
1099 &acpi_video_device_EDID_fops,
1100 acpi_driver_data(device));
1110 if (!entry) 1101 if (!entry)
1111 return -ENODEV; 1102 goto err_remove_brightness;
1112 else { 1103
1113 entry->proc_fops = &acpi_video_device_EDID_fops; 1104 acpi_device_dir(device) = device_dir;
1114 entry->data = acpi_driver_data(device);
1115 entry->owner = THIS_MODULE;
1116 }
1117 1105
1118 return 0; 1106 return 0;
1107
1108 err_remove_brightness:
1109 remove_proc_entry("brightness", device_dir);
1110 err_remove_state:
1111 remove_proc_entry("state", device_dir);
1112 err_remove_info:
1113 remove_proc_entry("info", device_dir);
1114 err_remove_dir:
1115 remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
1116 return -ENOMEM;
1119} 1117}
1120 1118
1121static int acpi_video_device_remove_fs(struct acpi_device *device) 1119static int acpi_video_device_remove_fs(struct acpi_device *device)
1122{ 1120{
1123 struct acpi_video_device *vid_dev; 1121 struct acpi_video_device *vid_dev;
1122 struct proc_dir_entry *device_dir;
1124 1123
1125 vid_dev = acpi_driver_data(device); 1124 vid_dev = acpi_driver_data(device);
1126 if (!vid_dev || !vid_dev->video || !vid_dev->video->dir) 1125 if (!vid_dev || !vid_dev->video || !vid_dev->video->dir)
1127 return -ENODEV; 1126 return -ENODEV;
1128 1127
1129 if (acpi_device_dir(device)) { 1128 device_dir = acpi_device_dir(device);
1130 remove_proc_entry("info", acpi_device_dir(device)); 1129 if (device_dir) {
1131 remove_proc_entry("state", acpi_device_dir(device)); 1130 remove_proc_entry("info", device_dir);
1132 remove_proc_entry("brightness", acpi_device_dir(device)); 1131 remove_proc_entry("state", device_dir);
1133 remove_proc_entry("EDID", acpi_device_dir(device)); 1132 remove_proc_entry("brightness", device_dir);
1133 remove_proc_entry("EDID", device_dir);
1134 remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir); 1134 remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
1135 acpi_device_dir(device) = NULL; 1135 acpi_device_dir(device) = NULL;
1136 } 1136 }
@@ -1337,94 +1337,81 @@ acpi_video_bus_write_DOS(struct file *file,
1337 1337
1338static int acpi_video_bus_add_fs(struct acpi_device *device) 1338static int acpi_video_bus_add_fs(struct acpi_device *device)
1339{ 1339{
1340 struct proc_dir_entry *entry = NULL; 1340 struct acpi_video_bus *video = acpi_driver_data(device);
1341 struct acpi_video_bus *video; 1341 struct proc_dir_entry *device_dir;
1342 struct proc_dir_entry *entry;
1342 1343
1344 device_dir = proc_mkdir(acpi_device_bid(device), acpi_video_dir);
1345 if (!device_dir)
1346 return -ENOMEM;
1343 1347
1344 video = acpi_driver_data(device); 1348 device_dir->owner = THIS_MODULE;
1345
1346 if (!acpi_device_dir(device)) {
1347 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
1348 acpi_video_dir);
1349 if (!acpi_device_dir(device))
1350 return -ENODEV;
1351 video->dir = acpi_device_dir(device);
1352 acpi_device_dir(device)->owner = THIS_MODULE;
1353 }
1354 1349
1355 /* 'info' [R] */ 1350 /* 'info' [R] */
1356 entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device)); 1351 entry = proc_create_data("info", S_IRUGO, device_dir,
1352 &acpi_video_bus_info_fops,
1353 acpi_driver_data(device));
1357 if (!entry) 1354 if (!entry)
1358 return -ENODEV; 1355 goto err_remove_dir;
1359 else {
1360 entry->proc_fops = &acpi_video_bus_info_fops;
1361 entry->data = acpi_driver_data(device);
1362 entry->owner = THIS_MODULE;
1363 }
1364 1356
1365 /* 'ROM' [R] */ 1357 /* 'ROM' [R] */
1366 entry = create_proc_entry("ROM", S_IRUGO, acpi_device_dir(device)); 1358 entry = proc_create_data("ROM", S_IRUGO, device_dir,
1359 &acpi_video_bus_ROM_fops,
1360 acpi_driver_data(device));
1367 if (!entry) 1361 if (!entry)
1368 return -ENODEV; 1362 goto err_remove_info;
1369 else {
1370 entry->proc_fops = &acpi_video_bus_ROM_fops;
1371 entry->data = acpi_driver_data(device);
1372 entry->owner = THIS_MODULE;
1373 }
1374 1363
1375 /* 'POST_info' [R] */ 1364 /* 'POST_info' [R] */
1376 entry = 1365 entry = proc_create_data("POST_info", S_IRUGO, device_dir,
1377 create_proc_entry("POST_info", S_IRUGO, acpi_device_dir(device)); 1366 &acpi_video_bus_POST_info_fops,
1367 acpi_driver_data(device));
1378 if (!entry) 1368 if (!entry)
1379 return -ENODEV; 1369 goto err_remove_rom;
1380 else {
1381 entry->proc_fops = &acpi_video_bus_POST_info_fops;
1382 entry->data = acpi_driver_data(device);
1383 entry->owner = THIS_MODULE;
1384 }
1385 1370
1386 /* 'POST' [R/W] */ 1371 /* 'POST' [R/W] */
1387 entry = 1372 acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST;
1388 create_proc_entry("POST", S_IFREG | S_IRUGO | S_IRUSR, 1373 entry = proc_create_data("POST", S_IFREG | S_IRUGO | S_IWUSR,
1389 acpi_device_dir(device)); 1374 device_dir,
1375 &acpi_video_bus_POST_fops,
1376 acpi_driver_data(device));
1390 if (!entry) 1377 if (!entry)
1391 return -ENODEV; 1378 goto err_remove_post_info;
1392 else {
1393 acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST;
1394 entry->proc_fops = &acpi_video_bus_POST_fops;
1395 entry->data = acpi_driver_data(device);
1396 entry->owner = THIS_MODULE;
1397 }
1398 1379
1399 /* 'DOS' [R/W] */ 1380 /* 'DOS' [R/W] */
1400 entry = 1381 acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS;
1401 create_proc_entry("DOS", S_IFREG | S_IRUGO | S_IRUSR, 1382 entry = proc_create_data("DOS", S_IFREG | S_IRUGO | S_IWUSR,
1402 acpi_device_dir(device)); 1383 device_dir,
1384 &acpi_video_bus_DOS_fops,
1385 acpi_driver_data(device));
1403 if (!entry) 1386 if (!entry)
1404 return -ENODEV; 1387 goto err_remove_post;
1405 else {
1406 acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS;
1407 entry->proc_fops = &acpi_video_bus_DOS_fops;
1408 entry->data = acpi_driver_data(device);
1409 entry->owner = THIS_MODULE;
1410 }
1411 1388
1389 video->dir = acpi_device_dir(device) = device_dir;
1412 return 0; 1390 return 0;
1391
1392 err_remove_post:
1393 remove_proc_entry("POST", device_dir);
1394 err_remove_post_info:
1395 remove_proc_entry("POST_info", device_dir);
1396 err_remove_rom:
1397 remove_proc_entry("ROM", device_dir);
1398 err_remove_info:
1399 remove_proc_entry("info", device_dir);
1400 err_remove_dir:
1401 remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
1402 return -ENOMEM;
1413} 1403}
1414 1404
1415static int acpi_video_bus_remove_fs(struct acpi_device *device) 1405static int acpi_video_bus_remove_fs(struct acpi_device *device)
1416{ 1406{
1417 struct acpi_video_bus *video; 1407 struct proc_dir_entry *device_dir = acpi_device_dir(device);
1418 1408
1419 1409 if (device_dir) {
1420 video = acpi_driver_data(device); 1410 remove_proc_entry("info", device_dir);
1421 1411 remove_proc_entry("ROM", device_dir);
1422 if (acpi_device_dir(device)) { 1412 remove_proc_entry("POST_info", device_dir);
1423 remove_proc_entry("info", acpi_device_dir(device)); 1413 remove_proc_entry("POST", device_dir);
1424 remove_proc_entry("ROM", acpi_device_dir(device)); 1414 remove_proc_entry("DOS", device_dir);
1425 remove_proc_entry("POST_info", acpi_device_dir(device));
1426 remove_proc_entry("POST", acpi_device_dir(device));
1427 remove_proc_entry("DOS", acpi_device_dir(device));
1428 remove_proc_entry(acpi_device_bid(device), acpi_video_dir); 1415 remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
1429 acpi_device_dir(device) = NULL; 1416 acpi_device_dir(device) = NULL;
1430 } 1417 }
@@ -1440,11 +1427,15 @@ static int acpi_video_bus_remove_fs(struct acpi_device *device)
1440static struct acpi_video_device_attrib* 1427static struct acpi_video_device_attrib*
1441acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id) 1428acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
1442{ 1429{
1443 int count; 1430 struct acpi_video_enumerated_device *ids;
1431 int i;
1432
1433 for (i = 0; i < video->attached_count; i++) {
1434 ids = &video->attached_array[i];
1435 if ((ids->value.int_val & 0xffff) == device_id)
1436 return &ids->value.attrib;
1437 }
1444 1438
1445 for(count = 0; count < video->attached_count; count++)
1446 if((video->attached_array[count].value.int_val & 0xffff) == device_id)
1447 return &(video->attached_array[count].value.attrib);
1448 return NULL; 1439 return NULL;
1449} 1440}
1450 1441
@@ -1571,20 +1562,16 @@ static void
1571acpi_video_device_bind(struct acpi_video_bus *video, 1562acpi_video_device_bind(struct acpi_video_bus *video,
1572 struct acpi_video_device *device) 1563 struct acpi_video_device *device)
1573{ 1564{
1565 struct acpi_video_enumerated_device *ids;
1574 int i; 1566 int i;
1575 1567
1576#define IDS_VAL(i) video->attached_array[i].value.int_val 1568 for (i = 0; i < video->attached_count; i++) {
1577#define IDS_BIND(i) video->attached_array[i].bind_info 1569 ids = &video->attached_array[i];
1578 1570 if (device->device_id == (ids->value.int_val & 0xffff)) {
1579 for (i = 0; IDS_VAL(i) != ACPI_VIDEO_HEAD_INVALID && 1571 ids->bind_info = device;
1580 i < video->attached_count; i++) {
1581 if (device->device_id == (IDS_VAL(i) & 0xffff)) {
1582 IDS_BIND(i) = device;
1583 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "device_bind %d\n", i)); 1572 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "device_bind %d\n", i));
1584 } 1573 }
1585 } 1574 }
1586#undef IDS_VAL
1587#undef IDS_BIND
1588} 1575}
1589 1576
1590/* 1577/*
@@ -1603,7 +1590,7 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
1603 int status; 1590 int status;
1604 int count; 1591 int count;
1605 int i; 1592 int i;
1606 struct acpi_video_enumerated_device *active_device_list; 1593 struct acpi_video_enumerated_device *active_list;
1607 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1594 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1608 union acpi_object *dod = NULL; 1595 union acpi_object *dod = NULL;
1609 union acpi_object *obj; 1596 union acpi_object *obj;
@@ -1624,13 +1611,10 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
1624 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d video heads in _DOD\n", 1611 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d video heads in _DOD\n",
1625 dod->package.count)); 1612 dod->package.count));
1626 1613
1627 active_device_list = kmalloc((1 + 1614 active_list = kcalloc(1 + dod->package.count,
1628 dod->package.count) * 1615 sizeof(struct acpi_video_enumerated_device),
1629 sizeof(struct 1616 GFP_KERNEL);
1630 acpi_video_enumerated_device), 1617 if (!active_list) {
1631 GFP_KERNEL);
1632
1633 if (!active_device_list) {
1634 status = -ENOMEM; 1618 status = -ENOMEM;
1635 goto out; 1619 goto out;
1636 } 1620 }
@@ -1640,23 +1624,24 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
1640 obj = &dod->package.elements[i]; 1624 obj = &dod->package.elements[i];
1641 1625
1642 if (obj->type != ACPI_TYPE_INTEGER) { 1626 if (obj->type != ACPI_TYPE_INTEGER) {
1643 printk(KERN_ERR PREFIX "Invalid _DOD data\n"); 1627 printk(KERN_ERR PREFIX
1644 active_device_list[i].value.int_val = 1628 "Invalid _DOD data in element %d\n", i);
1645 ACPI_VIDEO_HEAD_INVALID; 1629 continue;
1646 } 1630 }
1647 active_device_list[i].value.int_val = obj->integer.value; 1631
1648 active_device_list[i].bind_info = NULL; 1632 active_list[count].value.int_val = obj->integer.value;
1633 active_list[count].bind_info = NULL;
1649 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "dod element[%d] = %d\n", i, 1634 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "dod element[%d] = %d\n", i,
1650 (int)obj->integer.value)); 1635 (int)obj->integer.value));
1651 count++; 1636 count++;
1652 } 1637 }
1653 active_device_list[count].value.int_val = ACPI_VIDEO_HEAD_END;
1654 1638
1655 kfree(video->attached_array); 1639 kfree(video->attached_array);
1656 1640
1657 video->attached_array = active_device_list; 1641 video->attached_array = active_list;
1658 video->attached_count = count; 1642 video->attached_count = count;
1659 out: 1643
1644 out:
1660 kfree(buffer.pointer); 1645 kfree(buffer.pointer);
1661 return status; 1646 return status;
1662} 1647}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 292aa9a0f02f..1c11df9a5f32 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -566,11 +566,11 @@ config PATA_RADISYS
566 566
567 If unsure, say N. 567 If unsure, say N.
568 568
569config PATA_RB500 569config PATA_RB532
570 tristate "RouterBoard 500 PATA CompactFlash support" 570 tristate "RouterBoard 532 PATA CompactFlash support"
571 depends on MIKROTIK_RB500 571 depends on MIKROTIK_RB532
572 help 572 help
573 This option enables support for the RouterBoard 500 573 This option enables support for the RouterBoard 532
574 PATA CompactFlash controller. 574 PATA CompactFlash controller.
575 575
576 If unsure, say N. 576 If unsure, say N.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 1fbc2aa648b7..b693d829383a 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -55,7 +55,7 @@ obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
55obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o 55obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
56obj-$(CONFIG_PATA_QDI) += pata_qdi.o 56obj-$(CONFIG_PATA_QDI) += pata_qdi.o
57obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o 57obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
58obj-$(CONFIG_PATA_RB500) += pata_rb500_cf.o 58obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
59obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o 59obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
60obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o 60obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
61obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o 61obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 7c4f886f1f16..8cace9aa9c03 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -358,7 +358,7 @@ static const struct ata_port_info ahci_port_info[] = {
358 /* board_ahci_sb600 */ 358 /* board_ahci_sb600 */
359 { 359 {
360 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 360 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
361 AHCI_HFLAG_32BIT_ONLY | 361 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
362 AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), 362 AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP),
363 .flags = AHCI_FLAG_COMMON, 363 .flags = AHCI_FLAG_COMMON,
364 .pio_mask = 0x1f, /* pio0-4 */ 364 .pio_mask = 0x1f, /* pio0-4 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 51b7d2fad36a..3bc488538204 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3933,6 +3933,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3933 3933
3934 /* Devices which get the IVB wrong */ 3934 /* Devices which get the IVB wrong */
3935 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 3935 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3936 /* Maybe we should just blacklist TSSTcorp... */
3937 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, },
3938 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
3936 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, }, 3939 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
3937 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, }, 3940 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
3938 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, 3941 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a34f32442edf..3ce43920e459 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -49,7 +49,11 @@
49 49
50#include "libata.h" 50#include "libata.h"
51 51
52#define SECTOR_SIZE 512 52#define SECTOR_SIZE 512
53#define ATA_SCSI_RBUF_SIZE 4096
54
55static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
56static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
53 57
54typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 58typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
55 59
@@ -179,6 +183,13 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
179 ata_scsi_lpm_show, ata_scsi_lpm_put); 183 ata_scsi_lpm_show, ata_scsi_lpm_put);
180EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); 184EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
181 185
186static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
187{
188 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
189
190 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
191}
192
182static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 193static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
183 void (*done)(struct scsi_cmnd *)) 194 void (*done)(struct scsi_cmnd *))
184{ 195{
@@ -1632,53 +1643,48 @@ defer:
1632 1643
1633/** 1644/**
1634 * ata_scsi_rbuf_get - Map response buffer. 1645 * ata_scsi_rbuf_get - Map response buffer.
1635 * @cmd: SCSI command containing buffer to be mapped. 1646 * @flags: unsigned long variable to store irq enable status
1636 * @buf_out: Pointer to mapped area. 1647 * @copy_in: copy in from user buffer
1637 * 1648 *
1638 * Maps buffer contained within SCSI command @cmd. 1649 * Prepare buffer for simulated SCSI commands.
1639 * 1650 *
1640 * LOCKING: 1651 * LOCKING:
1641 * spin_lock_irqsave(host lock) 1652 * spin_lock_irqsave(ata_scsi_rbuf_lock) on success
1642 * 1653 *
1643 * RETURNS: 1654 * RETURNS:
1644 * Length of response buffer. 1655 * Pointer to response buffer.
1645 */ 1656 */
1646 1657static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
1647static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out) 1658 unsigned long *flags)
1648{ 1659{
1649 u8 *buf; 1660 spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
1650 unsigned int buflen;
1651
1652 struct scatterlist *sg = scsi_sglist(cmd);
1653 1661
1654 if (sg) { 1662 memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
1655 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 1663 if (copy_in)
1656 buflen = sg->length; 1664 sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1657 } else { 1665 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1658 buf = NULL; 1666 return ata_scsi_rbuf;
1659 buflen = 0;
1660 }
1661
1662 *buf_out = buf;
1663 return buflen;
1664} 1667}
1665 1668
1666/** 1669/**
1667 * ata_scsi_rbuf_put - Unmap response buffer. 1670 * ata_scsi_rbuf_put - Unmap response buffer.
1668 * @cmd: SCSI command containing buffer to be unmapped. 1671 * @cmd: SCSI command containing buffer to be unmapped.
1669 * @buf: buffer to unmap 1672 * @copy_out: copy out result
1673 * @flags: @flags passed to ata_scsi_rbuf_get()
1670 * 1674 *
1671 * Unmaps response buffer contained within @cmd. 1675 * Returns rbuf buffer. The result is copied to @cmd's buffer if
1676 * @copy_back is true.
1672 * 1677 *
1673 * LOCKING: 1678 * LOCKING:
1674 * spin_lock_irqsave(host lock) 1679 * Unlocks ata_scsi_rbuf_lock.
1675 */ 1680 */
1676 1681static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
1677static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) 1682 unsigned long *flags)
1678{ 1683{
1679 struct scatterlist *sg = scsi_sglist(cmd); 1684 if (copy_out)
1680 if (sg) 1685 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1681 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1686 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1687 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
1682} 1688}
1683 1689
1684/** 1690/**
@@ -1696,24 +1702,17 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
1696 * LOCKING: 1702 * LOCKING:
1697 * spin_lock_irqsave(host lock) 1703 * spin_lock_irqsave(host lock)
1698 */ 1704 */
1699 1705static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1700void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1706 unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
1701 unsigned int (*actor) (struct ata_scsi_args *args,
1702 u8 *rbuf, unsigned int buflen))
1703{ 1707{
1704 u8 *rbuf; 1708 u8 *rbuf;
1705 unsigned int buflen, rc; 1709 unsigned int rc;
1706 struct scsi_cmnd *cmd = args->cmd; 1710 struct scsi_cmnd *cmd = args->cmd;
1707 unsigned long flags; 1711 unsigned long flags;
1708 1712
1709 local_irq_save(flags); 1713 rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
1710 1714 rc = actor(args, rbuf);
1711 buflen = ata_scsi_rbuf_get(cmd, &rbuf); 1715 ata_scsi_rbuf_put(cmd, rc == 0, &flags);
1712 memset(rbuf, 0, buflen);
1713 rc = actor(args, rbuf, buflen);
1714 ata_scsi_rbuf_put(cmd, rbuf);
1715
1716 local_irq_restore(flags);
1717 1716
1718 if (rc == 0) 1717 if (rc == 0)
1719 cmd->result = SAM_STAT_GOOD; 1718 cmd->result = SAM_STAT_GOOD;
@@ -1721,26 +1720,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1721} 1720}
1722 1721
1723/** 1722/**
1724 * ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer
1725 * @idx: byte index into SCSI response buffer
1726 * @val: value to set
1727 *
1728 * To be used by SCSI command simulator functions. This macros
1729 * expects two local variables, u8 *rbuf and unsigned int buflen,
1730 * are in scope.
1731 *
1732 * LOCKING:
1733 * None.
1734 */
1735#define ATA_SCSI_RBUF_SET(idx, val) do { \
1736 if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \
1737 } while (0)
1738
1739/**
1740 * ata_scsiop_inq_std - Simulate INQUIRY command 1723 * ata_scsiop_inq_std - Simulate INQUIRY command
1741 * @args: device IDENTIFY data / SCSI command of interest. 1724 * @args: device IDENTIFY data / SCSI command of interest.
1742 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1725 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1743 * @buflen: Response buffer length.
1744 * 1726 *
1745 * Returns standard device identification data associated 1727 * Returns standard device identification data associated
1746 * with non-VPD INQUIRY command output. 1728 * with non-VPD INQUIRY command output.
@@ -1748,10 +1730,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1748 * LOCKING: 1730 * LOCKING:
1749 * spin_lock_irqsave(host lock) 1731 * spin_lock_irqsave(host lock)
1750 */ 1732 */
1751 1733static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1752unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1753 unsigned int buflen)
1754{ 1734{
1735 const u8 versions[] = {
1736 0x60, /* SAM-3 (no version claimed) */
1737
1738 0x03,
1739 0x20, /* SBC-2 (no version claimed) */
1740
1741 0x02,
1742 0x60 /* SPC-3 (no version claimed) */
1743 };
1755 u8 hdr[] = { 1744 u8 hdr[] = {
1756 TYPE_DISK, 1745 TYPE_DISK,
1757 0, 1746 0,
@@ -1760,35 +1749,21 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1760 95 - 4 1749 95 - 4
1761 }; 1750 };
1762 1751
1752 VPRINTK("ENTER\n");
1753
1763 /* set scsi removeable (RMB) bit per ata bit */ 1754 /* set scsi removeable (RMB) bit per ata bit */
1764 if (ata_id_removeable(args->id)) 1755 if (ata_id_removeable(args->id))
1765 hdr[1] |= (1 << 7); 1756 hdr[1] |= (1 << 7);
1766 1757
1767 VPRINTK("ENTER\n");
1768
1769 memcpy(rbuf, hdr, sizeof(hdr)); 1758 memcpy(rbuf, hdr, sizeof(hdr));
1759 memcpy(&rbuf[8], "ATA ", 8);
1760 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
1761 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
1770 1762
1771 if (buflen > 35) { 1763 if (rbuf[32] == 0 || rbuf[32] == ' ')
1772 memcpy(&rbuf[8], "ATA ", 8); 1764 memcpy(&rbuf[32], "n/a ", 4);
1773 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
1774 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
1775 if (rbuf[32] == 0 || rbuf[32] == ' ')
1776 memcpy(&rbuf[32], "n/a ", 4);
1777 }
1778
1779 if (buflen > 63) {
1780 const u8 versions[] = {
1781 0x60, /* SAM-3 (no version claimed) */
1782
1783 0x03,
1784 0x20, /* SBC-2 (no version claimed) */
1785 1765
1786 0x02, 1766 memcpy(rbuf + 59, versions, sizeof(versions));
1787 0x60 /* SPC-3 (no version claimed) */
1788 };
1789
1790 memcpy(rbuf + 59, versions, sizeof(versions));
1791 }
1792 1767
1793 return 0; 1768 return 0;
1794} 1769}
@@ -1797,27 +1772,22 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1797 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages 1772 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1798 * @args: device IDENTIFY data / SCSI command of interest. 1773 * @args: device IDENTIFY data / SCSI command of interest.
1799 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1774 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1800 * @buflen: Response buffer length.
1801 * 1775 *
1802 * Returns list of inquiry VPD pages available. 1776 * Returns list of inquiry VPD pages available.
1803 * 1777 *
1804 * LOCKING: 1778 * LOCKING:
1805 * spin_lock_irqsave(host lock) 1779 * spin_lock_irqsave(host lock)
1806 */ 1780 */
1807 1781static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
1808unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
1809 unsigned int buflen)
1810{ 1782{
1811 const u8 pages[] = { 1783 const u8 pages[] = {
1812 0x00, /* page 0x00, this page */ 1784 0x00, /* page 0x00, this page */
1813 0x80, /* page 0x80, unit serial no page */ 1785 0x80, /* page 0x80, unit serial no page */
1814 0x83 /* page 0x83, device ident page */ 1786 0x83 /* page 0x83, device ident page */
1815 }; 1787 };
1816 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
1817
1818 if (buflen > 6)
1819 memcpy(rbuf + 4, pages, sizeof(pages));
1820 1788
1789 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
1790 memcpy(rbuf + 4, pages, sizeof(pages));
1821 return 0; 1791 return 0;
1822} 1792}
1823 1793
@@ -1825,16 +1795,13 @@ unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
1825 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number 1795 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1826 * @args: device IDENTIFY data / SCSI command of interest. 1796 * @args: device IDENTIFY data / SCSI command of interest.
1827 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1797 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1828 * @buflen: Response buffer length.
1829 * 1798 *
1830 * Returns ATA device serial number. 1799 * Returns ATA device serial number.
1831 * 1800 *
1832 * LOCKING: 1801 * LOCKING:
1833 * spin_lock_irqsave(host lock) 1802 * spin_lock_irqsave(host lock)
1834 */ 1803 */
1835 1804static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
1836unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1837 unsigned int buflen)
1838{ 1805{
1839 const u8 hdr[] = { 1806 const u8 hdr[] = {
1840 0, 1807 0,
@@ -1842,12 +1809,10 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1842 0, 1809 0,
1843 ATA_ID_SERNO_LEN, /* page len */ 1810 ATA_ID_SERNO_LEN, /* page len */
1844 }; 1811 };
1845 memcpy(rbuf, hdr, sizeof(hdr));
1846
1847 if (buflen > (ATA_ID_SERNO_LEN + 4 - 1))
1848 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1849 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1850 1812
1813 memcpy(rbuf, hdr, sizeof(hdr));
1814 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1815 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1851 return 0; 1816 return 0;
1852} 1817}
1853 1818
@@ -1855,7 +1820,6 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1855 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity 1820 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1856 * @args: device IDENTIFY data / SCSI command of interest. 1821 * @args: device IDENTIFY data / SCSI command of interest.
1857 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1822 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1858 * @buflen: Response buffer length.
1859 * 1823 *
1860 * Yields two logical unit device identification designators: 1824 * Yields two logical unit device identification designators:
1861 * - vendor specific ASCII containing the ATA serial number 1825 * - vendor specific ASCII containing the ATA serial number
@@ -1865,41 +1829,37 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1865 * LOCKING: 1829 * LOCKING:
1866 * spin_lock_irqsave(host lock) 1830 * spin_lock_irqsave(host lock)
1867 */ 1831 */
1868 1832static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
1869unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1870 unsigned int buflen)
1871{ 1833{
1872 int num;
1873 const int sat_model_serial_desc_len = 68; 1834 const int sat_model_serial_desc_len = 68;
1835 int num;
1874 1836
1875 rbuf[1] = 0x83; /* this page code */ 1837 rbuf[1] = 0x83; /* this page code */
1876 num = 4; 1838 num = 4;
1877 1839
1878 if (buflen > (ATA_ID_SERNO_LEN + num + 3)) { 1840 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
1879 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ 1841 rbuf[num + 0] = 2;
1880 rbuf[num + 0] = 2; 1842 rbuf[num + 3] = ATA_ID_SERNO_LEN;
1881 rbuf[num + 3] = ATA_ID_SERNO_LEN; 1843 num += 4;
1882 num += 4; 1844 ata_id_string(args->id, (unsigned char *) rbuf + num,
1883 ata_id_string(args->id, (unsigned char *) rbuf + num, 1845 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1884 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1846 num += ATA_ID_SERNO_LEN;
1885 num += ATA_ID_SERNO_LEN; 1847
1886 } 1848 /* SAT defined lu model and serial numbers descriptor */
1887 if (buflen > (sat_model_serial_desc_len + num + 3)) { 1849 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
1888 /* SAT defined lu model and serial numbers descriptor */ 1850 rbuf[num + 0] = 2;
1889 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ 1851 rbuf[num + 1] = 1;
1890 rbuf[num + 0] = 2; 1852 rbuf[num + 3] = sat_model_serial_desc_len;
1891 rbuf[num + 1] = 1; 1853 num += 4;
1892 rbuf[num + 3] = sat_model_serial_desc_len; 1854 memcpy(rbuf + num, "ATA ", 8);
1893 num += 4; 1855 num += 8;
1894 memcpy(rbuf + num, "ATA ", 8); 1856 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
1895 num += 8; 1857 ATA_ID_PROD_LEN);
1896 ata_id_string(args->id, (unsigned char *) rbuf + num, 1858 num += ATA_ID_PROD_LEN;
1897 ATA_ID_PROD, ATA_ID_PROD_LEN); 1859 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
1898 num += ATA_ID_PROD_LEN; 1860 ATA_ID_SERNO_LEN);
1899 ata_id_string(args->id, (unsigned char *) rbuf + num, 1861 num += ATA_ID_SERNO_LEN;
1900 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1862
1901 num += ATA_ID_SERNO_LEN;
1902 }
1903 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 1863 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
1904 return 0; 1864 return 0;
1905} 1865}
@@ -1908,35 +1868,26 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1908 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info 1868 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
1909 * @args: device IDENTIFY data / SCSI command of interest. 1869 * @args: device IDENTIFY data / SCSI command of interest.
1910 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1870 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1911 * @buflen: Response buffer length.
1912 * 1871 *
1913 * Yields SAT-specified ATA VPD page. 1872 * Yields SAT-specified ATA VPD page.
1914 * 1873 *
1915 * LOCKING: 1874 * LOCKING:
1916 * spin_lock_irqsave(host lock) 1875 * spin_lock_irqsave(host lock)
1917 */ 1876 */
1918 1877static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
1919static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
1920 unsigned int buflen)
1921{ 1878{
1922 u8 pbuf[60];
1923 struct ata_taskfile tf; 1879 struct ata_taskfile tf;
1924 unsigned int i;
1925 1880
1926 if (!buflen)
1927 return 0;
1928
1929 memset(&pbuf, 0, sizeof(pbuf));
1930 memset(&tf, 0, sizeof(tf)); 1881 memset(&tf, 0, sizeof(tf));
1931 1882
1932 pbuf[1] = 0x89; /* our page code */ 1883 rbuf[1] = 0x89; /* our page code */
1933 pbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ 1884 rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
1934 pbuf[3] = (0x238 & 0xff); 1885 rbuf[3] = (0x238 & 0xff);
1935 1886
1936 memcpy(&pbuf[8], "linux ", 8); 1887 memcpy(&rbuf[8], "linux ", 8);
1937 memcpy(&pbuf[16], "libata ", 16); 1888 memcpy(&rbuf[16], "libata ", 16);
1938 memcpy(&pbuf[32], DRV_VERSION, 4); 1889 memcpy(&rbuf[32], DRV_VERSION, 4);
1939 ata_id_string(args->id, &pbuf[32], ATA_ID_FW_REV, 4); 1890 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
1940 1891
1941 /* we don't store the ATA device signature, so we fake it */ 1892 /* we don't store the ATA device signature, so we fake it */
1942 1893
@@ -1944,19 +1895,12 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
1944 tf.lbal = 0x1; 1895 tf.lbal = 0x1;
1945 tf.nsect = 0x1; 1896 tf.nsect = 0x1;
1946 1897
1947 ata_tf_to_fis(&tf, 0, 1, &pbuf[36]); /* TODO: PMP? */ 1898 ata_tf_to_fis(&tf, 0, 1, &rbuf[36]); /* TODO: PMP? */
1948 pbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ 1899 rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */
1949 1900
1950 pbuf[56] = ATA_CMD_ID_ATA; 1901 rbuf[56] = ATA_CMD_ID_ATA;
1951 1902
1952 i = min(buflen, 60U); 1903 memcpy(&rbuf[60], &args->id[0], 512);
1953 memcpy(rbuf, &pbuf[0], i);
1954 buflen -= i;
1955
1956 if (!buflen)
1957 return 0;
1958
1959 memcpy(&rbuf[60], &args->id[0], min(buflen, 512U));
1960 return 0; 1904 return 0;
1961} 1905}
1962 1906
@@ -1964,7 +1908,6 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
1964 * ata_scsiop_noop - Command handler that simply returns success. 1908 * ata_scsiop_noop - Command handler that simply returns success.
1965 * @args: device IDENTIFY data / SCSI command of interest. 1909 * @args: device IDENTIFY data / SCSI command of interest.
1966 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1910 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1967 * @buflen: Response buffer length.
1968 * 1911 *
1969 * No operation. Simply returns success to caller, to indicate 1912 * No operation. Simply returns success to caller, to indicate
1970 * that the caller should successfully complete this SCSI command. 1913 * that the caller should successfully complete this SCSI command.
@@ -1972,47 +1915,16 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
1972 * LOCKING: 1915 * LOCKING:
1973 * spin_lock_irqsave(host lock) 1916 * spin_lock_irqsave(host lock)
1974 */ 1917 */
1975 1918static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
1976unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
1977 unsigned int buflen)
1978{ 1919{
1979 VPRINTK("ENTER\n"); 1920 VPRINTK("ENTER\n");
1980 return 0; 1921 return 0;
1981} 1922}
1982 1923
1983/** 1924/**
1984 * ata_msense_push - Push data onto MODE SENSE data output buffer
1985 * @ptr_io: (input/output) Location to store more output data
1986 * @last: End of output data buffer
1987 * @buf: Pointer to BLOB being added to output buffer
1988 * @buflen: Length of BLOB
1989 *
1990 * Store MODE SENSE data on an output buffer.
1991 *
1992 * LOCKING:
1993 * None.
1994 */
1995
1996static void ata_msense_push(u8 **ptr_io, const u8 *last,
1997 const u8 *buf, unsigned int buflen)
1998{
1999 u8 *ptr = *ptr_io;
2000
2001 if ((ptr + buflen - 1) > last)
2002 return;
2003
2004 memcpy(ptr, buf, buflen);
2005
2006 ptr += buflen;
2007
2008 *ptr_io = ptr;
2009}
2010
2011/**
2012 * ata_msense_caching - Simulate MODE SENSE caching info page 1925 * ata_msense_caching - Simulate MODE SENSE caching info page
2013 * @id: device IDENTIFY data 1926 * @id: device IDENTIFY data
2014 * @ptr_io: (input/output) Location to store more output data 1927 * @buf: output buffer
2015 * @last: End of output data buffer
2016 * 1928 *
2017 * Generate a caching info page, which conditionally indicates 1929 * Generate a caching info page, which conditionally indicates
2018 * write caching to the SCSI layer, depending on device 1930 * write caching to the SCSI layer, depending on device
@@ -2021,58 +1933,43 @@ static void ata_msense_push(u8 **ptr_io, const u8 *last,
2021 * LOCKING: 1933 * LOCKING:
2022 * None. 1934 * None.
2023 */ 1935 */
2024 1936static unsigned int ata_msense_caching(u16 *id, u8 *buf)
2025static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io,
2026 const u8 *last)
2027{ 1937{
2028 u8 page[CACHE_MPAGE_LEN]; 1938 memcpy(buf, def_cache_mpage, sizeof(def_cache_mpage));
2029
2030 memcpy(page, def_cache_mpage, sizeof(page));
2031 if (ata_id_wcache_enabled(id)) 1939 if (ata_id_wcache_enabled(id))
2032 page[2] |= (1 << 2); /* write cache enable */ 1940 buf[2] |= (1 << 2); /* write cache enable */
2033 if (!ata_id_rahead_enabled(id)) 1941 if (!ata_id_rahead_enabled(id))
2034 page[12] |= (1 << 5); /* disable read ahead */ 1942 buf[12] |= (1 << 5); /* disable read ahead */
2035 1943 return sizeof(def_cache_mpage);
2036 ata_msense_push(ptr_io, last, page, sizeof(page));
2037 return sizeof(page);
2038} 1944}
2039 1945
2040/** 1946/**
2041 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page 1947 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
2042 * @dev: Device associated with this MODE SENSE command 1948 * @buf: output buffer
2043 * @ptr_io: (input/output) Location to store more output data
2044 * @last: End of output data buffer
2045 * 1949 *
2046 * Generate a generic MODE SENSE control mode page. 1950 * Generate a generic MODE SENSE control mode page.
2047 * 1951 *
2048 * LOCKING: 1952 * LOCKING:
2049 * None. 1953 * None.
2050 */ 1954 */
2051 1955static unsigned int ata_msense_ctl_mode(u8 *buf)
2052static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
2053{ 1956{
2054 ata_msense_push(ptr_io, last, def_control_mpage, 1957 memcpy(buf, def_control_mpage, sizeof(def_control_mpage));
2055 sizeof(def_control_mpage));
2056 return sizeof(def_control_mpage); 1958 return sizeof(def_control_mpage);
2057} 1959}
2058 1960
2059/** 1961/**
2060 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page 1962 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
2061 * @dev: Device associated with this MODE SENSE command 1963 * @bufp: output buffer
2062 * @ptr_io: (input/output) Location to store more output data
2063 * @last: End of output data buffer
2064 * 1964 *
2065 * Generate a generic MODE SENSE r/w error recovery page. 1965 * Generate a generic MODE SENSE r/w error recovery page.
2066 * 1966 *
2067 * LOCKING: 1967 * LOCKING:
2068 * None. 1968 * None.
2069 */ 1969 */
2070 1970static unsigned int ata_msense_rw_recovery(u8 *buf)
2071static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
2072{ 1971{
2073 1972 memcpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage));
2074 ata_msense_push(ptr_io, last, def_rw_recovery_mpage,
2075 sizeof(def_rw_recovery_mpage));
2076 return sizeof(def_rw_recovery_mpage); 1973 return sizeof(def_rw_recovery_mpage);
2077} 1974}
2078 1975
@@ -2104,7 +2001,6 @@ static int ata_dev_supports_fua(u16 *id)
2104 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands 2001 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
2105 * @args: device IDENTIFY data / SCSI command of interest. 2002 * @args: device IDENTIFY data / SCSI command of interest.
2106 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2003 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2107 * @buflen: Response buffer length.
2108 * 2004 *
2109 * Simulate MODE SENSE commands. Assume this is invoked for direct 2005 * Simulate MODE SENSE commands. Assume this is invoked for direct
2110 * access devices (e.g. disks) only. There should be no block 2006 * access devices (e.g. disks) only. There should be no block
@@ -2113,19 +2009,17 @@ static int ata_dev_supports_fua(u16 *id)
2113 * LOCKING: 2009 * LOCKING:
2114 * spin_lock_irqsave(host lock) 2010 * spin_lock_irqsave(host lock)
2115 */ 2011 */
2116 2012static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
2117unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
2118 unsigned int buflen)
2119{ 2013{
2120 struct ata_device *dev = args->dev; 2014 struct ata_device *dev = args->dev;
2121 u8 *scsicmd = args->cmd->cmnd, *p, *last; 2015 u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
2122 const u8 sat_blk_desc[] = { 2016 const u8 sat_blk_desc[] = {
2123 0, 0, 0, 0, /* number of blocks: sat unspecified */ 2017 0, 0, 0, 0, /* number of blocks: sat unspecified */
2124 0, 2018 0,
2125 0, 0x2, 0x0 /* block length: 512 bytes */ 2019 0, 0x2, 0x0 /* block length: 512 bytes */
2126 }; 2020 };
2127 u8 pg, spg; 2021 u8 pg, spg;
2128 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; 2022 unsigned int ebd, page_control, six_byte;
2129 u8 dpofua; 2023 u8 dpofua;
2130 2024
2131 VPRINTK("ENTER\n"); 2025 VPRINTK("ENTER\n");
@@ -2148,17 +2042,10 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
2148 goto invalid_fld; 2042 goto invalid_fld;
2149 } 2043 }
2150 2044
2151 if (six_byte) { 2045 if (six_byte)
2152 output_len = 4 + (ebd ? 8 : 0); 2046 p += 4 + (ebd ? 8 : 0);
2153 alloc_len = scsicmd[4]; 2047 else
2154 } else { 2048 p += 8 + (ebd ? 8 : 0);
2155 output_len = 8 + (ebd ? 8 : 0);
2156 alloc_len = (scsicmd[7] << 8) + scsicmd[8];
2157 }
2158 minlen = (alloc_len < buflen) ? alloc_len : buflen;
2159
2160 p = rbuf + output_len;
2161 last = rbuf + minlen - 1;
2162 2049
2163 pg = scsicmd[2] & 0x3f; 2050 pg = scsicmd[2] & 0x3f;
2164 spg = scsicmd[3]; 2051 spg = scsicmd[3];
@@ -2171,61 +2058,48 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
2171 2058
2172 switch(pg) { 2059 switch(pg) {
2173 case RW_RECOVERY_MPAGE: 2060 case RW_RECOVERY_MPAGE:
2174 output_len += ata_msense_rw_recovery(&p, last); 2061 p += ata_msense_rw_recovery(p);
2175 break; 2062 break;
2176 2063
2177 case CACHE_MPAGE: 2064 case CACHE_MPAGE:
2178 output_len += ata_msense_caching(args->id, &p, last); 2065 p += ata_msense_caching(args->id, p);
2179 break; 2066 break;
2180 2067
2181 case CONTROL_MPAGE: { 2068 case CONTROL_MPAGE:
2182 output_len += ata_msense_ctl_mode(&p, last); 2069 p += ata_msense_ctl_mode(p);
2183 break; 2070 break;
2184 }
2185 2071
2186 case ALL_MPAGES: 2072 case ALL_MPAGES:
2187 output_len += ata_msense_rw_recovery(&p, last); 2073 p += ata_msense_rw_recovery(p);
2188 output_len += ata_msense_caching(args->id, &p, last); 2074 p += ata_msense_caching(args->id, p);
2189 output_len += ata_msense_ctl_mode(&p, last); 2075 p += ata_msense_ctl_mode(p);
2190 break; 2076 break;
2191 2077
2192 default: /* invalid page code */ 2078 default: /* invalid page code */
2193 goto invalid_fld; 2079 goto invalid_fld;
2194 } 2080 }
2195 2081
2196 if (minlen < 1)
2197 return 0;
2198
2199 dpofua = 0; 2082 dpofua = 0;
2200 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) && 2083 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2201 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) 2084 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2202 dpofua = 1 << 4; 2085 dpofua = 1 << 4;
2203 2086
2204 if (six_byte) { 2087 if (six_byte) {
2205 output_len--; 2088 rbuf[0] = p - rbuf - 1;
2206 rbuf[0] = output_len; 2089 rbuf[2] |= dpofua;
2207 if (minlen > 2)
2208 rbuf[2] |= dpofua;
2209 if (ebd) { 2090 if (ebd) {
2210 if (minlen > 3) 2091 rbuf[3] = sizeof(sat_blk_desc);
2211 rbuf[3] = sizeof(sat_blk_desc); 2092 memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
2212 if (minlen > 11)
2213 memcpy(rbuf + 4, sat_blk_desc,
2214 sizeof(sat_blk_desc));
2215 } 2093 }
2216 } else { 2094 } else {
2217 output_len -= 2; 2095 unsigned int output_len = p - rbuf - 2;
2096
2218 rbuf[0] = output_len >> 8; 2097 rbuf[0] = output_len >> 8;
2219 if (minlen > 1) 2098 rbuf[1] = output_len;
2220 rbuf[1] = output_len; 2099 rbuf[3] |= dpofua;
2221 if (minlen > 3)
2222 rbuf[3] |= dpofua;
2223 if (ebd) { 2100 if (ebd) {
2224 if (minlen > 7) 2101 rbuf[7] = sizeof(sat_blk_desc);
2225 rbuf[7] = sizeof(sat_blk_desc); 2102 memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
2226 if (minlen > 15)
2227 memcpy(rbuf + 8, sat_blk_desc,
2228 sizeof(sat_blk_desc));
2229 } 2103 }
2230 } 2104 }
2231 return 0; 2105 return 0;
@@ -2245,15 +2119,13 @@ saving_not_supp:
2245 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands 2119 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2246 * @args: device IDENTIFY data / SCSI command of interest. 2120 * @args: device IDENTIFY data / SCSI command of interest.
2247 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2121 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2248 * @buflen: Response buffer length.
2249 * 2122 *
2250 * Simulate READ CAPACITY commands. 2123 * Simulate READ CAPACITY commands.
2251 * 2124 *
2252 * LOCKING: 2125 * LOCKING:
2253 * None. 2126 * None.
2254 */ 2127 */
2255unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2128static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2256 unsigned int buflen)
2257{ 2129{
2258 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ 2130 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */
2259 2131
@@ -2264,28 +2136,28 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2264 last_lba = 0xffffffff; 2136 last_lba = 0xffffffff;
2265 2137
2266 /* sector count, 32-bit */ 2138 /* sector count, 32-bit */
2267 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3)); 2139 rbuf[0] = last_lba >> (8 * 3);
2268 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2)); 2140 rbuf[1] = last_lba >> (8 * 2);
2269 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1)); 2141 rbuf[2] = last_lba >> (8 * 1);
2270 ATA_SCSI_RBUF_SET(3, last_lba); 2142 rbuf[3] = last_lba;
2271 2143
2272 /* sector size */ 2144 /* sector size */
2273 ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8); 2145 rbuf[6] = ATA_SECT_SIZE >> 8;
2274 ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE & 0xff); 2146 rbuf[7] = ATA_SECT_SIZE & 0xff;
2275 } else { 2147 } else {
2276 /* sector count, 64-bit */ 2148 /* sector count, 64-bit */
2277 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7)); 2149 rbuf[0] = last_lba >> (8 * 7);
2278 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6)); 2150 rbuf[1] = last_lba >> (8 * 6);
2279 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5)); 2151 rbuf[2] = last_lba >> (8 * 5);
2280 ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4)); 2152 rbuf[3] = last_lba >> (8 * 4);
2281 ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3)); 2153 rbuf[4] = last_lba >> (8 * 3);
2282 ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2)); 2154 rbuf[5] = last_lba >> (8 * 2);
2283 ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1)); 2155 rbuf[6] = last_lba >> (8 * 1);
2284 ATA_SCSI_RBUF_SET(7, last_lba); 2156 rbuf[7] = last_lba;
2285 2157
2286 /* sector size */ 2158 /* sector size */
2287 ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8); 2159 rbuf[10] = ATA_SECT_SIZE >> 8;
2288 ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE & 0xff); 2160 rbuf[11] = ATA_SECT_SIZE & 0xff;
2289 } 2161 }
2290 2162
2291 return 0; 2163 return 0;
@@ -2295,16 +2167,13 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2295 * ata_scsiop_report_luns - Simulate REPORT LUNS command 2167 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2296 * @args: device IDENTIFY data / SCSI command of interest. 2168 * @args: device IDENTIFY data / SCSI command of interest.
2297 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2169 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2298 * @buflen: Response buffer length.
2299 * 2170 *
2300 * Simulate REPORT LUNS command. 2171 * Simulate REPORT LUNS command.
2301 * 2172 *
2302 * LOCKING: 2173 * LOCKING:
2303 * spin_lock_irqsave(host lock) 2174 * spin_lock_irqsave(host lock)
2304 */ 2175 */
2305 2176static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
2306unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
2307 unsigned int buflen)
2308{ 2177{
2309 VPRINTK("ENTER\n"); 2178 VPRINTK("ENTER\n");
2310 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ 2179 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
@@ -2312,53 +2181,6 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
2312 return 0; 2181 return 0;
2313} 2182}
2314 2183
2315/**
2316 * ata_scsi_set_sense - Set SCSI sense data and status
2317 * @cmd: SCSI request to be handled
2318 * @sk: SCSI-defined sense key
2319 * @asc: SCSI-defined additional sense code
2320 * @ascq: SCSI-defined additional sense code qualifier
2321 *
2322 * Helper function that builds a valid fixed format, current
2323 * response code and the given sense key (sk), additional sense
2324 * code (asc) and additional sense code qualifier (ascq) with
2325 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and
2326 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
2327 *
2328 * LOCKING:
2329 * Not required
2330 */
2331
2332void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
2333{
2334 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
2335
2336 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
2337}
2338
2339/**
2340 * ata_scsi_badcmd - End a SCSI request with an error
2341 * @cmd: SCSI request to be handled
2342 * @done: SCSI command completion function
2343 * @asc: SCSI-defined additional sense code
2344 * @ascq: SCSI-defined additional sense code qualifier
2345 *
2346 * Helper function that completes a SCSI command with
2347 * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST
2348 * and the specified additional sense codes.
2349 *
2350 * LOCKING:
2351 * spin_lock_irqsave(host lock)
2352 */
2353
2354void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
2355{
2356 DPRINTK("ENTER\n");
2357 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
2358
2359 done(cmd);
2360}
2361
2362static void atapi_sense_complete(struct ata_queued_cmd *qc) 2184static void atapi_sense_complete(struct ata_queued_cmd *qc)
2363{ 2185{
2364 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { 2186 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
@@ -2485,13 +2307,10 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2485 u8 *scsicmd = cmd->cmnd; 2307 u8 *scsicmd = cmd->cmnd;
2486 2308
2487 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { 2309 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2488 u8 *buf = NULL;
2489 unsigned int buflen;
2490 unsigned long flags; 2310 unsigned long flags;
2311 u8 *buf;
2491 2312
2492 local_irq_save(flags); 2313 buf = ata_scsi_rbuf_get(cmd, true, &flags);
2493
2494 buflen = ata_scsi_rbuf_get(cmd, &buf);
2495 2314
2496 /* ATAPI devices typically report zero for their SCSI version, 2315 /* ATAPI devices typically report zero for their SCSI version,
2497 * and sometimes deviate from the spec WRT response data 2316 * and sometimes deviate from the spec WRT response data
@@ -2506,9 +2325,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2506 buf[3] = 0x32; 2325 buf[3] = 0x32;
2507 } 2326 }
2508 2327
2509 ata_scsi_rbuf_put(cmd, buf); 2328 ata_scsi_rbuf_put(cmd, true, &flags);
2510
2511 local_irq_restore(flags);
2512 } 2329 }
2513 2330
2514 cmd->result = SAM_STAT_GOOD; 2331 cmd->result = SAM_STAT_GOOD;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index ae2cfd95d43e..4514283937ea 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -146,34 +146,6 @@ extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
146extern int ata_scsi_offline_dev(struct ata_device *dev); 146extern int ata_scsi_offline_dev(struct ata_device *dev);
147extern void ata_scsi_media_change_notify(struct ata_device *dev); 147extern void ata_scsi_media_change_notify(struct ata_device *dev);
148extern void ata_scsi_hotplug(struct work_struct *work); 148extern void ata_scsi_hotplug(struct work_struct *work);
149extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
150 unsigned int buflen);
151
152extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
153 unsigned int buflen);
154
155extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
156 unsigned int buflen);
157extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
158 unsigned int buflen);
159extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
160 unsigned int buflen);
161extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
162 unsigned int buflen);
163extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
164 unsigned int buflen);
165extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
166 unsigned int buflen);
167extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
168 unsigned int buflen);
169extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
170 void (*done)(struct scsi_cmnd *),
171 u8 asc, u8 ascq);
172extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
173 u8 sk, u8 asc, u8 ascq);
174extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
175 unsigned int (*actor) (struct ata_scsi_args *args,
176 u8 *rbuf, unsigned int buflen));
177extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 149extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
178extern void ata_scsi_dev_rescan(struct work_struct *work); 150extern void ata_scsi_dev_rescan(struct work_struct *work);
179extern int ata_bus_probe(struct ata_port *ap); 151extern int ata_bus_probe(struct ata_port *ap);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 78738fb4223b..d7de7baf58a8 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -88,8 +88,8 @@ static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev,
88 pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data); 88 pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
89 89
90 pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); 90 pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
91 pio_mode_data &= ~(0xFF << timing_shift); 91 pio_timing_data &= ~(0xFF << timing_shift);
92 pio_mode_data |= (pio_timings[pio] << timing_shift); 92 pio_timing_data |= (pio_timings[pio] << timing_shift);
93 pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); 93 pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
94} 94}
95 95
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index a75de0684c15..9ab89732cf94 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1272,8 +1272,8 @@ static void bfin_freeze(struct ata_port *ap)
1272 1272
1273void bfin_thaw(struct ata_port *ap) 1273void bfin_thaw(struct ata_port *ap)
1274{ 1274{
1275 dev_dbg(ap->dev, "in atapi dma thaw\n");
1275 bfin_check_status(ap); 1276 bfin_check_status(ap);
1276 bfin_irq_clear(ap);
1277 bfin_irq_on(ap); 1277 bfin_irq_on(ap);
1278} 1278}
1279 1279
@@ -1339,13 +1339,130 @@ static int bfin_port_start(struct ata_port *ap)
1339 return 0; 1339 return 0;
1340} 1340}
1341 1341
1342static unsigned int bfin_ata_host_intr(struct ata_port *ap,
1343 struct ata_queued_cmd *qc)
1344{
1345 struct ata_eh_info *ehi = &ap->link.eh_info;
1346 u8 status, host_stat = 0;
1347
1348 VPRINTK("ata%u: protocol %d task_state %d\n",
1349 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1350
1351 /* Check whether we are expecting interrupt in this state */
1352 switch (ap->hsm_task_state) {
1353 case HSM_ST_FIRST:
1354 /* Some pre-ATAPI-4 devices assert INTRQ
1355 * at this state when ready to receive CDB.
1356 */
1357
1358 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1359 * The flag was turned on only for atapi devices.
1360 * No need to check is_atapi_taskfile(&qc->tf) again.
1361 */
1362 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1363 goto idle_irq;
1364 break;
1365 case HSM_ST_LAST:
1366 if (qc->tf.protocol == ATA_PROT_DMA ||
1367 qc->tf.protocol == ATAPI_PROT_DMA) {
1368 /* check status of DMA engine */
1369 host_stat = ap->ops->bmdma_status(ap);
1370 VPRINTK("ata%u: host_stat 0x%X\n",
1371 ap->print_id, host_stat);
1372
1373 /* if it's not our irq... */
1374 if (!(host_stat & ATA_DMA_INTR))
1375 goto idle_irq;
1376
1377 /* before we do anything else, clear DMA-Start bit */
1378 ap->ops->bmdma_stop(qc);
1379
1380 if (unlikely(host_stat & ATA_DMA_ERR)) {
1381 /* error when transfering data to/from memory */
1382 qc->err_mask |= AC_ERR_HOST_BUS;
1383 ap->hsm_task_state = HSM_ST_ERR;
1384 }
1385 }
1386 break;
1387 case HSM_ST:
1388 break;
1389 default:
1390 goto idle_irq;
1391 }
1392
1393 /* check altstatus */
1394 status = ap->ops->sff_check_altstatus(ap);
1395 if (status & ATA_BUSY)
1396 goto busy_ata;
1397
1398 /* check main status, clearing INTRQ */
1399 status = ap->ops->sff_check_status(ap);
1400 if (unlikely(status & ATA_BUSY))
1401 goto busy_ata;
1402
1403 /* ack bmdma irq events */
1404 ap->ops->sff_irq_clear(ap);
1405
1406 ata_sff_hsm_move(ap, qc, status, 0);
1407
1408 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
1409 qc->tf.protocol == ATAPI_PROT_DMA))
1410 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
1411
1412busy_ata:
1413 return 1; /* irq handled */
1414
1415idle_irq:
1416 ap->stats.idle_irq++;
1417
1418#ifdef ATA_IRQ_TRAP
1419 if ((ap->stats.idle_irq % 1000) == 0) {
1420 ap->ops->irq_ack(ap, 0); /* debug trap */
1421 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1422 return 1;
1423 }
1424#endif
1425 return 0; /* irq not handled */
1426}
1427
1428static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
1429{
1430 struct ata_host *host = dev_instance;
1431 unsigned int i;
1432 unsigned int handled = 0;
1433 unsigned long flags;
1434
1435 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1436 spin_lock_irqsave(&host->lock, flags);
1437
1438 for (i = 0; i < host->n_ports; i++) {
1439 struct ata_port *ap;
1440
1441 ap = host->ports[i];
1442 if (ap &&
1443 !(ap->flags & ATA_FLAG_DISABLED)) {
1444 struct ata_queued_cmd *qc;
1445
1446 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1447 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
1448 (qc->flags & ATA_QCFLAG_ACTIVE))
1449 handled |= bfin_ata_host_intr(ap, qc);
1450 }
1451 }
1452
1453 spin_unlock_irqrestore(&host->lock, flags);
1454
1455 return IRQ_RETVAL(handled);
1456}
1457
1458
1342static struct scsi_host_template bfin_sht = { 1459static struct scsi_host_template bfin_sht = {
1343 ATA_BASE_SHT(DRV_NAME), 1460 ATA_BASE_SHT(DRV_NAME),
1344 .sg_tablesize = SG_NONE, 1461 .sg_tablesize = SG_NONE,
1345 .dma_boundary = ATA_DMA_BOUNDARY, 1462 .dma_boundary = ATA_DMA_BOUNDARY,
1346}; 1463};
1347 1464
1348static const struct ata_port_operations bfin_pata_ops = { 1465static struct ata_port_operations bfin_pata_ops = {
1349 .inherits = &ata_sff_port_ops, 1466 .inherits = &ata_sff_port_ops,
1350 1467
1351 .set_piomode = bfin_set_piomode, 1468 .set_piomode = bfin_set_piomode,
@@ -1370,7 +1487,6 @@ static const struct ata_port_operations bfin_pata_ops = {
1370 .thaw = bfin_thaw, 1487 .thaw = bfin_thaw,
1371 .softreset = bfin_softreset, 1488 .softreset = bfin_softreset,
1372 .postreset = bfin_postreset, 1489 .postreset = bfin_postreset,
1373 .post_internal_cmd = bfin_bmdma_stop,
1374 1490
1375 .sff_irq_clear = bfin_irq_clear, 1491 .sff_irq_clear = bfin_irq_clear,
1376 .sff_irq_on = bfin_irq_on, 1492 .sff_irq_on = bfin_irq_on,
@@ -1507,7 +1623,7 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev)
1507 } 1623 }
1508 1624
1509 if (ata_host_activate(host, platform_get_irq(pdev, 0), 1625 if (ata_host_activate(host, platform_get_irq(pdev, 0),
1510 ata_sff_interrupt, IRQF_SHARED, &bfin_sht) != 0) { 1626 bfin_ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
1511 peripheral_free_list(atapi_io_port); 1627 peripheral_free_list(atapi_io_port);
1512 dev_err(&pdev->dev, "Fail to attach ATAPI device\n"); 1628 dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
1513 return -ENODEV; 1629 return -ENODEV;
diff --git a/drivers/ata/pata_rb500_cf.c b/drivers/ata/pata_rb532_cf.c
index 4345174aaeec..a108d259f19d 100644
--- a/drivers/ata/pata_rb500_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -32,7 +32,7 @@
32 32
33#include <asm/gpio.h> 33#include <asm/gpio.h>
34 34
35#define DRV_NAME "pata-rb500-cf" 35#define DRV_NAME "pata-rb532-cf"
36#define DRV_VERSION "0.1.0" 36#define DRV_VERSION "0.1.0"
37#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" 37#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
38 38
@@ -43,7 +43,7 @@
43#define RB500_CF_REG_CTRL 0x080E 43#define RB500_CF_REG_CTRL 0x080E
44#define RB500_CF_REG_DATA 0x0C00 44#define RB500_CF_REG_DATA 0x0C00
45 45
46struct rb500_cf_info { 46struct rb532_cf_info {
47 void __iomem *iobase; 47 void __iomem *iobase;
48 unsigned int gpio_line; 48 unsigned int gpio_line;
49 int frozen; 49 int frozen;
@@ -52,10 +52,10 @@ struct rb500_cf_info {
52 52
53/* ------------------------------------------------------------------------ */ 53/* ------------------------------------------------------------------------ */
54 54
55static inline void rb500_pata_finish_io(struct ata_port *ap) 55static inline void rb532_pata_finish_io(struct ata_port *ap)
56{ 56{
57 struct ata_host *ah = ap->host; 57 struct ata_host *ah = ap->host;
58 struct rb500_cf_info *info = ah->private_data; 58 struct rb532_cf_info *info = ah->private_data;
59 59
60 ata_sff_altstatus(ap); 60 ata_sff_altstatus(ap);
61 ndelay(RB500_CF_IO_DELAY); 61 ndelay(RB500_CF_IO_DELAY);
@@ -63,14 +63,14 @@ static inline void rb500_pata_finish_io(struct ata_port *ap)
63 set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); 63 set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
64} 64}
65 65
66static void rb500_pata_exec_command(struct ata_port *ap, 66static void rb532_pata_exec_command(struct ata_port *ap,
67 const struct ata_taskfile *tf) 67 const struct ata_taskfile *tf)
68{ 68{
69 writeb(tf->command, ap->ioaddr.command_addr); 69 writeb(tf->command, ap->ioaddr.command_addr);
70 rb500_pata_finish_io(ap); 70 rb532_pata_finish_io(ap);
71} 71}
72 72
73static void rb500_pata_data_xfer(struct ata_device *adev, unsigned char *buf, 73static void rb532_pata_data_xfer(struct ata_device *adev, unsigned char *buf,
74 unsigned int buflen, int write_data) 74 unsigned int buflen, int write_data)
75{ 75{
76 struct ata_port *ap = adev->link->ap; 76 struct ata_port *ap = adev->link->ap;
@@ -84,27 +84,27 @@ static void rb500_pata_data_xfer(struct ata_device *adev, unsigned char *buf,
84 *buf = readb(ioaddr); 84 *buf = readb(ioaddr);
85 } 85 }
86 86
87 rb500_pata_finish_io(adev->link->ap); 87 rb532_pata_finish_io(adev->link->ap);
88} 88}
89 89
90static void rb500_pata_freeze(struct ata_port *ap) 90static void rb532_pata_freeze(struct ata_port *ap)
91{ 91{
92 struct rb500_cf_info *info = ap->host->private_data; 92 struct rb532_cf_info *info = ap->host->private_data;
93 93
94 info->frozen = 1; 94 info->frozen = 1;
95} 95}
96 96
97static void rb500_pata_thaw(struct ata_port *ap) 97static void rb532_pata_thaw(struct ata_port *ap)
98{ 98{
99 struct rb500_cf_info *info = ap->host->private_data; 99 struct rb532_cf_info *info = ap->host->private_data;
100 100
101 info->frozen = 0; 101 info->frozen = 0;
102} 102}
103 103
104static irqreturn_t rb500_pata_irq_handler(int irq, void *dev_instance) 104static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
105{ 105{
106 struct ata_host *ah = dev_instance; 106 struct ata_host *ah = dev_instance;
107 struct rb500_cf_info *info = ah->private_data; 107 struct rb532_cf_info *info = ah->private_data;
108 108
109 if (gpio_get_value(info->gpio_line)) { 109 if (gpio_get_value(info->gpio_line)) {
110 set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); 110 set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
@@ -117,30 +117,30 @@ static irqreturn_t rb500_pata_irq_handler(int irq, void *dev_instance)
117 return IRQ_HANDLED; 117 return IRQ_HANDLED;
118} 118}
119 119
120static struct ata_port_operations rb500_pata_port_ops = { 120static struct ata_port_operations rb532_pata_port_ops = {
121 .inherits = &ata_sff_port_ops, 121 .inherits = &ata_sff_port_ops,
122 .sff_exec_command = rb500_pata_exec_command, 122 .sff_exec_command = rb532_pata_exec_command,
123 .sff_data_xfer = rb500_pata_data_xfer, 123 .sff_data_xfer = rb532_pata_data_xfer,
124 .freeze = rb500_pata_freeze, 124 .freeze = rb532_pata_freeze,
125 .thaw = rb500_pata_thaw, 125 .thaw = rb532_pata_thaw,
126}; 126};
127 127
128/* ------------------------------------------------------------------------ */ 128/* ------------------------------------------------------------------------ */
129 129
130static struct scsi_host_template rb500_pata_sht = { 130static struct scsi_host_template rb532_pata_sht = {
131 ATA_PIO_SHT(DRV_NAME), 131 ATA_PIO_SHT(DRV_NAME),
132}; 132};
133 133
134/* ------------------------------------------------------------------------ */ 134/* ------------------------------------------------------------------------ */
135 135
136static void rb500_pata_setup_ports(struct ata_host *ah) 136static void rb532_pata_setup_ports(struct ata_host *ah)
137{ 137{
138 struct rb500_cf_info *info = ah->private_data; 138 struct rb532_cf_info *info = ah->private_data;
139 struct ata_port *ap; 139 struct ata_port *ap;
140 140
141 ap = ah->ports[0]; 141 ap = ah->ports[0];
142 142
143 ap->ops = &rb500_pata_port_ops; 143 ap->ops = &rb532_pata_port_ops;
144 ap->pio_mask = 0x1f; /* PIO4 */ 144 ap->pio_mask = 0x1f; /* PIO4 */
145 ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO; 145 ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
146 146
@@ -153,13 +153,13 @@ static void rb500_pata_setup_ports(struct ata_host *ah)
153 ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DATA; 153 ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DATA;
154} 154}
155 155
156static __devinit int rb500_pata_driver_probe(struct platform_device *pdev) 156static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
157{ 157{
158 unsigned int irq; 158 unsigned int irq;
159 int gpio; 159 int gpio;
160 struct resource *res; 160 struct resource *res;
161 struct ata_host *ah; 161 struct ata_host *ah;
162 struct rb500_cf_info *info; 162 struct rb532_cf_info *info;
163 int ret; 163 int ret;
164 164
165 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 165 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -213,10 +213,10 @@ static __devinit int rb500_pata_driver_probe(struct platform_device *pdev)
213 goto err_free_gpio; 213 goto err_free_gpio;
214 } 214 }
215 215
216 rb500_pata_setup_ports(ah); 216 rb532_pata_setup_ports(ah);
217 217
218 ret = ata_host_activate(ah, irq, rb500_pata_irq_handler, 218 ret = ata_host_activate(ah, irq, rb532_pata_irq_handler,
219 IRQF_TRIGGER_LOW, &rb500_pata_sht); 219 IRQF_TRIGGER_LOW, &rb532_pata_sht);
220 if (ret) 220 if (ret)
221 goto err_free_gpio; 221 goto err_free_gpio;
222 222
@@ -228,10 +228,10 @@ err_free_gpio:
228 return ret; 228 return ret;
229} 229}
230 230
231static __devexit int rb500_pata_driver_remove(struct platform_device *pdev) 231static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
232{ 232{
233 struct ata_host *ah = platform_get_drvdata(pdev); 233 struct ata_host *ah = platform_get_drvdata(pdev);
234 struct rb500_cf_info *info = ah->private_data; 234 struct rb532_cf_info *info = ah->private_data;
235 235
236 ata_host_detach(ah); 236 ata_host_detach(ah);
237 gpio_free(info->gpio_line); 237 gpio_free(info->gpio_line);
@@ -242,9 +242,9 @@ static __devexit int rb500_pata_driver_remove(struct platform_device *pdev)
242/* work with hotplug and coldplug */ 242/* work with hotplug and coldplug */
243MODULE_ALIAS("platform:" DRV_NAME); 243MODULE_ALIAS("platform:" DRV_NAME);
244 244
245static struct platform_driver rb500_pata_platform_driver = { 245static struct platform_driver rb532_pata_platform_driver = {
246 .probe = rb500_pata_driver_probe, 246 .probe = rb532_pata_driver_probe,
247 .remove = __devexit_p(rb500_pata_driver_remove), 247 .remove = __devexit_p(rb532_pata_driver_remove),
248 .driver = { 248 .driver = {
249 .name = DRV_NAME, 249 .name = DRV_NAME,
250 .owner = THIS_MODULE, 250 .owner = THIS_MODULE,
@@ -255,16 +255,16 @@ static struct platform_driver rb500_pata_platform_driver = {
255 255
256#define DRV_INFO DRV_DESC " version " DRV_VERSION 256#define DRV_INFO DRV_DESC " version " DRV_VERSION
257 257
258static int __init rb500_pata_module_init(void) 258static int __init rb532_pata_module_init(void)
259{ 259{
260 printk(KERN_INFO DRV_INFO "\n"); 260 printk(KERN_INFO DRV_INFO "\n");
261 261
262 return platform_driver_register(&rb500_pata_platform_driver); 262 return platform_driver_register(&rb532_pata_platform_driver);
263} 263}
264 264
265static void __exit rb500_pata_module_exit(void) 265static void __exit rb532_pata_module_exit(void)
266{ 266{
267 platform_driver_unregister(&rb500_pata_platform_driver); 267 platform_driver_unregister(&rb532_pata_platform_driver);
268} 268}
269 269
270MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>"); 270MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
@@ -273,5 +273,5 @@ MODULE_DESCRIPTION(DRV_DESC);
273MODULE_VERSION(DRV_VERSION); 273MODULE_VERSION(DRV_VERSION);
274MODULE_LICENSE("GPL"); 274MODULE_LICENSE("GPL");
275 275
276module_init(rb500_pata_module_init); 276module_init(rb532_pata_module_init);
277module_exit(rb500_pata_module_exit); 277module_exit(rb532_pata_module_exit);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index d4840748fb5c..2fea6cbe7755 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -464,11 +464,12 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
464 } 464 }
465 pci_dev_put(isa); 465 pci_dev_put(isa);
466 466
467 /* 0x40 low bits indicate enabled channels */ 467 if (!(config->flags & VIA_NO_ENABLES)) {
468 pci_read_config_byte(pdev, 0x40 , &enable); 468 /* 0x40 low bits indicate enabled channels */
469 enable &= 3; 469 pci_read_config_byte(pdev, 0x40 , &enable);
470 if (enable == 0) { 470 enable &= 3;
471 return -ENODEV; 471 if (enable == 0)
472 return -ENODEV;
472 } 473 }
473 474
474 /* Initialise the FIFO for the enabled channels. */ 475 /* Initialise the FIFO for the enabled channels. */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 26a6337195b3..842b1a15b78c 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -172,10 +172,11 @@ enum {
172 PCIE_IRQ_MASK_OFS = 0x1910, 172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
174 174
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 175 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64, 176 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
177 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020, 177 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
178 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024, 178 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
179 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
179 ERR_IRQ = (1 << 0), /* shift by port # */ 180 ERR_IRQ = (1 << 0), /* shift by port # */
180 DONE_IRQ = (1 << 1), /* shift by port # */ 181 DONE_IRQ = (1 << 1), /* shift by port # */
181 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 182 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
@@ -445,8 +446,8 @@ struct mv_host_priv {
445 const struct mv_hw_ops *ops; 446 const struct mv_hw_ops *ops;
446 int n_ports; 447 int n_ports;
447 void __iomem *base; 448 void __iomem *base;
448 void __iomem *main_cause_reg_addr; 449 void __iomem *main_irq_cause_addr;
449 void __iomem *main_mask_reg_addr; 450 void __iomem *main_irq_mask_addr;
450 u32 irq_cause_ofs; 451 u32 irq_cause_ofs;
451 u32 irq_mask_ofs; 452 u32 irq_mask_ofs;
452 u32 unmask_all_irqs; 453 u32 unmask_all_irqs;
@@ -727,8 +728,8 @@ static inline unsigned int mv_hardport_from_port(unsigned int port)
727 * Simple code, with two return values, so macro rather than inline. 728 * Simple code, with two return values, so macro rather than inline.
728 * 729 *
729 * port is the sole input, in range 0..7. 730 * port is the sole input, in range 0..7.
730 * shift is one output, for use with the main_cause and main_mask registers. 731 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
731 * hardport is the other output, in range 0..3 732 * hardport is the other output, in range 0..3.
732 * 733 *
733 * Note that port and hardport may be the same variable in some cases. 734 * Note that port and hardport may be the same variable in some cases.
734 */ 735 */
@@ -1679,12 +1680,12 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
1679/** 1680/**
1680 * mv_host_intr - Handle all interrupts on the given host controller 1681 * mv_host_intr - Handle all interrupts on the given host controller
1681 * @host: host specific structure 1682 * @host: host specific structure
1682 * @main_cause: Main interrupt cause register for the chip. 1683 * @main_irq_cause: Main interrupt cause register for the chip.
1683 * 1684 *
1684 * LOCKING: 1685 * LOCKING:
1685 * Inherited from caller. 1686 * Inherited from caller.
1686 */ 1687 */
1687static int mv_host_intr(struct ata_host *host, u32 main_cause) 1688static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
1688{ 1689{
1689 struct mv_host_priv *hpriv = host->private_data; 1690 struct mv_host_priv *hpriv = host->private_data;
1690 void __iomem *mmio = hpriv->base, *hc_mmio = NULL; 1691 void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
@@ -1705,7 +1706,7 @@ static int mv_host_intr(struct ata_host *host, u32 main_cause)
1705 * Do nothing if port is not interrupting or is disabled: 1706 * Do nothing if port is not interrupting or is disabled:
1706 */ 1707 */
1707 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 1708 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1708 port_cause = (main_cause >> shift) & (DONE_IRQ | ERR_IRQ); 1709 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1709 if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED)) 1710 if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
1710 continue; 1711 continue;
1711 /* 1712 /*
@@ -1811,20 +1812,20 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1811 struct ata_host *host = dev_instance; 1812 struct ata_host *host = dev_instance;
1812 struct mv_host_priv *hpriv = host->private_data; 1813 struct mv_host_priv *hpriv = host->private_data;
1813 unsigned int handled = 0; 1814 unsigned int handled = 0;
1814 u32 main_cause, main_mask; 1815 u32 main_irq_cause, main_irq_mask;
1815 1816
1816 spin_lock(&host->lock); 1817 spin_lock(&host->lock);
1817 main_cause = readl(hpriv->main_cause_reg_addr); 1818 main_irq_cause = readl(hpriv->main_irq_cause_addr);
1818 main_mask = readl(hpriv->main_mask_reg_addr); 1819 main_irq_mask = readl(hpriv->main_irq_mask_addr);
1819 /* 1820 /*
1820 * Deal with cases where we either have nothing pending, or have read 1821 * Deal with cases where we either have nothing pending, or have read
1821 * a bogus register value which can indicate HW removal or PCI fault. 1822 * a bogus register value which can indicate HW removal or PCI fault.
1822 */ 1823 */
1823 if ((main_cause & main_mask) && (main_cause != 0xffffffffU)) { 1824 if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) {
1824 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) 1825 if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host)))
1825 handled = mv_pci_error(host, hpriv->base); 1826 handled = mv_pci_error(host, hpriv->base);
1826 else 1827 else
1827 handled = mv_host_intr(host, main_cause); 1828 handled = mv_host_intr(host, main_irq_cause);
1828 } 1829 }
1829 spin_unlock(&host->lock); 1830 spin_unlock(&host->lock);
1830 return IRQ_RETVAL(handled); 1831 return IRQ_RETVAL(handled);
@@ -2027,7 +2028,7 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2027 ZERO(MV_PCI_DISC_TIMER); 2028 ZERO(MV_PCI_DISC_TIMER);
2028 ZERO(MV_PCI_MSI_TRIGGER); 2029 ZERO(MV_PCI_MSI_TRIGGER);
2029 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); 2030 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2030 ZERO(HC_MAIN_IRQ_MASK_OFS); 2031 ZERO(PCI_HC_MAIN_IRQ_MASK_OFS);
2031 ZERO(MV_PCI_SERR_MASK); 2032 ZERO(MV_PCI_SERR_MASK);
2032 ZERO(hpriv->irq_cause_ofs); 2033 ZERO(hpriv->irq_cause_ofs);
2033 ZERO(hpriv->irq_mask_ofs); 2034 ZERO(hpriv->irq_mask_ofs);
@@ -2404,7 +2405,7 @@ static void mv_eh_freeze(struct ata_port *ap)
2404{ 2405{
2405 struct mv_host_priv *hpriv = ap->host->private_data; 2406 struct mv_host_priv *hpriv = ap->host->private_data;
2406 unsigned int shift, hardport, port = ap->port_no; 2407 unsigned int shift, hardport, port = ap->port_no;
2407 u32 main_mask; 2408 u32 main_irq_mask;
2408 2409
2409 /* FIXME: handle coalescing completion events properly */ 2410 /* FIXME: handle coalescing completion events properly */
2410 2411
@@ -2412,9 +2413,9 @@ static void mv_eh_freeze(struct ata_port *ap)
2412 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2413 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2413 2414
2414 /* disable assertion of portN err, done events */ 2415 /* disable assertion of portN err, done events */
2415 main_mask = readl(hpriv->main_mask_reg_addr); 2416 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2416 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift); 2417 main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2417 writelfl(main_mask, hpriv->main_mask_reg_addr); 2418 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2418} 2419}
2419 2420
2420static void mv_eh_thaw(struct ata_port *ap) 2421static void mv_eh_thaw(struct ata_port *ap)
@@ -2423,7 +2424,7 @@ static void mv_eh_thaw(struct ata_port *ap)
2423 unsigned int shift, hardport, port = ap->port_no; 2424 unsigned int shift, hardport, port = ap->port_no;
2424 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 2425 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2425 void __iomem *port_mmio = mv_ap_base(ap); 2426 void __iomem *port_mmio = mv_ap_base(ap);
2426 u32 main_mask, hc_irq_cause; 2427 u32 main_irq_mask, hc_irq_cause;
2427 2428
2428 /* FIXME: handle coalescing completion events properly */ 2429 /* FIXME: handle coalescing completion events properly */
2429 2430
@@ -2438,9 +2439,9 @@ static void mv_eh_thaw(struct ata_port *ap)
2438 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2439 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2439 2440
2440 /* enable assertion of portN err, done events */ 2441 /* enable assertion of portN err, done events */
2441 main_mask = readl(hpriv->main_mask_reg_addr); 2442 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2442 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift); 2443 main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2443 writelfl(main_mask, hpriv->main_mask_reg_addr); 2444 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2444} 2445}
2445 2446
2446/** 2447/**
@@ -2654,15 +2655,15 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2654 goto done; 2655 goto done;
2655 2656
2656 if (HAS_PCI(host)) { 2657 if (HAS_PCI(host)) {
2657 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS; 2658 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
2658 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS; 2659 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
2659 } else { 2660 } else {
2660 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS; 2661 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
2661 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS; 2662 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
2662 } 2663 }
2663 2664
2664 /* global interrupt mask: 0 == mask everything */ 2665 /* global interrupt mask: 0 == mask everything */
2665 writel(0, hpriv->main_mask_reg_addr); 2666 writel(0, hpriv->main_irq_mask_addr);
2666 2667
2667 n_hc = mv_get_hc_count(host->ports[0]->flags); 2668 n_hc = mv_get_hc_count(host->ports[0]->flags);
2668 2669
@@ -2712,23 +2713,23 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2712 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 2713 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2713 if (IS_GEN_I(hpriv)) 2714 if (IS_GEN_I(hpriv))
2714 writelfl(~HC_MAIN_MASKED_IRQS_5, 2715 writelfl(~HC_MAIN_MASKED_IRQS_5,
2715 hpriv->main_mask_reg_addr); 2716 hpriv->main_irq_mask_addr);
2716 else 2717 else
2717 writelfl(~HC_MAIN_MASKED_IRQS, 2718 writelfl(~HC_MAIN_MASKED_IRQS,
2718 hpriv->main_mask_reg_addr); 2719 hpriv->main_irq_mask_addr);
2719 2720
2720 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 2721 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2721 "PCI int cause/mask=0x%08x/0x%08x\n", 2722 "PCI int cause/mask=0x%08x/0x%08x\n",
2722 readl(hpriv->main_cause_reg_addr), 2723 readl(hpriv->main_irq_cause_addr),
2723 readl(hpriv->main_mask_reg_addr), 2724 readl(hpriv->main_irq_mask_addr),
2724 readl(mmio + hpriv->irq_cause_ofs), 2725 readl(mmio + hpriv->irq_cause_ofs),
2725 readl(mmio + hpriv->irq_mask_ofs)); 2726 readl(mmio + hpriv->irq_mask_ofs));
2726 } else { 2727 } else {
2727 writelfl(~HC_MAIN_MASKED_IRQS_SOC, 2728 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2728 hpriv->main_mask_reg_addr); 2729 hpriv->main_irq_mask_addr);
2729 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n", 2730 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2730 readl(hpriv->main_cause_reg_addr), 2731 readl(hpriv->main_irq_cause_addr),
2731 readl(hpriv->main_mask_reg_addr)); 2732 readl(hpriv->main_irq_mask_addr));
2732 } 2733 }
2733done: 2734done:
2734 return rc; 2735 return rc;
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 5aa12b011a9a..6adb72a2f876 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -33,6 +33,7 @@
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/poison.h> 34#include <linux/poison.h>
35#include <linux/bitrev.h> 35#include <linux/bitrev.h>
36#include <linux/mutex.h>
36 37
37#include <asm/atomic.h> 38#include <asm/atomic.h>
38#include <asm/io.h> 39#include <asm/io.h>
@@ -1177,7 +1178,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1177 1178
1178 vcc->tx_frame_bits = tx_frame_bits; 1179 vcc->tx_frame_bits = tx_frame_bits;
1179 1180
1180 down (&dev->vcc_sf); 1181 mutex_lock(&dev->vcc_sf);
1181 if (dev->rxer[vci]) { 1182 if (dev->rxer[vci]) {
1182 // RXer on the channel already, just modify rate... 1183 // RXer on the channel already, just modify rate...
1183 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); 1184 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
@@ -1203,7 +1204,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1203 schedule(); 1204 schedule();
1204 } 1205 }
1205 dev->txer[vci].tx_present = 1; 1206 dev->txer[vci].tx_present = 1;
1206 up (&dev->vcc_sf); 1207 mutex_unlock(&dev->vcc_sf);
1207 } 1208 }
1208 1209
1209 if (rxtp->traffic_class != ATM_NONE) { 1210 if (rxtp->traffic_class != ATM_NONE) {
@@ -1211,7 +1212,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1211 1212
1212 vcc->rx_info.pool = pool; 1213 vcc->rx_info.pool = pool;
1213 1214
1214 down (&dev->vcc_sf); 1215 mutex_lock(&dev->vcc_sf);
1215 /* grow RX buffer pool */ 1216 /* grow RX buffer pool */
1216 if (!dev->rxq[pool].buffers_wanted) 1217 if (!dev->rxq[pool].buffers_wanted)
1217 dev->rxq[pool].buffers_wanted = rx_lats; 1218 dev->rxq[pool].buffers_wanted = rx_lats;
@@ -1237,7 +1238,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1237 schedule(); 1238 schedule();
1238 // this link allows RX frames through 1239 // this link allows RX frames through
1239 dev->rxer[vci] = atm_vcc; 1240 dev->rxer[vci] = atm_vcc;
1240 up (&dev->vcc_sf); 1241 mutex_unlock(&dev->vcc_sf);
1241 } 1242 }
1242 1243
1243 // indicate readiness 1244 // indicate readiness
@@ -1262,7 +1263,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1262 if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { 1263 if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
1263 command cmd; 1264 command cmd;
1264 1265
1265 down (&dev->vcc_sf); 1266 mutex_lock(&dev->vcc_sf);
1266 if (dev->rxer[vci]) { 1267 if (dev->rxer[vci]) {
1267 // RXer still on the channel, just modify rate... XXX not really needed 1268 // RXer still on the channel, just modify rate... XXX not really needed
1268 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); 1269 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
@@ -1277,7 +1278,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1277 dev->txer[vci].tx_present = 0; 1278 dev->txer[vci].tx_present = 0;
1278 while (command_do (dev, &cmd)) 1279 while (command_do (dev, &cmd))
1279 schedule(); 1280 schedule();
1280 up (&dev->vcc_sf); 1281 mutex_unlock(&dev->vcc_sf);
1281 } 1282 }
1282 1283
1283 // disable RXing 1284 // disable RXing
@@ -1287,7 +1288,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1287 // this is (the?) one reason why we need the amb_vcc struct 1288 // this is (the?) one reason why we need the amb_vcc struct
1288 unsigned char pool = vcc->rx_info.pool; 1289 unsigned char pool = vcc->rx_info.pool;
1289 1290
1290 down (&dev->vcc_sf); 1291 mutex_lock(&dev->vcc_sf);
1291 if (dev->txer[vci].tx_present) { 1292 if (dev->txer[vci].tx_present) {
1292 // TXer still on the channel, just go to pool zero XXX not really needed 1293 // TXer still on the channel, just go to pool zero XXX not really needed
1293 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); 1294 cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
@@ -1314,7 +1315,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1314 dev->rxq[pool].buffers_wanted = 0; 1315 dev->rxq[pool].buffers_wanted = 0;
1315 drain_rx_pool (dev, pool); 1316 drain_rx_pool (dev, pool);
1316 } 1317 }
1317 up (&dev->vcc_sf); 1318 mutex_unlock(&dev->vcc_sf);
1318 } 1319 }
1319 1320
1320 // free our structure 1321 // free our structure
@@ -2188,7 +2189,7 @@ static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
2188 2189
2189 // semaphore for txer/rxer modifications - we cannot use a 2190 // semaphore for txer/rxer modifications - we cannot use a
2190 // spinlock as the critical region needs to switch processes 2191 // spinlock as the critical region needs to switch processes
2191 init_MUTEX (&dev->vcc_sf); 2192 mutex_init(&dev->vcc_sf);
2192 // queue manipulation spinlocks; we want atomic reads and 2193 // queue manipulation spinlocks; we want atomic reads and
2193 // writes to the queue descriptors (handles IRQ and SMP) 2194 // writes to the queue descriptors (handles IRQ and SMP)
2194 // consider replacing "int pending" -> "atomic_t available" 2195 // consider replacing "int pending" -> "atomic_t available"
diff --git a/drivers/atm/ambassador.h b/drivers/atm/ambassador.h
index ff2a303cbe00..df55fa8387dc 100644
--- a/drivers/atm/ambassador.h
+++ b/drivers/atm/ambassador.h
@@ -638,7 +638,7 @@ struct amb_dev {
638 amb_txq txq; 638 amb_txq txq;
639 amb_rxq rxq[NUM_RX_POOLS]; 639 amb_rxq rxq[NUM_RX_POOLS];
640 640
641 struct semaphore vcc_sf; 641 struct mutex vcc_sf;
642 amb_tx_info txer[NUM_VCS]; 642 amb_tx_info txer[NUM_VCS];
643 struct atm_vcc * rxer[NUM_VCS]; 643 struct atm_vcc * rxer[NUM_VCS];
644 unsigned int tx_avail; 644 unsigned int tx_avail;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 38c769f8d2b7..3da804b1627d 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -415,7 +415,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
415 card->pcidev = pcidev; 415 card->pcidev = pcidev;
416 membase = pci_resource_start(pcidev, 1); 416 membase = pci_resource_start(pcidev, 1);
417 card->membase = ioremap(membase, NS_IOREMAP_SIZE); 417 card->membase = ioremap(membase, NS_IOREMAP_SIZE);
418 if (card->membase == 0) 418 if (!card->membase)
419 { 419 {
420 printk("nicstar%d: can't ioremap() membase.\n",i); 420 printk("nicstar%d: can't ioremap() membase.\n",i);
421 error = 3; 421 error = 3;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index c0444146c09a..2c9ae43e2219 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -64,17 +64,6 @@ extern void sysdev_shutdown(void);
64extern int sysdev_suspend(pm_message_t state); 64extern int sysdev_suspend(pm_message_t state);
65extern int sysdev_resume(void); 65extern int sysdev_resume(void);
66 66
67static inline struct class_device *to_class_dev(struct kobject *obj)
68{
69 return container_of(obj, struct class_device, kobj);
70}
71
72static inline
73struct class_device_attribute *to_class_dev_attr(struct attribute *_attr)
74{
75 return container_of(_attr, struct class_device_attribute, attr);
76}
77
78extern char *make_class_name(const char *name, struct kobject *kobj); 67extern char *make_class_name(const char *name, struct kobject *kobj);
79 68
80extern int devres_release_all(struct device *dev); 69extern int devres_release_all(struct device *dev);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index b4901799308b..0ef00e8d4153 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -179,27 +179,13 @@ static void class_create_release(struct class *cls)
179 kfree(cls); 179 kfree(cls);
180} 180}
181 181
182static void class_device_create_release(struct class_device *class_dev)
183{
184 pr_debug("%s called for %s\n", __func__, class_dev->class_id);
185 kfree(class_dev);
186}
187
188/* needed to allow these devices to have parent class devices */
189static int class_device_create_uevent(struct class_device *class_dev,
190 struct kobj_uevent_env *env)
191{
192 pr_debug("%s called for %s\n", __func__, class_dev->class_id);
193 return 0;
194}
195
196/** 182/**
197 * class_create - create a struct class structure 183 * class_create - create a struct class structure
198 * @owner: pointer to the module that is to "own" this struct class 184 * @owner: pointer to the module that is to "own" this struct class
199 * @name: pointer to a string for the name of this class. 185 * @name: pointer to a string for the name of this class.
200 * 186 *
201 * This is used to create a struct class pointer that can then be used 187 * This is used to create a struct class pointer that can then be used
202 * in calls to class_device_create(). 188 * in calls to device_create().
203 * 189 *
204 * Note, the pointer created here is to be destroyed when finished by 190 * Note, the pointer created here is to be destroyed when finished by
205 * making a call to class_destroy(). 191 * making a call to class_destroy().
@@ -218,7 +204,6 @@ struct class *class_create(struct module *owner, const char *name)
218 cls->name = name; 204 cls->name = name;
219 cls->owner = owner; 205 cls->owner = owner;
220 cls->class_release = class_create_release; 206 cls->class_release = class_create_release;
221 cls->release = class_device_create_release;
222 207
223 retval = class_register(cls); 208 retval = class_register(cls);
224 if (retval) 209 if (retval)
@@ -246,113 +231,6 @@ void class_destroy(struct class *cls)
246 class_unregister(cls); 231 class_unregister(cls);
247} 232}
248 233
249/* Class Device Stuff */
250
251int class_device_create_file(struct class_device *class_dev,
252 const struct class_device_attribute *attr)
253{
254 int error = -EINVAL;
255 if (class_dev)
256 error = sysfs_create_file(&class_dev->kobj, &attr->attr);
257 return error;
258}
259
260void class_device_remove_file(struct class_device *class_dev,
261 const struct class_device_attribute *attr)
262{
263 if (class_dev)
264 sysfs_remove_file(&class_dev->kobj, &attr->attr);
265}
266
267int class_device_create_bin_file(struct class_device *class_dev,
268 struct bin_attribute *attr)
269{
270 int error = -EINVAL;
271 if (class_dev)
272 error = sysfs_create_bin_file(&class_dev->kobj, attr);
273 return error;
274}
275
276void class_device_remove_bin_file(struct class_device *class_dev,
277 struct bin_attribute *attr)
278{
279 if (class_dev)
280 sysfs_remove_bin_file(&class_dev->kobj, attr);
281}
282
283static ssize_t class_device_attr_show(struct kobject *kobj,
284 struct attribute *attr, char *buf)
285{
286 struct class_device_attribute *class_dev_attr = to_class_dev_attr(attr);
287 struct class_device *cd = to_class_dev(kobj);
288 ssize_t ret = 0;
289
290 if (class_dev_attr->show)
291 ret = class_dev_attr->show(cd, buf);
292 return ret;
293}
294
295static ssize_t class_device_attr_store(struct kobject *kobj,
296 struct attribute *attr,
297 const char *buf, size_t count)
298{
299 struct class_device_attribute *class_dev_attr = to_class_dev_attr(attr);
300 struct class_device *cd = to_class_dev(kobj);
301 ssize_t ret = 0;
302
303 if (class_dev_attr->store)
304 ret = class_dev_attr->store(cd, buf, count);
305 return ret;
306}
307
308static struct sysfs_ops class_dev_sysfs_ops = {
309 .show = class_device_attr_show,
310 .store = class_device_attr_store,
311};
312
313static void class_dev_release(struct kobject *kobj)
314{
315 struct class_device *cd = to_class_dev(kobj);
316 struct class *cls = cd->class;
317
318 pr_debug("device class '%s': release.\n", cd->class_id);
319
320 if (cd->release)
321 cd->release(cd);
322 else if (cls->release)
323 cls->release(cd);
324 else {
325 printk(KERN_ERR "Class Device '%s' does not have a release() "
326 "function, it is broken and must be fixed.\n",
327 cd->class_id);
328 WARN_ON(1);
329 }
330}
331
332static struct kobj_type class_device_ktype = {
333 .sysfs_ops = &class_dev_sysfs_ops,
334 .release = class_dev_release,
335};
336
337static int class_uevent_filter(struct kset *kset, struct kobject *kobj)
338{
339 struct kobj_type *ktype = get_ktype(kobj);
340
341 if (ktype == &class_device_ktype) {
342 struct class_device *class_dev = to_class_dev(kobj);
343 if (class_dev->class)
344 return 1;
345 }
346 return 0;
347}
348
349static const char *class_uevent_name(struct kset *kset, struct kobject *kobj)
350{
351 struct class_device *class_dev = to_class_dev(kobj);
352
353 return class_dev->class->name;
354}
355
356#ifdef CONFIG_SYSFS_DEPRECATED 234#ifdef CONFIG_SYSFS_DEPRECATED
357char *make_class_name(const char *name, struct kobject *kobj) 235char *make_class_name(const char *name, struct kobject *kobj)
358{ 236{
@@ -370,445 +248,8 @@ char *make_class_name(const char *name, struct kobject *kobj)
370 strcat(class_name, kobject_name(kobj)); 248 strcat(class_name, kobject_name(kobj));
371 return class_name; 249 return class_name;
372} 250}
373
374static int make_deprecated_class_device_links(struct class_device *class_dev)
375{
376 char *class_name;
377 int error;
378
379 if (!class_dev->dev)
380 return 0;
381
382 class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
383 if (class_name)
384 error = sysfs_create_link(&class_dev->dev->kobj,
385 &class_dev->kobj, class_name);
386 else
387 error = -ENOMEM;
388 kfree(class_name);
389 return error;
390}
391
392static void remove_deprecated_class_device_links(struct class_device *class_dev)
393{
394 char *class_name;
395
396 if (!class_dev->dev)
397 return;
398
399 class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
400 if (class_name)
401 sysfs_remove_link(&class_dev->dev->kobj, class_name);
402 kfree(class_name);
403}
404#else
405static inline int make_deprecated_class_device_links(struct class_device *cd)
406{ return 0; }
407static void remove_deprecated_class_device_links(struct class_device *cd)
408{ }
409#endif 251#endif
410 252
411static int class_uevent(struct kset *kset, struct kobject *kobj,
412 struct kobj_uevent_env *env)
413{
414 struct class_device *class_dev = to_class_dev(kobj);
415 struct device *dev = class_dev->dev;
416 int retval = 0;
417
418 pr_debug("%s - name = %s\n", __func__, class_dev->class_id);
419
420 if (MAJOR(class_dev->devt)) {
421 add_uevent_var(env, "MAJOR=%u", MAJOR(class_dev->devt));
422
423 add_uevent_var(env, "MINOR=%u", MINOR(class_dev->devt));
424 }
425
426 if (dev) {
427 const char *path = kobject_get_path(&dev->kobj, GFP_KERNEL);
428 if (path) {
429 add_uevent_var(env, "PHYSDEVPATH=%s", path);
430 kfree(path);
431 }
432
433 if (dev->bus)
434 add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
435
436 if (dev->driver)
437 add_uevent_var(env, "PHYSDEVDRIVER=%s",
438 dev->driver->name);
439 }
440
441 if (class_dev->uevent) {
442 /* have the class device specific function add its stuff */
443 retval = class_dev->uevent(class_dev, env);
444 if (retval)
445 pr_debug("class_dev->uevent() returned %d\n", retval);
446 } else if (class_dev->class->uevent) {
447 /* have the class specific function add its stuff */
448 retval = class_dev->class->uevent(class_dev, env);
449 if (retval)
450 pr_debug("class->uevent() returned %d\n", retval);
451 }
452
453 return retval;
454}
455
456static struct kset_uevent_ops class_uevent_ops = {
457 .filter = class_uevent_filter,
458 .name = class_uevent_name,
459 .uevent = class_uevent,
460};
461
462/*
463 * DO NOT copy how this is created, kset_create_and_add() should be
464 * called, but this is a hold-over from the old-way and will be deleted
465 * entirely soon.
466 */
467static struct kset class_obj_subsys = {
468 .uevent_ops = &class_uevent_ops,
469};
470
471static int class_device_add_attrs(struct class_device *cd)
472{
473 int i;
474 int error = 0;
475 struct class *cls = cd->class;
476
477 if (cls->class_dev_attrs) {
478 for (i = 0; attr_name(cls->class_dev_attrs[i]); i++) {
479 error = class_device_create_file(cd,
480 &cls->class_dev_attrs[i]);
481 if (error)
482 goto err;
483 }
484 }
485done:
486 return error;
487err:
488 while (--i >= 0)
489 class_device_remove_file(cd, &cls->class_dev_attrs[i]);
490 goto done;
491}
492
493static void class_device_remove_attrs(struct class_device *cd)
494{
495 int i;
496 struct class *cls = cd->class;
497
498 if (cls->class_dev_attrs) {
499 for (i = 0; attr_name(cls->class_dev_attrs[i]); i++)
500 class_device_remove_file(cd, &cls->class_dev_attrs[i]);
501 }
502}
503
504static int class_device_add_groups(struct class_device *cd)
505{
506 int i;
507 int error = 0;
508
509 if (cd->groups) {
510 for (i = 0; cd->groups[i]; i++) {
511 error = sysfs_create_group(&cd->kobj, cd->groups[i]);
512 if (error) {
513 while (--i >= 0)
514 sysfs_remove_group(&cd->kobj,
515 cd->groups[i]);
516 goto out;
517 }
518 }
519 }
520out:
521 return error;
522}
523
524static void class_device_remove_groups(struct class_device *cd)
525{
526 int i;
527 if (cd->groups)
528 for (i = 0; cd->groups[i]; i++)
529 sysfs_remove_group(&cd->kobj, cd->groups[i]);
530}
531
532static ssize_t show_dev(struct class_device *class_dev, char *buf)
533{
534 return print_dev_t(buf, class_dev->devt);
535}
536
537static struct class_device_attribute class_devt_attr =
538 __ATTR(dev, S_IRUGO, show_dev, NULL);
539
540static ssize_t store_uevent(struct class_device *class_dev,
541 const char *buf, size_t count)
542{
543 kobject_uevent(&class_dev->kobj, KOBJ_ADD);
544 return count;
545}
546
547static struct class_device_attribute class_uevent_attr =
548 __ATTR(uevent, S_IWUSR, NULL, store_uevent);
549
550void class_device_initialize(struct class_device *class_dev)
551{
552 class_dev->kobj.kset = &class_obj_subsys;
553 kobject_init(&class_dev->kobj, &class_device_ktype);
554 INIT_LIST_HEAD(&class_dev->node);
555}
556
557int class_device_add(struct class_device *class_dev)
558{
559 struct class *parent_class = NULL;
560 struct class_device *parent_class_dev = NULL;
561 struct class_interface *class_intf;
562 int error = -EINVAL;
563
564 class_dev = class_device_get(class_dev);
565 if (!class_dev)
566 return -EINVAL;
567
568 if (!strlen(class_dev->class_id))
569 goto out1;
570
571 parent_class = class_get(class_dev->class);
572 if (!parent_class)
573 goto out1;
574
575 parent_class_dev = class_device_get(class_dev->parent);
576
577 pr_debug("CLASS: registering class device: ID = '%s'\n",
578 class_dev->class_id);
579
580 /* first, register with generic layer. */
581 if (parent_class_dev)
582 class_dev->kobj.parent = &parent_class_dev->kobj;
583 else
584 class_dev->kobj.parent = &parent_class->subsys.kobj;
585
586 error = kobject_add(&class_dev->kobj, class_dev->kobj.parent,
587 "%s", class_dev->class_id);
588 if (error)
589 goto out2;
590
591 /* add the needed attributes to this device */
592 error = sysfs_create_link(&class_dev->kobj,
593 &parent_class->subsys.kobj, "subsystem");
594 if (error)
595 goto out3;
596
597 error = class_device_create_file(class_dev, &class_uevent_attr);
598 if (error)
599 goto out3;
600
601 if (MAJOR(class_dev->devt)) {
602 error = class_device_create_file(class_dev, &class_devt_attr);
603 if (error)
604 goto out4;
605 }
606
607 error = class_device_add_attrs(class_dev);
608 if (error)
609 goto out5;
610
611 if (class_dev->dev) {
612 error = sysfs_create_link(&class_dev->kobj,
613 &class_dev->dev->kobj, "device");
614 if (error)
615 goto out6;
616 }
617
618 error = class_device_add_groups(class_dev);
619 if (error)
620 goto out7;
621
622 error = make_deprecated_class_device_links(class_dev);
623 if (error)
624 goto out8;
625
626 kobject_uevent(&class_dev->kobj, KOBJ_ADD);
627
628 /* notify any interfaces this device is now here */
629 down(&parent_class->sem);
630 list_add_tail(&class_dev->node, &parent_class->children);
631 list_for_each_entry(class_intf, &parent_class->interfaces, node) {
632 if (class_intf->add)
633 class_intf->add(class_dev, class_intf);
634 }
635 up(&parent_class->sem);
636
637 goto out1;
638
639 out8:
640 class_device_remove_groups(class_dev);
641 out7:
642 if (class_dev->dev)
643 sysfs_remove_link(&class_dev->kobj, "device");
644 out6:
645 class_device_remove_attrs(class_dev);
646 out5:
647 if (MAJOR(class_dev->devt))
648 class_device_remove_file(class_dev, &class_devt_attr);
649 out4:
650 class_device_remove_file(class_dev, &class_uevent_attr);
651 out3:
652 kobject_del(&class_dev->kobj);
653 out2:
654 if (parent_class_dev)
655 class_device_put(parent_class_dev);
656 class_put(parent_class);
657 out1:
658 class_device_put(class_dev);
659 return error;
660}
661
662int class_device_register(struct class_device *class_dev)
663{
664 class_device_initialize(class_dev);
665 return class_device_add(class_dev);
666}
667
668/**
669 * class_device_create - creates a class device and registers it with sysfs
670 * @cls: pointer to the struct class that this device should be registered to.
671 * @parent: pointer to the parent struct class_device of this new device, if
672 * any.
673 * @devt: the dev_t for the char device to be added.
674 * @device: a pointer to a struct device that is assiociated with this class
675 * device.
676 * @fmt: string for the class device's name
677 *
678 * This function can be used by char device classes. A struct
679 * class_device will be created in sysfs, registered to the specified
680 * class.
681 * A "dev" file will be created, showing the dev_t for the device, if
682 * the dev_t is not 0,0.
683 * If a pointer to a parent struct class_device is passed in, the newly
684 * created struct class_device will be a child of that device in sysfs.
685 * The pointer to the struct class_device will be returned from the
686 * call. Any further sysfs files that might be required can be created
687 * using this pointer.
688 *
689 * Note: the struct class passed to this function must have previously
690 * been created with a call to class_create().
691 */
692struct class_device *class_device_create(struct class *cls,
693 struct class_device *parent,
694 dev_t devt,
695 struct device *device,
696 const char *fmt, ...)
697{
698 va_list args;
699 struct class_device *class_dev = NULL;
700 int retval = -ENODEV;
701
702 if (cls == NULL || IS_ERR(cls))
703 goto error;
704
705 class_dev = kzalloc(sizeof(*class_dev), GFP_KERNEL);
706 if (!class_dev) {
707 retval = -ENOMEM;
708 goto error;
709 }
710
711 class_dev->devt = devt;
712 class_dev->dev = device;
713 class_dev->class = cls;
714 class_dev->parent = parent;
715 class_dev->release = class_device_create_release;
716 class_dev->uevent = class_device_create_uevent;
717
718 va_start(args, fmt);
719 vsnprintf(class_dev->class_id, BUS_ID_SIZE, fmt, args);
720 va_end(args);
721 retval = class_device_register(class_dev);
722 if (retval)
723 goto error;
724
725 return class_dev;
726
727error:
728 kfree(class_dev);
729 return ERR_PTR(retval);
730}
731
732void class_device_del(struct class_device *class_dev)
733{
734 struct class *parent_class = class_dev->class;
735 struct class_device *parent_device = class_dev->parent;
736 struct class_interface *class_intf;
737
738 if (parent_class) {
739 down(&parent_class->sem);
740 list_del_init(&class_dev->node);
741 list_for_each_entry(class_intf, &parent_class->interfaces, node)
742 if (class_intf->remove)
743 class_intf->remove(class_dev, class_intf);
744 up(&parent_class->sem);
745 }
746
747 if (class_dev->dev) {
748 remove_deprecated_class_device_links(class_dev);
749 sysfs_remove_link(&class_dev->kobj, "device");
750 }
751 sysfs_remove_link(&class_dev->kobj, "subsystem");
752 class_device_remove_file(class_dev, &class_uevent_attr);
753 if (MAJOR(class_dev->devt))
754 class_device_remove_file(class_dev, &class_devt_attr);
755 class_device_remove_attrs(class_dev);
756 class_device_remove_groups(class_dev);
757
758 kobject_uevent(&class_dev->kobj, KOBJ_REMOVE);
759 kobject_del(&class_dev->kobj);
760
761 class_device_put(parent_device);
762 class_put(parent_class);
763}
764
765void class_device_unregister(struct class_device *class_dev)
766{
767 pr_debug("CLASS: Unregistering class device. ID = '%s'\n",
768 class_dev->class_id);
769 class_device_del(class_dev);
770 class_device_put(class_dev);
771}
772
773/**
774 * class_device_destroy - removes a class device that was created with class_device_create()
775 * @cls: the pointer to the struct class that this device was registered * with.
776 * @devt: the dev_t of the device that was previously registered.
777 *
778 * This call unregisters and cleans up a class device that was created with a
779 * call to class_device_create()
780 */
781void class_device_destroy(struct class *cls, dev_t devt)
782{
783 struct class_device *class_dev = NULL;
784 struct class_device *class_dev_tmp;
785
786 down(&cls->sem);
787 list_for_each_entry(class_dev_tmp, &cls->children, node) {
788 if (class_dev_tmp->devt == devt) {
789 class_dev = class_dev_tmp;
790 break;
791 }
792 }
793 up(&cls->sem);
794
795 if (class_dev)
796 class_device_unregister(class_dev);
797}
798
799struct class_device *class_device_get(struct class_device *class_dev)
800{
801 if (class_dev)
802 return to_class_dev(kobject_get(&class_dev->kobj));
803 return NULL;
804}
805
806void class_device_put(struct class_device *class_dev)
807{
808 if (class_dev)
809 kobject_put(&class_dev->kobj);
810}
811
812/** 253/**
813 * class_for_each_device - device iterator 254 * class_for_each_device - device iterator
814 * @class: the class we're iterating 255 * @class: the class we're iterating
@@ -897,56 +338,9 @@ struct device *class_find_device(struct class *class, void *data,
897} 338}
898EXPORT_SYMBOL_GPL(class_find_device); 339EXPORT_SYMBOL_GPL(class_find_device);
899 340
900/**
901 * class_find_child - device iterator for locating a particular class_device
902 * @class: the class we're iterating
903 * @data: data for the match function
904 * @match: function to check class_device
905 *
906 * This function returns a reference to a class_device that is 'found' for
907 * later use, as determined by the @match callback.
908 *
909 * The callback should return 0 if the class_device doesn't match and non-zero
910 * if it does. If the callback returns non-zero, this function will
911 * return to the caller and not iterate over any more class_devices.
912 *
913 * Note, you will need to drop the reference with class_device_put() after use.
914 *
915 * We hold class->sem in this function, so it can not be
916 * re-acquired in @match, otherwise it will self-deadlocking. For
917 * example, calls to add or remove class members would be verboten.
918 */
919struct class_device *class_find_child(struct class *class, void *data,
920 int (*match)(struct class_device *, void *))
921{
922 struct class_device *dev;
923 int found = 0;
924
925 if (!class)
926 return NULL;
927
928 down(&class->sem);
929 list_for_each_entry(dev, &class->children, node) {
930 dev = class_device_get(dev);
931 if (dev) {
932 if (match(dev, data)) {
933 found = 1;
934 break;
935 } else
936 class_device_put(dev);
937 } else
938 break;
939 }
940 up(&class->sem);
941
942 return found ? dev : NULL;
943}
944EXPORT_SYMBOL_GPL(class_find_child);
945
946int class_interface_register(struct class_interface *class_intf) 341int class_interface_register(struct class_interface *class_intf)
947{ 342{
948 struct class *parent; 343 struct class *parent;
949 struct class_device *class_dev;
950 struct device *dev; 344 struct device *dev;
951 345
952 if (!class_intf || !class_intf->class) 346 if (!class_intf || !class_intf->class)
@@ -958,10 +352,6 @@ int class_interface_register(struct class_interface *class_intf)
958 352
959 down(&parent->sem); 353 down(&parent->sem);
960 list_add_tail(&class_intf->node, &parent->interfaces); 354 list_add_tail(&class_intf->node, &parent->interfaces);
961 if (class_intf->add) {
962 list_for_each_entry(class_dev, &parent->children, node)
963 class_intf->add(class_dev, class_intf);
964 }
965 if (class_intf->add_dev) { 355 if (class_intf->add_dev) {
966 list_for_each_entry(dev, &parent->devices, node) 356 list_for_each_entry(dev, &parent->devices, node)
967 class_intf->add_dev(dev, class_intf); 357 class_intf->add_dev(dev, class_intf);
@@ -974,7 +364,6 @@ int class_interface_register(struct class_interface *class_intf)
974void class_interface_unregister(struct class_interface *class_intf) 364void class_interface_unregister(struct class_interface *class_intf)
975{ 365{
976 struct class *parent = class_intf->class; 366 struct class *parent = class_intf->class;
977 struct class_device *class_dev;
978 struct device *dev; 367 struct device *dev;
979 368
980 if (!parent) 369 if (!parent)
@@ -982,10 +371,6 @@ void class_interface_unregister(struct class_interface *class_intf)
982 371
983 down(&parent->sem); 372 down(&parent->sem);
984 list_del_init(&class_intf->node); 373 list_del_init(&class_intf->node);
985 if (class_intf->remove) {
986 list_for_each_entry(class_dev, &parent->children, node)
987 class_intf->remove(class_dev, class_intf);
988 }
989 if (class_intf->remove_dev) { 374 if (class_intf->remove_dev) {
990 list_for_each_entry(dev, &parent->devices, node) 375 list_for_each_entry(dev, &parent->devices, node)
991 class_intf->remove_dev(dev, class_intf); 376 class_intf->remove_dev(dev, class_intf);
@@ -1000,13 +385,6 @@ int __init classes_init(void)
1000 class_kset = kset_create_and_add("class", NULL, NULL); 385 class_kset = kset_create_and_add("class", NULL, NULL);
1001 if (!class_kset) 386 if (!class_kset)
1002 return -ENOMEM; 387 return -ENOMEM;
1003
1004 /* ick, this is ugly, the things we go through to keep from showing up
1005 * in sysfs... */
1006 kset_init(&class_obj_subsys);
1007 kobject_set_name(&class_obj_subsys.kobj, "class_obj");
1008 if (!class_obj_subsys.kobj.parent)
1009 class_obj_subsys.kobj.parent = &class_obj_subsys.kobj;
1010 return 0; 388 return 0;
1011} 389}
1012 390
@@ -1017,19 +395,5 @@ EXPORT_SYMBOL_GPL(class_unregister);
1017EXPORT_SYMBOL_GPL(class_create); 395EXPORT_SYMBOL_GPL(class_create);
1018EXPORT_SYMBOL_GPL(class_destroy); 396EXPORT_SYMBOL_GPL(class_destroy);
1019 397
1020EXPORT_SYMBOL_GPL(class_device_register);
1021EXPORT_SYMBOL_GPL(class_device_unregister);
1022EXPORT_SYMBOL_GPL(class_device_initialize);
1023EXPORT_SYMBOL_GPL(class_device_add);
1024EXPORT_SYMBOL_GPL(class_device_del);
1025EXPORT_SYMBOL_GPL(class_device_get);
1026EXPORT_SYMBOL_GPL(class_device_put);
1027EXPORT_SYMBOL_GPL(class_device_create);
1028EXPORT_SYMBOL_GPL(class_device_destroy);
1029EXPORT_SYMBOL_GPL(class_device_create_file);
1030EXPORT_SYMBOL_GPL(class_device_remove_file);
1031EXPORT_SYMBOL_GPL(class_device_create_bin_file);
1032EXPORT_SYMBOL_GPL(class_device_remove_bin_file);
1033
1034EXPORT_SYMBOL_GPL(class_interface_register); 398EXPORT_SYMBOL_GPL(class_interface_register);
1035EXPORT_SYMBOL_GPL(class_interface_unregister); 399EXPORT_SYMBOL_GPL(class_interface_unregister);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 9248e0927d08..be288b5e4180 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -787,6 +787,10 @@ int device_add(struct device *dev)
787 parent = get_device(dev->parent); 787 parent = get_device(dev->parent);
788 setup_parent(dev, parent); 788 setup_parent(dev, parent);
789 789
790 /* use parent numa_node */
791 if (parent)
792 set_dev_node(dev, dev_to_node(parent));
793
790 /* first, register with generic layer. */ 794 /* first, register with generic layer. */
791 error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev->bus_id); 795 error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev->bus_id);
792 if (error) 796 if (error)
@@ -1306,8 +1310,11 @@ int device_move(struct device *dev, struct device *new_parent)
1306 dev->parent = new_parent; 1310 dev->parent = new_parent;
1307 if (old_parent) 1311 if (old_parent)
1308 klist_remove(&dev->knode_parent); 1312 klist_remove(&dev->knode_parent);
1309 if (new_parent) 1313 if (new_parent) {
1310 klist_add_tail(&dev->knode_parent, &new_parent->klist_children); 1314 klist_add_tail(&dev->knode_parent, &new_parent->klist_children);
1315 set_dev_node(dev, dev_to_node(new_parent));
1316 }
1317
1311 if (!dev->class) 1318 if (!dev->class)
1312 goto out_put; 1319 goto out_put;
1313 error = device_move_class_links(dev, old_parent, new_parent); 1320 error = device_move_class_links(dev, old_parent, new_parent);
@@ -1317,9 +1324,12 @@ int device_move(struct device *dev, struct device *new_parent)
1317 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 1324 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
1318 if (new_parent) 1325 if (new_parent)
1319 klist_remove(&dev->knode_parent); 1326 klist_remove(&dev->knode_parent);
1320 if (old_parent) 1327 dev->parent = old_parent;
1328 if (old_parent) {
1321 klist_add_tail(&dev->knode_parent, 1329 klist_add_tail(&dev->knode_parent,
1322 &old_parent->klist_children); 1330 &old_parent->klist_children);
1331 set_dev_node(dev, dev_to_node(old_parent));
1332 }
1323 } 1333 }
1324 cleanup_glue_dir(dev, new_parent_kobj); 1334 cleanup_glue_dir(dev, new_parent_kobj);
1325 put_device(new_parent); 1335 put_device(new_parent);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 6fe417429977..e38dfed41d80 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -18,7 +18,7 @@ struct sysdev_class cpu_sysdev_class = {
18}; 18};
19EXPORT_SYMBOL(cpu_sysdev_class); 19EXPORT_SYMBOL(cpu_sysdev_class);
20 20
21static struct sys_device *cpu_sys_devices[NR_CPUS]; 21static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices);
22 22
23#ifdef CONFIG_HOTPLUG_CPU 23#ifdef CONFIG_HOTPLUG_CPU
24static ssize_t show_online(struct sys_device *dev, char *buf) 24static ssize_t show_online(struct sys_device *dev, char *buf)
@@ -68,7 +68,7 @@ void unregister_cpu(struct cpu *cpu)
68 sysdev_remove_file(&cpu->sysdev, &attr_online); 68 sysdev_remove_file(&cpu->sysdev, &attr_online);
69 69
70 sysdev_unregister(&cpu->sysdev); 70 sysdev_unregister(&cpu->sysdev);
71 cpu_sys_devices[logical_cpu] = NULL; 71 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
72 return; 72 return;
73} 73}
74#else /* ... !CONFIG_HOTPLUG_CPU */ 74#else /* ... !CONFIG_HOTPLUG_CPU */
@@ -167,7 +167,7 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
167 if (!error && cpu->hotpluggable) 167 if (!error && cpu->hotpluggable)
168 register_cpu_control(cpu); 168 register_cpu_control(cpu);
169 if (!error) 169 if (!error)
170 cpu_sys_devices[num] = &cpu->sysdev; 170 per_cpu(cpu_sys_devices, num) = &cpu->sysdev;
171 if (!error) 171 if (!error)
172 register_cpu_under_node(num, cpu_to_node(num)); 172 register_cpu_under_node(num, cpu_to_node(num));
173 173
@@ -180,8 +180,8 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
180 180
181struct sys_device *get_cpu_sysdev(unsigned cpu) 181struct sys_device *get_cpu_sysdev(unsigned cpu)
182{ 182{
183 if (cpu < NR_CPUS) 183 if (cpu < nr_cpu_ids && cpu_possible(cpu))
184 return cpu_sys_devices[cpu]; 184 return per_cpu(cpu_sys_devices, cpu);
185 else 185 else
186 return NULL; 186 return NULL;
187} 187}
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 9a6537f14401..2ef5acf4368b 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -217,12 +217,22 @@ static void driver_remove_groups(struct device_driver *drv,
217int driver_register(struct device_driver *drv) 217int driver_register(struct device_driver *drv)
218{ 218{
219 int ret; 219 int ret;
220 struct device_driver *other;
220 221
221 if ((drv->bus->probe && drv->probe) || 222 if ((drv->bus->probe && drv->probe) ||
222 (drv->bus->remove && drv->remove) || 223 (drv->bus->remove && drv->remove) ||
223 (drv->bus->shutdown && drv->shutdown)) 224 (drv->bus->shutdown && drv->shutdown))
224 printk(KERN_WARNING "Driver '%s' needs updating - please use " 225 printk(KERN_WARNING "Driver '%s' needs updating - please use "
225 "bus_type methods\n", drv->name); 226 "bus_type methods\n", drv->name);
227
228 other = driver_find(drv->name, drv->bus);
229 if (other) {
230 put_driver(other);
231 printk(KERN_ERR "Error: Driver '%s' is already registered, "
232 "aborting...\n", drv->name);
233 return -EEXIST;
234 }
235
226 ret = bus_add_driver(drv); 236 ret = bus_add_driver(drv);
227 if (ret) 237 if (ret)
228 return ret; 238 return ret;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 1fef7df8c9d6..9fd4a8534146 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -396,6 +396,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
396 if (!firmware_p) 396 if (!firmware_p)
397 return -EINVAL; 397 return -EINVAL;
398 398
399 printk(KERN_INFO "firmware: requesting %s\n", name);
400
399 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 401 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
400 if (!firmware) { 402 if (!firmware) {
401 printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n", 403 printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n",
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 12fde2d03d69..39f3d1b3a213 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -77,6 +77,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
77 "Node %d PageTables: %8lu kB\n" 77 "Node %d PageTables: %8lu kB\n"
78 "Node %d NFS_Unstable: %8lu kB\n" 78 "Node %d NFS_Unstable: %8lu kB\n"
79 "Node %d Bounce: %8lu kB\n" 79 "Node %d Bounce: %8lu kB\n"
80 "Node %d WritebackTmp: %8lu kB\n"
80 "Node %d Slab: %8lu kB\n" 81 "Node %d Slab: %8lu kB\n"
81 "Node %d SReclaimable: %8lu kB\n" 82 "Node %d SReclaimable: %8lu kB\n"
82 "Node %d SUnreclaim: %8lu kB\n", 83 "Node %d SUnreclaim: %8lu kB\n",
@@ -99,6 +100,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
99 nid, K(node_page_state(nid, NR_PAGETABLE)), 100 nid, K(node_page_state(nid, NR_PAGETABLE)),
100 nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), 101 nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
101 nid, K(node_page_state(nid, NR_BOUNCE)), 102 nid, K(node_page_state(nid, NR_BOUNCE)),
103 nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)),
102 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + 104 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
103 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 105 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
104 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), 106 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 280e71ee744c..5b4c6e649c11 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -195,7 +195,6 @@ void aoedev_exit(void);
195struct aoedev *aoedev_by_aoeaddr(int maj, int min); 195struct aoedev *aoedev_by_aoeaddr(int maj, int min);
196struct aoedev *aoedev_by_sysminor_m(ulong sysminor); 196struct aoedev *aoedev_by_sysminor_m(ulong sysminor);
197void aoedev_downdev(struct aoedev *d); 197void aoedev_downdev(struct aoedev *d);
198int aoedev_isbusy(struct aoedev *d);
199int aoedev_flush(const char __user *str, size_t size); 198int aoedev_flush(const char __user *str, size_t size);
200 199
201int aoenet_init(void); 200int aoenet_init(void);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d00293ba3b45..8fc429cf82b6 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -668,16 +668,16 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
668 u16 n; 668 u16 n;
669 669
670 /* word 83: command set supported */ 670 /* word 83: command set supported */
671 n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1])); 671 n = get_unaligned_le16(&id[83 << 1]);
672 672
673 /* word 86: command set/feature enabled */ 673 /* word 86: command set/feature enabled */
674 n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1])); 674 n |= get_unaligned_le16(&id[86 << 1]);
675 675
676 if (n & (1<<10)) { /* bit 10: LBA 48 */ 676 if (n & (1<<10)) { /* bit 10: LBA 48 */
677 d->flags |= DEVFL_EXT; 677 d->flags |= DEVFL_EXT;
678 678
679 /* word 100: number lba48 sectors */ 679 /* word 100: number lba48 sectors */
680 ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1])); 680 ssize = get_unaligned_le64(&id[100 << 1]);
681 681
682 /* set as in ide-disk.c:init_idedisk_capacity */ 682 /* set as in ide-disk.c:init_idedisk_capacity */
683 d->geo.cylinders = ssize; 683 d->geo.cylinders = ssize;
@@ -688,12 +688,12 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
688 d->flags &= ~DEVFL_EXT; 688 d->flags &= ~DEVFL_EXT;
689 689
690 /* number lba28 sectors */ 690 /* number lba28 sectors */
691 ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1])); 691 ssize = get_unaligned_le32(&id[60 << 1]);
692 692
693 /* NOTE: obsolete in ATA 6 */ 693 /* NOTE: obsolete in ATA 6 */
694 d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1])); 694 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
695 d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1])); 695 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
696 d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1])); 696 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
697 } 697 }
698 698
699 if (d->ssize != ssize) 699 if (d->ssize != ssize)
@@ -779,7 +779,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
779 u16 aoemajor; 779 u16 aoemajor;
780 780
781 hin = (struct aoe_hdr *) skb_mac_header(skb); 781 hin = (struct aoe_hdr *) skb_mac_header(skb);
782 aoemajor = be16_to_cpu(get_unaligned(&hin->major)); 782 aoemajor = get_unaligned_be16(&hin->major);
783 d = aoedev_by_aoeaddr(aoemajor, hin->minor); 783 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
784 if (d == NULL) { 784 if (d == NULL) {
785 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " 785 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
@@ -791,7 +791,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
791 791
792 spin_lock_irqsave(&d->lock, flags); 792 spin_lock_irqsave(&d->lock, flags);
793 793
794 n = be32_to_cpu(get_unaligned(&hin->tag)); 794 n = get_unaligned_be32(&hin->tag);
795 t = gettgt(d, hin->src); 795 t = gettgt(d, hin->src);
796 if (t == NULL) { 796 if (t == NULL) {
797 printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n", 797 printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n",
@@ -806,9 +806,9 @@ aoecmd_ata_rsp(struct sk_buff *skb)
806 snprintf(ebuf, sizeof ebuf, 806 snprintf(ebuf, sizeof ebuf,
807 "%15s e%d.%d tag=%08x@%08lx\n", 807 "%15s e%d.%d tag=%08x@%08lx\n",
808 "unexpected rsp", 808 "unexpected rsp",
809 be16_to_cpu(get_unaligned(&hin->major)), 809 get_unaligned_be16(&hin->major),
810 hin->minor, 810 hin->minor,
811 be32_to_cpu(get_unaligned(&hin->tag)), 811 get_unaligned_be32(&hin->tag),
812 jiffies); 812 jiffies);
813 aoechr_error(ebuf); 813 aoechr_error(ebuf);
814 return; 814 return;
@@ -873,7 +873,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
873 printk(KERN_INFO 873 printk(KERN_INFO
874 "aoe: unrecognized ata command %2.2Xh for %d.%d\n", 874 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
875 ahout->cmdstat, 875 ahout->cmdstat,
876 be16_to_cpu(get_unaligned(&hin->major)), 876 get_unaligned_be16(&hin->major),
877 hin->minor); 877 hin->minor);
878 } 878 }
879 } 879 }
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index f9a1cd9edb77..a1d813ab0d6b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -18,24 +18,6 @@ static void skbpoolfree(struct aoedev *d);
18static struct aoedev *devlist; 18static struct aoedev *devlist;
19static DEFINE_SPINLOCK(devlist_lock); 19static DEFINE_SPINLOCK(devlist_lock);
20 20
21int
22aoedev_isbusy(struct aoedev *d)
23{
24 struct aoetgt **t, **te;
25 struct frame *f, *e;
26
27 t = d->targets;
28 te = t + NTARGETS;
29 for (; t < te && *t; t++) {
30 f = (*t)->frames;
31 e = f + (*t)->nframes;
32 for (; f < e; f++)
33 if (f->tag != FREETAG)
34 return 1;
35 }
36 return 0;
37}
38
39struct aoedev * 21struct aoedev *
40aoedev_by_aoeaddr(int maj, int min) 22aoedev_by_aoeaddr(int maj, int min)
41{ 23{
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 18d243c73eee..d625169c8e48 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -128,7 +128,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
128 skb_push(skb, ETH_HLEN); /* (1) */ 128 skb_push(skb, ETH_HLEN); /* (1) */
129 129
130 h = (struct aoe_hdr *) skb_mac_header(skb); 130 h = (struct aoe_hdr *) skb_mac_header(skb);
131 n = be32_to_cpu(get_unaligned(&h->tag)); 131 n = get_unaligned_be32(&h->tag);
132 if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) 132 if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
133 goto exit; 133 goto exit;
134 134
@@ -140,7 +140,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
140 printk(KERN_ERR 140 printk(KERN_ERR
141 "%s%d.%d@%s; ecode=%d '%s'\n", 141 "%s%d.%d@%s; ecode=%d '%s'\n",
142 "aoe: error packet from ", 142 "aoe: error packet from ",
143 be16_to_cpu(get_unaligned(&h->major)), 143 get_unaligned_be16(&h->major),
144 h->minor, skb->dev->name, 144 h->minor, skb->dev->name,
145 h->err, aoe_errlist[n]); 145 h->err, aoe_errlist[n]);
146 goto exit; 146 goto exit;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index e8e38faeafd8..a196ef7f147f 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -387,10 +387,14 @@ static struct block_device_operations brd_fops = {
387 */ 387 */
388static int rd_nr; 388static int rd_nr;
389int rd_size = CONFIG_BLK_DEV_RAM_SIZE; 389int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
390static int max_part;
391static int part_shift;
390module_param(rd_nr, int, 0); 392module_param(rd_nr, int, 0);
391MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); 393MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
392module_param(rd_size, int, 0); 394module_param(rd_size, int, 0);
393MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); 395MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
396module_param(max_part, int, 0);
397MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
394MODULE_LICENSE("GPL"); 398MODULE_LICENSE("GPL");
395MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); 399MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
396 400
@@ -435,11 +439,11 @@ static struct brd_device *brd_alloc(int i)
435 blk_queue_max_sectors(brd->brd_queue, 1024); 439 blk_queue_max_sectors(brd->brd_queue, 1024);
436 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 440 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
437 441
438 disk = brd->brd_disk = alloc_disk(1); 442 disk = brd->brd_disk = alloc_disk(1 << part_shift);
439 if (!disk) 443 if (!disk)
440 goto out_free_queue; 444 goto out_free_queue;
441 disk->major = RAMDISK_MAJOR; 445 disk->major = RAMDISK_MAJOR;
442 disk->first_minor = i; 446 disk->first_minor = i << part_shift;
443 disk->fops = &brd_fops; 447 disk->fops = &brd_fops;
444 disk->private_data = brd; 448 disk->private_data = brd;
445 disk->queue = brd->brd_queue; 449 disk->queue = brd->brd_queue;
@@ -523,7 +527,12 @@ static int __init brd_init(void)
523 * themselves and have kernel automatically instantiate actual 527 * themselves and have kernel automatically instantiate actual
524 * device on-demand. 528 * device on-demand.
525 */ 529 */
526 if (rd_nr > 1UL << MINORBITS) 530
531 part_shift = 0;
532 if (max_part > 0)
533 part_shift = fls(max_part);
534
535 if (rd_nr > 1UL << (MINORBITS - part_shift))
527 return -EINVAL; 536 return -EINVAL;
528 537
529 if (rd_nr) { 538 if (rd_nr) {
@@ -531,7 +540,7 @@ static int __init brd_init(void)
531 range = rd_nr; 540 range = rd_nr;
532 } else { 541 } else {
533 nr = CONFIG_BLK_DEV_RAM_COUNT; 542 nr = CONFIG_BLK_DEV_RAM_COUNT;
534 range = 1UL << MINORBITS; 543 range = 1UL << (MINORBITS - part_shift);
535 } 544 }
536 545
537 if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) 546 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
@@ -570,7 +579,7 @@ static void __exit brd_exit(void)
570 unsigned long range; 579 unsigned long range;
571 struct brd_device *brd, *next; 580 struct brd_device *brd, *next;
572 581
573 range = rd_nr ? rd_nr : 1UL << MINORBITS; 582 range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift);
574 583
575 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) 584 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
576 brd_del_one(brd); 585 brd_del_one(brd);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index cf6083a1f928..e336b05fe4a7 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -425,16 +425,12 @@ static void __devinit cciss_procinit(int i)
425 struct proc_dir_entry *pde; 425 struct proc_dir_entry *pde;
426 426
427 if (proc_cciss == NULL) 427 if (proc_cciss == NULL)
428 proc_cciss = proc_mkdir("cciss", proc_root_driver); 428 proc_cciss = proc_mkdir("driver/cciss", NULL);
429 if (!proc_cciss) 429 if (!proc_cciss)
430 return; 430 return;
431 pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | 431 pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
432 S_IROTH, proc_cciss, 432 S_IROTH, proc_cciss,
433 &cciss_proc_fops); 433 &cciss_proc_fops, hba[i]);
434 if (!pde)
435 return;
436
437 pde->data = hba[i];
438} 434}
439#endif /* CONFIG_PROC_FS */ 435#endif /* CONFIG_PROC_FS */
440 436
@@ -3700,7 +3696,7 @@ static void __exit cciss_cleanup(void)
3700 cciss_remove_one(hba[i]->pdev); 3696 cciss_remove_one(hba[i]->pdev);
3701 } 3697 }
3702 } 3698 }
3703 remove_proc_entry("cciss", proc_root_driver); 3699 remove_proc_entry("driver/cciss", NULL);
3704} 3700}
3705 3701
3706static void fail_all_cmds(unsigned long ctlr) 3702static void fail_all_cmds(unsigned long ctlr)
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 69199185ff4b..09c14341e6e3 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -214,7 +214,7 @@ static struct proc_dir_entry *proc_array;
214static void __init ida_procinit(int i) 214static void __init ida_procinit(int i)
215{ 215{
216 if (proc_array == NULL) { 216 if (proc_array == NULL) {
217 proc_array = proc_mkdir("cpqarray", proc_root_driver); 217 proc_array = proc_mkdir("driver/cpqarray", NULL);
218 if (!proc_array) return; 218 if (!proc_array) return;
219 } 219 }
220 220
@@ -1796,7 +1796,7 @@ static void __exit cpqarray_exit(void)
1796 } 1796 }
1797 } 1797 }
1798 1798
1799 remove_proc_entry("cpqarray", proc_root_driver); 1799 remove_proc_entry("driver/cpqarray", NULL);
1800} 1800}
1801 1801
1802module_init(cpqarray_init) 1802module_init(cpqarray_init)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 7652e87d60c5..395f8ea7981c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4526,14 +4526,15 @@ static void __init parse_floppy_cfg_string(char *cfg)
4526 } 4526 }
4527} 4527}
4528 4528
4529int __init init_module(void) 4529static int __init floppy_module_init(void)
4530{ 4530{
4531 if (floppy) 4531 if (floppy)
4532 parse_floppy_cfg_string(floppy); 4532 parse_floppy_cfg_string(floppy);
4533 return floppy_init(); 4533 return floppy_init();
4534} 4534}
4535module_init(floppy_module_init);
4535 4536
4536void cleanup_module(void) 4537static void __exit floppy_module_exit(void)
4537{ 4538{
4538 int drive; 4539 int drive;
4539 4540
@@ -4562,6 +4563,7 @@ void cleanup_module(void)
4562 /* eject disk, if any */ 4563 /* eject disk, if any */
4563 fd_eject(0); 4564 fd_eject(0);
4564} 4565}
4566module_exit(floppy_module_exit);
4565 4567
4566module_param(floppy, charp, 0); 4568module_param(floppy, charp, 0);
4567module_param(FLOPPY_IRQ, int, 0); 4569module_param(FLOPPY_IRQ, int, 0);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f7f163557aa0..d3a25b027ff9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -546,7 +546,7 @@ static void loop_unplug(struct request_queue *q)
546{ 546{
547 struct loop_device *lo = q->queuedata; 547 struct loop_device *lo = q->queuedata;
548 548
549 clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); 549 queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
550 blk_run_address_space(lo->lo_backing_file->f_mapping); 550 blk_run_address_space(lo->lo_backing_file->f_mapping);
551} 551}
552 552
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 60cc54368b66..ad98dda6037d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <net/sock.h> 30#include <net/sock.h>
31#include <linux/net.h> 31#include <linux/net.h>
32#include <linux/kthread.h>
32 33
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/system.h> 35#include <asm/system.h>
@@ -55,6 +56,7 @@ static unsigned int debugflags;
55 56
56static unsigned int nbds_max = 16; 57static unsigned int nbds_max = 16;
57static struct nbd_device *nbd_dev; 58static struct nbd_device *nbd_dev;
59static int max_part;
58 60
59/* 61/*
60 * Use just one lock (or at most 1 per NIC). Two arguments for this: 62 * Use just one lock (or at most 1 per NIC). Two arguments for this:
@@ -337,7 +339,7 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
337 } 339 }
338 340
339 req = nbd_find_request(lo, *(struct request **)reply.handle); 341 req = nbd_find_request(lo, *(struct request **)reply.handle);
340 if (unlikely(IS_ERR(req))) { 342 if (IS_ERR(req)) {
341 result = PTR_ERR(req); 343 result = PTR_ERR(req);
342 if (result != -ENOENT) 344 if (result != -ENOENT)
343 goto harderror; 345 goto harderror;
@@ -441,6 +443,85 @@ static void nbd_clear_que(struct nbd_device *lo)
441} 443}
442 444
443 445
446static void nbd_handle_req(struct nbd_device *lo, struct request *req)
447{
448 if (!blk_fs_request(req))
449 goto error_out;
450
451 nbd_cmd(req) = NBD_CMD_READ;
452 if (rq_data_dir(req) == WRITE) {
453 nbd_cmd(req) = NBD_CMD_WRITE;
454 if (lo->flags & NBD_READ_ONLY) {
455 printk(KERN_ERR "%s: Write on read-only\n",
456 lo->disk->disk_name);
457 goto error_out;
458 }
459 }
460
461 req->errors = 0;
462
463 mutex_lock(&lo->tx_lock);
464 if (unlikely(!lo->sock)) {
465 mutex_unlock(&lo->tx_lock);
466 printk(KERN_ERR "%s: Attempted send on closed socket\n",
467 lo->disk->disk_name);
468 req->errors++;
469 nbd_end_request(req);
470 return;
471 }
472
473 lo->active_req = req;
474
475 if (nbd_send_req(lo, req) != 0) {
476 printk(KERN_ERR "%s: Request send failed\n",
477 lo->disk->disk_name);
478 req->errors++;
479 nbd_end_request(req);
480 } else {
481 spin_lock(&lo->queue_lock);
482 list_add(&req->queuelist, &lo->queue_head);
483 spin_unlock(&lo->queue_lock);
484 }
485
486 lo->active_req = NULL;
487 mutex_unlock(&lo->tx_lock);
488 wake_up_all(&lo->active_wq);
489
490 return;
491
492error_out:
493 req->errors++;
494 nbd_end_request(req);
495}
496
497static int nbd_thread(void *data)
498{
499 struct nbd_device *lo = data;
500 struct request *req;
501
502 set_user_nice(current, -20);
503 while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) {
504 /* wait for something to do */
505 wait_event_interruptible(lo->waiting_wq,
506 kthread_should_stop() ||
507 !list_empty(&lo->waiting_queue));
508
509 /* extract request */
510 if (list_empty(&lo->waiting_queue))
511 continue;
512
513 spin_lock_irq(&lo->queue_lock);
514 req = list_entry(lo->waiting_queue.next, struct request,
515 queuelist);
516 list_del_init(&req->queuelist);
517 spin_unlock_irq(&lo->queue_lock);
518
519 /* handle request */
520 nbd_handle_req(lo, req);
521 }
522 return 0;
523}
524
444/* 525/*
445 * We always wait for result of write, for now. It would be nice to make it optional 526 * We always wait for result of write, for now. It would be nice to make it optional
446 * in future 527 * in future
@@ -456,65 +537,23 @@ static void do_nbd_request(struct request_queue * q)
456 struct nbd_device *lo; 537 struct nbd_device *lo;
457 538
458 blkdev_dequeue_request(req); 539 blkdev_dequeue_request(req);
540
541 spin_unlock_irq(q->queue_lock);
542
459 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", 543 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
460 req->rq_disk->disk_name, req, req->cmd_type); 544 req->rq_disk->disk_name, req, req->cmd_type);
461 545
462 if (!blk_fs_request(req))
463 goto error_out;
464
465 lo = req->rq_disk->private_data; 546 lo = req->rq_disk->private_data;
466 547
467 BUG_ON(lo->magic != LO_MAGIC); 548 BUG_ON(lo->magic != LO_MAGIC);
468 549
469 nbd_cmd(req) = NBD_CMD_READ; 550 spin_lock_irq(&lo->queue_lock);
470 if (rq_data_dir(req) == WRITE) { 551 list_add_tail(&req->queuelist, &lo->waiting_queue);
471 nbd_cmd(req) = NBD_CMD_WRITE; 552 spin_unlock_irq(&lo->queue_lock);
472 if (lo->flags & NBD_READ_ONLY) {
473 printk(KERN_ERR "%s: Write on read-only\n",
474 lo->disk->disk_name);
475 goto error_out;
476 }
477 }
478
479 req->errors = 0;
480 spin_unlock_irq(q->queue_lock);
481
482 mutex_lock(&lo->tx_lock);
483 if (unlikely(!lo->sock)) {
484 mutex_unlock(&lo->tx_lock);
485 printk(KERN_ERR "%s: Attempted send on closed socket\n",
486 lo->disk->disk_name);
487 req->errors++;
488 nbd_end_request(req);
489 spin_lock_irq(q->queue_lock);
490 continue;
491 }
492
493 lo->active_req = req;
494 553
495 if (nbd_send_req(lo, req) != 0) { 554 wake_up(&lo->waiting_wq);
496 printk(KERN_ERR "%s: Request send failed\n",
497 lo->disk->disk_name);
498 req->errors++;
499 nbd_end_request(req);
500 } else {
501 spin_lock(&lo->queue_lock);
502 list_add(&req->queuelist, &lo->queue_head);
503 spin_unlock(&lo->queue_lock);
504 }
505
506 lo->active_req = NULL;
507 mutex_unlock(&lo->tx_lock);
508 wake_up_all(&lo->active_wq);
509 555
510 spin_lock_irq(q->queue_lock); 556 spin_lock_irq(q->queue_lock);
511 continue;
512
513error_out:
514 req->errors++;
515 spin_unlock(q->queue_lock);
516 nbd_end_request(req);
517 spin_lock(q->queue_lock);
518 } 557 }
519} 558}
520 559
@@ -524,6 +563,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
524 struct nbd_device *lo = inode->i_bdev->bd_disk->private_data; 563 struct nbd_device *lo = inode->i_bdev->bd_disk->private_data;
525 int error; 564 int error;
526 struct request sreq ; 565 struct request sreq ;
566 struct task_struct *thread;
527 567
528 if (!capable(CAP_SYS_ADMIN)) 568 if (!capable(CAP_SYS_ADMIN))
529 return -EPERM; 569 return -EPERM;
@@ -537,6 +577,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
537 switch (cmd) { 577 switch (cmd) {
538 case NBD_DISCONNECT: 578 case NBD_DISCONNECT:
539 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); 579 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
580 blk_rq_init(NULL, &sreq);
540 sreq.cmd_type = REQ_TYPE_SPECIAL; 581 sreq.cmd_type = REQ_TYPE_SPECIAL;
541 nbd_cmd(&sreq) = NBD_CMD_DISC; 582 nbd_cmd(&sreq) = NBD_CMD_DISC;
542 /* 583 /*
@@ -571,10 +612,13 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
571 error = -EINVAL; 612 error = -EINVAL;
572 file = fget(arg); 613 file = fget(arg);
573 if (file) { 614 if (file) {
615 struct block_device *bdev = inode->i_bdev;
574 inode = file->f_path.dentry->d_inode; 616 inode = file->f_path.dentry->d_inode;
575 if (S_ISSOCK(inode->i_mode)) { 617 if (S_ISSOCK(inode->i_mode)) {
576 lo->file = file; 618 lo->file = file;
577 lo->sock = SOCKET_I(inode); 619 lo->sock = SOCKET_I(inode);
620 if (max_part > 0)
621 bdev->bd_invalidated = 1;
578 error = 0; 622 error = 0;
579 } else { 623 } else {
580 fput(file); 624 fput(file);
@@ -606,7 +650,12 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
606 case NBD_DO_IT: 650 case NBD_DO_IT:
607 if (!lo->file) 651 if (!lo->file)
608 return -EINVAL; 652 return -EINVAL;
653 thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
654 if (IS_ERR(thread))
655 return PTR_ERR(thread);
656 wake_up_process(thread);
609 error = nbd_do_it(lo); 657 error = nbd_do_it(lo);
658 kthread_stop(thread);
610 if (error) 659 if (error)
611 return error; 660 return error;
612 sock_shutdown(lo, 1); 661 sock_shutdown(lo, 1);
@@ -619,6 +668,8 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
619 lo->bytesize = 0; 668 lo->bytesize = 0;
620 inode->i_bdev->bd_inode->i_size = 0; 669 inode->i_bdev->bd_inode->i_size = 0;
621 set_capacity(lo->disk, 0); 670 set_capacity(lo->disk, 0);
671 if (max_part > 0)
672 ioctl_by_bdev(inode->i_bdev, BLKRRPART, 0);
622 return lo->harderror; 673 return lo->harderror;
623 case NBD_CLEAR_QUE: 674 case NBD_CLEAR_QUE:
624 /* 675 /*
@@ -652,6 +703,7 @@ static int __init nbd_init(void)
652{ 703{
653 int err = -ENOMEM; 704 int err = -ENOMEM;
654 int i; 705 int i;
706 int part_shift;
655 707
656 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 708 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
657 709
@@ -659,8 +711,17 @@ static int __init nbd_init(void)
659 if (!nbd_dev) 711 if (!nbd_dev)
660 return -ENOMEM; 712 return -ENOMEM;
661 713
714 if (max_part < 0) {
715 printk(KERN_CRIT "nbd: max_part must be >= 0\n");
716 return -EINVAL;
717 }
718
719 part_shift = 0;
720 if (max_part > 0)
721 part_shift = fls(max_part);
722
662 for (i = 0; i < nbds_max; i++) { 723 for (i = 0; i < nbds_max; i++) {
663 struct gendisk *disk = alloc_disk(1); 724 struct gendisk *disk = alloc_disk(1 << part_shift);
664 elevator_t *old_e; 725 elevator_t *old_e;
665 if (!disk) 726 if (!disk)
666 goto out; 727 goto out;
@@ -695,17 +756,18 @@ static int __init nbd_init(void)
695 nbd_dev[i].file = NULL; 756 nbd_dev[i].file = NULL;
696 nbd_dev[i].magic = LO_MAGIC; 757 nbd_dev[i].magic = LO_MAGIC;
697 nbd_dev[i].flags = 0; 758 nbd_dev[i].flags = 0;
759 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
698 spin_lock_init(&nbd_dev[i].queue_lock); 760 spin_lock_init(&nbd_dev[i].queue_lock);
699 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 761 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
700 mutex_init(&nbd_dev[i].tx_lock); 762 mutex_init(&nbd_dev[i].tx_lock);
701 init_waitqueue_head(&nbd_dev[i].active_wq); 763 init_waitqueue_head(&nbd_dev[i].active_wq);
764 init_waitqueue_head(&nbd_dev[i].waiting_wq);
702 nbd_dev[i].blksize = 1024; 765 nbd_dev[i].blksize = 1024;
703 nbd_dev[i].bytesize = 0; 766 nbd_dev[i].bytesize = 0;
704 disk->major = NBD_MAJOR; 767 disk->major = NBD_MAJOR;
705 disk->first_minor = i; 768 disk->first_minor = i << part_shift;
706 disk->fops = &nbd_fops; 769 disk->fops = &nbd_fops;
707 disk->private_data = &nbd_dev[i]; 770 disk->private_data = &nbd_dev[i];
708 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
709 sprintf(disk->disk_name, "nbd%d", i); 771 sprintf(disk->disk_name, "nbd%d", i);
710 set_capacity(disk, 0); 772 set_capacity(disk, 0);
711 add_disk(disk); 773 add_disk(disk);
@@ -743,7 +805,9 @@ MODULE_DESCRIPTION("Network Block Device");
743MODULE_LICENSE("GPL"); 805MODULE_LICENSE("GPL");
744 806
745module_param(nbds_max, int, 0444); 807module_param(nbds_max, int, 0444);
746MODULE_PARM_DESC(nbds_max, "How many network block devices to initialize."); 808MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
809module_param(max_part, int, 0444);
810MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
747#ifndef NDEBUG 811#ifndef NDEBUG
748module_param(debugflags, int, 0644); 812module_param(debugflags, int, 0644);
749MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); 813MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index df819f8a95a6..570f3b70dce7 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -716,10 +716,8 @@ static int pd_special_command(struct pd_unit *disk,
716 struct request rq; 716 struct request rq;
717 int err = 0; 717 int err = 0;
718 718
719 memset(&rq, 0, sizeof(rq)); 719 blk_rq_init(NULL, &rq);
720 rq.errors = 0;
721 rq.rq_disk = disk->gd; 720 rq.rq_disk = disk->gd;
722 rq.ref_count = 1;
723 rq.end_io_data = &wait; 721 rq.end_io_data = &wait;
724 rq.end_io = blk_end_sync_rq; 722 rq.end_io = blk_end_sync_rq;
725 blk_insert_request(disk->gd->queue, &rq, 0, func); 723 blk_insert_request(disk->gd->queue, &rq, 0, func);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 18feb1c7c33b..3ba1df93e9e3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -776,8 +776,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
776 776
777 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); 777 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
778 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); 778 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
779 if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
780 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
781 779
782 rq->timeout = 60*HZ; 780 rq->timeout = 60*HZ;
783 rq->cmd_type = REQ_TYPE_BLOCK_PC; 781 rq->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -2744,7 +2742,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2744 int i; 2742 int i;
2745 int ret = 0; 2743 int ret = 0;
2746 char b[BDEVNAME_SIZE]; 2744 char b[BDEVNAME_SIZE];
2747 struct proc_dir_entry *proc;
2748 struct block_device *bdev; 2745 struct block_device *bdev;
2749 2746
2750 if (pd->pkt_dev == dev) { 2747 if (pd->pkt_dev == dev) {
@@ -2788,11 +2785,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2788 goto out_mem; 2785 goto out_mem;
2789 } 2786 }
2790 2787
2791 proc = create_proc_entry(pd->name, 0, pkt_proc); 2788 proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
2792 if (proc) {
2793 proc->data = pd;
2794 proc->proc_fops = &pkt_proc_fops;
2795 }
2796 DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); 2789 DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
2797 return 0; 2790 return 0;
2798 2791
@@ -3101,7 +3094,7 @@ static int __init pkt_init(void)
3101 goto out_misc; 3094 goto out_misc;
3102 } 3095 }
3103 3096
3104 pkt_proc = proc_mkdir(DRIVER_NAME, proc_root_driver); 3097 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
3105 3098
3106 return 0; 3099 return 0;
3107 3100
@@ -3117,7 +3110,7 @@ out2:
3117 3110
3118static void __exit pkt_exit(void) 3111static void __exit pkt_exit(void)
3119{ 3112{
3120 remove_proc_entry(DRIVER_NAME, proc_root_driver); 3113 remove_proc_entry("driver/"DRIVER_NAME, NULL);
3121 misc_deregister(&pkt_misc); 3114 misc_deregister(&pkt_misc);
3122 3115
3123 pkt_debugfs_cleanup(); 3116 pkt_debugfs_cleanup();
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 7483f947f0e9..d797e209951d 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -102,8 +102,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
102 dev_dbg(&dev->sbd.core, 102 dev_dbg(&dev->sbd.core,
103 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 103 "%s:%u: bio %u: %u segs %u sectors from %lu\n",
104 __func__, __LINE__, i, bio_segments(iter.bio), 104 __func__, __LINE__, i, bio_segments(iter.bio),
105 bio_sectors(iter.bio), 105 bio_sectors(iter.bio), iter.bio->bi_sector);
106 (unsigned long)iter.bio->bi_sector);
107 106
108 size = bvec->bv_len; 107 size = bvec->bv_len;
109 buf = bvec_kmap_irq(bvec, &flags); 108 buf = bvec_kmap_irq(bvec, &flags);
@@ -406,7 +405,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
406 405
407 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 406 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
408 407
409 memset(req->cmd, 0, sizeof(req->cmd));
410 req->cmd_type = REQ_TYPE_FLUSH; 408 req->cmd_type = REQ_TYPE_FLUSH;
411} 409}
412 410
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 27bfe72aab59..3a281ef11ffa 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -205,6 +205,7 @@ struct ub_scsi_cmd {
205 unsigned char key, asc, ascq; /* May be valid if error==-EIO */ 205 unsigned char key, asc, ascq; /* May be valid if error==-EIO */
206 206
207 int stat_count; /* Retries getting status. */ 207 int stat_count; /* Retries getting status. */
208 unsigned int timeo; /* jiffies until rq->timeout changes */
208 209
209 unsigned int len; /* Requested length */ 210 unsigned int len; /* Requested length */
210 unsigned int current_sg; 211 unsigned int current_sg;
@@ -318,6 +319,7 @@ struct ub_dev {
318 int openc; /* protected by ub_lock! */ 319 int openc; /* protected by ub_lock! */
319 /* kref is too implicit for our taste */ 320 /* kref is too implicit for our taste */
320 int reset; /* Reset is running */ 321 int reset; /* Reset is running */
322 int bad_resid;
321 unsigned int tagcnt; 323 unsigned int tagcnt;
322 char name[12]; 324 char name[12];
323 struct usb_device *dev; 325 struct usb_device *dev;
@@ -764,6 +766,12 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
764 cmd->cdb_len = rq->cmd_len; 766 cmd->cdb_len = rq->cmd_len;
765 767
766 cmd->len = rq->data_len; 768 cmd->len = rq->data_len;
769
770 /*
771 * To reapply this to every URB is not as incorrect as it looks.
772 * In return, we avoid any complicated tracking calculations.
773 */
774 cmd->timeo = rq->timeout;
767} 775}
768 776
769static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) 777static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
@@ -785,10 +793,6 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
785 scsi_status = 0; 793 scsi_status = 0;
786 } else { 794 } else {
787 if (cmd->act_len != cmd->len) { 795 if (cmd->act_len != cmd->len) {
788 if ((cmd->key == MEDIUM_ERROR ||
789 cmd->key == UNIT_ATTENTION) &&
790 ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
791 return;
792 scsi_status = SAM_STAT_CHECK_CONDITION; 796 scsi_status = SAM_STAT_CHECK_CONDITION;
793 } else { 797 } else {
794 scsi_status = 0; 798 scsi_status = 0;
@@ -804,7 +808,10 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
804 else 808 else
805 scsi_status = DID_ERROR << 16; 809 scsi_status = DID_ERROR << 16;
806 } else { 810 } else {
807 if (cmd->error == -EIO) { 811 if (cmd->error == -EIO &&
812 (cmd->key == 0 ||
813 cmd->key == MEDIUM_ERROR ||
814 cmd->key == UNIT_ATTENTION)) {
808 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) 815 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
809 return; 816 return;
810 } 817 }
@@ -1259,14 +1266,19 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1259 return; 1266 return;
1260 } 1267 }
1261 1268
1262 len = le32_to_cpu(bcs->Residue); 1269 if (!sc->bad_resid) {
1263 if (len != cmd->len - cmd->act_len) { 1270 len = le32_to_cpu(bcs->Residue);
1264 /* 1271 if (len != cmd->len - cmd->act_len) {
1265 * It is all right to transfer less, the caller has 1272 /*
1266 * to check. But it's not all right if the device 1273 * Only start ignoring if this cmd ended well.
1267 * counts disagree with our counts. 1274 */
1268 */ 1275 if (cmd->len == cmd->act_len) {
1269 goto Bad_End; 1276 printk(KERN_NOTICE "%s: "
1277 "bad residual %d of %d, ignoring\n",
1278 sc->name, len, cmd->len);
1279 sc->bad_resid = 1;
1280 }
1281 }
1270 } 1282 }
1271 1283
1272 switch (bcs->Status) { 1284 switch (bcs->Status) {
@@ -1297,8 +1309,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1297 ub_state_done(sc, cmd, -EIO); 1309 ub_state_done(sc, cmd, -EIO);
1298 1310
1299 } else { 1311 } else {
1300 printk(KERN_WARNING "%s: " 1312 printk(KERN_WARNING "%s: wrong command state %d\n",
1301 "wrong command state %d\n",
1302 sc->name, cmd->state); 1313 sc->name, cmd->state);
1303 ub_state_done(sc, cmd, -EINVAL); 1314 ub_state_done(sc, cmd, -EINVAL);
1304 return; 1315 return;
@@ -1336,7 +1347,10 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1336 return; 1347 return;
1337 } 1348 }
1338 1349
1339 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; 1350 if (cmd->timeo)
1351 sc->work_timer.expires = jiffies + cmd->timeo;
1352 else
1353 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1340 add_timer(&sc->work_timer); 1354 add_timer(&sc->work_timer);
1341 1355
1342 cmd->state = UB_CMDST_DATA; 1356 cmd->state = UB_CMDST_DATA;
@@ -1376,7 +1390,10 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1376 return -1; 1390 return -1;
1377 } 1391 }
1378 1392
1379 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; 1393 if (cmd->timeo)
1394 sc->work_timer.expires = jiffies + cmd->timeo;
1395 else
1396 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1380 add_timer(&sc->work_timer); 1397 add_timer(&sc->work_timer);
1381 return 0; 1398 return 0;
1382} 1399}
@@ -1515,8 +1532,7 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1515 return; 1532 return;
1516 } 1533 }
1517 if (cmd->state != UB_CMDST_SENSE) { 1534 if (cmd->state != UB_CMDST_SENSE) {
1518 printk(KERN_WARNING "%s: " 1535 printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
1519 "sense done with bad cmd state %d\n",
1520 sc->name, cmd->state); 1536 sc->name, cmd->state);
1521 return; 1537 return;
1522 } 1538 }
@@ -1720,7 +1736,7 @@ static int ub_bd_ioctl(struct inode *inode, struct file *filp,
1720} 1736}
1721 1737
1722/* 1738/*
1723 * This is called once a new disk was seen by the block layer or by ub_probe(). 1739 * This is called by check_disk_change if we reported a media change.
1724 * The main onjective here is to discover the features of the media such as 1740 * The main onjective here is to discover the features of the media such as
1725 * the capacity, read-only status, etc. USB storage generally does not 1741 * the capacity, read-only status, etc. USB storage generally does not
1726 * need to be spun up, but if we needed it, this would be the place. 1742 * need to be spun up, but if we needed it, this would be the place.
@@ -2136,8 +2152,7 @@ static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2136 } 2152 }
2137 2153
2138 if (ep_in == NULL || ep_out == NULL) { 2154 if (ep_in == NULL || ep_out == NULL) {
2139 printk(KERN_NOTICE "%s: failed endpoint check\n", 2155 printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
2140 sc->name);
2141 return -ENODEV; 2156 return -ENODEV;
2142 } 2157 }
2143 2158
@@ -2354,7 +2369,7 @@ static void ub_disconnect(struct usb_interface *intf)
2354 spin_unlock_irqrestore(&ub_lock, flags); 2369 spin_unlock_irqrestore(&ub_lock, flags);
2355 2370
2356 /* 2371 /*
2357 * Fence stall clearnings, operations triggered by unlinkings and so on. 2372 * Fence stall clearings, operations triggered by unlinkings and so on.
2358 * We do not attempt to unlink any URBs, because we do not trust the 2373 * We do not attempt to unlink any URBs, because we do not trust the
2359 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. 2374 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2360 */ 2375 */
@@ -2399,7 +2414,7 @@ static void ub_disconnect(struct usb_interface *intf)
2399 del_gendisk(lun->disk); 2414 del_gendisk(lun->disk);
2400 /* 2415 /*
2401 * I wish I could do: 2416 * I wish I could do:
2402 * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2417 * queue_flag_set(QUEUE_FLAG_DEAD, q);
2403 * As it is, we rely on our internal poisoning and let 2418 * As it is, we rely on our internal poisoning and let
2404 * the upper levels to spin furiously failing all the I/O. 2419 * the upper levels to spin furiously failing all the I/O.
2405 */ 2420 */
@@ -2417,7 +2432,7 @@ static void ub_disconnect(struct usb_interface *intf)
2417 spin_unlock_irqrestore(sc->lock, flags); 2432 spin_unlock_irqrestore(sc->lock, flags);
2418 2433
2419 /* 2434 /*
2420 * There is virtually no chance that other CPU runs times so long 2435 * There is virtually no chance that other CPU runs a timeout so long
2421 * after ub_urb_complete should have called del_timer, but only if HCD 2436 * after ub_urb_complete should have called del_timer, but only if HCD
2422 * didn't forget to deliver a callback on unlink. 2437 * didn't forget to deliver a callback on unlink.
2423 */ 2438 */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 0cfbe8c594a5..84e064ffee52 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -35,7 +35,7 @@ struct virtblk_req
35 struct list_head list; 35 struct list_head list;
36 struct request *req; 36 struct request *req;
37 struct virtio_blk_outhdr out_hdr; 37 struct virtio_blk_outhdr out_hdr;
38 struct virtio_blk_inhdr in_hdr; 38 u8 status;
39}; 39};
40 40
41static void blk_done(struct virtqueue *vq) 41static void blk_done(struct virtqueue *vq)
@@ -48,7 +48,7 @@ static void blk_done(struct virtqueue *vq)
48 spin_lock_irqsave(&vblk->lock, flags); 48 spin_lock_irqsave(&vblk->lock, flags);
49 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 49 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
50 int uptodate; 50 int uptodate;
51 switch (vbr->in_hdr.status) { 51 switch (vbr->status) {
52 case VIRTIO_BLK_S_OK: 52 case VIRTIO_BLK_S_OK:
53 uptodate = 1; 53 uptodate = 1;
54 break; 54 break;
@@ -101,7 +101,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
101 sg_init_table(vblk->sg, VIRTIO_MAX_SG); 101 sg_init_table(vblk->sg, VIRTIO_MAX_SG);
102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); 102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); 103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
104 sg_set_buf(&vblk->sg[num+1], &vbr->in_hdr, sizeof(vbr->in_hdr)); 104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
105 105
106 if (rq_data_dir(vbr->req) == WRITE) { 106 if (rq_data_dir(vbr->req) == WRITE) {
107 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 107 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
@@ -157,10 +157,25 @@ static int virtblk_ioctl(struct inode *inode, struct file *filp,
157/* We provide getgeo only to please some old bootloader/partitioning tools */ 157/* We provide getgeo only to please some old bootloader/partitioning tools */
158static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) 158static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
159{ 159{
160 /* some standard values, similar to sd */ 160 struct virtio_blk *vblk = bd->bd_disk->private_data;
161 geo->heads = 1 << 6; 161 struct virtio_blk_geometry vgeo;
162 geo->sectors = 1 << 5; 162 int err;
163 geo->cylinders = get_capacity(bd->bd_disk) >> 11; 163
164 /* see if the host passed in geometry config */
165 err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
166 offsetof(struct virtio_blk_config, geometry),
167 &vgeo);
168
169 if (!err) {
170 geo->heads = vgeo.heads;
171 geo->sectors = vgeo.sectors;
172 geo->cylinders = vgeo.cylinders;
173 } else {
174 /* some standard values, similar to sd */
175 geo->heads = 1 << 6;
176 geo->sectors = 1 << 5;
177 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
178 }
164 return 0; 179 return 0;
165} 180}
166 181
@@ -242,12 +257,12 @@ static int virtblk_probe(struct virtio_device *vdev)
242 index++; 257 index++;
243 258
244 /* If barriers are supported, tell block layer that queue is ordered */ 259 /* If barriers are supported, tell block layer that queue is ordered */
245 if (vdev->config->feature(vdev, VIRTIO_BLK_F_BARRIER)) 260 if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
246 blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); 261 blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
247 262
248 /* Host must always specify the capacity. */ 263 /* Host must always specify the capacity. */
249 __virtio_config_val(vdev, offsetof(struct virtio_blk_config, capacity), 264 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
250 &cap); 265 &cap, sizeof(cap));
251 266
252 /* If capacity is too big, truncate with warning. */ 267 /* If capacity is too big, truncate with warning. */
253 if ((sector_t)cap != cap) { 268 if ((sector_t)cap != cap) {
@@ -289,7 +304,6 @@ out:
289static void virtblk_remove(struct virtio_device *vdev) 304static void virtblk_remove(struct virtio_device *vdev)
290{ 305{
291 struct virtio_blk *vblk = vdev->priv; 306 struct virtio_blk *vblk = vdev->priv;
292 int major = vblk->disk->major;
293 307
294 /* Nothing should be pending. */ 308 /* Nothing should be pending. */
295 BUG_ON(!list_empty(&vblk->reqs)); 309 BUG_ON(!list_empty(&vblk->reqs));
@@ -299,7 +313,6 @@ static void virtblk_remove(struct virtio_device *vdev)
299 313
300 blk_cleanup_queue(vblk->disk->queue); 314 blk_cleanup_queue(vblk->disk->queue);
301 put_disk(vblk->disk); 315 put_disk(vblk->disk);
302 unregister_blkdev(major, "virtblk");
303 mempool_destroy(vblk->pool); 316 mempool_destroy(vblk->pool);
304 vdev->config->del_vq(vblk->vq); 317 vdev->config->del_vq(vblk->vq);
305 kfree(vblk); 318 kfree(vblk);
@@ -310,7 +323,14 @@ static struct virtio_device_id id_table[] = {
310 { 0 }, 323 { 0 },
311}; 324};
312 325
326static unsigned int features[] = {
327 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
328 VIRTIO_BLK_F_GEOMETRY,
329};
330
313static struct virtio_driver virtio_blk = { 331static struct virtio_driver virtio_blk = {
332 .feature_table = features,
333 .feature_table_size = ARRAY_SIZE(features),
314 .driver.name = KBUILD_MODNAME, 334 .driver.name = KBUILD_MODNAME,
315 .driver.owner = THIS_MODULE, 335 .driver.owner = THIS_MODULE,
316 .id_table = id_table, 336 .id_table = id_table,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d771da816d95..f2fff5799ddf 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -137,7 +137,7 @@ static void blkif_restart_queue_callback(void *arg)
137 schedule_work(&info->work); 137 schedule_work(&info->work);
138} 138}
139 139
140int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) 140static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
141{ 141{
142 /* We don't have real geometry info, but let's at least return 142 /* We don't have real geometry info, but let's at least return
143 values consistent with the size of the device */ 143 values consistent with the size of the device */
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 7e31d5f1bc8a..e5cd856a2fea 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -143,7 +143,7 @@ restart:
143 int len; 143 int len;
144 144
145 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 145 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
146 len = tty->driver->write(tty, skb->data, skb->len); 146 len = tty->ops->write(tty, skb->data, skb->len);
147 hdev->stat.byte_tx += len; 147 hdev->stat.byte_tx += len;
148 148
149 skb_pull(skb, len); 149 skb_pull(skb, len);
@@ -190,8 +190,7 @@ static int hci_uart_flush(struct hci_dev *hdev)
190 190
191 /* Flush any pending characters in the driver and discipline. */ 191 /* Flush any pending characters in the driver and discipline. */
192 tty_ldisc_flush(tty); 192 tty_ldisc_flush(tty);
193 if (tty->driver && tty->driver->flush_buffer) 193 tty_driver_flush_buffer(tty);
194 tty->driver->flush_buffer(tty);
195 194
196 if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) 195 if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
197 hu->proto->flush(hu); 196 hu->proto->flush(hu);
@@ -285,9 +284,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
285 284
286 if (tty->ldisc.flush_buffer) 285 if (tty->ldisc.flush_buffer)
287 tty->ldisc.flush_buffer(tty); 286 tty->ldisc.flush_buffer(tty);
288 287 tty_driver_flush_buffer(tty);
289 if (tty->driver && tty->driver->flush_buffer)
290 tty->driver->flush_buffer(tty);
291 288
292 return 0; 289 return 0;
293} 290}
@@ -373,9 +370,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f
373 hu->hdev->stat.byte_rx += count; 370 hu->hdev->stat.byte_rx += count;
374 spin_unlock(&hu->rx_lock); 371 spin_unlock(&hu->rx_lock);
375 372
376 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) && 373 tty_unthrottle(tty);
377 tty->driver->unthrottle)
378 tty->driver->unthrottle(tty);
379} 374}
380 375
381static int hci_uart_register_dev(struct hci_uart *hu) 376static int hci_uart_register_dev(struct hci_uart *hu)
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h
index 414080a4e8ff..1790cc8e431e 100644
--- a/drivers/bluetooth/hci_usb.h
+++ b/drivers/bluetooth/hci_usb.h
@@ -70,7 +70,8 @@ static inline void _urb_queue_head(struct _urb_queue *q, struct _urb *_urb)
70{ 70{
71 unsigned long flags; 71 unsigned long flags;
72 spin_lock_irqsave(&q->lock, flags); 72 spin_lock_irqsave(&q->lock, flags);
73 list_add(&_urb->list, &q->head); _urb->queue = q; 73 /* _urb_unlink needs to know which spinlock to use, thus mb(). */
74 _urb->queue = q; mb(); list_add(&_urb->list, &q->head);
74 spin_unlock_irqrestore(&q->lock, flags); 75 spin_unlock_irqrestore(&q->lock, flags);
75} 76}
76 77
@@ -78,19 +79,23 @@ static inline void _urb_queue_tail(struct _urb_queue *q, struct _urb *_urb)
78{ 79{
79 unsigned long flags; 80 unsigned long flags;
80 spin_lock_irqsave(&q->lock, flags); 81 spin_lock_irqsave(&q->lock, flags);
81 list_add_tail(&_urb->list, &q->head); _urb->queue = q; 82 /* _urb_unlink needs to know which spinlock to use, thus mb(). */
83 _urb->queue = q; mb(); list_add_tail(&_urb->list, &q->head);
82 spin_unlock_irqrestore(&q->lock, flags); 84 spin_unlock_irqrestore(&q->lock, flags);
83} 85}
84 86
85static inline void _urb_unlink(struct _urb *_urb) 87static inline void _urb_unlink(struct _urb *_urb)
86{ 88{
87 struct _urb_queue *q = _urb->queue; 89 struct _urb_queue *q;
88 unsigned long flags; 90 unsigned long flags;
89 if (q) { 91
90 spin_lock_irqsave(&q->lock, flags); 92 mb();
91 list_del(&_urb->list); _urb->queue = NULL; 93 q = _urb->queue;
92 spin_unlock_irqrestore(&q->lock, flags); 94 /* If q is NULL, it will die at easy-to-debug NULL pointer dereference.
93 } 95 No need to BUG(). */
96 spin_lock_irqsave(&q->lock, flags);
97 list_del(&_urb->list); _urb->queue = NULL;
98 spin_unlock_irqrestore(&q->lock, flags);
94} 99}
95 100
96struct hci_usb { 101struct hci_usb {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index ac3829030ac5..69f26eb6415b 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2194,7 +2194,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2194 if (ret) 2194 if (ret)
2195 break; 2195 break;
2196 2196
2197 memset(rq->cmd, 0, sizeof(rq->cmd));
2198 rq->cmd[0] = GPCMD_READ_CD; 2197 rq->cmd[0] = GPCMD_READ_CD;
2199 rq->cmd[1] = 1 << 2; 2198 rq->cmd[1] = 1 << 2;
2200 rq->cmd[2] = (lba >> 24) & 0xff; 2199 rq->cmd[2] = (lba >> 24) & 0xff;
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index b74b6c2768a8..5245a4a0ba74 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -144,6 +144,7 @@ static int proc_viocd_open(struct inode *inode, struct file *file)
144} 144}
145 145
146static const struct file_operations proc_viocd_operations = { 146static const struct file_operations proc_viocd_operations = {
147 .owner = THIS_MODULE,
147 .open = proc_viocd_open, 148 .open = proc_viocd_open,
148 .read = seq_read, 149 .read = seq_read,
149 .llseek = seq_lseek, 150 .llseek = seq_lseek,
@@ -679,7 +680,6 @@ static struct vio_driver viocd_driver = {
679 680
680static int __init viocd_init(void) 681static int __init viocd_init(void)
681{ 682{
682 struct proc_dir_entry *e;
683 int ret = 0; 683 int ret = 0;
684 684
685 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 685 if (!firmware_has_feature(FW_FEATURE_ISERIES))
@@ -719,12 +719,8 @@ static int __init viocd_init(void)
719 if (ret) 719 if (ret)
720 goto out_free_info; 720 goto out_free_info;
721 721
722 e = create_proc_entry("iSeries/viocd", S_IFREG|S_IRUGO, NULL); 722 proc_create("iSeries/viocd", S_IFREG|S_IRUGO, NULL,
723 if (e) { 723 &proc_viocd_operations);
724 e->owner = THIS_MODULE;
725 e->proc_fops = &proc_viocd_operations;
726 }
727
728 return 0; 724 return 0;
729 725
730out_free_info: 726out_free_info:
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 929d4fa73fd9..5dce3877eee5 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -80,6 +80,15 @@ config VT_HW_CONSOLE_BINDING
80 information. For framebuffer console users, please refer to 80 information. For framebuffer console users, please refer to
81 <file:Documentation/fb/fbcon.txt>. 81 <file:Documentation/fb/fbcon.txt>.
82 82
83config DEVKMEM
84 bool "/dev/kmem virtual device support"
85 default y
86 help
87 Say Y here if you want to support the /dev/kmem device. The
88 /dev/kmem device is rarely used, but can be used for certain
89 kind of kernel debugging operations.
90 When in doubt, say "N".
91
83config SERIAL_NONSTANDARD 92config SERIAL_NONSTANDARD
84 bool "Non-standard serial port support" 93 bool "Non-standard serial port support"
85 depends on HAS_IOMEM 94 depends on HAS_IOMEM
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index c69f79598e47..99e6a406efb4 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -35,7 +35,7 @@
35 35
36//#define AGP_DEBUG 1 36//#define AGP_DEBUG 1
37#ifdef AGP_DEBUG 37#ifdef AGP_DEBUG
38#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __FUNCTION__ , ## y) 38#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __func__ , ## y)
39#else 39#else
40#define DBG(x,y...) do { } while (0) 40#define DBG(x,y...) do { } while (0)
41#endif 41#endif
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 3d468f502d2d..37457e5a4f2b 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -832,33 +832,34 @@ static void change_speed(struct async_struct *info,
832 local_irq_restore(flags); 832 local_irq_restore(flags);
833} 833}
834 834
835static void rs_put_char(struct tty_struct *tty, unsigned char ch) 835static int rs_put_char(struct tty_struct *tty, unsigned char ch)
836{ 836{
837 struct async_struct *info; 837 struct async_struct *info;
838 unsigned long flags; 838 unsigned long flags;
839 839
840 if (!tty) 840 if (!tty)
841 return; 841 return 0;
842 842
843 info = tty->driver_data; 843 info = tty->driver_data;
844 844
845 if (serial_paranoia_check(info, tty->name, "rs_put_char")) 845 if (serial_paranoia_check(info, tty->name, "rs_put_char"))
846 return; 846 return 0;
847 847
848 if (!info->xmit.buf) 848 if (!info->xmit.buf)
849 return; 849 return 0;
850 850
851 local_irq_save(flags); 851 local_irq_save(flags);
852 if (CIRC_SPACE(info->xmit.head, 852 if (CIRC_SPACE(info->xmit.head,
853 info->xmit.tail, 853 info->xmit.tail,
854 SERIAL_XMIT_SIZE) == 0) { 854 SERIAL_XMIT_SIZE) == 0) {
855 local_irq_restore(flags); 855 local_irq_restore(flags);
856 return; 856 return 0;
857 } 857 }
858 858
859 info->xmit.buf[info->xmit.head++] = ch; 859 info->xmit.buf[info->xmit.head++] = ch;
860 info->xmit.head &= SERIAL_XMIT_SIZE-1; 860 info->xmit.head &= SERIAL_XMIT_SIZE-1;
861 local_irq_restore(flags); 861 local_irq_restore(flags);
862 return 1;
862} 863}
863 864
864static void rs_flush_chars(struct tty_struct *tty) 865static void rs_flush_chars(struct tty_struct *tty)
@@ -1074,6 +1075,7 @@ static int get_serial_info(struct async_struct * info,
1074 if (!retinfo) 1075 if (!retinfo)
1075 return -EFAULT; 1076 return -EFAULT;
1076 memset(&tmp, 0, sizeof(tmp)); 1077 memset(&tmp, 0, sizeof(tmp));
1078 lock_kernel();
1077 tmp.type = state->type; 1079 tmp.type = state->type;
1078 tmp.line = state->line; 1080 tmp.line = state->line;
1079 tmp.port = state->port; 1081 tmp.port = state->port;
@@ -1084,6 +1086,7 @@ static int get_serial_info(struct async_struct * info,
1084 tmp.close_delay = state->close_delay; 1086 tmp.close_delay = state->close_delay;
1085 tmp.closing_wait = state->closing_wait; 1087 tmp.closing_wait = state->closing_wait;
1086 tmp.custom_divisor = state->custom_divisor; 1088 tmp.custom_divisor = state->custom_divisor;
1089 unlock_kernel();
1087 if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) 1090 if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
1088 return -EFAULT; 1091 return -EFAULT;
1089 return 0; 1092 return 0;
@@ -1099,13 +1102,17 @@ static int set_serial_info(struct async_struct * info,
1099 1102
1100 if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) 1103 if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
1101 return -EFAULT; 1104 return -EFAULT;
1105
1106 lock_kernel();
1102 state = info->state; 1107 state = info->state;
1103 old_state = *state; 1108 old_state = *state;
1104 1109
1105 change_irq = new_serial.irq != state->irq; 1110 change_irq = new_serial.irq != state->irq;
1106 change_port = (new_serial.port != state->port); 1111 change_port = (new_serial.port != state->port);
1107 if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) 1112 if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) {
1113 unlock_kernel();
1108 return -EINVAL; 1114 return -EINVAL;
1115 }
1109 1116
1110 if (!serial_isroot()) { 1117 if (!serial_isroot()) {
1111 if ((new_serial.baud_base != state->baud_base) || 1118 if ((new_serial.baud_base != state->baud_base) ||
@@ -1122,8 +1129,10 @@ static int set_serial_info(struct async_struct * info,
1122 goto check_and_exit; 1129 goto check_and_exit;
1123 } 1130 }
1124 1131
1125 if (new_serial.baud_base < 9600) 1132 if (new_serial.baud_base < 9600) {
1133 unlock_kernel();
1126 return -EINVAL; 1134 return -EINVAL;
1135 }
1127 1136
1128 /* 1137 /*
1129 * OK, past this point, all the error checking has been done. 1138 * OK, past this point, all the error checking has been done.
@@ -1157,6 +1166,7 @@ check_and_exit:
1157 } 1166 }
1158 } else 1167 } else
1159 retval = startup(info); 1168 retval = startup(info);
1169 unlock_kernel();
1160 return retval; 1170 return retval;
1161} 1171}
1162 1172
@@ -1496,8 +1506,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
1496 rs_wait_until_sent(tty, info->timeout); 1506 rs_wait_until_sent(tty, info->timeout);
1497 } 1507 }
1498 shutdown(info); 1508 shutdown(info);
1499 if (tty->driver->flush_buffer) 1509 rs_flush_buffer(tty);
1500 tty->driver->flush_buffer(tty);
1501 1510
1502 tty_ldisc_flush(tty); 1511 tty_ldisc_flush(tty);
1503 tty->closing = 0; 1512 tty->closing = 0;
@@ -1530,6 +1539,8 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
1530 return; /* Just in case.... */ 1539 return; /* Just in case.... */
1531 1540
1532 orig_jiffies = jiffies; 1541 orig_jiffies = jiffies;
1542
1543 lock_kernel();
1533 /* 1544 /*
1534 * Set the check interval to be 1/5 of the estimated time to 1545 * Set the check interval to be 1/5 of the estimated time to
1535 * send a single character, and make it at least 1. The check 1546 * send a single character, and make it at least 1. The check
@@ -1570,6 +1581,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
1570 break; 1581 break;
1571 } 1582 }
1572 __set_current_state(TASK_RUNNING); 1583 __set_current_state(TASK_RUNNING);
1584 unlock_kernel();
1573#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT 1585#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
1574 printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); 1586 printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
1575#endif 1587#endif
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 17d54315e146..cdd876dbb2b0 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -14,6 +14,7 @@
14#include <linux/poll.h> 14#include <linux/poll.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
17#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
18#include <linux/apm_bios.h> 19#include <linux/apm_bios.h>
19#include <linux/capability.h> 20#include <linux/capability.h>
@@ -493,11 +494,10 @@ static struct miscdevice apm_device = {
493 * -1: Unknown 494 * -1: Unknown
494 * 8) min = minutes; sec = seconds 495 * 8) min = minutes; sec = seconds
495 */ 496 */
496static int apm_get_info(char *buf, char **start, off_t fpos, int length) 497static int proc_apm_show(struct seq_file *m, void *v)
497{ 498{
498 struct apm_power_info info; 499 struct apm_power_info info;
499 char *units; 500 char *units;
500 int ret;
501 501
502 info.ac_line_status = 0xff; 502 info.ac_line_status = 0xff;
503 info.battery_status = 0xff; 503 info.battery_status = 0xff;
@@ -515,14 +515,27 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length)
515 case 1: units = "sec"; break; 515 case 1: units = "sec"; break;
516 } 516 }
517 517
518 ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", 518 seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
519 driver_version, APM_32_BIT_SUPPORT, 519 driver_version, APM_32_BIT_SUPPORT,
520 info.ac_line_status, info.battery_status, 520 info.ac_line_status, info.battery_status,
521 info.battery_flag, info.battery_life, 521 info.battery_flag, info.battery_life,
522 info.time, units); 522 info.time, units);
523 523
524 return ret; 524 return 0;
525} 525}
526
527static int proc_apm_open(struct inode *inode, struct file *file)
528{
529 return single_open(file, proc_apm_show, NULL);
530}
531
532static const struct file_operations apm_proc_fops = {
533 .owner = THIS_MODULE,
534 .open = proc_apm_open,
535 .read = seq_read,
536 .llseek = seq_lseek,
537 .release = single_release,
538};
526#endif 539#endif
527 540
528static int kapmd(void *arg) 541static int kapmd(void *arg)
@@ -593,7 +606,7 @@ static int __init apm_init(void)
593 wake_up_process(kapmd_tsk); 606 wake_up_process(kapmd_tsk);
594 607
595#ifdef CONFIG_PROC_FS 608#ifdef CONFIG_PROC_FS
596 create_proc_info_entry("apm", 0, NULL, apm_get_info); 609 proc_create("apm", 0, NULL, &apm_proc_fops);
597#endif 610#endif
598 611
599 ret = misc_register(&apm_device); 612 ret = misc_register(&apm_device);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index a7c4990b5b6b..31d08b641f5b 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -199,7 +199,7 @@ static int __init applicom_init(void)
199 if (pci_enable_device(dev)) 199 if (pci_enable_device(dev))
200 return -EIO; 200 return -EIO;
201 201
202 RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO); 202 RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO);
203 203
204 if (!RamIO) { 204 if (!RamIO) {
205 printk(KERN_INFO "ac.o: Failed to ioremap PCI memory " 205 printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
@@ -254,7 +254,7 @@ static int __init applicom_init(void)
254 /* Now try the specified ISA cards */ 254 /* Now try the specified ISA cards */
255 255
256 for (i = 0; i < MAX_ISA_BOARD; i++) { 256 for (i = 0; i < MAX_ISA_BOARD; i++) {
257 RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO); 257 RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
258 258
259 if (!RamIO) { 259 if (!RamIO) {
260 printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1); 260 printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
diff --git a/drivers/char/consolemap.c b/drivers/char/consolemap.c
index 6b104e45a322..4246b8e36cb3 100644
--- a/drivers/char/consolemap.c
+++ b/drivers/char/consolemap.c
@@ -277,6 +277,7 @@ u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode)
277 return p->inverse_translations[m][glyph]; 277 return p->inverse_translations[m][glyph];
278 } 278 }
279} 279}
280EXPORT_SYMBOL_GPL(inverse_translate);
280 281
281static void update_user_maps(void) 282static void update_user_maps(void)
282{ 283{
diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c
index c2d23cae9515..c0a4a0bb509e 100644
--- a/drivers/char/cs5535_gpio.c
+++ b/drivers/char/cs5535_gpio.c
@@ -215,7 +215,7 @@ static int __init cs5535_gpio_init(void)
215 else 215 else
216 mask = 0x0b003c66; 216 mask = 0x0b003c66;
217 217
218 if (request_region(gpio_base, CS5535_GPIO_SIZE, NAME) == 0) { 218 if (!request_region(gpio_base, CS5535_GPIO_SIZE, NAME)) {
219 printk(KERN_ERR NAME ": can't allocate I/O for GPIO\n"); 219 printk(KERN_ERR NAME ": can't allocate I/O for GPIO\n");
220 return -ENODEV; 220 return -ENODEV;
221 } 221 }
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e4f579c3e245..ef73e72daedc 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -21,7 +21,6 @@
21 * 21 *
22 * This version supports shared IRQ's (only for PCI boards). 22 * This version supports shared IRQ's (only for PCI boards).
23 * 23 *
24 * $Log: cyclades.c,v $
25 * Prevent users from opening non-existing Z ports. 24 * Prevent users from opening non-existing Z ports.
26 * 25 *
27 * Revision 2.3.2.8 2000/07/06 18:14:16 ivan 26 * Revision 2.3.2.8 2000/07/06 18:14:16 ivan
@@ -62,7 +61,7 @@
62 * Driver now makes sure that the constant SERIAL_XMIT_SIZE is defined; 61 * Driver now makes sure that the constant SERIAL_XMIT_SIZE is defined;
63 * 62 *
64 * Revision 2.3.2.2 1999/10/01 11:27:43 ivan 63 * Revision 2.3.2.2 1999/10/01 11:27:43 ivan
65 * Fixed bug in cyz_poll that would make all ports but port 0 64 * Fixed bug in cyz_poll that would make all ports but port 0
66 * unable to transmit/receive data (Cyclades-Z only); 65 * unable to transmit/receive data (Cyclades-Z only);
67 * Implemented logic to prevent the RX buffer from being stuck with data 66 * Implemented logic to prevent the RX buffer from being stuck with data
68 * due to a driver / firmware race condition in interrupt op mode 67 * due to a driver / firmware race condition in interrupt op mode
@@ -83,25 +82,25 @@
83 * Revision 2.3.1.1 1999/07/15 16:45:53 ivan 82 * Revision 2.3.1.1 1999/07/15 16:45:53 ivan
84 * Removed CY_PROC conditional compilation; 83 * Removed CY_PROC conditional compilation;
85 * Implemented SMP-awareness for the driver; 84 * Implemented SMP-awareness for the driver;
86 * Implemented a new ISA IRQ autoprobe that uses the irq_probe_[on|off] 85 * Implemented a new ISA IRQ autoprobe that uses the irq_probe_[on|off]
87 * functions; 86 * functions;
88 * The driver now accepts memory addresses (maddr=0xMMMMM) and IRQs 87 * The driver now accepts memory addresses (maddr=0xMMMMM) and IRQs
89 * (irq=NN) as parameters (only for ISA boards); 88 * (irq=NN) as parameters (only for ISA boards);
90 * Fixed bug in set_line_char that would prevent the Cyclades-Z 89 * Fixed bug in set_line_char that would prevent the Cyclades-Z
91 * ports from being configured at speeds above 115.2Kbps; 90 * ports from being configured at speeds above 115.2Kbps;
92 * Fixed bug in cy_set_termios that would prevent XON/XOFF flow control 91 * Fixed bug in cy_set_termios that would prevent XON/XOFF flow control
93 * switching from working properly; 92 * switching from working properly;
94 * The driver now only prints IRQ info for the Cyclades-Z if it's 93 * The driver now only prints IRQ info for the Cyclades-Z if it's
95 * configured to work in interrupt mode; 94 * configured to work in interrupt mode;
96 * 95 *
97 * Revision 2.2.2.3 1999/06/28 11:13:29 ivan 96 * Revision 2.2.2.3 1999/06/28 11:13:29 ivan
98 * Added support for interrupt mode operation for the Z cards; 97 * Added support for interrupt mode operation for the Z cards;
99 * Removed the driver inactivity control for the Z; 98 * Removed the driver inactivity control for the Z;
100 * Added a missing MOD_DEC_USE_COUNT in the cy_open function for when 99 * Added a missing MOD_DEC_USE_COUNT in the cy_open function for when
101 * the Z firmware is not loaded yet; 100 * the Z firmware is not loaded yet;
102 * Replaced the "manual" Z Tx flush buffer by a call to a FW command of 101 * Replaced the "manual" Z Tx flush buffer by a call to a FW command of
103 * same functionality; 102 * same functionality;
104 * Implemented workaround for IRQ setting loss on the PCI configuration 103 * Implemented workaround for IRQ setting loss on the PCI configuration
105 * registers after a PCI bridge EEPROM reload (affects PLX9060 only); 104 * registers after a PCI bridge EEPROM reload (affects PLX9060 only);
106 * 105 *
107 * Revision 2.2.2.2 1999/05/14 17:18:15 ivan 106 * Revision 2.2.2.2 1999/05/14 17:18:15 ivan
@@ -112,22 +111,22 @@
112 * BREAK implementation changed in order to make use of the 'break_ctl' 111 * BREAK implementation changed in order to make use of the 'break_ctl'
113 * TTY facility; 112 * TTY facility;
114 * Fixed typo in TTY structure field 'driver_name'; 113 * Fixed typo in TTY structure field 'driver_name';
115 * Included a PCI bridge reset and EEPROM reload in the board 114 * Included a PCI bridge reset and EEPROM reload in the board
116 * initialization code (for both Y and Z series). 115 * initialization code (for both Y and Z series).
117 * 116 *
118 * Revision 2.2.2.1 1999/04/08 16:17:43 ivan 117 * Revision 2.2.2.1 1999/04/08 16:17:43 ivan
119 * Fixed a bug in cy_wait_until_sent that was preventing the port to be 118 * Fixed a bug in cy_wait_until_sent that was preventing the port to be
120 * closed properly after a SIGINT; 119 * closed properly after a SIGINT;
121 * Module usage counter scheme revisited; 120 * Module usage counter scheme revisited;
122 * Added support to the upcoming Y PCI boards (i.e., support to additional 121 * Added support to the upcoming Y PCI boards (i.e., support to additional
123 * PCI Device ID's). 122 * PCI Device ID's).
124 * 123 *
125 * Revision 2.2.1.10 1999/01/20 16:14:29 ivan 124 * Revision 2.2.1.10 1999/01/20 16:14:29 ivan
126 * Removed all unnecessary page-alignement operations in ioremap calls 125 * Removed all unnecessary page-alignement operations in ioremap calls
127 * (ioremap is currently safe for these operations). 126 * (ioremap is currently safe for these operations).
128 * 127 *
129 * Revision 2.2.1.9 1998/12/30 18:18:30 ivan 128 * Revision 2.2.1.9 1998/12/30 18:18:30 ivan
130 * Changed access to PLX PCI bridge registers from I/O to MMIO, in 129 * Changed access to PLX PCI bridge registers from I/O to MMIO, in
131 * order to make PLX9050-based boards work with certain motherboards. 130 * order to make PLX9050-based boards work with certain motherboards.
132 * 131 *
133 * Revision 2.2.1.8 1998/11/13 12:46:20 ivan 132 * Revision 2.2.1.8 1998/11/13 12:46:20 ivan
@@ -148,7 +147,7 @@
148 * Fixed Cyclom-4Yo hardware detection bug. 147 * Fixed Cyclom-4Yo hardware detection bug.
149 * 148 *
150 * Revision 2.2.1.4 1998/08/04 11:02:50 ivan 149 * Revision 2.2.1.4 1998/08/04 11:02:50 ivan
151 * /proc/cyclades implementation with great collaboration of 150 * /proc/cyclades implementation with great collaboration of
152 * Marc Lewis <marc@blarg.net>; 151 * Marc Lewis <marc@blarg.net>;
153 * cyy_interrupt was changed to avoid occurrence of kernel oopses 152 * cyy_interrupt was changed to avoid occurrence of kernel oopses
154 * during PPP operation. 153 * during PPP operation.
@@ -157,7 +156,7 @@
157 * General code review in order to comply with 2.1 kernel standards; 156 * General code review in order to comply with 2.1 kernel standards;
158 * data loss prevention for slow devices revisited (cy_wait_until_sent 157 * data loss prevention for slow devices revisited (cy_wait_until_sent
159 * was created); 158 * was created);
160 * removed conditional compilation for new/old PCI structure support 159 * removed conditional compilation for new/old PCI structure support
161 * (now the driver only supports the new PCI structure). 160 * (now the driver only supports the new PCI structure).
162 * 161 *
163 * Revision 2.2.1.1 1998/03/19 16:43:12 ivan 162 * Revision 2.2.1.1 1998/03/19 16:43:12 ivan
@@ -168,7 +167,7 @@
168 * cleaned up the data loss fix; 167 * cleaned up the data loss fix;
169 * fixed XON/XOFF handling once more (Cyclades-Z); 168 * fixed XON/XOFF handling once more (Cyclades-Z);
170 * general review of the driver routines; 169 * general review of the driver routines;
171 * introduction of a mechanism to prevent data loss with slow 170 * introduction of a mechanism to prevent data loss with slow
172 * printers, by forcing a delay before closing the port. 171 * printers, by forcing a delay before closing the port.
173 * 172 *
174 * Revision 2.1.1.2 1998/02/17 16:50:00 ivan 173 * Revision 2.1.1.2 1998/02/17 16:50:00 ivan
@@ -182,12 +181,12 @@
182 * Code review for the module cleanup routine; 181 * Code review for the module cleanup routine;
183 * fixed RTS and DTR status report for new CD1400's in get_modem_info; 182 * fixed RTS and DTR status report for new CD1400's in get_modem_info;
184 * includes anonymous changes regarding signal_pending. 183 * includes anonymous changes regarding signal_pending.
185 * 184 *
186 * Revision 2.1 1997/11/01 17:42:41 ivan 185 * Revision 2.1 1997/11/01 17:42:41 ivan
187 * Changes in the driver to support Alpha systems (except 8Zo V_1); 186 * Changes in the driver to support Alpha systems (except 8Zo V_1);
188 * BREAK fix for the Cyclades-Z boards; 187 * BREAK fix for the Cyclades-Z boards;
189 * driver inactivity control by FW implemented; 188 * driver inactivity control by FW implemented;
190 * introduction of flag that allows driver to take advantage of 189 * introduction of flag that allows driver to take advantage of
191 * a special CD1400 feature related to HW flow control; 190 * a special CD1400 feature related to HW flow control;
192 * added support for the CD1400 rev. J (Cyclom-Y boards); 191 * added support for the CD1400 rev. J (Cyclom-Y boards);
193 * introduction of ioctls to: 192 * introduction of ioctls to:
@@ -196,17 +195,17 @@
196 * - adjust the polling interval (Cyclades-Z); 195 * - adjust the polling interval (Cyclades-Z);
197 * 196 *
198 * Revision 1.36.4.33 1997/06/27 19:00:00 ivan 197 * Revision 1.36.4.33 1997/06/27 19:00:00 ivan
199 * Fixes related to kernel version conditional 198 * Fixes related to kernel version conditional
200 * compilation. 199 * compilation.
201 * 200 *
202 * Revision 1.36.4.32 1997/06/14 19:30:00 ivan 201 * Revision 1.36.4.32 1997/06/14 19:30:00 ivan
203 * Compatibility issues between kernels 2.0.x and 202 * Compatibility issues between kernels 2.0.x and
204 * 2.1.x (mainly related to clear_bit function). 203 * 2.1.x (mainly related to clear_bit function).
205 * 204 *
206 * Revision 1.36.4.31 1997/06/03 15:30:00 ivan 205 * Revision 1.36.4.31 1997/06/03 15:30:00 ivan
207 * Changes to define the memory window according to the 206 * Changes to define the memory window according to the
208 * board type. 207 * board type.
209 * 208 *
210 * Revision 1.36.4.30 1997/05/16 15:30:00 daniel 209 * Revision 1.36.4.30 1997/05/16 15:30:00 daniel
211 * Changes to support new cycladesZ boards. 210 * Changes to support new cycladesZ boards.
212 * 211 *
@@ -624,7 +623,7 @@
624#undef CY_PCI_DEBUG 623#undef CY_PCI_DEBUG
625 624
626/* 625/*
627 * Include section 626 * Include section
628 */ 627 */
629#include <linux/module.h> 628#include <linux/module.h>
630#include <linux/errno.h> 629#include <linux/errno.h>
@@ -649,9 +648,9 @@
649#include <linux/firmware.h> 648#include <linux/firmware.h>
650 649
651#include <asm/system.h> 650#include <asm/system.h>
652#include <asm/io.h> 651#include <linux/io.h>
653#include <asm/irq.h> 652#include <asm/irq.h>
654#include <asm/uaccess.h> 653#include <linux/uaccess.h>
655 654
656#include <linux/kernel.h> 655#include <linux/kernel.h>
657#include <linux/pci.h> 656#include <linux/pci.h>
@@ -668,10 +667,10 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
668 ((readl(&((struct RUNTIME_9060 __iomem *) \ 667 ((readl(&((struct RUNTIME_9060 __iomem *) \
669 ((card).ctl_addr))->init_ctrl) & (1<<17)) != 0) 668 ((card).ctl_addr))->init_ctrl) & (1<<17)) != 0)
670 669
671#define ISZLOADED(card) (((ZO_V1==readl(&((struct RUNTIME_9060 __iomem *) \ 670#define ISZLOADED(card) (((ZO_V1 == readl(&((struct RUNTIME_9060 __iomem *) \
672 ((card).ctl_addr))->mail_box_0)) || \ 671 ((card).ctl_addr))->mail_box_0)) || \
673 Z_FPGA_CHECK(card)) && \ 672 Z_FPGA_CHECK(card)) && \
674 (ZFIRM_ID==readl(&((struct FIRM_ID __iomem *) \ 673 (ZFIRM_ID == readl(&((struct FIRM_ID __iomem *) \
675 ((card).base_addr+ID_ADDRESS))->signature))) 674 ((card).base_addr+ID_ADDRESS))->signature)))
676 675
677#ifndef SERIAL_XMIT_SIZE 676#ifndef SERIAL_XMIT_SIZE
@@ -809,12 +808,12 @@ static char baud_cor3[] = { /* receive threshold */
809 808
810/* 809/*
811 * The Cyclades driver implements HW flow control as any serial driver. 810 * The Cyclades driver implements HW flow control as any serial driver.
812 * The cyclades_port structure member rflow and the vector rflow_thr 811 * The cyclades_port structure member rflow and the vector rflow_thr
813 * allows us to take advantage of a special feature in the CD1400 to avoid 812 * allows us to take advantage of a special feature in the CD1400 to avoid
814 * data loss even when the system interrupt latency is too high. These flags 813 * data loss even when the system interrupt latency is too high. These flags
815 * are to be used only with very special applications. Setting these flags 814 * are to be used only with very special applications. Setting these flags
816 * requires the use of a special cable (DTR and RTS reversed). In the new 815 * requires the use of a special cable (DTR and RTS reversed). In the new
817 * CD1400-based boards (rev. 6.00 or later), there is no need for special 816 * CD1400-based boards (rev. 6.00 or later), there is no need for special
818 * cables. 817 * cables.
819 */ 818 */
820 819
@@ -841,14 +840,22 @@ static int cy_chip_offset[] = { 0x0000,
841 840
842#ifdef CONFIG_PCI 841#ifdef CONFIG_PCI
843static struct pci_device_id cy_pci_dev_id[] __devinitdata = { 842static struct pci_device_id cy_pci_dev_id[] __devinitdata = {
844 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) }, /* PCI < 1Mb */ 843 /* PCI < 1Mb */
845 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) }, /* PCI > 1Mb */ 844 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) },
846 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) }, /* 4Y PCI < 1Mb */ 845 /* PCI > 1Mb */
847 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) }, /* 4Y PCI > 1Mb */ 846 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) },
848 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) }, /* 8Y PCI < 1Mb */ 847 /* 4Y PCI < 1Mb */
849 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) }, /* 8Y PCI > 1Mb */ 848 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) },
850 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) }, /* Z PCI < 1Mb */ 849 /* 4Y PCI > 1Mb */
851 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) }, /* Z PCI > 1Mb */ 850 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) },
851 /* 8Y PCI < 1Mb */
852 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) },
853 /* 8Y PCI > 1Mb */
854 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) },
855 /* Z PCI < 1Mb */
856 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) },
857 /* Z PCI > 1Mb */
858 { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) },
852 { } /* end of table */ 859 { } /* end of table */
853}; 860};
854MODULE_DEVICE_TABLE(pci, cy_pci_dev_id); 861MODULE_DEVICE_TABLE(pci, cy_pci_dev_id);
@@ -905,15 +912,14 @@ static inline int serial_paranoia_check(struct cyclades_port *info,
905 912
906 This function is only called from inside spinlock-protected code. 913 This function is only called from inside spinlock-protected code.
907 */ 914 */
908static int cyy_issue_cmd(void __iomem * base_addr, u_char cmd, int index) 915static int cyy_issue_cmd(void __iomem *base_addr, u_char cmd, int index)
909{ 916{
910 unsigned int i; 917 unsigned int i;
911 918
912 /* Check to see that the previous command has completed */ 919 /* Check to see that the previous command has completed */
913 for (i = 0; i < 100; i++) { 920 for (i = 0; i < 100; i++) {
914 if (readb(base_addr + (CyCCR << index)) == 0) { 921 if (readb(base_addr + (CyCCR << index)) == 0)
915 break; 922 break;
916 }
917 udelay(10L); 923 udelay(10L);
918 } 924 }
919 /* if the CCR never cleared, the previous command 925 /* if the CCR never cleared, the previous command
@@ -929,7 +935,7 @@ static int cyy_issue_cmd(void __iomem * base_addr, u_char cmd, int index)
929 935
930#ifdef CONFIG_ISA 936#ifdef CONFIG_ISA
931/* ISA interrupt detection code */ 937/* ISA interrupt detection code */
932static unsigned detect_isa_irq(void __iomem * address) 938static unsigned detect_isa_irq(void __iomem *address)
933{ 939{
934 int irq; 940 int irq;
935 unsigned long irqs, flags; 941 unsigned long irqs, flags;
@@ -1038,7 +1044,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
1038 if (info->flags & ASYNC_SAK) 1044 if (info->flags & ASYNC_SAK)
1039 do_SAK(tty); 1045 do_SAK(tty);
1040 } else if (data & CyFRAME) { 1046 } else if (data & CyFRAME) {
1041 tty_insert_flip_char( tty, 1047 tty_insert_flip_char(tty,
1042 readb(base_addr + (CyRDSR << 1048 readb(base_addr + (CyRDSR <<
1043 index)), TTY_FRAME); 1049 index)), TTY_FRAME);
1044 info->icount.rx++; 1050 info->icount.rx++;
@@ -1320,7 +1326,8 @@ static irqreturn_t cyy_interrupt(int irq, void *dev_id)
1320 1326
1321 if (unlikely(cinfo == NULL)) { 1327 if (unlikely(cinfo == NULL)) {
1322#ifdef CY_DEBUG_INTERRUPTS 1328#ifdef CY_DEBUG_INTERRUPTS
1323 printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n",irq); 1329 printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n",
1330 irq);
1324#endif 1331#endif
1325 return IRQ_NONE; /* spurious interrupt */ 1332 return IRQ_NONE; /* spurious interrupt */
1326 } 1333 }
@@ -1375,12 +1382,12 @@ static irqreturn_t cyy_interrupt(int irq, void *dev_id)
1375 1382
1376/***********************************************************/ 1383/***********************************************************/
1377/********* End of block of Cyclom-Y specific code **********/ 1384/********* End of block of Cyclom-Y specific code **********/
1378/******** Start of block of Cyclades-Z specific code *********/ 1385/******** Start of block of Cyclades-Z specific code *******/
1379/***********************************************************/ 1386/***********************************************************/
1380 1387
1381static int 1388static int
1382cyz_fetch_msg(struct cyclades_card *cinfo, 1389cyz_fetch_msg(struct cyclades_card *cinfo,
1383 __u32 * channel, __u8 * cmd, __u32 * param) 1390 __u32 *channel, __u8 *cmd, __u32 *param)
1384{ 1391{
1385 struct FIRM_ID __iomem *firm_id; 1392 struct FIRM_ID __iomem *firm_id;
1386 struct ZFW_CTRL __iomem *zfw_ctrl; 1393 struct ZFW_CTRL __iomem *zfw_ctrl;
@@ -1388,9 +1395,8 @@ cyz_fetch_msg(struct cyclades_card *cinfo,
1388 unsigned long loc_doorbell; 1395 unsigned long loc_doorbell;
1389 1396
1390 firm_id = cinfo->base_addr + ID_ADDRESS; 1397 firm_id = cinfo->base_addr + ID_ADDRESS;
1391 if (!ISZLOADED(*cinfo)) { 1398 if (!ISZLOADED(*cinfo))
1392 return -1; 1399 return -1;
1393 }
1394 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); 1400 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
1395 board_ctrl = &zfw_ctrl->board_ctrl; 1401 board_ctrl = &zfw_ctrl->board_ctrl;
1396 1402
@@ -1418,9 +1424,9 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
1418 unsigned int index; 1424 unsigned int index;
1419 1425
1420 firm_id = cinfo->base_addr + ID_ADDRESS; 1426 firm_id = cinfo->base_addr + ID_ADDRESS;
1421 if (!ISZLOADED(*cinfo)) { 1427 if (!ISZLOADED(*cinfo))
1422 return -1; 1428 return -1;
1423 } 1429
1424 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); 1430 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
1425 board_ctrl = &zfw_ctrl->board_ctrl; 1431 board_ctrl = &zfw_ctrl->board_ctrl;
1426 1432
@@ -1428,9 +1434,8 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
1428 pci_doorbell = 1434 pci_doorbell =
1429 &((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->pci_doorbell; 1435 &((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->pci_doorbell;
1430 while ((readl(pci_doorbell) & 0xff) != 0) { 1436 while ((readl(pci_doorbell) & 0xff) != 0) {
1431 if (index++ == 1000) { 1437 if (index++ == 1000)
1432 return (int)(readl(pci_doorbell) & 0xff); 1438 return (int)(readl(pci_doorbell) & 0xff);
1433 }
1434 udelay(50L); 1439 udelay(50L);
1435 } 1440 }
1436 cy_writel(&board_ctrl->hcmd_channel, channel); 1441 cy_writel(&board_ctrl->hcmd_channel, channel);
@@ -1504,7 +1509,8 @@ static void cyz_handle_rx(struct cyclades_port *info,
1504 while (len--) { 1509 while (len--) {
1505 data = readb(cinfo->base_addr + rx_bufaddr + 1510 data = readb(cinfo->base_addr + rx_bufaddr +
1506 new_rx_get); 1511 new_rx_get);
1507 new_rx_get = (new_rx_get + 1)& (rx_bufsize - 1); 1512 new_rx_get = (new_rx_get + 1) &
1513 (rx_bufsize - 1);
1508 tty_insert_flip_char(tty, data, TTY_NORMAL); 1514 tty_insert_flip_char(tty, data, TTY_NORMAL);
1509 info->idle_stats.recv_bytes++; 1515 info->idle_stats.recv_bytes++;
1510 info->icount.rx++; 1516 info->icount.rx++;
@@ -1636,7 +1642,8 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
1636 special_count = 0; 1642 special_count = 0;
1637 delta_count = 0; 1643 delta_count = 0;
1638 info = &cinfo->ports[channel]; 1644 info = &cinfo->ports[channel];
1639 if ((tty = info->tty) == NULL) 1645 tty = info->tty;
1646 if (tty == NULL)
1640 continue; 1647 continue;
1641 1648
1642 ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); 1649 ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]);
@@ -1732,7 +1739,8 @@ static irqreturn_t cyz_interrupt(int irq, void *dev_id)
1732 1739
1733 if (unlikely(cinfo == NULL)) { 1740 if (unlikely(cinfo == NULL)) {
1734#ifdef CY_DEBUG_INTERRUPTS 1741#ifdef CY_DEBUG_INTERRUPTS
1735 printk(KERN_DEBUG "cyz_interrupt: spurious interrupt %d\n",irq); 1742 printk(KERN_DEBUG "cyz_interrupt: spurious interrupt %d\n",
1743 irq);
1736#endif 1744#endif
1737 return IRQ_NONE; /* spurious interrupt */ 1745 return IRQ_NONE; /* spurious interrupt */
1738 } 1746 }
@@ -1851,9 +1859,8 @@ static int startup(struct cyclades_port *info)
1851 } 1859 }
1852 1860
1853 if (!info->type) { 1861 if (!info->type) {
1854 if (info->tty) { 1862 if (info->tty)
1855 set_bit(TTY_IO_ERROR, &info->tty->flags); 1863 set_bit(TTY_IO_ERROR, &info->tty->flags);
1856 }
1857 free_page(page); 1864 free_page(page);
1858 goto errout; 1865 goto errout;
1859 } 1866 }
@@ -1904,9 +1911,8 @@ static int startup(struct cyclades_port *info)
1904 readb(base_addr + (CySRER << index)) | CyRxData); 1911 readb(base_addr + (CySRER << index)) | CyRxData);
1905 info->flags |= ASYNC_INITIALIZED; 1912 info->flags |= ASYNC_INITIALIZED;
1906 1913
1907 if (info->tty) { 1914 if (info->tty)
1908 clear_bit(TTY_IO_ERROR, &info->tty->flags); 1915 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1909 }
1910 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1916 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1911 info->breakon = info->breakoff = 0; 1917 info->breakon = info->breakoff = 0;
1912 memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats)); 1918 memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
@@ -1925,9 +1931,8 @@ static int startup(struct cyclades_port *info)
1925 base_addr = card->base_addr; 1931 base_addr = card->base_addr;
1926 1932
1927 firm_id = base_addr + ID_ADDRESS; 1933 firm_id = base_addr + ID_ADDRESS;
1928 if (!ISZLOADED(*card)) { 1934 if (!ISZLOADED(*card))
1929 return -ENODEV; 1935 return -ENODEV;
1930 }
1931 1936
1932 zfw_ctrl = card->base_addr + 1937 zfw_ctrl = card->base_addr +
1933 (readl(&firm_id->zfwctrl_addr) & 0xfffff); 1938 (readl(&firm_id->zfwctrl_addr) & 0xfffff);
@@ -1990,9 +1995,8 @@ static int startup(struct cyclades_port *info)
1990 /* enable send, recv, modem !!! */ 1995 /* enable send, recv, modem !!! */
1991 1996
1992 info->flags |= ASYNC_INITIALIZED; 1997 info->flags |= ASYNC_INITIALIZED;
1993 if (info->tty) { 1998 if (info->tty)
1994 clear_bit(TTY_IO_ERROR, &info->tty->flags); 1999 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1995 }
1996 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 2000 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1997 info->breakon = info->breakoff = 0; 2001 info->breakon = info->breakoff = 0;
1998 memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats)); 2002 memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
@@ -2061,9 +2065,8 @@ static void shutdown(struct cyclades_port *info)
2061 void __iomem *base_addr; 2065 void __iomem *base_addr;
2062 int chip, channel, index; 2066 int chip, channel, index;
2063 2067
2064 if (!(info->flags & ASYNC_INITIALIZED)) { 2068 if (!(info->flags & ASYNC_INITIALIZED))
2065 return; 2069 return;
2066 }
2067 2070
2068 card = info->card; 2071 card = info->card;
2069 channel = info->line - card->first_line; 2072 channel = info->line - card->first_line;
@@ -2105,9 +2108,8 @@ static void shutdown(struct cyclades_port *info)
2105 /* it may be appropriate to clear _XMIT at 2108 /* it may be appropriate to clear _XMIT at
2106 some later date (after testing)!!! */ 2109 some later date (after testing)!!! */
2107 2110
2108 if (info->tty) { 2111 if (info->tty)
2109 set_bit(TTY_IO_ERROR, &info->tty->flags); 2112 set_bit(TTY_IO_ERROR, &info->tty->flags);
2110 }
2111 info->flags &= ~ASYNC_INITIALIZED; 2113 info->flags &= ~ASYNC_INITIALIZED;
2112 spin_unlock_irqrestore(&card->card_lock, flags); 2114 spin_unlock_irqrestore(&card->card_lock, flags);
2113 } else { 2115 } else {
@@ -2124,9 +2126,8 @@ static void shutdown(struct cyclades_port *info)
2124#endif 2126#endif
2125 2127
2126 firm_id = base_addr + ID_ADDRESS; 2128 firm_id = base_addr + ID_ADDRESS;
2127 if (!ISZLOADED(*card)) { 2129 if (!ISZLOADED(*card))
2128 return; 2130 return;
2129 }
2130 2131
2131 zfw_ctrl = card->base_addr + 2132 zfw_ctrl = card->base_addr +
2132 (readl(&firm_id->zfwctrl_addr) & 0xfffff); 2133 (readl(&firm_id->zfwctrl_addr) & 0xfffff);
@@ -2157,9 +2158,8 @@ static void shutdown(struct cyclades_port *info)
2157#endif 2158#endif
2158 } 2159 }
2159 2160
2160 if (info->tty) { 2161 if (info->tty)
2161 set_bit(TTY_IO_ERROR, &info->tty->flags); 2162 set_bit(TTY_IO_ERROR, &info->tty->flags);
2162 }
2163 info->flags &= ~ASYNC_INITIALIZED; 2163 info->flags &= ~ASYNC_INITIALIZED;
2164 2164
2165 spin_unlock_irqrestore(&card->card_lock, flags); 2165 spin_unlock_irqrestore(&card->card_lock, flags);
@@ -2204,7 +2204,8 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
2204 * If non-blocking mode is set, then make the check up front 2204 * If non-blocking mode is set, then make the check up front
2205 * and then exit. 2205 * and then exit.
2206 */ 2206 */
2207 if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) { 2207 if ((filp->f_flags & O_NONBLOCK) ||
2208 (tty->flags & (1 << TTY_IO_ERROR))) {
2208 info->flags |= ASYNC_NORMAL_ACTIVE; 2209 info->flags |= ASYNC_NORMAL_ACTIVE;
2209 return 0; 2210 return 0;
2210 } 2211 }
@@ -2301,7 +2302,8 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
2301 return -EINVAL; 2302 return -EINVAL;
2302 } 2303 }
2303 2304
2304 zfw_ctrl = base_addr + (readl(&firm_id->zfwctrl_addr)& 0xfffff); 2305 zfw_ctrl = base_addr + (readl(&firm_id->zfwctrl_addr)
2306 & 0xfffff);
2305 board_ctrl = &zfw_ctrl->board_ctrl; 2307 board_ctrl = &zfw_ctrl->board_ctrl;
2306 ch_ctrl = zfw_ctrl->ch_ctrl; 2308 ch_ctrl = zfw_ctrl->ch_ctrl;
2307 2309
@@ -2378,9 +2380,9 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
2378 int retval; 2380 int retval;
2379 2381
2380 line = tty->index; 2382 line = tty->index;
2381 if ((tty->index < 0) || (NR_PORTS <= line)) { 2383 if (tty->index < 0 || NR_PORTS <= line)
2382 return -ENODEV; 2384 return -ENODEV;
2383 } 2385
2384 for (i = 0; i < NR_CARDS; i++) 2386 for (i = 0; i < NR_CARDS; i++)
2385 if (line < cy_card[i].first_line + cy_card[i].nports && 2387 if (line < cy_card[i].first_line + cy_card[i].nports &&
2386 line >= cy_card[i].first_line) 2388 line >= cy_card[i].first_line)
@@ -2388,9 +2390,8 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
2388 if (i >= NR_CARDS) 2390 if (i >= NR_CARDS)
2389 return -ENODEV; 2391 return -ENODEV;
2390 info = &cy_card[i].ports[line - cy_card[i].first_line]; 2392 info = &cy_card[i].ports[line - cy_card[i].first_line];
2391 if (info->line < 0) { 2393 if (info->line < 0)
2392 return -ENODEV; 2394 return -ENODEV;
2393 }
2394 2395
2395 /* If the card's firmware hasn't been loaded, 2396 /* If the card's firmware hasn't been loaded,
2396 treat it as absent from the system. This 2397 treat it as absent from the system. This
@@ -2456,9 +2457,9 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
2456#endif 2457#endif
2457 tty->driver_data = info; 2458 tty->driver_data = info;
2458 info->tty = tty; 2459 info->tty = tty;
2459 if (serial_paranoia_check(info, tty->name, "cy_open")) { 2460 if (serial_paranoia_check(info, tty->name, "cy_open"))
2460 return -ENODEV; 2461 return -ENODEV;
2461 } 2462
2462#ifdef CY_DEBUG_OPEN 2463#ifdef CY_DEBUG_OPEN
2463 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line, 2464 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
2464 info->count); 2465 info->count);
@@ -2482,9 +2483,8 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
2482 * Start up serial port 2483 * Start up serial port
2483 */ 2484 */
2484 retval = startup(info); 2485 retval = startup(info);
2485 if (retval) { 2486 if (retval)
2486 return retval; 2487 return retval;
2487 }
2488 2488
2489 retval = block_til_ready(tty, filp, info); 2489 retval = block_til_ready(tty, filp, info);
2490 if (retval) { 2490 if (retval) {
@@ -2522,6 +2522,7 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
2522 return; /* Just in case.... */ 2522 return; /* Just in case.... */
2523 2523
2524 orig_jiffies = jiffies; 2524 orig_jiffies = jiffies;
2525 lock_kernel();
2525 /* 2526 /*
2526 * Set the check interval to be 1/5 of the estimated time to 2527 * Set the check interval to be 1/5 of the estimated time to
2527 * send a single character, and make it at least 1. The check 2528 * send a single character, and make it at least 1. The check
@@ -2573,11 +2574,47 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
2573 } 2574 }
2574 /* Run one more char cycle */ 2575 /* Run one more char cycle */
2575 msleep_interruptible(jiffies_to_msecs(char_time * 5)); 2576 msleep_interruptible(jiffies_to_msecs(char_time * 5));
2577 unlock_kernel();
2576#ifdef CY_DEBUG_WAIT_UNTIL_SENT 2578#ifdef CY_DEBUG_WAIT_UNTIL_SENT
2577 printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies); 2579 printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies);
2578#endif 2580#endif
2579} 2581}
2580 2582
2583static void cy_flush_buffer(struct tty_struct *tty)
2584{
2585 struct cyclades_port *info = tty->driver_data;
2586 struct cyclades_card *card;
2587 int channel, retval;
2588 unsigned long flags;
2589
2590#ifdef CY_DEBUG_IO
2591 printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line);
2592#endif
2593
2594 if (serial_paranoia_check(info, tty->name, "cy_flush_buffer"))
2595 return;
2596
2597 card = info->card;
2598 channel = info->line - card->first_line;
2599
2600 spin_lock_irqsave(&card->card_lock, flags);
2601 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2602 spin_unlock_irqrestore(&card->card_lock, flags);
2603
2604 if (IS_CYC_Z(*card)) { /* If it is a Z card, flush the on-board
2605 buffers as well */
2606 spin_lock_irqsave(&card->card_lock, flags);
2607 retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
2608 if (retval != 0) {
2609 printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d "
2610 "was %x\n", info->line, retval);
2611 }
2612 spin_unlock_irqrestore(&card->card_lock, flags);
2613 }
2614 tty_wakeup(tty);
2615} /* cy_flush_buffer */
2616
2617
2581/* 2618/*
2582 * This routine is called when a particular tty device is closed. 2619 * This routine is called when a particular tty device is closed.
2583 */ 2620 */
@@ -2591,9 +2628,8 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
2591 printk(KERN_DEBUG "cyc:cy_close ttyC%d\n", info->line); 2628 printk(KERN_DEBUG "cyc:cy_close ttyC%d\n", info->line);
2592#endif 2629#endif
2593 2630
2594 if (!info || serial_paranoia_check(info, tty->name, "cy_close")) { 2631 if (!info || serial_paranoia_check(info, tty->name, "cy_close"))
2595 return; 2632 return;
2596 }
2597 2633
2598 card = info->card; 2634 card = info->card;
2599 2635
@@ -2641,9 +2677,9 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
2641 */ 2677 */
2642 tty->closing = 1; 2678 tty->closing = 1;
2643 spin_unlock_irqrestore(&card->card_lock, flags); 2679 spin_unlock_irqrestore(&card->card_lock, flags);
2644 if (info->closing_wait != CY_CLOSING_WAIT_NONE) { 2680 if (info->closing_wait != CY_CLOSING_WAIT_NONE)
2645 tty_wait_until_sent(tty, info->closing_wait); 2681 tty_wait_until_sent(tty, info->closing_wait);
2646 } 2682
2647 spin_lock_irqsave(&card->card_lock, flags); 2683 spin_lock_irqsave(&card->card_lock, flags);
2648 2684
2649 if (!IS_CYC_Z(*card)) { 2685 if (!IS_CYC_Z(*card)) {
@@ -2657,15 +2693,16 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
2657 cy_writeb(base_addr + (CySRER << index), 2693 cy_writeb(base_addr + (CySRER << index),
2658 readb(base_addr + (CySRER << index)) & ~CyRxData); 2694 readb(base_addr + (CySRER << index)) & ~CyRxData);
2659 if (info->flags & ASYNC_INITIALIZED) { 2695 if (info->flags & ASYNC_INITIALIZED) {
2660 /* Waiting for on-board buffers to be empty before closing 2696 /* Waiting for on-board buffers to be empty before
2661 the port */ 2697 closing the port */
2662 spin_unlock_irqrestore(&card->card_lock, flags); 2698 spin_unlock_irqrestore(&card->card_lock, flags);
2663 cy_wait_until_sent(tty, info->timeout); 2699 cy_wait_until_sent(tty, info->timeout);
2664 spin_lock_irqsave(&card->card_lock, flags); 2700 spin_lock_irqsave(&card->card_lock, flags);
2665 } 2701 }
2666 } else { 2702 } else {
2667#ifdef Z_WAKE 2703#ifdef Z_WAKE
2668 /* Waiting for on-board buffers to be empty before closing the port */ 2704 /* Waiting for on-board buffers to be empty before closing
2705 the port */
2669 void __iomem *base_addr = card->base_addr; 2706 void __iomem *base_addr = card->base_addr;
2670 struct FIRM_ID __iomem *firm_id = base_addr + ID_ADDRESS; 2707 struct FIRM_ID __iomem *firm_id = base_addr + ID_ADDRESS;
2671 struct ZFW_CTRL __iomem *zfw_ctrl = 2708 struct ZFW_CTRL __iomem *zfw_ctrl =
@@ -2689,8 +2726,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
2689 2726
2690 spin_unlock_irqrestore(&card->card_lock, flags); 2727 spin_unlock_irqrestore(&card->card_lock, flags);
2691 shutdown(info); 2728 shutdown(info);
2692 if (tty->driver->flush_buffer) 2729 cy_flush_buffer(tty);
2693 tty->driver->flush_buffer(tty);
2694 tty_ldisc_flush(tty); 2730 tty_ldisc_flush(tty);
2695 spin_lock_irqsave(&card->card_lock, flags); 2731 spin_lock_irqsave(&card->card_lock, flags);
2696 2732
@@ -2738,17 +2774,16 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
2738 printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line); 2774 printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line);
2739#endif 2775#endif
2740 2776
2741 if (serial_paranoia_check(info, tty->name, "cy_write")) { 2777 if (serial_paranoia_check(info, tty->name, "cy_write"))
2742 return 0; 2778 return 0;
2743 }
2744 2779
2745 if (!info->xmit_buf) 2780 if (!info->xmit_buf)
2746 return 0; 2781 return 0;
2747 2782
2748 spin_lock_irqsave(&info->card->card_lock, flags); 2783 spin_lock_irqsave(&info->card->card_lock, flags);
2749 while (1) { 2784 while (1) {
2750 c = min(count, min((int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1), 2785 c = min(count, (int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1));
2751 (int)(SERIAL_XMIT_SIZE - info->xmit_head))); 2786 c = min(c, (int)(SERIAL_XMIT_SIZE - info->xmit_head));
2752 2787
2753 if (c <= 0) 2788 if (c <= 0)
2754 break; 2789 break;
@@ -2766,9 +2801,9 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
2766 info->idle_stats.xmit_bytes += ret; 2801 info->idle_stats.xmit_bytes += ret;
2767 info->idle_stats.xmit_idle = jiffies; 2802 info->idle_stats.xmit_idle = jiffies;
2768 2803
2769 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { 2804 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped)
2770 start_xmit(info); 2805 start_xmit(info);
2771 } 2806
2772 return ret; 2807 return ret;
2773} /* cy_write */ 2808} /* cy_write */
2774 2809
@@ -2779,7 +2814,7 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
2779 * done stuffing characters into the driver. If there is no room 2814 * done stuffing characters into the driver. If there is no room
2780 * in the queue, the character is ignored. 2815 * in the queue, the character is ignored.
2781 */ 2816 */
2782static void cy_put_char(struct tty_struct *tty, unsigned char ch) 2817static int cy_put_char(struct tty_struct *tty, unsigned char ch)
2783{ 2818{
2784 struct cyclades_port *info = tty->driver_data; 2819 struct cyclades_port *info = tty->driver_data;
2785 unsigned long flags; 2820 unsigned long flags;
@@ -2789,15 +2824,15 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch)
2789#endif 2824#endif
2790 2825
2791 if (serial_paranoia_check(info, tty->name, "cy_put_char")) 2826 if (serial_paranoia_check(info, tty->name, "cy_put_char"))
2792 return; 2827 return 0;
2793 2828
2794 if (!info->xmit_buf) 2829 if (!info->xmit_buf)
2795 return; 2830 return 0;
2796 2831
2797 spin_lock_irqsave(&info->card->card_lock, flags); 2832 spin_lock_irqsave(&info->card->card_lock, flags);
2798 if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) { 2833 if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) {
2799 spin_unlock_irqrestore(&info->card->card_lock, flags); 2834 spin_unlock_irqrestore(&info->card->card_lock, flags);
2800 return; 2835 return 0;
2801 } 2836 }
2802 2837
2803 info->xmit_buf[info->xmit_head++] = ch; 2838 info->xmit_buf[info->xmit_head++] = ch;
@@ -2806,11 +2841,12 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch)
2806 info->idle_stats.xmit_bytes++; 2841 info->idle_stats.xmit_bytes++;
2807 info->idle_stats.xmit_idle = jiffies; 2842 info->idle_stats.xmit_idle = jiffies;
2808 spin_unlock_irqrestore(&info->card->card_lock, flags); 2843 spin_unlock_irqrestore(&info->card->card_lock, flags);
2844 return 1;
2809} /* cy_put_char */ 2845} /* cy_put_char */
2810 2846
2811/* 2847/*
2812 * This routine is called by the kernel after it has written a 2848 * This routine is called by the kernel after it has written a
2813 * series of characters to the tty device using put_char(). 2849 * series of characters to the tty device using put_char().
2814 */ 2850 */
2815static void cy_flush_chars(struct tty_struct *tty) 2851static void cy_flush_chars(struct tty_struct *tty)
2816{ 2852{
@@ -2882,6 +2918,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
2882 int char_count; 2918 int char_count;
2883 __u32 tx_put, tx_get, tx_bufsize; 2919 __u32 tx_put, tx_get, tx_bufsize;
2884 2920
2921 lock_kernel();
2885 firm_id = card->base_addr + ID_ADDRESS; 2922 firm_id = card->base_addr + ID_ADDRESS;
2886 zfw_ctrl = card->base_addr + 2923 zfw_ctrl = card->base_addr +
2887 (readl(&firm_id->zfwctrl_addr) & 0xfffff); 2924 (readl(&firm_id->zfwctrl_addr) & 0xfffff);
@@ -2899,6 +2936,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
2899 printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n", 2936 printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
2900 info->line, info->xmit_cnt + char_count); 2937 info->line, info->xmit_cnt + char_count);
2901#endif 2938#endif
2939 unlock_kernel();
2902 return info->xmit_cnt + char_count; 2940 return info->xmit_cnt + char_count;
2903 } 2941 }
2904#endif /* Z_EXT_CHARS_IN_BUFFER */ 2942#endif /* Z_EXT_CHARS_IN_BUFFER */
@@ -2950,12 +2988,12 @@ static void set_line_char(struct cyclades_port *info)
2950 int baud, baud_rate = 0; 2988 int baud, baud_rate = 0;
2951 int i; 2989 int i;
2952 2990
2953 if (!info->tty || !info->tty->termios) { 2991 if (!info->tty || !info->tty->termios)
2954 return; 2992 return;
2955 } 2993
2956 if (info->line == -1) { 2994 if (info->line == -1)
2957 return; 2995 return;
2958 } 2996
2959 cflag = info->tty->termios->c_cflag; 2997 cflag = info->tty->termios->c_cflag;
2960 iflag = info->tty->termios->c_iflag; 2998 iflag = info->tty->termios->c_iflag;
2961 2999
@@ -2994,13 +3032,11 @@ static void set_line_char(struct cyclades_port *info)
2994 } 3032 }
2995 /* find the baud index */ 3033 /* find the baud index */
2996 for (i = 0; i < 20; i++) { 3034 for (i = 0; i < 20; i++) {
2997 if (baud == baud_table[i]) { 3035 if (baud == baud_table[i])
2998 break; 3036 break;
2999 }
3000 } 3037 }
3001 if (i == 20) { 3038 if (i == 20)
3002 i = 19; /* CD1400_MAX_SPEED */ 3039 i = 19; /* CD1400_MAX_SPEED */
3003 }
3004 3040
3005 if (baud == 38400 && (info->flags & ASYNC_SPD_MASK) == 3041 if (baud == 38400 && (info->flags & ASYNC_SPD_MASK) ==
3006 ASYNC_SPD_CUST) { 3042 ASYNC_SPD_CUST) {
@@ -3059,18 +3095,16 @@ static void set_line_char(struct cyclades_port *info)
3059 info->cor1 = Cy_8_BITS; 3095 info->cor1 = Cy_8_BITS;
3060 break; 3096 break;
3061 } 3097 }
3062 if (cflag & CSTOPB) { 3098 if (cflag & CSTOPB)
3063 info->cor1 |= Cy_2_STOP; 3099 info->cor1 |= Cy_2_STOP;
3064 } 3100
3065 if (cflag & PARENB) { 3101 if (cflag & PARENB) {
3066 if (cflag & PARODD) { 3102 if (cflag & PARODD)
3067 info->cor1 |= CyPARITY_O; 3103 info->cor1 |= CyPARITY_O;
3068 } else { 3104 else
3069 info->cor1 |= CyPARITY_E; 3105 info->cor1 |= CyPARITY_E;
3070 } 3106 } else
3071 } else {
3072 info->cor1 |= CyPARITY_NONE; 3107 info->cor1 |= CyPARITY_NONE;
3073 }
3074 3108
3075 /* CTS flow control flag */ 3109 /* CTS flow control flag */
3076 if (cflag & CRTSCTS) { 3110 if (cflag & CRTSCTS) {
@@ -3123,7 +3157,8 @@ static void set_line_char(struct cyclades_port *info)
3123 cyy_issue_cmd(base_addr, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch | 3157 cyy_issue_cmd(base_addr, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch |
3124 CyCOR3ch, index); 3158 CyCOR3ch, index);
3125 3159
3126 cy_writeb(base_addr + (CyCAR << index), (u_char) channel); /* !!! Is this needed? */ 3160 /* !!! Is this needed? */
3161 cy_writeb(base_addr + (CyCAR << index), (u_char) channel);
3127 cy_writeb(base_addr + (CyRTPR << index), 3162 cy_writeb(base_addr + (CyRTPR << index),
3128 (info->default_timeout ? info->default_timeout : 0x02)); 3163 (info->default_timeout ? info->default_timeout : 0x02));
3129 /* 10ms rx timeout */ 3164 /* 10ms rx timeout */
@@ -3191,9 +3226,8 @@ static void set_line_char(struct cyclades_port *info)
3191#endif 3226#endif
3192 } 3227 }
3193 3228
3194 if (info->tty) { 3229 if (info->tty)
3195 clear_bit(TTY_IO_ERROR, &info->tty->flags); 3230 clear_bit(TTY_IO_ERROR, &info->tty->flags);
3196 }
3197 spin_unlock_irqrestore(&card->card_lock, flags); 3231 spin_unlock_irqrestore(&card->card_lock, flags);
3198 3232
3199 } else { 3233 } else {
@@ -3206,9 +3240,8 @@ static void set_line_char(struct cyclades_port *info)
3206 int retval; 3240 int retval;
3207 3241
3208 firm_id = card->base_addr + ID_ADDRESS; 3242 firm_id = card->base_addr + ID_ADDRESS;
3209 if (!ISZLOADED(*card)) { 3243 if (!ISZLOADED(*card))
3210 return; 3244 return;
3211 }
3212 3245
3213 zfw_ctrl = card->base_addr + 3246 zfw_ctrl = card->base_addr +
3214 (readl(&firm_id->zfwctrl_addr) & 0xfffff); 3247 (readl(&firm_id->zfwctrl_addr) & 0xfffff);
@@ -3268,14 +3301,12 @@ static void set_line_char(struct cyclades_port *info)
3268 readl(&ch_ctrl->comm_data_l) | C_DL_1STOP); 3301 readl(&ch_ctrl->comm_data_l) | C_DL_1STOP);
3269 } 3302 }
3270 if (cflag & PARENB) { 3303 if (cflag & PARENB) {
3271 if (cflag & PARODD) { 3304 if (cflag & PARODD)
3272 cy_writel(&ch_ctrl->comm_parity, C_PR_ODD); 3305 cy_writel(&ch_ctrl->comm_parity, C_PR_ODD);
3273 } else { 3306 else
3274 cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN); 3307 cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN);
3275 } 3308 } else
3276 } else {
3277 cy_writel(&ch_ctrl->comm_parity, C_PR_NONE); 3309 cy_writel(&ch_ctrl->comm_parity, C_PR_NONE);
3278 }
3279 3310
3280 /* CTS flow control flag */ 3311 /* CTS flow control flag */
3281 if (cflag & CRTSCTS) { 3312 if (cflag & CRTSCTS) {
@@ -3305,11 +3336,10 @@ static void set_line_char(struct cyclades_port *info)
3305 } 3336 }
3306 3337
3307 /* CD sensitivity */ 3338 /* CD sensitivity */
3308 if (cflag & CLOCAL) { 3339 if (cflag & CLOCAL)
3309 info->flags &= ~ASYNC_CHECK_CD; 3340 info->flags &= ~ASYNC_CHECK_CD;
3310 } else { 3341 else
3311 info->flags |= ASYNC_CHECK_CD; 3342 info->flags |= ASYNC_CHECK_CD;
3312 }
3313 3343
3314 if (baud == 0) { /* baud rate is zero, turn off line */ 3344 if (baud == 0) { /* baud rate is zero, turn off line */
3315 cy_writel(&ch_ctrl->rs_control, 3345 cy_writel(&ch_ctrl->rs_control,
@@ -3325,21 +3355,20 @@ static void set_line_char(struct cyclades_port *info)
3325#endif 3355#endif
3326 } 3356 }
3327 3357
3328 retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM,0L); 3358 retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
3329 if (retval != 0) { 3359 if (retval != 0) {
3330 printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d " 3360 printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d "
3331 "was %x\n", info->line, retval); 3361 "was %x\n", info->line, retval);
3332 } 3362 }
3333 3363
3334 if (info->tty) { 3364 if (info->tty)
3335 clear_bit(TTY_IO_ERROR, &info->tty->flags); 3365 clear_bit(TTY_IO_ERROR, &info->tty->flags);
3336 }
3337 } 3366 }
3338} /* set_line_char */ 3367} /* set_line_char */
3339 3368
3340static int 3369static int
3341get_serial_info(struct cyclades_port *info, 3370get_serial_info(struct cyclades_port *info,
3342 struct serial_struct __user * retinfo) 3371 struct serial_struct __user *retinfo)
3343{ 3372{
3344 struct serial_struct tmp; 3373 struct serial_struct tmp;
3345 struct cyclades_card *cinfo = info->card; 3374 struct cyclades_card *cinfo = info->card;
@@ -3363,7 +3392,7 @@ get_serial_info(struct cyclades_port *info,
3363 3392
3364static int 3393static int
3365set_serial_info(struct cyclades_port *info, 3394set_serial_info(struct cyclades_port *info,
3366 struct serial_struct __user * new_info) 3395 struct serial_struct __user *new_info)
3367{ 3396{
3368 struct serial_struct new_serial; 3397 struct serial_struct new_serial;
3369 struct cyclades_port old_info; 3398 struct cyclades_port old_info;
@@ -3417,7 +3446,7 @@ check_and_exit:
3417 * transmit holding register is empty. This functionality 3446 * transmit holding register is empty. This functionality
3418 * allows an RS485 driver to be written in user space. 3447 * allows an RS485 driver to be written in user space.
3419 */ 3448 */
3420static int get_lsr_info(struct cyclades_port *info, unsigned int __user * value) 3449static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value)
3421{ 3450{
3422 struct cyclades_card *card; 3451 struct cyclades_card *card;
3423 int chip, channel, index; 3452 int chip, channel, index;
@@ -3461,9 +3490,11 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
3461 struct BOARD_CTRL __iomem *board_ctrl; 3490 struct BOARD_CTRL __iomem *board_ctrl;
3462 struct CH_CTRL __iomem *ch_ctrl; 3491 struct CH_CTRL __iomem *ch_ctrl;
3463 3492
3464 if (serial_paranoia_check(info, tty->name, __FUNCTION__)) 3493 if (serial_paranoia_check(info, tty->name, __func__))
3465 return -ENODEV; 3494 return -ENODEV;
3466 3495
3496 lock_kernel();
3497
3467 card = info->card; 3498 card = info->card;
3468 channel = info->line - card->first_line; 3499 channel = info->line - card->first_line;
3469 if (!IS_CYC_Z(*card)) { 3500 if (!IS_CYC_Z(*card)) {
@@ -3506,10 +3537,12 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
3506 ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0); 3537 ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0);
3507 } else { 3538 } else {
3508 result = 0; 3539 result = 0;
3540 unlock_kernel();
3509 return -ENODEV; 3541 return -ENODEV;
3510 } 3542 }
3511 3543
3512 } 3544 }
3545 unlock_kernel();
3513 return result; 3546 return result;
3514} /* cy_tiomget */ 3547} /* cy_tiomget */
3515 3548
@@ -3528,7 +3561,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file,
3528 struct CH_CTRL __iomem *ch_ctrl; 3561 struct CH_CTRL __iomem *ch_ctrl;
3529 int retval; 3562 int retval;
3530 3563
3531 if (serial_paranoia_check(info, tty->name, __FUNCTION__)) 3564 if (serial_paranoia_check(info, tty->name, __func__))
3532 return -ENODEV; 3565 return -ENODEV;
3533 3566
3534 card = info->card; 3567 card = info->card;
@@ -3727,8 +3760,8 @@ static void cy_break(struct tty_struct *tty, int break_state)
3727 spin_unlock_irqrestore(&card->card_lock, flags); 3760 spin_unlock_irqrestore(&card->card_lock, flags);
3728} /* cy_break */ 3761} /* cy_break */
3729 3762
3730static int 3763static int get_mon_info(struct cyclades_port *info,
3731get_mon_info(struct cyclades_port *info, struct cyclades_monitor __user * mon) 3764 struct cyclades_monitor __user *mon)
3732{ 3765{
3733 3766
3734 if (copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor))) 3767 if (copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor)))
@@ -3767,8 +3800,8 @@ static int set_threshold(struct cyclades_port *info, unsigned long value)
3767 return 0; 3800 return 0;
3768} /* set_threshold */ 3801} /* set_threshold */
3769 3802
3770static int 3803static int get_threshold(struct cyclades_port *info,
3771get_threshold(struct cyclades_port *info, unsigned long __user * value) 3804 unsigned long __user *value)
3772{ 3805{
3773 struct cyclades_card *card; 3806 struct cyclades_card *card;
3774 void __iomem *base_addr; 3807 void __iomem *base_addr;
@@ -3789,15 +3822,15 @@ get_threshold(struct cyclades_port *info, unsigned long __user * value)
3789 return 0; 3822 return 0;
3790} /* get_threshold */ 3823} /* get_threshold */
3791 3824
3792static int 3825static int set_default_threshold(struct cyclades_port *info,
3793set_default_threshold(struct cyclades_port *info, unsigned long value) 3826 unsigned long value)
3794{ 3827{
3795 info->default_threshold = value & 0x0f; 3828 info->default_threshold = value & 0x0f;
3796 return 0; 3829 return 0;
3797} /* set_default_threshold */ 3830} /* set_default_threshold */
3798 3831
3799static int 3832static int get_default_threshold(struct cyclades_port *info,
3800get_default_threshold(struct cyclades_port *info, unsigned long __user * value) 3833 unsigned long __user *value)
3801{ 3834{
3802 return put_user(info->default_threshold, value); 3835 return put_user(info->default_threshold, value);
3803} /* get_default_threshold */ 3836} /* get_default_threshold */
@@ -3824,7 +3857,8 @@ static int set_timeout(struct cyclades_port *info, unsigned long value)
3824 return 0; 3857 return 0;
3825} /* set_timeout */ 3858} /* set_timeout */
3826 3859
3827static int get_timeout(struct cyclades_port *info, unsigned long __user * value) 3860static int get_timeout(struct cyclades_port *info,
3861 unsigned long __user *value)
3828{ 3862{
3829 struct cyclades_card *card; 3863 struct cyclades_card *card;
3830 void __iomem *base_addr; 3864 void __iomem *base_addr;
@@ -3851,8 +3885,8 @@ static int set_default_timeout(struct cyclades_port *info, unsigned long value)
3851 return 0; 3885 return 0;
3852} /* set_default_timeout */ 3886} /* set_default_timeout */
3853 3887
3854static int 3888static int get_default_timeout(struct cyclades_port *info,
3855get_default_timeout(struct cyclades_port *info, unsigned long __user * value) 3889 unsigned long __user *value)
3856{ 3890{
3857 return put_user(info->default_timeout, value); 3891 return put_user(info->default_timeout, value);
3858} /* get_default_timeout */ 3892} /* get_default_timeout */
@@ -3880,6 +3914,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
3880 printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n", 3914 printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n",
3881 info->line, cmd, arg); 3915 info->line, cmd, arg);
3882#endif 3916#endif
3917 lock_kernel();
3883 3918
3884 switch (cmd) { 3919 switch (cmd) {
3885 case CYGETMON: 3920 case CYGETMON:
@@ -3936,7 +3971,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
3936 break; 3971 break;
3937#endif /* CONFIG_CYZ_INTR */ 3972#endif /* CONFIG_CYZ_INTR */
3938 case CYSETWAIT: 3973 case CYSETWAIT:
3939 info->closing_wait = (unsigned short)arg *HZ / 100; 3974 info->closing_wait = (unsigned short)arg * HZ / 100;
3940 ret_val = 0; 3975 ret_val = 0;
3941 break; 3976 break;
3942 case CYGETWAIT: 3977 case CYGETWAIT:
@@ -3988,47 +4023,47 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
3988 p_cuser = argp; 4023 p_cuser = argp;
3989 ret_val = put_user(cnow.cts, &p_cuser->cts); 4024 ret_val = put_user(cnow.cts, &p_cuser->cts);
3990 if (ret_val) 4025 if (ret_val)
3991 return ret_val; 4026 break;
3992 ret_val = put_user(cnow.dsr, &p_cuser->dsr); 4027 ret_val = put_user(cnow.dsr, &p_cuser->dsr);
3993 if (ret_val) 4028 if (ret_val)
3994 return ret_val; 4029 break;
3995 ret_val = put_user(cnow.rng, &p_cuser->rng); 4030 ret_val = put_user(cnow.rng, &p_cuser->rng);
3996 if (ret_val) 4031 if (ret_val)
3997 return ret_val; 4032 break;
3998 ret_val = put_user(cnow.dcd, &p_cuser->dcd); 4033 ret_val = put_user(cnow.dcd, &p_cuser->dcd);
3999 if (ret_val) 4034 if (ret_val)
4000 return ret_val; 4035 break;
4001 ret_val = put_user(cnow.rx, &p_cuser->rx); 4036 ret_val = put_user(cnow.rx, &p_cuser->rx);
4002 if (ret_val) 4037 if (ret_val)
4003 return ret_val; 4038 break;
4004 ret_val = put_user(cnow.tx, &p_cuser->tx); 4039 ret_val = put_user(cnow.tx, &p_cuser->tx);
4005 if (ret_val) 4040 if (ret_val)
4006 return ret_val; 4041 break;
4007 ret_val = put_user(cnow.frame, &p_cuser->frame); 4042 ret_val = put_user(cnow.frame, &p_cuser->frame);
4008 if (ret_val) 4043 if (ret_val)
4009 return ret_val; 4044 break;
4010 ret_val = put_user(cnow.overrun, &p_cuser->overrun); 4045 ret_val = put_user(cnow.overrun, &p_cuser->overrun);
4011 if (ret_val) 4046 if (ret_val)
4012 return ret_val; 4047 break;
4013 ret_val = put_user(cnow.parity, &p_cuser->parity); 4048 ret_val = put_user(cnow.parity, &p_cuser->parity);
4014 if (ret_val) 4049 if (ret_val)
4015 return ret_val; 4050 break;
4016 ret_val = put_user(cnow.brk, &p_cuser->brk); 4051 ret_val = put_user(cnow.brk, &p_cuser->brk);
4017 if (ret_val) 4052 if (ret_val)
4018 return ret_val; 4053 break;
4019 ret_val = put_user(cnow.buf_overrun, &p_cuser->buf_overrun); 4054 ret_val = put_user(cnow.buf_overrun, &p_cuser->buf_overrun);
4020 if (ret_val) 4055 if (ret_val)
4021 return ret_val; 4056 break;
4022 ret_val = 0; 4057 ret_val = 0;
4023 break; 4058 break;
4024 default: 4059 default:
4025 ret_val = -ENOIOCTLCMD; 4060 ret_val = -ENOIOCTLCMD;
4026 } 4061 }
4062 unlock_kernel();
4027 4063
4028#ifdef CY_DEBUG_OTHER 4064#ifdef CY_DEBUG_OTHER
4029 printk(KERN_DEBUG "cyc:cy_ioctl done\n"); 4065 printk(KERN_DEBUG "cyc:cy_ioctl done\n");
4030#endif 4066#endif
4031
4032 return ret_val; 4067 return ret_val;
4033} /* cy_ioctl */ 4068} /* cy_ioctl */
4034 4069
@@ -4113,9 +4148,8 @@ static void cy_throttle(struct tty_struct *tty)
4113 tty->ldisc.chars_in_buffer(tty), info->line); 4148 tty->ldisc.chars_in_buffer(tty), info->line);
4114#endif 4149#endif
4115 4150
4116 if (serial_paranoia_check(info, tty->name, "cy_throttle")) { 4151 if (serial_paranoia_check(info, tty->name, "cy_throttle"))
4117 return; 4152 return;
4118 }
4119 4153
4120 card = info->card; 4154 card = info->card;
4121 4155
@@ -4169,12 +4203,11 @@ static void cy_unthrottle(struct tty_struct *tty)
4169 char buf[64]; 4203 char buf[64];
4170 4204
4171 printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n", 4205 printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n",
4172 tty_name(tty, buf), tty->ldisc.chars_in_buffer(tty),info->line); 4206 tty_name(tty, buf), tty_chars_in_buffer(tty), info->line);
4173#endif 4207#endif
4174 4208
4175 if (serial_paranoia_check(info, tty->name, "cy_unthrottle")) { 4209 if (serial_paranoia_check(info, tty->name, "cy_unthrottle"))
4176 return; 4210 return;
4177 }
4178 4211
4179 if (I_IXOFF(tty)) { 4212 if (I_IXOFF(tty)) {
4180 if (info->x_char) 4213 if (info->x_char)
@@ -4269,47 +4302,14 @@ static void cy_start(struct tty_struct *tty)
4269 base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index); 4302 base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index);
4270 4303
4271 spin_lock_irqsave(&cinfo->card_lock, flags); 4304 spin_lock_irqsave(&cinfo->card_lock, flags);
4272 cy_writeb(base_addr + (CyCAR << index), (u_char) (channel & 0x0003)); /* index channel */ 4305 cy_writeb(base_addr + (CyCAR << index),
4306 (u_char) (channel & 0x0003)); /* index channel */
4273 cy_writeb(base_addr + (CySRER << index), 4307 cy_writeb(base_addr + (CySRER << index),
4274 readb(base_addr + (CySRER << index)) | CyTxRdy); 4308 readb(base_addr + (CySRER << index)) | CyTxRdy);
4275 spin_unlock_irqrestore(&cinfo->card_lock, flags); 4309 spin_unlock_irqrestore(&cinfo->card_lock, flags);
4276 } 4310 }
4277} /* cy_start */ 4311} /* cy_start */
4278 4312
4279static void cy_flush_buffer(struct tty_struct *tty)
4280{
4281 struct cyclades_port *info = tty->driver_data;
4282 struct cyclades_card *card;
4283 int channel, retval;
4284 unsigned long flags;
4285
4286#ifdef CY_DEBUG_IO
4287 printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line);
4288#endif
4289
4290 if (serial_paranoia_check(info, tty->name, "cy_flush_buffer"))
4291 return;
4292
4293 card = info->card;
4294 channel = info->line - card->first_line;
4295
4296 spin_lock_irqsave(&card->card_lock, flags);
4297 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
4298 spin_unlock_irqrestore(&card->card_lock, flags);
4299
4300 if (IS_CYC_Z(*card)) { /* If it is a Z card, flush the on-board
4301 buffers as well */
4302 spin_lock_irqsave(&card->card_lock, flags);
4303 retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
4304 if (retval != 0) {
4305 printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d "
4306 "was %x\n", info->line, retval);
4307 }
4308 spin_unlock_irqrestore(&card->card_lock, flags);
4309 }
4310 tty_wakeup(tty);
4311} /* cy_flush_buffer */
4312
4313/* 4313/*
4314 * cy_hangup() --- called by tty_hangup() when a hangup is signaled. 4314 * cy_hangup() --- called by tty_hangup() when a hangup is signaled.
4315 */ 4315 */
@@ -4406,10 +4406,11 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
4406 info->cor3 = 0x08; /* _very_ small rcv threshold */ 4406 info->cor3 = 0x08; /* _very_ small rcv threshold */
4407 4407
4408 chip_number = (port - cinfo->first_line) / 4; 4408 chip_number = (port - cinfo->first_line) / 4;
4409 if ((info->chip_rev = readb(cinfo->base_addr + 4409 info->chip_rev = readb(cinfo->base_addr +
4410 (cy_chip_offset[chip_number] << 4410 (cy_chip_offset[chip_number] << index) +
4411 index) + (CyGFRCR << index))) >= 4411 (CyGFRCR << index));
4412 CD1400_REV_J) { 4412
4413 if (info->chip_rev >= CD1400_REV_J) {
4413 /* It is a CD1400 rev. J or later */ 4414 /* It is a CD1400 rev. J or later */
4414 info->tbpr = baud_bpr_60[13]; /* Tx BPR */ 4415 info->tbpr = baud_bpr_60[13]; /* Tx BPR */
4415 info->tco = baud_co_60[13]; /* Tx CO */ 4416 info->tco = baud_co_60[13]; /* Tx CO */
@@ -4454,7 +4455,8 @@ static unsigned short __devinit cyy_init_card(void __iomem *true_base_addr,
4454 /* Cy_ClrIntr is 0x1800 */ 4455 /* Cy_ClrIntr is 0x1800 */
4455 udelay(500L); 4456 udelay(500L);
4456 4457
4457 for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD; chip_number++) { 4458 for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD;
4459 chip_number++) {
4458 base_addr = 4460 base_addr =
4459 true_base_addr + (cy_chip_offset[chip_number] << index); 4461 true_base_addr + (cy_chip_offset[chip_number] << index);
4460 mdelay(1); 4462 mdelay(1);
@@ -4555,12 +4557,11 @@ static int __init cy_detect_isa(void)
4555 /* scan the address table probing for Cyclom-Y/ISA boards */ 4557 /* scan the address table probing for Cyclom-Y/ISA boards */
4556 for (i = 0; i < NR_ISA_ADDRS; i++) { 4558 for (i = 0; i < NR_ISA_ADDRS; i++) {
4557 unsigned int isa_address = cy_isa_addresses[i]; 4559 unsigned int isa_address = cy_isa_addresses[i];
4558 if (isa_address == 0x0000) { 4560 if (isa_address == 0x0000)
4559 return nboard; 4561 return nboard;
4560 }
4561 4562
4562 /* probe for CD1400... */ 4563 /* probe for CD1400... */
4563 cy_isa_address = ioremap(isa_address, CyISA_Ywin); 4564 cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin);
4564 if (cy_isa_address == NULL) { 4565 if (cy_isa_address == NULL) {
4565 printk(KERN_ERR "Cyclom-Y/ISA: can't remap base " 4566 printk(KERN_ERR "Cyclom-Y/ISA: can't remap base "
4566 "address\n"); 4567 "address\n");
@@ -4847,12 +4848,10 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4847 if (mailbox != 0) { 4848 if (mailbox != 0) {
4848 /* set window to last 512K of RAM */ 4849 /* set window to last 512K of RAM */
4849 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE); 4850 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
4850 //sleep(1);
4851 for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++) 4851 for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
4852 cy_writeb(tmp, 255); 4852 cy_writeb(tmp, 255);
4853 /* set window to beginning of RAM */ 4853 /* set window to beginning of RAM */
4854 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); 4854 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
4855 //sleep(1);
4856 } 4855 }
4857 4856
4858 retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL); 4857 retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
@@ -5382,7 +5381,8 @@ static void __exit cy_cleanup_module(void)
5382 del_timer_sync(&cyz_timerlist); 5381 del_timer_sync(&cyz_timerlist);
5383#endif /* CONFIG_CYZ_INTR */ 5382#endif /* CONFIG_CYZ_INTR */
5384 5383
5385 if ((e1 = tty_unregister_driver(cy_serial_driver))) 5384 e1 = tty_unregister_driver(cy_serial_driver);
5385 if (e1)
5386 printk(KERN_ERR "failed to unregister Cyclades serial " 5386 printk(KERN_ERR "failed to unregister Cyclades serial "
5387 "driver(%d)\n", e1); 5387 "driver(%d)\n", e1);
5388 5388
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index ecee3547a13f..213b3ca3468e 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -160,7 +160,7 @@ struct drm_device;
160 * \param arg arguments 160 * \param arg arguments
161 */ 161 */
162#define DRM_ERROR(fmt, arg...) \ 162#define DRM_ERROR(fmt, arg...) \
163 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg) 163 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
164 164
165/** 165/**
166 * Memory error output. 166 * Memory error output.
@@ -170,7 +170,7 @@ struct drm_device;
170 * \param arg arguments 170 * \param arg arguments
171 */ 171 */
172#define DRM_MEM_ERROR(area, fmt, arg...) \ 172#define DRM_MEM_ERROR(area, fmt, arg...) \
173 printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ 173 printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
174 drm_mem_stats[area].name , ##arg) 174 drm_mem_stats[area].name , ##arg)
175 175
176#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) 176#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
@@ -187,7 +187,7 @@ struct drm_device;
187 if ( drm_debug ) \ 187 if ( drm_debug ) \
188 printk(KERN_DEBUG \ 188 printk(KERN_DEBUG \
189 "[" DRM_NAME ":%s] " fmt , \ 189 "[" DRM_NAME ":%s] " fmt , \
190 __FUNCTION__ , ##arg); \ 190 __func__ , ##arg); \
191 } while (0) 191 } while (0)
192#else 192#else
193#define DRM_DEBUG(fmt, arg...) do { } while (0) 193#define DRM_DEBUG(fmt, arg...) do { } while (0)
@@ -238,7 +238,7 @@ do { \
238 if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ 238 if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
239 dev->lock.file_priv != file_priv ) { \ 239 dev->lock.file_priv != file_priv ) { \
240 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ 240 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
241 __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ 241 __func__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
242 dev->lock.file_priv, file_priv ); \ 242 dev->lock.file_priv, file_priv ); \
243 return -EINVAL; \ 243 return -EINVAL; \
244 } \ 244 } \
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index 7a1d9a782ddb..9a32169e88fb 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -34,7 +34,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
34 struct drm_minor *drm_minor = to_drm_minor(dev); 34 struct drm_minor *drm_minor = to_drm_minor(dev);
35 struct drm_device *drm_dev = drm_minor->dev; 35 struct drm_device *drm_dev = drm_minor->dev;
36 36
37 printk(KERN_ERR "%s\n", __FUNCTION__); 37 printk(KERN_ERR "%s\n", __func__);
38 38
39 if (drm_dev->driver->suspend) 39 if (drm_dev->driver->suspend)
40 return drm_dev->driver->suspend(drm_dev, state); 40 return drm_dev->driver->suspend(drm_dev, state);
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 60c9376be486..a86ab30b4620 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -692,7 +692,7 @@ static void i830EmitState(struct drm_device * dev)
692 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; 692 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
693 unsigned int dirty = sarea_priv->dirty; 693 unsigned int dirty = sarea_priv->dirty;
694 694
695 DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); 695 DRM_DEBUG("%s %x\n", __func__, dirty);
696 696
697 if (dirty & I830_UPLOAD_BUFFERS) { 697 if (dirty & I830_UPLOAD_BUFFERS) {
698 i830EmitDestVerified(dev, sarea_priv->BufferState); 698 i830EmitDestVerified(dev, sarea_priv->BufferState);
@@ -1043,7 +1043,7 @@ static void i830_dma_dispatch_flip(struct drm_device * dev)
1043 RING_LOCALS; 1043 RING_LOCALS;
1044 1044
1045 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 1045 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
1046 __FUNCTION__, 1046 __func__,
1047 dev_priv->current_page, 1047 dev_priv->current_page,
1048 dev_priv->sarea_priv->pf_current_page); 1048 dev_priv->sarea_priv->pf_current_page);
1049 1049
@@ -1206,7 +1206,7 @@ static void i830_dma_quiescent(struct drm_device * dev)
1206 OUT_RING(0); 1206 OUT_RING(0);
1207 ADVANCE_LP_RING(); 1207 ADVANCE_LP_RING();
1208 1208
1209 i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 1209 i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
1210} 1210}
1211 1211
1212static int i830_flush_queue(struct drm_device * dev) 1212static int i830_flush_queue(struct drm_device * dev)
@@ -1223,7 +1223,7 @@ static int i830_flush_queue(struct drm_device * dev)
1223 OUT_RING(0); 1223 OUT_RING(0);
1224 ADVANCE_LP_RING(); 1224 ADVANCE_LP_RING();
1225 1225
1226 i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 1226 i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
1227 1227
1228 for (i = 0; i < dma->buf_count; i++) { 1228 for (i = 0; i < dma->buf_count; i++) {
1229 struct drm_buf *buf = dma->buflist[i]; 1229 struct drm_buf *buf = dma->buflist[i];
@@ -1344,7 +1344,7 @@ static void i830_do_init_pageflip(struct drm_device * dev)
1344{ 1344{
1345 drm_i830_private_t *dev_priv = dev->dev_private; 1345 drm_i830_private_t *dev_priv = dev->dev_private;
1346 1346
1347 DRM_DEBUG("%s\n", __FUNCTION__); 1347 DRM_DEBUG("%s\n", __func__);
1348 dev_priv->page_flipping = 1; 1348 dev_priv->page_flipping = 1;
1349 dev_priv->current_page = 0; 1349 dev_priv->current_page = 0;
1350 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 1350 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
@@ -1354,7 +1354,7 @@ static int i830_do_cleanup_pageflip(struct drm_device * dev)
1354{ 1354{
1355 drm_i830_private_t *dev_priv = dev->dev_private; 1355 drm_i830_private_t *dev_priv = dev->dev_private;
1356 1356
1357 DRM_DEBUG("%s\n", __FUNCTION__); 1357 DRM_DEBUG("%s\n", __func__);
1358 if (dev_priv->current_page != 0) 1358 if (dev_priv->current_page != 0)
1359 i830_dma_dispatch_flip(dev); 1359 i830_dma_dispatch_flip(dev);
1360 1360
@@ -1367,7 +1367,7 @@ static int i830_flip_bufs(struct drm_device *dev, void *data,
1367{ 1367{
1368 drm_i830_private_t *dev_priv = dev->dev_private; 1368 drm_i830_private_t *dev_priv = dev->dev_private;
1369 1369
1370 DRM_DEBUG("%s\n", __FUNCTION__); 1370 DRM_DEBUG("%s\n", __func__);
1371 1371
1372 LOCK_TEST_WITH_RETURN(dev, file_priv); 1372 LOCK_TEST_WITH_RETURN(dev, file_priv);
1373 1373
@@ -1437,7 +1437,7 @@ static int i830_getparam(struct drm_device *dev, void *data,
1437 int value; 1437 int value;
1438 1438
1439 if (!dev_priv) { 1439 if (!dev_priv) {
1440 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1440 DRM_ERROR("%s called with no initialization\n", __func__);
1441 return -EINVAL; 1441 return -EINVAL;
1442 } 1442 }
1443 1443
@@ -1464,7 +1464,7 @@ static int i830_setparam(struct drm_device *dev, void *data,
1464 drm_i830_setparam_t *param = data; 1464 drm_i830_setparam_t *param = data;
1465 1465
1466 if (!dev_priv) { 1466 if (!dev_priv) {
1467 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1467 DRM_ERROR("%s called with no initialization\n", __func__);
1468 return -EINVAL; 1468 return -EINVAL;
1469 } 1469 }
1470 1470
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index 4caba8c54455..b5bf8cc0fdaa 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -158,7 +158,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
158 if (I830_VERBOSE) \ 158 if (I830_VERBOSE) \
159 printk("BEGIN_LP_RING(%d)\n", (n)); \ 159 printk("BEGIN_LP_RING(%d)\n", (n)); \
160 if (dev_priv->ring.space < n*4) \ 160 if (dev_priv->ring.space < n*4) \
161 i830_wait_ring(dev, n*4, __FUNCTION__); \ 161 i830_wait_ring(dev, n*4, __func__); \
162 outcount = 0; \ 162 outcount = 0; \
163 outring = dev_priv->ring.tail; \ 163 outring = dev_priv->ring.tail; \
164 ringmask = dev_priv->ring.tail_mask; \ 164 ringmask = dev_priv->ring.tail_mask; \
diff --git a/drivers/char/drm/i830_irq.c b/drivers/char/drm/i830_irq.c
index a33db5f0967f..91ec2bb497e9 100644
--- a/drivers/char/drm/i830_irq.c
+++ b/drivers/char/drm/i830_irq.c
@@ -58,7 +58,7 @@ static int i830_emit_irq(struct drm_device * dev)
58 drm_i830_private_t *dev_priv = dev->dev_private; 58 drm_i830_private_t *dev_priv = dev->dev_private;
59 RING_LOCALS; 59 RING_LOCALS;
60 60
61 DRM_DEBUG("%s\n", __FUNCTION__); 61 DRM_DEBUG("%s\n", __func__);
62 62
63 atomic_inc(&dev_priv->irq_emitted); 63 atomic_inc(&dev_priv->irq_emitted);
64 64
@@ -77,7 +77,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
77 unsigned long end = jiffies + HZ * 3; 77 unsigned long end = jiffies + HZ * 3;
78 int ret = 0; 78 int ret = 0;
79 79
80 DRM_DEBUG("%s\n", __FUNCTION__); 80 DRM_DEBUG("%s\n", __func__);
81 81
82 if (atomic_read(&dev_priv->irq_received) >= irq_nr) 82 if (atomic_read(&dev_priv->irq_received) >= irq_nr)
83 return 0; 83 return 0;
@@ -124,7 +124,7 @@ int i830_irq_emit(struct drm_device *dev, void *data,
124 LOCK_TEST_WITH_RETURN(dev, file_priv); 124 LOCK_TEST_WITH_RETURN(dev, file_priv);
125 125
126 if (!dev_priv) { 126 if (!dev_priv) {
127 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 127 DRM_ERROR("%s called with no initialization\n", __func__);
128 return -EINVAL; 128 return -EINVAL;
129 } 129 }
130 130
@@ -147,7 +147,7 @@ int i830_irq_wait(struct drm_device *dev, void *data,
147 drm_i830_irq_wait_t *irqwait = data; 147 drm_i830_irq_wait_t *irqwait = data;
148 148
149 if (!dev_priv) { 149 if (!dev_priv) {
150 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 150 DRM_ERROR("%s called with no initialization\n", __func__);
151 return -EINVAL; 151 return -EINVAL;
152 } 152 }
153 153
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index ef7bf143a80c..f47e46e3529f 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -194,7 +194,7 @@ static int i915_dma_resume(struct drm_device * dev)
194{ 194{
195 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 195 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
196 196
197 DRM_DEBUG("%s\n", __FUNCTION__); 197 DRM_DEBUG("%s\n", __func__);
198 198
199 if (!dev_priv->sarea) { 199 if (!dev_priv->sarea) {
200 DRM_ERROR("can not find sarea!\n"); 200 DRM_ERROR("can not find sarea!\n");
@@ -609,7 +609,7 @@ static int i915_quiescent(struct drm_device * dev)
609 drm_i915_private_t *dev_priv = dev->dev_private; 609 drm_i915_private_t *dev_priv = dev->dev_private;
610 610
611 i915_kernel_lost_context(dev); 611 i915_kernel_lost_context(dev);
612 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 612 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
613} 613}
614 614
615static int i915_flush_ioctl(struct drm_device *dev, void *data, 615static int i915_flush_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index c614d78b3dfd..db7001f22561 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -272,7 +272,7 @@ extern void i915_mem_release(struct drm_device * dev,
272 if (I915_VERBOSE) \ 272 if (I915_VERBOSE) \
273 DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ 273 DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
274 if (dev_priv->ring.space < (n)*4) \ 274 if (dev_priv->ring.space < (n)*4) \
275 i915_wait_ring(dev, (n)*4, __FUNCTION__); \ 275 i915_wait_ring(dev, (n)*4, __func__); \
276 outcount = 0; \ 276 outcount = 0; \
277 outring = dev_priv->ring.tail; \ 277 outring = dev_priv->ring.tail; \
278 ringmask = dev_priv->ring.tail_mask; \ 278 ringmask = dev_priv->ring.tail_mask; \
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index f36adbd3aaf5..c31afbde62e7 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -817,7 +817,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev)
817 for (i = 0; i < dma->buf_count; i++) { 817 for (i = 0; i < dma->buf_count; i++) {
818 buf = dma->buflist[i]; 818 buf = dma->buflist[i];
819 buf_priv = buf->dev_private; 819 buf_priv = buf->dev_private;
820 if (buf->file_priv == 0) 820 if (!buf->file_priv)
821 return buf; 821 return buf;
822 } 822 }
823 823
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 9072e4a1894e..f6f6c92bf771 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -894,7 +894,7 @@ static u32 RADEON_READ_IGPGART(drm_radeon_private_t *dev_priv, int addr)
894#if RADEON_FIFO_DEBUG 894#if RADEON_FIFO_DEBUG
895static void radeon_status(drm_radeon_private_t * dev_priv) 895static void radeon_status(drm_radeon_private_t * dev_priv)
896{ 896{
897 printk("%s:\n", __FUNCTION__); 897 printk("%s:\n", __func__);
898 printk("RBBM_STATUS = 0x%08x\n", 898 printk("RBBM_STATUS = 0x%08x\n",
899 (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); 899 (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
900 printk("CP_RB_RTPR = 0x%08x\n", 900 printk("CP_RB_RTPR = 0x%08x\n",
diff --git a/drivers/char/ds1286.c b/drivers/char/ds1286.c
index 59146e3365ba..ea35ab2c9909 100644
--- a/drivers/char/ds1286.c
+++ b/drivers/char/ds1286.c
@@ -39,6 +39,7 @@
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40#include <linux/bcd.h> 40#include <linux/bcd.h>
41#include <linux/proc_fs.h> 41#include <linux/proc_fs.h>
42#include <linux/jiffies.h>
42 43
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
44#include <asm/system.h> 45#include <asm/system.h>
@@ -451,7 +452,7 @@ static void ds1286_get_time(struct rtc_time *rtc_tm)
451 */ 452 */
452 453
453 if (ds1286_is_updating() != 0) 454 if (ds1286_is_updating() != 0)
454 while (jiffies - uip_watchdog < 2*HZ/100) 455 while (time_before(jiffies, uip_watchdog + 2*HZ/100))
455 barrier(); 456 barrier();
456 457
457 /* 458 /*
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index ffd747c5dff0..60a4df7dac12 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -38,8 +38,8 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/ioport.h> 39#include <linux/ioport.h>
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <asm/uaccess.h> 41#include <linux/uaccess.h>
42#include <asm/io.h> 42#include <linux/io.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/pci.h> 44#include <linux/pci.h>
45#include "digiPCI.h" 45#include "digiPCI.h"
@@ -73,7 +73,8 @@ static int invalid_lilo_config;
73 */ 73 */
74static DEFINE_SPINLOCK(epca_lock); 74static DEFINE_SPINLOCK(epca_lock);
75 75
76/* MAXBOARDS is typically 12, but ISA and EISA cards are restricted to 7 below. */ 76/* MAXBOARDS is typically 12, but ISA and EISA cards are restricted
77 to 7 below. */
77static struct board_info boards[MAXBOARDS]; 78static struct board_info boards[MAXBOARDS];
78 79
79static struct tty_driver *pc_driver; 80static struct tty_driver *pc_driver;
@@ -157,13 +158,12 @@ static void epca_error(int, char *);
157static void pc_close(struct tty_struct *, struct file *); 158static void pc_close(struct tty_struct *, struct file *);
158static void shutdown(struct channel *); 159static void shutdown(struct channel *);
159static void pc_hangup(struct tty_struct *); 160static void pc_hangup(struct tty_struct *);
160static void pc_put_char(struct tty_struct *, unsigned char);
161static int pc_write_room(struct tty_struct *); 161static int pc_write_room(struct tty_struct *);
162static int pc_chars_in_buffer(struct tty_struct *); 162static int pc_chars_in_buffer(struct tty_struct *);
163static void pc_flush_buffer(struct tty_struct *); 163static void pc_flush_buffer(struct tty_struct *);
164static void pc_flush_chars(struct tty_struct *); 164static void pc_flush_chars(struct tty_struct *);
165static int block_til_ready(struct tty_struct *, struct file *, 165static int block_til_ready(struct tty_struct *, struct file *,
166 struct channel *); 166 struct channel *);
167static int pc_open(struct tty_struct *, struct file *); 167static int pc_open(struct tty_struct *, struct file *);
168static void post_fep_init(unsigned int crd); 168static void post_fep_init(unsigned int crd);
169static void epcapoll(unsigned long); 169static void epcapoll(unsigned long);
@@ -175,18 +175,18 @@ static unsigned termios2digi_c(struct channel *ch, unsigned);
175static void epcaparam(struct tty_struct *, struct channel *); 175static void epcaparam(struct tty_struct *, struct channel *);
176static void receive_data(struct channel *); 176static void receive_data(struct channel *);
177static int pc_ioctl(struct tty_struct *, struct file *, 177static int pc_ioctl(struct tty_struct *, struct file *,
178 unsigned int, unsigned long); 178 unsigned int, unsigned long);
179static int info_ioctl(struct tty_struct *, struct file *, 179static int info_ioctl(struct tty_struct *, struct file *,
180 unsigned int, unsigned long); 180 unsigned int, unsigned long);
181static void pc_set_termios(struct tty_struct *, struct ktermios *); 181static void pc_set_termios(struct tty_struct *, struct ktermios *);
182static void do_softint(struct work_struct *work); 182static void do_softint(struct work_struct *work);
183static void pc_stop(struct tty_struct *); 183static void pc_stop(struct tty_struct *);
184static void pc_start(struct tty_struct *); 184static void pc_start(struct tty_struct *);
185static void pc_throttle(struct tty_struct * tty); 185static void pc_throttle(struct tty_struct *tty);
186static void pc_unthrottle(struct tty_struct *tty); 186static void pc_unthrottle(struct tty_struct *tty);
187static void digi_send_break(struct channel *ch, int msec); 187static void digi_send_break(struct channel *ch, int msec);
188static void setup_empty_event(struct tty_struct *tty, struct channel *ch); 188static void setup_empty_event(struct tty_struct *tty, struct channel *ch);
189void epca_setup(char *, int *); 189static void epca_setup(char *, int *);
190 190
191static int pc_write(struct tty_struct *, const unsigned char *, int); 191static int pc_write(struct tty_struct *, const unsigned char *, int);
192static int pc_init(void); 192static int pc_init(void);
@@ -243,7 +243,7 @@ static void assertmemoff(struct channel *ch)
243/* PCXEM windowing is the same as that used in the PCXR and CX series cards. */ 243/* PCXEM windowing is the same as that used in the PCXR and CX series cards. */
244static void pcxem_memwinon(struct board_info *b, unsigned int win) 244static void pcxem_memwinon(struct board_info *b, unsigned int win)
245{ 245{
246 outb_p(FEPWIN|win, b->port + 1); 246 outb_p(FEPWIN | win, b->port + 1);
247} 247}
248 248
249static void pcxem_memwinoff(struct board_info *b, unsigned int win) 249static void pcxem_memwinoff(struct board_info *b, unsigned int win)
@@ -253,7 +253,7 @@ static void pcxem_memwinoff(struct board_info *b, unsigned int win)
253 253
254static void pcxem_globalwinon(struct channel *ch) 254static void pcxem_globalwinon(struct channel *ch)
255{ 255{
256 outb_p( FEPWIN, (int)ch->board->port + 1); 256 outb_p(FEPWIN, (int)ch->board->port + 1);
257} 257}
258 258
259static void pcxem_rxwinon(struct channel *ch) 259static void pcxem_rxwinon(struct channel *ch)
@@ -394,7 +394,7 @@ static struct channel *verifyChannel(struct tty_struct *tty)
394 */ 394 */
395 if (tty) { 395 if (tty) {
396 struct channel *ch = (struct channel *)tty->driver_data; 396 struct channel *ch = (struct channel *)tty->driver_data;
397 if ((ch >= &digi_channels[0]) && (ch < &digi_channels[nbdevs])) { 397 if (ch >= &digi_channels[0] && ch < &digi_channels[nbdevs]) {
398 if (ch->magic == EPCA_MAGIC) 398 if (ch->magic == EPCA_MAGIC)
399 return ch; 399 return ch;
400 } 400 }
@@ -414,7 +414,7 @@ static void pc_sched_event(struct channel *ch, int event)
414 414
415static void epca_error(int line, char *msg) 415static void epca_error(int line, char *msg)
416{ 416{
417 printk(KERN_ERR "epca_error (Digi): line = %d %s\n",line,msg); 417 printk(KERN_ERR "epca_error (Digi): line = %d %s\n", line, msg);
418} 418}
419 419
420static void pc_close(struct tty_struct *tty, struct file *filp) 420static void pc_close(struct tty_struct *tty, struct file *filp)
@@ -425,7 +425,8 @@ static void pc_close(struct tty_struct *tty, struct file *filp)
425 * verifyChannel returns the channel from the tty struct if it is 425 * verifyChannel returns the channel from the tty struct if it is
426 * valid. This serves as a sanity check. 426 * valid. This serves as a sanity check.
427 */ 427 */
428 if ((ch = verifyChannel(tty)) != NULL) { 428 ch = verifyChannel(tty);
429 if (ch != NULL) {
429 spin_lock_irqsave(&epca_lock, flags); 430 spin_lock_irqsave(&epca_lock, flags);
430 if (tty_hung_up_p(filp)) { 431 if (tty_hung_up_p(filp)) {
431 spin_unlock_irqrestore(&epca_lock, flags); 432 spin_unlock_irqrestore(&epca_lock, flags);
@@ -440,7 +441,6 @@ static void pc_close(struct tty_struct *tty, struct file *filp)
440 spin_unlock_irqrestore(&epca_lock, flags); 441 spin_unlock_irqrestore(&epca_lock, flags);
441 return; 442 return;
442 } 443 }
443
444 /* Port open only once go ahead with shutdown & reset */ 444 /* Port open only once go ahead with shutdown & reset */
445 BUG_ON(ch->count < 0); 445 BUG_ON(ch->count < 0);
446 446
@@ -455,12 +455,13 @@ static void pc_close(struct tty_struct *tty, struct file *filp)
455 spin_unlock_irqrestore(&epca_lock, flags); 455 spin_unlock_irqrestore(&epca_lock, flags);
456 456
457 if (ch->asyncflags & ASYNC_INITIALIZED) { 457 if (ch->asyncflags & ASYNC_INITIALIZED) {
458 /* Setup an event to indicate when the transmit buffer empties */ 458 /* Setup an event to indicate when the
459 transmit buffer empties */
459 setup_empty_event(tty, ch); 460 setup_empty_event(tty, ch);
460 tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */ 461 /* 30 seconds timeout */
462 tty_wait_until_sent(tty, 3000);
461 } 463 }
462 if (tty->driver->flush_buffer) 464 pc_flush_buffer(tty);
463 tty->driver->flush_buffer(tty);
464 465
465 tty_ldisc_flush(tty); 466 tty_ldisc_flush(tty);
466 shutdown(ch); 467 shutdown(ch);
@@ -477,7 +478,7 @@ static void pc_close(struct tty_struct *tty, struct file *filp)
477 wake_up_interruptible(&ch->open_wait); 478 wake_up_interruptible(&ch->open_wait);
478 } 479 }
479 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED | 480 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED |
480 ASYNC_CLOSING); 481 ASYNC_CLOSING);
481 wake_up_interruptible(&ch->close_wait); 482 wake_up_interruptible(&ch->close_wait);
482 } 483 }
483} 484}
@@ -524,16 +525,15 @@ static void shutdown(struct channel *ch)
524static void pc_hangup(struct tty_struct *tty) 525static void pc_hangup(struct tty_struct *tty)
525{ 526{
526 struct channel *ch; 527 struct channel *ch;
527
528 /* 528 /*
529 * verifyChannel returns the channel from the tty struct if it is 529 * verifyChannel returns the channel from the tty struct if it is
530 * valid. This serves as a sanity check. 530 * valid. This serves as a sanity check.
531 */ 531 */
532 if ((ch = verifyChannel(tty)) != NULL) { 532 ch = verifyChannel(tty);
533 if (ch != NULL) {
533 unsigned long flags; 534 unsigned long flags;
534 535
535 if (tty->driver->flush_buffer) 536 pc_flush_buffer(tty);
536 tty->driver->flush_buffer(tty);
537 tty_ldisc_flush(tty); 537 tty_ldisc_flush(tty);
538 shutdown(ch); 538 shutdown(ch);
539 539
@@ -548,7 +548,7 @@ static void pc_hangup(struct tty_struct *tty)
548} 548}
549 549
550static int pc_write(struct tty_struct *tty, 550static int pc_write(struct tty_struct *tty,
551 const unsigned char *buf, int bytesAvailable) 551 const unsigned char *buf, int bytesAvailable)
552{ 552{
553 unsigned int head, tail; 553 unsigned int head, tail;
554 int dataLen; 554 int dataLen;
@@ -572,7 +572,8 @@ static int pc_write(struct tty_struct *tty,
572 * verifyChannel returns the channel from the tty struct if it is 572 * verifyChannel returns the channel from the tty struct if it is
573 * valid. This serves as a sanity check. 573 * valid. This serves as a sanity check.
574 */ 574 */
575 if ((ch = verifyChannel(tty)) == NULL) 575 ch = verifyChannel(tty);
576 if (ch == NULL)
576 return 0; 577 return 0;
577 578
578 /* Make a pointer to the channel data structure found on the board. */ 579 /* Make a pointer to the channel data structure found on the board. */
@@ -645,26 +646,19 @@ static int pc_write(struct tty_struct *tty,
645 return amountCopied; 646 return amountCopied;
646} 647}
647 648
648static void pc_put_char(struct tty_struct *tty, unsigned char c)
649{
650 pc_write(tty, &c, 1);
651}
652
653static int pc_write_room(struct tty_struct *tty) 649static int pc_write_room(struct tty_struct *tty)
654{ 650{
655 int remain; 651 int remain = 0;
656 struct channel *ch; 652 struct channel *ch;
657 unsigned long flags; 653 unsigned long flags;
658 unsigned int head, tail; 654 unsigned int head, tail;
659 struct board_chan __iomem *bc; 655 struct board_chan __iomem *bc;
660
661 remain = 0;
662
663 /* 656 /*
664 * verifyChannel returns the channel from the tty struct if it is 657 * verifyChannel returns the channel from the tty struct if it is
665 * valid. This serves as a sanity check. 658 * valid. This serves as a sanity check.
666 */ 659 */
667 if ((ch = verifyChannel(tty)) != NULL) { 660 ch = verifyChannel(tty);
661 if (ch != NULL) {
668 spin_lock_irqsave(&epca_lock, flags); 662 spin_lock_irqsave(&epca_lock, flags);
669 globalwinon(ch); 663 globalwinon(ch);
670 664
@@ -676,8 +670,8 @@ static int pc_write_room(struct tty_struct *tty)
676 tail = readw(&bc->tout); 670 tail = readw(&bc->tout);
677 /* Wrap tail if necessary */ 671 /* Wrap tail if necessary */
678 tail &= (ch->txbufsize - 1); 672 tail &= (ch->txbufsize - 1);
679 673 remain = tail - head - 1;
680 if ((remain = tail - head - 1) < 0 ) 674 if (remain < 0)
681 remain += ch->txbufsize; 675 remain += ch->txbufsize;
682 676
683 if (remain && (ch->statusflags & LOWWAIT) == 0) { 677 if (remain && (ch->statusflags & LOWWAIT) == 0) {
@@ -699,12 +693,12 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
699 unsigned long flags; 693 unsigned long flags;
700 struct channel *ch; 694 struct channel *ch;
701 struct board_chan __iomem *bc; 695 struct board_chan __iomem *bc;
702
703 /* 696 /*
704 * verifyChannel returns the channel from the tty struct if it is 697 * verifyChannel returns the channel from the tty struct if it is
705 * valid. This serves as a sanity check. 698 * valid. This serves as a sanity check.
706 */ 699 */
707 if ((ch = verifyChannel(tty)) == NULL) 700 ch = verifyChannel(tty);
701 if (ch == NULL)
708 return 0; 702 return 0;
709 703
710 spin_lock_irqsave(&epca_lock, flags); 704 spin_lock_irqsave(&epca_lock, flags);
@@ -715,7 +709,8 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
715 head = readw(&bc->tin); 709 head = readw(&bc->tin);
716 ctail = readw(&ch->mailbox->cout); 710 ctail = readw(&ch->mailbox->cout);
717 711
718 if (tail == head && readw(&ch->mailbox->cin) == ctail && readb(&bc->tbusy) == 0) 712 if (tail == head && readw(&ch->mailbox->cin) == ctail &&
713 readb(&bc->tbusy) == 0)
719 chars = 0; 714 chars = 0;
720 else { /* Begin if some space on the card has been used */ 715 else { /* Begin if some space on the card has been used */
721 head = readw(&bc->tin) & (ch->txbufsize - 1); 716 head = readw(&bc->tin) & (ch->txbufsize - 1);
@@ -725,7 +720,8 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
725 * pc_write_room here we are finding the amount of bytes in the 720 * pc_write_room here we are finding the amount of bytes in the
726 * buffer filled. Not the amount of bytes empty. 721 * buffer filled. Not the amount of bytes empty.
727 */ 722 */
728 if ((remain = tail - head - 1) < 0 ) 723 remain = tail - head - 1;
724 if (remain < 0)
729 remain += ch->txbufsize; 725 remain += ch->txbufsize;
730 chars = (int)(ch->txbufsize - remain); 726 chars = (int)(ch->txbufsize - remain);
731 /* 727 /*
@@ -736,7 +732,7 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
736 * transmit buffer empties. 732 * transmit buffer empties.
737 */ 733 */
738 if (!(ch->statusflags & EMPTYWAIT)) 734 if (!(ch->statusflags & EMPTYWAIT))
739 setup_empty_event(tty,ch); 735 setup_empty_event(tty, ch);
740 } /* End if some space on the card has been used */ 736 } /* End if some space on the card has been used */
741 memoff(ch); 737 memoff(ch);
742 spin_unlock_irqrestore(&epca_lock, flags); 738 spin_unlock_irqrestore(&epca_lock, flags);
@@ -754,7 +750,8 @@ static void pc_flush_buffer(struct tty_struct *tty)
754 * verifyChannel returns the channel from the tty struct if it is 750 * verifyChannel returns the channel from the tty struct if it is
755 * valid. This serves as a sanity check. 751 * valid. This serves as a sanity check.
756 */ 752 */
757 if ((ch = verifyChannel(tty)) == NULL) 753 ch = verifyChannel(tty);
754 if (ch == NULL)
758 return; 755 return;
759 756
760 spin_lock_irqsave(&epca_lock, flags); 757 spin_lock_irqsave(&epca_lock, flags);
@@ -775,23 +772,25 @@ static void pc_flush_chars(struct tty_struct *tty)
775 * verifyChannel returns the channel from the tty struct if it is 772 * verifyChannel returns the channel from the tty struct if it is
776 * valid. This serves as a sanity check. 773 * valid. This serves as a sanity check.
777 */ 774 */
778 if ((ch = verifyChannel(tty)) != NULL) { 775 ch = verifyChannel(tty);
776 if (ch != NULL) {
779 unsigned long flags; 777 unsigned long flags;
780 spin_lock_irqsave(&epca_lock, flags); 778 spin_lock_irqsave(&epca_lock, flags);
781 /* 779 /*
782 * If not already set and the transmitter is busy setup an 780 * If not already set and the transmitter is busy setup an
783 * event to indicate when the transmit empties. 781 * event to indicate when the transmit empties.
784 */ 782 */
785 if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT)) 783 if ((ch->statusflags & TXBUSY) &&
786 setup_empty_event(tty,ch); 784 !(ch->statusflags & EMPTYWAIT))
785 setup_empty_event(tty, ch);
787 spin_unlock_irqrestore(&epca_lock, flags); 786 spin_unlock_irqrestore(&epca_lock, flags);
788 } 787 }
789} 788}
790 789
791static int block_til_ready(struct tty_struct *tty, 790static int block_til_ready(struct tty_struct *tty,
792 struct file *filp, struct channel *ch) 791 struct file *filp, struct channel *ch)
793{ 792{
794 DECLARE_WAITQUEUE(wait,current); 793 DECLARE_WAITQUEUE(wait, current);
795 int retval, do_clocal = 0; 794 int retval, do_clocal = 0;
796 unsigned long flags; 795 unsigned long flags;
797 796
@@ -839,8 +838,7 @@ static int block_til_ready(struct tty_struct *tty,
839 while (1) { 838 while (1) {
840 set_current_state(TASK_INTERRUPTIBLE); 839 set_current_state(TASK_INTERRUPTIBLE);
841 if (tty_hung_up_p(filp) || 840 if (tty_hung_up_p(filp) ||
842 !(ch->asyncflags & ASYNC_INITIALIZED)) 841 !(ch->asyncflags & ASYNC_INITIALIZED)) {
843 {
844 if (ch->asyncflags & ASYNC_HUP_NOTIFY) 842 if (ch->asyncflags & ASYNC_HUP_NOTIFY)
845 retval = -EAGAIN; 843 retval = -EAGAIN;
846 else 844 else
@@ -880,7 +878,7 @@ static int block_til_ready(struct tty_struct *tty,
880 return 0; 878 return 0;
881} 879}
882 880
883static int pc_open(struct tty_struct *tty, struct file * filp) 881static int pc_open(struct tty_struct *tty, struct file *filp)
884{ 882{
885 struct channel *ch; 883 struct channel *ch;
886 unsigned long flags; 884 unsigned long flags;
@@ -923,7 +921,8 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
923 return(-ENODEV); 921 return(-ENODEV);
924 } 922 }
925 923
926 if ((bc = ch->brdchan) == 0) { 924 bc = ch->brdchan;
925 if (bc == NULL) {
927 tty->driver_data = NULL; 926 tty->driver_data = NULL;
928 return -ENODEV; 927 return -ENODEV;
929 } 928 }
@@ -964,7 +963,7 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
964 * The below routine generally sets up parity, baud, flow control 963 * The below routine generally sets up parity, baud, flow control
965 * issues, etc.... It effect both control flags and input flags. 964 * issues, etc.... It effect both control flags and input flags.
966 */ 965 */
967 epcaparam(tty,ch); 966 epcaparam(tty, ch);
968 ch->asyncflags |= ASYNC_INITIALIZED; 967 ch->asyncflags |= ASYNC_INITIALIZED;
969 memoff(ch); 968 memoff(ch);
970 spin_unlock_irqrestore(&epca_lock, flags); 969 spin_unlock_irqrestore(&epca_lock, flags);
@@ -1002,8 +1001,8 @@ static void __exit epca_module_exit(void)
1002 1001
1003 del_timer_sync(&epca_timer); 1002 del_timer_sync(&epca_timer);
1004 1003
1005 if (tty_unregister_driver(pc_driver) || tty_unregister_driver(pc_info)) 1004 if (tty_unregister_driver(pc_driver) ||
1006 { 1005 tty_unregister_driver(pc_info)) {
1007 printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n"); 1006 printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n");
1008 return; 1007 return;
1009 } 1008 }
@@ -1034,7 +1033,6 @@ static const struct tty_operations pc_ops = {
1034 .flush_buffer = pc_flush_buffer, 1033 .flush_buffer = pc_flush_buffer,
1035 .chars_in_buffer = pc_chars_in_buffer, 1034 .chars_in_buffer = pc_chars_in_buffer,
1036 .flush_chars = pc_flush_chars, 1035 .flush_chars = pc_flush_chars,
1037 .put_char = pc_put_char,
1038 .ioctl = pc_ioctl, 1036 .ioctl = pc_ioctl,
1039 .set_termios = pc_set_termios, 1037 .set_termios = pc_set_termios,
1040 .stop = pc_stop, 1038 .stop = pc_stop,
@@ -1044,7 +1042,7 @@ static const struct tty_operations pc_ops = {
1044 .hangup = pc_hangup, 1042 .hangup = pc_hangup,
1045}; 1043};
1046 1044
1047static int info_open(struct tty_struct *tty, struct file * filp) 1045static int info_open(struct tty_struct *tty, struct file *filp)
1048{ 1046{
1049 return 0; 1047 return 0;
1050} 1048}
@@ -1099,7 +1097,7 @@ static int __init pc_init(void)
1099 * Set up interrupt, we will worry about memory allocation in 1097 * Set up interrupt, we will worry about memory allocation in
1100 * post_fep_init. 1098 * post_fep_init.
1101 */ 1099 */
1102 printk(KERN_INFO "DIGI epca driver version %s loaded.\n",VERSION); 1100 printk(KERN_INFO "DIGI epca driver version %s loaded.\n", VERSION);
1103 1101
1104 /* 1102 /*
1105 * NOTE : This code assumes that the number of ports found in the 1103 * NOTE : This code assumes that the number of ports found in the
@@ -1252,7 +1250,7 @@ static int __init pc_init(void)
1252 if ((board_id & 0x30) == 0x30) 1250 if ((board_id & 0x30) == 0x30)
1253 bd->memory_seg = 0x8000; 1251 bd->memory_seg = 0x8000;
1254 } else 1252 } else
1255 printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n",(int)bd->port); 1253 printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n", (int)bd->port);
1256 break; 1254 break;
1257 } 1255 }
1258 } 1256 }
@@ -1326,12 +1324,12 @@ static void post_fep_init(unsigned int crd)
1326 */ 1324 */
1327 /* PCI cards are already remapped at this point ISA are not */ 1325 /* PCI cards are already remapped at this point ISA are not */
1328 bd->numports = readw(bd->re_map_membase + XEMPORTS); 1326 bd->numports = readw(bd->re_map_membase + XEMPORTS);
1329 epcaassert(bd->numports <= 64,"PCI returned a invalid number of ports"); 1327 epcaassert(bd->numports <= 64, "PCI returned a invalid number of ports");
1330 nbdevs += (bd->numports); 1328 nbdevs += (bd->numports);
1331 } else { 1329 } else {
1332 /* Fix up the mappings for ISA/EISA etc */ 1330 /* Fix up the mappings for ISA/EISA etc */
1333 /* FIXME: 64K - can we be smarter ? */ 1331 /* FIXME: 64K - can we be smarter ? */
1334 bd->re_map_membase = ioremap(bd->membase, 0x10000); 1332 bd->re_map_membase = ioremap_nocache(bd->membase, 0x10000);
1335 } 1333 }
1336 1334
1337 if (crd != 0) 1335 if (crd != 0)
@@ -1362,7 +1360,8 @@ static void post_fep_init(unsigned int crd)
1362 * XEPORTS (address 0xc22) points at the number of channels the card 1360 * XEPORTS (address 0xc22) points at the number of channels the card
1363 * supports. (For 64XE, XI, XEM, and XR use 0xc02) 1361 * supports. (For 64XE, XI, XEM, and XR use 0xc02)
1364 */ 1362 */
1365 if ((bd->type == PCXEVE || bd->type == PCXE) && (readw(memaddr + XEPORTS) < 3)) 1363 if ((bd->type == PCXEVE || bd->type == PCXE) &&
1364 (readw(memaddr + XEPORTS) < 3))
1366 shrinkmem = 1; 1365 shrinkmem = 1;
1367 if (bd->type < PCIXEM) 1366 if (bd->type < PCIXEM)
1368 if (!request_region((int)bd->port, 4, board_desc[bd->type])) 1367 if (!request_region((int)bd->port, 4, board_desc[bd->type]))
@@ -1461,10 +1460,12 @@ static void post_fep_init(unsigned int crd)
1461 1460
1462 case PCXEVE: 1461 case PCXEVE:
1463 case PCXE: 1462 case PCXE:
1464 ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) & 0x1fff); 1463 ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4)
1464 & 0x1fff);
1465 ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9); 1465 ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9);
1466 ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) & 0x1fff); 1466 ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4)
1467 ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >>9 ); 1467 & 0x1fff);
1468 ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >> 9);
1468 break; 1469 break;
1469 1470
1470 case PCXI: 1471 case PCXI:
@@ -1518,8 +1519,9 @@ static void post_fep_init(unsigned int crd)
1518 } 1519 }
1519 1520
1520 printk(KERN_INFO 1521 printk(KERN_INFO
1521 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n", 1522 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n",
1522 VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports); 1523 VERSION, board_desc[bd->type], (long)bd->port,
1524 (long)bd->membase, bd->numports);
1523 memwinoff(bd, 0); 1525 memwinoff(bd, 0);
1524} 1526}
1525 1527
@@ -1527,7 +1529,7 @@ static void epcapoll(unsigned long ignored)
1527{ 1529{
1528 unsigned long flags; 1530 unsigned long flags;
1529 int crd; 1531 int crd;
1530 volatile unsigned int head, tail; 1532 unsigned int head, tail;
1531 struct channel *ch; 1533 struct channel *ch;
1532 struct board_info *bd; 1534 struct board_info *bd;
1533 1535
@@ -1593,7 +1595,9 @@ static void doevent(int crd)
1593 chan0 = card_ptr[crd]; 1595 chan0 = card_ptr[crd];
1594 epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range"); 1596 epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range");
1595 assertgwinon(chan0); 1597 assertgwinon(chan0);
1596 while ((tail = readw(&chan0->mailbox->eout)) != (head = readw(&chan0->mailbox->ein))) { /* Begin while something in event queue */ 1598 while ((tail = readw(&chan0->mailbox->eout)) !=
1599 (head = readw(&chan0->mailbox->ein))) {
1600 /* Begin while something in event queue */
1597 assertgwinon(chan0); 1601 assertgwinon(chan0);
1598 eventbuf = bd->re_map_membase + tail + ISTART; 1602 eventbuf = bd->re_map_membase + tail + ISTART;
1599 /* Get the channel the event occurred on */ 1603 /* Get the channel the event occurred on */
@@ -1617,7 +1621,8 @@ static void doevent(int crd)
1617 goto next; 1621 goto next;
1618 } 1622 }
1619 1623
1620 if ((bc = ch->brdchan) == NULL) 1624 bc = ch->brdchan;
1625 if (bc == NULL)
1621 goto next; 1626 goto next;
1622 1627
1623 if (event & DATA_IND) { /* Begin DATA_IND */ 1628 if (event & DATA_IND) { /* Begin DATA_IND */
@@ -1629,10 +1634,11 @@ static void doevent(int crd)
1629 /* A modem signal change has been indicated */ 1634 /* A modem signal change has been indicated */
1630 ch->imodem = mstat; 1635 ch->imodem = mstat;
1631 if (ch->asyncflags & ASYNC_CHECK_CD) { 1636 if (ch->asyncflags & ASYNC_CHECK_CD) {
1632 if (mstat & ch->dcd) /* We are now receiving dcd */ 1637 /* We are now receiving dcd */
1638 if (mstat & ch->dcd)
1633 wake_up_interruptible(&ch->open_wait); 1639 wake_up_interruptible(&ch->open_wait);
1634 else 1640 else /* No dcd; hangup */
1635 pc_sched_event(ch, EPCA_EVENT_HANGUP); /* No dcd; hangup */ 1641 pc_sched_event(ch, EPCA_EVENT_HANGUP);
1636 } 1642 }
1637 } 1643 }
1638 tty = ch->tty; 1644 tty = ch->tty;
@@ -1647,7 +1653,8 @@ static void doevent(int crd)
1647 tty_wakeup(tty); 1653 tty_wakeup(tty);
1648 } 1654 }
1649 } else if (event & EMPTYTX_IND) { 1655 } else if (event & EMPTYTX_IND) {
1650 /* This event is generated by setup_empty_event */ 1656 /* This event is generated by
1657 setup_empty_event */
1651 ch->statusflags &= ~TXBUSY; 1658 ch->statusflags &= ~TXBUSY;
1652 if (ch->statusflags & EMPTYWAIT) { 1659 if (ch->statusflags & EMPTYWAIT) {
1653 ch->statusflags &= ~EMPTYWAIT; 1660 ch->statusflags &= ~EMPTYWAIT;
@@ -1655,7 +1662,7 @@ static void doevent(int crd)
1655 } 1662 }
1656 } 1663 }
1657 } 1664 }
1658 next: 1665next:
1659 globalwinon(ch); 1666 globalwinon(ch);
1660 BUG_ON(!bc); 1667 BUG_ON(!bc);
1661 writew(1, &bc->idata); 1668 writew(1, &bc->idata);
@@ -1665,7 +1672,7 @@ static void doevent(int crd)
1665} 1672}
1666 1673
1667static void fepcmd(struct channel *ch, int cmd, int word_or_byte, 1674static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
1668 int byte2, int ncmds, int bytecmd) 1675 int byte2, int ncmds, int bytecmd)
1669{ 1676{
1670 unchar __iomem *memaddr; 1677 unchar __iomem *memaddr;
1671 unsigned int head, cmdTail, cmdStart, cmdMax; 1678 unsigned int head, cmdTail, cmdStart, cmdMax;
@@ -1690,8 +1697,10 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
1690 memaddr = ch->board->re_map_membase; 1697 memaddr = ch->board->re_map_membase;
1691 1698
1692 if (head >= (cmdMax - cmdStart) || (head & 03)) { 1699 if (head >= (cmdMax - cmdStart) || (head & 03)) {
1693 printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n", __LINE__, cmd, head); 1700 printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n",
1694 printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n", __LINE__, cmdMax, cmdStart); 1701 __LINE__, cmd, head);
1702 printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n",
1703 __LINE__, cmdMax, cmdStart);
1695 return; 1704 return;
1696 } 1705 }
1697 if (bytecmd) { 1706 if (bytecmd) {
@@ -1770,7 +1779,7 @@ static unsigned termios2digi_h(struct channel *ch, unsigned cflag)
1770static unsigned termios2digi_i(struct channel *ch, unsigned iflag) 1779static unsigned termios2digi_i(struct channel *ch, unsigned iflag)
1771{ 1780{
1772 unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | 1781 unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
1773 INPCK | ISTRIP|IXON|IXANY|IXOFF); 1782 INPCK | ISTRIP | IXON | IXANY | IXOFF);
1774 if (ch->digiext.digi_flags & DIGI_AIXON) 1783 if (ch->digiext.digi_flags & DIGI_AIXON)
1775 res |= IAIXON; 1784 res |= IAIXON;
1776 return res; 1785 return res;
@@ -1838,7 +1847,7 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
1838 unsigned mval, hflow, cflag, iflag; 1847 unsigned mval, hflow, cflag, iflag;
1839 1848
1840 bc = ch->brdchan; 1849 bc = ch->brdchan;
1841 epcaassert(bc !=0, "bc out of range"); 1850 epcaassert(bc != NULL, "bc out of range");
1842 1851
1843 assertgwinon(ch); 1852 assertgwinon(ch);
1844 ts = tty->termios; 1853 ts = tty->termios;
@@ -1884,8 +1893,10 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
1884 * Command sets channels iflag structure on the board. Such 1893 * Command sets channels iflag structure on the board. Such
1885 * things as input soft flow control, handling of parity 1894 * things as input soft flow control, handling of parity
1886 * errors, and break handling are all set here. 1895 * errors, and break handling are all set here.
1896 *
1897 * break handling, parity handling, input stripping,
1898 * flow control chars
1887 */ 1899 */
1888 /* break handling, parity handling, input stripping, flow control chars */
1889 fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0); 1900 fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0);
1890 } 1901 }
1891 /* 1902 /*
@@ -1981,7 +1992,7 @@ static void receive_data(struct channel *ch)
1981 return; 1992 return;
1982 1993
1983 /* If CREAD bit is off or device not open, set TX tail to head */ 1994 /* If CREAD bit is off or device not open, set TX tail to head */
1984 if (!tty || !ts || !(ts->c_cflag & CREAD)) { 1995 if (!tty || !ts || !(ts->c_cflag & CREAD)) {
1985 writew(head, &bc->rout); 1996 writew(head, &bc->rout);
1986 return; 1997 return;
1987 } 1998 }
@@ -1991,18 +2002,21 @@ static void receive_data(struct channel *ch)
1991 2002
1992 if (readb(&bc->orun)) { 2003 if (readb(&bc->orun)) {
1993 writeb(0, &bc->orun); 2004 writeb(0, &bc->orun);
1994 printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n",tty->name); 2005 printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n",
2006 tty->name);
1995 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 2007 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1996 } 2008 }
1997 rxwinon(ch); 2009 rxwinon(ch);
1998 while (bytesAvailable > 0) { /* Begin while there is data on the card */ 2010 while (bytesAvailable > 0) {
2011 /* Begin while there is data on the card */
1999 wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail; 2012 wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail;
2000 /* 2013 /*
2001 * Even if head has wrapped around only report the amount of 2014 * Even if head has wrapped around only report the amount of
2002 * data to be equal to the size - tail. Remember memcpy can't 2015 * data to be equal to the size - tail. Remember memcpy can't
2003 * automaticly wrap around the receive buffer. 2016 * automaticly wrap around the receive buffer.
2004 */ 2017 */
2005 dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable; 2018 dataToRead = (wrapgap < bytesAvailable) ? wrapgap
2019 : bytesAvailable;
2006 /* Make sure we don't overflow the buffer */ 2020 /* Make sure we don't overflow the buffer */
2007 dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead); 2021 dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead);
2008 if (dataToRead == 0) 2022 if (dataToRead == 0)
@@ -2153,14 +2167,14 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file,
2153 * The below routine generally sets up parity, baud, flow control 2167 * The below routine generally sets up parity, baud, flow control
2154 * issues, etc.... It effect both control flags and input flags. 2168 * issues, etc.... It effect both control flags and input flags.
2155 */ 2169 */
2156 epcaparam(tty,ch); 2170 epcaparam(tty, ch);
2157 memoff(ch); 2171 memoff(ch);
2158 spin_unlock_irqrestore(&epca_lock, flags); 2172 spin_unlock_irqrestore(&epca_lock, flags);
2159 return 0; 2173 return 0;
2160} 2174}
2161 2175
2162static int pc_ioctl(struct tty_struct *tty, struct file * file, 2176static int pc_ioctl(struct tty_struct *tty, struct file *file,
2163 unsigned int cmd, unsigned long arg) 2177 unsigned int cmd, unsigned long arg)
2164{ 2178{
2165 digiflow_t dflow; 2179 digiflow_t dflow;
2166 int retval; 2180 int retval;
@@ -2175,7 +2189,6 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2175 bc = ch->brdchan; 2189 bc = ch->brdchan;
2176 else 2190 else
2177 return -EINVAL; 2191 return -EINVAL;
2178
2179 /* 2192 /*
2180 * For POSIX compliance we need to add more ioctls. See tty_ioctl.c in 2193 * For POSIX compliance we need to add more ioctls. See tty_ioctl.c in
2181 * /usr/src/linux/drivers/char for a good example. In particular think 2194 * /usr/src/linux/drivers/char for a good example. In particular think
@@ -2186,9 +2199,10 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2186 retval = tty_check_change(tty); 2199 retval = tty_check_change(tty);
2187 if (retval) 2200 if (retval)
2188 return retval; 2201 return retval;
2189 /* Setup an event to indicate when the transmit buffer empties */ 2202 /* Setup an event to indicate when the transmit
2203 buffer empties */
2190 spin_lock_irqsave(&epca_lock, flags); 2204 spin_lock_irqsave(&epca_lock, flags);
2191 setup_empty_event(tty,ch); 2205 setup_empty_event(tty, ch);
2192 spin_unlock_irqrestore(&epca_lock, flags); 2206 spin_unlock_irqrestore(&epca_lock, flags);
2193 tty_wait_until_sent(tty, 0); 2207 tty_wait_until_sent(tty, 0);
2194 if (!arg) 2208 if (!arg)
@@ -2198,29 +2212,14 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2198 retval = tty_check_change(tty); 2212 retval = tty_check_change(tty);
2199 if (retval) 2213 if (retval)
2200 return retval; 2214 return retval;
2201 2215 /* Setup an event to indicate when the transmit buffer
2202 /* Setup an event to indicate when the transmit buffer empties */ 2216 empties */
2203 spin_lock_irqsave(&epca_lock, flags); 2217 spin_lock_irqsave(&epca_lock, flags);
2204 setup_empty_event(tty,ch); 2218 setup_empty_event(tty, ch);
2205 spin_unlock_irqrestore(&epca_lock, flags); 2219 spin_unlock_irqrestore(&epca_lock, flags);
2206 tty_wait_until_sent(tty, 0); 2220 tty_wait_until_sent(tty, 0);
2207 digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4); 2221 digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4);
2208 return 0; 2222 return 0;
2209 case TIOCGSOFTCAR:
2210 if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg))
2211 return -EFAULT;
2212 return 0;
2213 case TIOCSSOFTCAR:
2214 {
2215 unsigned int value;
2216
2217 if (get_user(value, (unsigned __user *)argp))
2218 return -EFAULT;
2219 tty->termios->c_cflag =
2220 ((tty->termios->c_cflag & ~CLOCAL) |
2221 (value ? CLOCAL : 0));
2222 return 0;
2223 }
2224 case TIOCMODG: 2223 case TIOCMODG:
2225 mflag = pc_tiocmget(tty, file); 2224 mflag = pc_tiocmget(tty, file);
2226 if (put_user(mflag, (unsigned long __user *)argp)) 2225 if (put_user(mflag, (unsigned long __user *)argp))
@@ -2253,10 +2252,12 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2253 break; 2252 break;
2254 case DIGI_SETAW: 2253 case DIGI_SETAW:
2255 case DIGI_SETAF: 2254 case DIGI_SETAF:
2255 lock_kernel();
2256 if (cmd == DIGI_SETAW) { 2256 if (cmd == DIGI_SETAW) {
2257 /* Setup an event to indicate when the transmit buffer empties */ 2257 /* Setup an event to indicate when the transmit
2258 buffer empties */
2258 spin_lock_irqsave(&epca_lock, flags); 2259 spin_lock_irqsave(&epca_lock, flags);
2259 setup_empty_event(tty,ch); 2260 setup_empty_event(tty, ch);
2260 spin_unlock_irqrestore(&epca_lock, flags); 2261 spin_unlock_irqrestore(&epca_lock, flags);
2261 tty_wait_until_sent(tty, 0); 2262 tty_wait_until_sent(tty, 0);
2262 } else { 2263 } else {
@@ -2264,6 +2265,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2264 if (tty->ldisc.flush_buffer) 2265 if (tty->ldisc.flush_buffer)
2265 tty->ldisc.flush_buffer(tty); 2266 tty->ldisc.flush_buffer(tty);
2266 } 2267 }
2268 unlock_kernel();
2267 /* Fall Thru */ 2269 /* Fall Thru */
2268 case DIGI_SETA: 2270 case DIGI_SETA:
2269 if (copy_from_user(&ch->digiext, argp, sizeof(digi_t))) 2271 if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
@@ -2285,7 +2287,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2285 * control issues, etc.... It effect both control flags and 2287 * control issues, etc.... It effect both control flags and
2286 * input flags. 2288 * input flags.
2287 */ 2289 */
2288 epcaparam(tty,ch); 2290 epcaparam(tty, ch);
2289 memoff(ch); 2291 memoff(ch);
2290 spin_unlock_irqrestore(&epca_lock, flags); 2292 spin_unlock_irqrestore(&epca_lock, flags);
2291 break; 2293 break;
@@ -2321,18 +2323,21 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2321 if (copy_from_user(&dflow, argp, sizeof(dflow))) 2323 if (copy_from_user(&dflow, argp, sizeof(dflow)))
2322 return -EFAULT; 2324 return -EFAULT;
2323 2325
2324 if (dflow.startc != startc || dflow.stopc != stopc) { /* Begin if setflow toggled */ 2326 if (dflow.startc != startc || dflow.stopc != stopc) {
2327 /* Begin if setflow toggled */
2325 spin_lock_irqsave(&epca_lock, flags); 2328 spin_lock_irqsave(&epca_lock, flags);
2326 globalwinon(ch); 2329 globalwinon(ch);
2327 2330
2328 if (cmd == DIGI_SETFLOW) { 2331 if (cmd == DIGI_SETFLOW) {
2329 ch->fepstartc = ch->startc = dflow.startc; 2332 ch->fepstartc = ch->startc = dflow.startc;
2330 ch->fepstopc = ch->stopc = dflow.stopc; 2333 ch->fepstopc = ch->stopc = dflow.stopc;
2331 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1); 2334 fepcmd(ch, SONOFFC, ch->fepstartc,
2335 ch->fepstopc, 0, 1);
2332 } else { 2336 } else {
2333 ch->fepstartca = ch->startca = dflow.startc; 2337 ch->fepstartca = ch->startca = dflow.startc;
2334 ch->fepstopca = ch->stopca = dflow.stopc; 2338 ch->fepstopca = ch->stopca = dflow.stopc;
2335 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1); 2339 fepcmd(ch, SAUXONOFFC, ch->fepstartca,
2340 ch->fepstopca, 0, 1);
2336 } 2341 }
2337 2342
2338 if (ch->statusflags & TXSTOPPED) 2343 if (ch->statusflags & TXSTOPPED)
@@ -2356,7 +2361,9 @@ static void pc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
2356 * verifyChannel returns the channel from the tty struct if it is 2361 * verifyChannel returns the channel from the tty struct if it is
2357 * valid. This serves as a sanity check. 2362 * valid. This serves as a sanity check.
2358 */ 2363 */
2359 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */ 2364 ch = verifyChannel(tty);
2365
2366 if (ch != NULL) { /* Begin if channel valid */
2360 spin_lock_irqsave(&epca_lock, flags); 2367 spin_lock_irqsave(&epca_lock, flags);
2361 globalwinon(ch); 2368 globalwinon(ch);
2362 epcaparam(tty, ch); 2369 epcaparam(tty, ch);
@@ -2383,7 +2390,7 @@ static void do_softint(struct work_struct *work)
2383 2390
2384 if (tty && tty->driver_data) { 2391 if (tty && tty->driver_data) {
2385 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { 2392 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) {
2386 tty_hangup(tty); /* FIXME: module removal race here - AKPM */ 2393 tty_hangup(tty);
2387 wake_up_interruptible(&ch->open_wait); 2394 wake_up_interruptible(&ch->open_wait);
2388 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; 2395 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
2389 } 2396 }
@@ -2403,9 +2410,11 @@ static void pc_stop(struct tty_struct *tty)
2403 * verifyChannel returns the channel from the tty struct if it is 2410 * verifyChannel returns the channel from the tty struct if it is
2404 * valid. This serves as a sanity check. 2411 * valid. This serves as a sanity check.
2405 */ 2412 */
2406 if ((ch = verifyChannel(tty)) != NULL) { 2413 ch = verifyChannel(tty);
2414 if (ch != NULL) {
2407 spin_lock_irqsave(&epca_lock, flags); 2415 spin_lock_irqsave(&epca_lock, flags);
2408 if ((ch->statusflags & TXSTOPPED) == 0) { /* Begin if transmit stop requested */ 2416 if ((ch->statusflags & TXSTOPPED) == 0) {
2417 /* Begin if transmit stop requested */
2409 globalwinon(ch); 2418 globalwinon(ch);
2410 /* STOP transmitting now !! */ 2419 /* STOP transmitting now !! */
2411 fepcmd(ch, PAUSETX, 0, 0, 0, 0); 2420 fepcmd(ch, PAUSETX, 0, 0, 0, 0);
@@ -2423,11 +2432,14 @@ static void pc_start(struct tty_struct *tty)
2423 * verifyChannel returns the channel from the tty struct if it is 2432 * verifyChannel returns the channel from the tty struct if it is
2424 * valid. This serves as a sanity check. 2433 * valid. This serves as a sanity check.
2425 */ 2434 */
2426 if ((ch = verifyChannel(tty)) != NULL) { 2435 ch = verifyChannel(tty);
2436 if (ch != NULL) {
2427 unsigned long flags; 2437 unsigned long flags;
2428 spin_lock_irqsave(&epca_lock, flags); 2438 spin_lock_irqsave(&epca_lock, flags);
2429 /* Just in case output was resumed because of a change in Digi-flow */ 2439 /* Just in case output was resumed because of a change
2430 if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */ 2440 in Digi-flow */
2441 if (ch->statusflags & TXSTOPPED) {
2442 /* Begin transmit resume requested */
2431 struct board_chan __iomem *bc; 2443 struct board_chan __iomem *bc;
2432 globalwinon(ch); 2444 globalwinon(ch);
2433 bc = ch->brdchan; 2445 bc = ch->brdchan;
@@ -2457,7 +2469,8 @@ static void pc_throttle(struct tty_struct *tty)
2457 * verifyChannel returns the channel from the tty struct if it is 2469 * verifyChannel returns the channel from the tty struct if it is
2458 * valid. This serves as a sanity check. 2470 * valid. This serves as a sanity check.
2459 */ 2471 */
2460 if ((ch = verifyChannel(tty)) != NULL) { 2472 ch = verifyChannel(tty);
2473 if (ch != NULL) {
2461 spin_lock_irqsave(&epca_lock, flags); 2474 spin_lock_irqsave(&epca_lock, flags);
2462 if ((ch->statusflags & RXSTOPPED) == 0) { 2475 if ((ch->statusflags & RXSTOPPED) == 0) {
2463 globalwinon(ch); 2476 globalwinon(ch);
@@ -2477,8 +2490,10 @@ static void pc_unthrottle(struct tty_struct *tty)
2477 * verifyChannel returns the channel from the tty struct if it is 2490 * verifyChannel returns the channel from the tty struct if it is
2478 * valid. This serves as a sanity check. 2491 * valid. This serves as a sanity check.
2479 */ 2492 */
2480 if ((ch = verifyChannel(tty)) != NULL) { 2493 ch = verifyChannel(tty);
2481 /* Just in case output was resumed because of a change in Digi-flow */ 2494 if (ch != NULL) {
2495 /* Just in case output was resumed because of a change
2496 in Digi-flow */
2482 spin_lock_irqsave(&epca_lock, flags); 2497 spin_lock_irqsave(&epca_lock, flags);
2483 if (ch->statusflags & RXSTOPPED) { 2498 if (ch->statusflags & RXSTOPPED) {
2484 globalwinon(ch); 2499 globalwinon(ch);
@@ -2490,7 +2505,7 @@ static void pc_unthrottle(struct tty_struct *tty)
2490 } 2505 }
2491} 2506}
2492 2507
2493void digi_send_break(struct channel *ch, int msec) 2508static void digi_send_break(struct channel *ch, int msec)
2494{ 2509{
2495 unsigned long flags; 2510 unsigned long flags;
2496 2511
@@ -2523,7 +2538,7 @@ static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
2523 memoff(ch); 2538 memoff(ch);
2524} 2539}
2525 2540
2526void epca_setup(char *str, int *ints) 2541static void epca_setup(char *str, int *ints)
2527{ 2542{
2528 struct board_info board; 2543 struct board_info board;
2529 int index, loop, last; 2544 int index, loop, last;
@@ -2552,14 +2567,16 @@ void epca_setup(char *str, int *ints)
2552 * instructing the driver to ignore epcaconfig.) For 2567 * instructing the driver to ignore epcaconfig.) For
2553 * this reason we check for 2. 2568 * this reason we check for 2.
2554 */ 2569 */
2555 if (board.status == 2) { /* Begin ignore epcaconfig as well as lilo cmd line */ 2570 if (board.status == 2) {
2571 /* Begin ignore epcaconfig as well as lilo cmd line */
2556 nbdevs = 0; 2572 nbdevs = 0;
2557 num_cards = 0; 2573 num_cards = 0;
2558 return; 2574 return;
2559 } /* End ignore epcaconfig as well as lilo cmd line */ 2575 } /* End ignore epcaconfig as well as lilo cmd line */
2560 2576
2561 if (board.status > 2) { 2577 if (board.status > 2) {
2562 printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n", board.status); 2578 printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n",
2579 board.status);
2563 invalid_lilo_config = 1; 2580 invalid_lilo_config = 1;
2564 setup_error_code |= INVALID_BOARD_STATUS; 2581 setup_error_code |= INVALID_BOARD_STATUS;
2565 return; 2582 return;
@@ -2613,7 +2630,8 @@ void epca_setup(char *str, int *ints)
2613 case 6: 2630 case 6:
2614 board.membase = ints[index]; 2631 board.membase = ints[index];
2615 if (ints[index] <= 0) { 2632 if (ints[index] <= 0) {
2616 printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",(unsigned int)board.membase); 2633 printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",
2634 (unsigned int)board.membase);
2617 invalid_lilo_config = 1; 2635 invalid_lilo_config = 1;
2618 setup_error_code |= INVALID_MEM_BASE; 2636 setup_error_code |= INVALID_MEM_BASE;
2619 return; 2637 return;
@@ -2744,7 +2762,7 @@ void epca_setup(char *str, int *ints)
2744 t2++; 2762 t2++;
2745 2763
2746 if (*t2) { 2764 if (*t2) {
2747 printk(KERN_ERR "epca_setup: Invalid memory base %s\n",str); 2765 printk(KERN_ERR "epca_setup: Invalid memory base %s\n", str);
2748 invalid_lilo_config = 1; 2766 invalid_lilo_config = 1;
2749 setup_error_code |= INVALID_MEM_BASE; 2767 setup_error_code |= INVALID_MEM_BASE;
2750 return; 2768 return;
@@ -2766,7 +2784,7 @@ void epca_setup(char *str, int *ints)
2766 2784
2767 /* I should REALLY validate the stuff here */ 2785 /* I should REALLY validate the stuff here */
2768 /* Copies our local copy of board into boards */ 2786 /* Copies our local copy of board into boards */
2769 memcpy((void *)&boards[num_cards],(void *)&board, sizeof(board)); 2787 memcpy((void *)&boards[num_cards], (void *)&board, sizeof(board));
2770 /* Does this get called once per lilo arg are what ? */ 2788 /* Does this get called once per lilo arg are what ? */
2771 printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n", 2789 printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n",
2772 num_cards, board_desc[board.type], 2790 num_cards, board_desc[board.type],
@@ -2807,9 +2825,9 @@ static int __devinit epca_init_one(struct pci_dev *pdev,
2807 if (board_idx >= MAXBOARDS) 2825 if (board_idx >= MAXBOARDS)
2808 goto err_out; 2826 goto err_out;
2809 2827
2810 addr = pci_resource_start (pdev, epca_info_tbl[info_idx].bar_idx); 2828 addr = pci_resource_start(pdev, epca_info_tbl[info_idx].bar_idx);
2811 if (!addr) { 2829 if (!addr) {
2812 printk (KERN_ERR PFX "PCI region #%d not available (size 0)\n", 2830 printk(KERN_ERR PFX "PCI region #%d not available (size 0)\n",
2813 epca_info_tbl[info_idx].bar_idx); 2831 epca_info_tbl[info_idx].bar_idx);
2814 goto err_out; 2832 goto err_out;
2815 } 2833 }
@@ -2820,28 +2838,29 @@ static int __devinit epca_init_one(struct pci_dev *pdev,
2820 boards[board_idx].port = addr + PCI_IO_OFFSET; 2838 boards[board_idx].port = addr + PCI_IO_OFFSET;
2821 boards[board_idx].membase = addr; 2839 boards[board_idx].membase = addr;
2822 2840
2823 if (!request_mem_region (addr + PCI_IO_OFFSET, 0x200000, "epca")) { 2841 if (!request_mem_region(addr + PCI_IO_OFFSET, 0x200000, "epca")) {
2824 printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", 2842 printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
2825 0x200000, addr + PCI_IO_OFFSET); 2843 0x200000, addr + PCI_IO_OFFSET);
2826 goto err_out; 2844 goto err_out;
2827 } 2845 }
2828 2846
2829 boards[board_idx].re_map_port = ioremap(addr + PCI_IO_OFFSET, 0x200000); 2847 boards[board_idx].re_map_port = ioremap_nocache(addr + PCI_IO_OFFSET,
2848 0x200000);
2830 if (!boards[board_idx].re_map_port) { 2849 if (!boards[board_idx].re_map_port) {
2831 printk (KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n", 2850 printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n",
2832 0x200000, addr + PCI_IO_OFFSET); 2851 0x200000, addr + PCI_IO_OFFSET);
2833 goto err_out_free_pciio; 2852 goto err_out_free_pciio;
2834 } 2853 }
2835 2854
2836 if (!request_mem_region (addr, 0x200000, "epca")) { 2855 if (!request_mem_region(addr, 0x200000, "epca")) {
2837 printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", 2856 printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
2838 0x200000, addr); 2857 0x200000, addr);
2839 goto err_out_free_iounmap; 2858 goto err_out_free_iounmap;
2840 } 2859 }
2841 2860
2842 boards[board_idx].re_map_membase = ioremap(addr, 0x200000); 2861 boards[board_idx].re_map_membase = ioremap_nocache(addr, 0x200000);
2843 if (!boards[board_idx].re_map_membase) { 2862 if (!boards[board_idx].re_map_membase) {
2844 printk (KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n", 2863 printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n",
2845 0x200000, addr + PCI_IO_OFFSET); 2864 0x200000, addr + PCI_IO_OFFSET);
2846 goto err_out_free_memregion; 2865 goto err_out_free_memregion;
2847 } 2866 }
@@ -2858,11 +2877,11 @@ static int __devinit epca_init_one(struct pci_dev *pdev,
2858 return 0; 2877 return 0;
2859 2878
2860err_out_free_memregion: 2879err_out_free_memregion:
2861 release_mem_region (addr, 0x200000); 2880 release_mem_region(addr, 0x200000);
2862err_out_free_iounmap: 2881err_out_free_iounmap:
2863 iounmap (boards[board_idx].re_map_port); 2882 iounmap(boards[board_idx].re_map_port);
2864err_out_free_pciio: 2883err_out_free_pciio:
2865 release_mem_region (addr + PCI_IO_OFFSET, 0x200000); 2884 release_mem_region(addr + PCI_IO_OFFSET, 0x200000);
2866err_out: 2885err_out:
2867 return -ENODEV; 2886 return -ENODEV;
2868} 2887}
@@ -2878,9 +2897,9 @@ static struct pci_device_id epca_pci_tbl[] = {
2878 2897
2879MODULE_DEVICE_TABLE(pci, epca_pci_tbl); 2898MODULE_DEVICE_TABLE(pci, epca_pci_tbl);
2880 2899
2881int __init init_PCI (void) 2900static int __init init_PCI(void)
2882{ 2901{
2883 memset (&epca_driver, 0, sizeof (epca_driver)); 2902 memset(&epca_driver, 0, sizeof(epca_driver));
2884 epca_driver.name = "epca"; 2903 epca_driver.name = "epca";
2885 epca_driver.id_table = epca_pci_tbl; 2904 epca_driver.id_table = epca_pci_tbl;
2886 epca_driver.probe = epca_init_one; 2905 epca_driver.probe = epca_init_one;
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index f3fe62067344..84840ba13ff0 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -8,7 +8,7 @@
8 * Extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92. Now 8 * Extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92. Now
9 * much more extensible to support other serial cards based on the 9 * much more extensible to support other serial cards based on the
10 * 16450/16550A UART's. Added support for the AST FourPort and the 10 * 16450/16550A UART's. Added support for the AST FourPort and the
11 * Accent Async board. 11 * Accent Async board.
12 * 12 *
13 * set_serial_info fixed to set the flags, custom divisor, and uart 13 * set_serial_info fixed to set the flags, custom divisor, and uart
14 * type fields. Fix suggested by Michael K. Johnson 12/12/92. 14 * type fields. Fix suggested by Michael K. Johnson 12/12/92.
@@ -61,11 +61,11 @@
61#include <linux/bitops.h> 61#include <linux/bitops.h>
62 62
63#include <asm/system.h> 63#include <asm/system.h>
64#include <asm/io.h> 64#include <linux/io.h>
65 65
66#include <asm/dma.h> 66#include <asm/dma.h>
67#include <linux/slab.h> 67#include <linux/slab.h>
68#include <asm/uaccess.h> 68#include <linux/uaccess.h>
69 69
70#include <linux/hayesesp.h> 70#include <linux/hayesesp.h>
71 71
@@ -127,8 +127,10 @@ static struct tty_driver *esp_driver;
127#undef SERIAL_DEBUG_FLOW 127#undef SERIAL_DEBUG_FLOW
128 128
129#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) 129#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT)
130#define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ 130#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \
131 tty->name, (info->flags), serial_driver.refcount,info->count,tty->count,s) 131 tty->name, info->flags, \
132 serial_driver.refcount, \
133 info->count, tty->count, s)
132#else 134#else
133#define DBG_CNT(s) 135#define DBG_CNT(s)
134#endif 136#endif
@@ -189,7 +191,7 @@ static inline void serial_out(struct esp_struct *info, int offset,
189 */ 191 */
190static void rs_stop(struct tty_struct *tty) 192static void rs_stop(struct tty_struct *tty)
191{ 193{
192 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 194 struct esp_struct *info = tty->driver_data;
193 unsigned long flags; 195 unsigned long flags;
194 196
195 if (serial_paranoia_check(info, tty->name, "rs_stop")) 197 if (serial_paranoia_check(info, tty->name, "rs_stop"))
@@ -206,12 +208,12 @@ static void rs_stop(struct tty_struct *tty)
206 208
207static void rs_start(struct tty_struct *tty) 209static void rs_start(struct tty_struct *tty)
208{ 210{
209 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 211 struct esp_struct *info = tty->driver_data;
210 unsigned long flags; 212 unsigned long flags;
211 213
212 if (serial_paranoia_check(info, tty->name, "rs_start")) 214 if (serial_paranoia_check(info, tty->name, "rs_start"))
213 return; 215 return;
214 216
215 spin_lock_irqsave(&info->lock, flags); 217 spin_lock_irqsave(&info->lock, flags);
216 if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) { 218 if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) {
217 info->IER |= UART_IER_THRI; 219 info->IER |= UART_IER_THRI;
@@ -233,7 +235,7 @@ static void rs_start(struct tty_struct *tty)
233 * rs_interrupt() should try to keep the interrupt handler as fast as 235 * rs_interrupt() should try to keep the interrupt handler as fast as
234 * possible. After you are done making modifications, it is not a bad 236 * possible. After you are done making modifications, it is not a bad
235 * idea to do: 237 * idea to do:
236 * 238 *
237 * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c 239 * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
238 * 240 *
239 * and look at the resulting assemble code in serial.s. 241 * and look at the resulting assemble code in serial.s.
@@ -290,7 +292,7 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
290 } 292 }
291 293
292 status_mask = (info->read_status_mask >> 2) & 0x07; 294 status_mask = (info->read_status_mask >> 2) & 0x07;
293 295
294 for (i = 0; i < num_bytes - 1; i += 2) { 296 for (i = 0; i < num_bytes - 1; i += 2) {
295 *((unsigned short *)(pio_buf->data + i)) = 297 *((unsigned short *)(pio_buf->data + i)) =
296 inw(info->port + UART_ESI_RX); 298 inw(info->port + UART_ESI_RX);
@@ -325,8 +327,7 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
325 flag = TTY_BREAK; 327 flag = TTY_BREAK;
326 if (info->flags & ASYNC_SAK) 328 if (info->flags & ASYNC_SAK)
327 do_SAK(tty); 329 do_SAK(tty);
328 } 330 } else if (err_buf->data[i] & 0x02)
329 else if (err_buf->data[i] & 0x02)
330 flag = TTY_FRAME; 331 flag = TTY_FRAME;
331 else if (err_buf->data[i] & 0x01) 332 else if (err_buf->data[i] & 0x01)
332 flag = TTY_PARITY; 333 flag = TTY_PARITY;
@@ -341,23 +342,29 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
341 release_pio_buffer(err_buf); 342 release_pio_buffer(err_buf);
342} 343}
343 344
344static inline void receive_chars_dma(struct esp_struct *info, int num_bytes) 345static void program_isa_dma(int dma, int dir, unsigned long addr, int len)
345{ 346{
346 unsigned long flags; 347 unsigned long flags;
348
349 flags = claim_dma_lock();
350 disable_dma(dma);
351 clear_dma_ff(dma);
352 set_dma_mode(dma, dir);
353 set_dma_addr(dma, addr);
354 set_dma_count(dma, len);
355 enable_dma(dma);
356 release_dma_lock(flags);
357}
358
359static void receive_chars_dma(struct esp_struct *info, int num_bytes)
360{
347 info->stat_flags &= ~ESP_STAT_RX_TIMEOUT; 361 info->stat_flags &= ~ESP_STAT_RX_TIMEOUT;
348 dma_bytes = num_bytes; 362 dma_bytes = num_bytes;
349 info->stat_flags |= ESP_STAT_DMA_RX; 363 info->stat_flags |= ESP_STAT_DMA_RX;
350 364
351 flags=claim_dma_lock(); 365 program_isa_dma(dma, DMA_MODE_READ, isa_virt_to_bus(dma_buffer),
352 disable_dma(dma); 366 dma_bytes);
353 clear_dma_ff(dma); 367 serial_out(info, UART_ESI_CMD1, ESI_START_DMA_RX);
354 set_dma_mode(dma, DMA_MODE_READ);
355 set_dma_addr(dma, isa_virt_to_bus(dma_buffer));
356 set_dma_count(dma, dma_bytes);
357 enable_dma(dma);
358 release_dma_lock(flags);
359
360 serial_out(info, UART_ESI_CMD1, ESI_START_DMA_RX);
361} 368}
362 369
363static inline void receive_chars_dma_done(struct esp_struct *info, 370static inline void receive_chars_dma_done(struct esp_struct *info,
@@ -366,22 +373,22 @@ static inline void receive_chars_dma_done(struct esp_struct *info,
366 struct tty_struct *tty = info->tty; 373 struct tty_struct *tty = info->tty;
367 int num_bytes; 374 int num_bytes;
368 unsigned long flags; 375 unsigned long flags;
369 376
370 flags=claim_dma_lock(); 377 flags = claim_dma_lock();
371 disable_dma(dma); 378 disable_dma(dma);
372 clear_dma_ff(dma); 379 clear_dma_ff(dma);
373 380
374 info->stat_flags &= ~ESP_STAT_DMA_RX; 381 info->stat_flags &= ~ESP_STAT_DMA_RX;
375 num_bytes = dma_bytes - get_dma_residue(dma); 382 num_bytes = dma_bytes - get_dma_residue(dma);
376 release_dma_lock(flags); 383 release_dma_lock(flags);
377 384
378 info->icount.rx += num_bytes; 385 info->icount.rx += num_bytes;
379 386
380 if (num_bytes > 0) { 387 if (num_bytes > 0) {
381 tty_insert_flip_string(tty, dma_buffer, num_bytes - 1); 388 tty_insert_flip_string(tty, dma_buffer, num_bytes - 1);
382 389
383 status &= (0x1c & info->read_status_mask); 390 status &= (0x1c & info->read_status_mask);
384 391
385 /* Is the status significant or do we throw the last byte ? */ 392 /* Is the status significant or do we throw the last byte ? */
386 if (!(status & info->ignore_status_mask)) { 393 if (!(status & info->ignore_status_mask)) {
387 int statflag = 0; 394 int statflag = 0;
@@ -393,13 +400,13 @@ static inline void receive_chars_dma_done(struct esp_struct *info,
393 do_SAK(tty); 400 do_SAK(tty);
394 } else if (status & 0x08) { 401 } else if (status & 0x08) {
395 statflag = TTY_FRAME; 402 statflag = TTY_FRAME;
396 (info->icount.frame)++; 403 info->icount.frame++;
397 } 404 } else if (status & 0x04) {
398 else if (status & 0x04) {
399 statflag = TTY_PARITY; 405 statflag = TTY_PARITY;
400 (info->icount.parity)++; 406 info->icount.parity++;
401 } 407 }
402 tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], statflag); 408 tty_insert_flip_char(tty, dma_buffer[num_bytes - 1],
409 statflag);
403 } 410 }
404 tty_schedule_flip(tty); 411 tty_schedule_flip(tty);
405 } 412 }
@@ -484,8 +491,6 @@ static inline void transmit_chars_pio(struct esp_struct *info,
484/* Caller must hold info->lock */ 491/* Caller must hold info->lock */
485static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes) 492static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes)
486{ 493{
487 unsigned long flags;
488
489 dma_bytes = num_bytes; 494 dma_bytes = num_bytes;
490 495
491 if (info->xmit_tail + dma_bytes <= ESP_XMIT_SIZE) { 496 if (info->xmit_tail + dma_bytes <= ESP_XMIT_SIZE) {
@@ -517,26 +522,18 @@ static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes)
517 } 522 }
518 523
519 info->stat_flags |= ESP_STAT_DMA_TX; 524 info->stat_flags |= ESP_STAT_DMA_TX;
520 525
521 flags=claim_dma_lock(); 526 program_isa_dma(dma, DMA_MODE_WRITE, isa_virt_to_bus(dma_buffer),
522 disable_dma(dma); 527 dma_bytes);
523 clear_dma_ff(dma); 528 serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
524 set_dma_mode(dma, DMA_MODE_WRITE);
525 set_dma_addr(dma, isa_virt_to_bus(dma_buffer));
526 set_dma_count(dma, dma_bytes);
527 enable_dma(dma);
528 release_dma_lock(flags);
529
530 serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
531} 529}
532 530
533static inline void transmit_chars_dma_done(struct esp_struct *info) 531static inline void transmit_chars_dma_done(struct esp_struct *info)
534{ 532{
535 int num_bytes; 533 int num_bytes;
536 unsigned long flags; 534 unsigned long flags;
537
538 535
539 flags=claim_dma_lock(); 536 flags = claim_dma_lock();
540 disable_dma(dma); 537 disable_dma(dma);
541 clear_dma_ff(dma); 538 clear_dma_ff(dma);
542 539
@@ -547,27 +544,21 @@ static inline void transmit_chars_dma_done(struct esp_struct *info)
547 if (dma_bytes != num_bytes) { 544 if (dma_bytes != num_bytes) {
548 dma_bytes -= num_bytes; 545 dma_bytes -= num_bytes;
549 memmove(dma_buffer, dma_buffer + num_bytes, dma_bytes); 546 memmove(dma_buffer, dma_buffer + num_bytes, dma_bytes);
550 547
551 flags=claim_dma_lock(); 548 program_isa_dma(dma, DMA_MODE_WRITE,
552 disable_dma(dma); 549 isa_virt_to_bus(dma_buffer), dma_bytes);
553 clear_dma_ff(dma); 550
554 set_dma_mode(dma, DMA_MODE_WRITE); 551 serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
555 set_dma_addr(dma, isa_virt_to_bus(dma_buffer));
556 set_dma_count(dma, dma_bytes);
557 enable_dma(dma);
558 release_dma_lock(flags);
559
560 serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
561 } else { 552 } else {
562 dma_bytes = 0; 553 dma_bytes = 0;
563 info->stat_flags &= ~ESP_STAT_DMA_TX; 554 info->stat_flags &= ~ESP_STAT_DMA_TX;
564 } 555 }
565} 556}
566 557
567static inline void check_modem_status(struct esp_struct *info) 558static void check_modem_status(struct esp_struct *info)
568{ 559{
569 int status; 560 int status;
570 561
571 serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT); 562 serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT);
572 status = serial_in(info, UART_ESI_STAT2); 563 status = serial_in(info, UART_ESI_STAT2);
573 564
@@ -588,7 +579,7 @@ static inline void check_modem_status(struct esp_struct *info)
588#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR)) 579#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
589 printk("ttys%d CD now %s...", info->line, 580 printk("ttys%d CD now %s...", info->line,
590 (status & UART_MSR_DCD) ? "on" : "off"); 581 (status & UART_MSR_DCD) ? "on" : "off");
591#endif 582#endif
592 if (status & UART_MSR_DCD) 583 if (status & UART_MSR_DCD)
593 wake_up_interruptible(&info->open_wait); 584 wake_up_interruptible(&info->open_wait);
594 else { 585 else {
@@ -605,7 +596,7 @@ static inline void check_modem_status(struct esp_struct *info)
605 */ 596 */
606static irqreturn_t rs_interrupt_single(int irq, void *dev_id) 597static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
607{ 598{
608 struct esp_struct * info; 599 struct esp_struct *info;
609 unsigned err_status; 600 unsigned err_status;
610 unsigned int scratch; 601 unsigned int scratch;
611 602
@@ -617,7 +608,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
617 scratch = serial_in(info, UART_ESI_SID); 608 scratch = serial_in(info, UART_ESI_SID);
618 609
619 spin_lock(&info->lock); 610 spin_lock(&info->lock);
620 611
621 if (!info->tty) { 612 if (!info->tty) {
622 spin_unlock(&info->lock); 613 spin_unlock(&info->lock);
623 return IRQ_NONE; 614 return IRQ_NONE;
@@ -637,7 +628,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
637 if (err_status & 0x80) /* Start break */ 628 if (err_status & 0x80) /* Start break */
638 wake_up_interruptible(&info->break_wait); 629 wake_up_interruptible(&info->break_wait);
639 } 630 }
640 631
641 if ((scratch & 0x88) || /* DMA completed or timed out */ 632 if ((scratch & 0x88) || /* DMA completed or timed out */
642 (err_status & 0x1c) /* receive error */) { 633 (err_status & 0x1c) /* receive error */) {
643 if (info->stat_flags & ESP_STAT_DMA_RX) 634 if (info->stat_flags & ESP_STAT_DMA_RX)
@@ -667,7 +658,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
667 receive_chars_dma(info, num_bytes); 658 receive_chars_dma(info, num_bytes);
668 } 659 }
669 } 660 }
670 661
671 if (!(info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) && 662 if (!(info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) &&
672 (scratch & 0x02) && (info->IER & UART_IER_THRI)) { 663 (scratch & 0x02) && (info->IER & UART_IER_THRI)) {
673 if ((info->xmit_cnt <= 0) || info->tty->stopped) { 664 if ((info->xmit_cnt <= 0) || info->tty->stopped) {
@@ -722,11 +713,11 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
722 * --------------------------------------------------------------- 713 * ---------------------------------------------------------------
723 */ 714 */
724 715
725static inline void esp_basic_init(struct esp_struct * info) 716static void esp_basic_init(struct esp_struct *info)
726{ 717{
727 /* put ESPC in enhanced mode */ 718 /* put ESPC in enhanced mode */
728 serial_out(info, UART_ESI_CMD1, ESI_SET_MODE); 719 serial_out(info, UART_ESI_CMD1, ESI_SET_MODE);
729 720
730 if (info->stat_flags & ESP_STAT_NEVER_DMA) 721 if (info->stat_flags & ESP_STAT_NEVER_DMA)
731 serial_out(info, UART_ESI_CMD2, 0x01); 722 serial_out(info, UART_ESI_CMD2, 0x01);
732 else 723 else
@@ -783,13 +774,13 @@ static inline void esp_basic_init(struct esp_struct * info)
783 serial_out(info, UART_ESI_CMD2, 0xff); 774 serial_out(info, UART_ESI_CMD2, 0xff);
784} 775}
785 776
786static int startup(struct esp_struct * info) 777static int startup(struct esp_struct *info)
787{ 778{
788 unsigned long flags; 779 unsigned long flags;
789 int retval=0; 780 int retval = 0;
790 unsigned int num_chars; 781 unsigned int num_chars;
791 782
792 spin_lock_irqsave(&info->lock, flags); 783 spin_lock_irqsave(&info->lock, flags);
793 784
794 if (info->flags & ASYNC_INITIALIZED) 785 if (info->flags & ASYNC_INITIALIZED)
795 goto out; 786 goto out;
@@ -802,7 +793,8 @@ static int startup(struct esp_struct * info)
802 } 793 }
803 794
804#ifdef SERIAL_DEBUG_OPEN 795#ifdef SERIAL_DEBUG_OPEN
805 printk("starting up ttys%d (irq %d)...", info->line, info->irq); 796 printk(KERN_DEBUG "starting up ttys%d (irq %d)...",
797 info->line, info->irq);
806#endif 798#endif
807 799
808 /* Flush the RX buffer. Using the ESI flush command may cause */ 800 /* Flush the RX buffer. Using the ESI flush command may cause */
@@ -863,7 +855,7 @@ static int startup(struct esp_struct * info)
863 dma_buffer = NULL; 855 dma_buffer = NULL;
864 info->stat_flags |= ESP_STAT_USE_PIO; 856 info->stat_flags |= ESP_STAT_USE_PIO;
865 } 857 }
866 858
867 } 859 }
868 860
869 info->MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2; 861 info->MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
@@ -872,7 +864,7 @@ static int startup(struct esp_struct * info)
872 serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); 864 serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
873 serial_out(info, UART_ESI_CMD2, UART_MCR); 865 serial_out(info, UART_ESI_CMD2, UART_MCR);
874 serial_out(info, UART_ESI_CMD2, info->MCR); 866 serial_out(info, UART_ESI_CMD2, info->MCR);
875 867
876 /* 868 /*
877 * Finally, enable interrupts 869 * Finally, enable interrupts
878 */ 870 */
@@ -881,7 +873,7 @@ static int startup(struct esp_struct * info)
881 UART_IER_DMA_TC; 873 UART_IER_DMA_TC;
882 serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); 874 serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
883 serial_out(info, UART_ESI_CMD2, info->IER); 875 serial_out(info, UART_ESI_CMD2, info->IER);
884 876
885 if (info->tty) 877 if (info->tty)
886 clear_bit(TTY_IO_ERROR, &info->tty->flags); 878 clear_bit(TTY_IO_ERROR, &info->tty->flags);
887 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 879 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
@@ -900,7 +892,7 @@ static int startup(struct esp_struct * info)
900 if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) 892 if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
901 info->tty->alt_speed = 460800; 893 info->tty->alt_speed = 460800;
902 } 894 }
903 895
904 /* 896 /*
905 * set the speed of the serial port 897 * set the speed of the serial port
906 */ 898 */
@@ -918,7 +910,7 @@ out_unlocked:
918 * This routine will shutdown a serial port; interrupts are disabled, and 910 * This routine will shutdown a serial port; interrupts are disabled, and
919 * DTR is dropped if the hangup on close termio flag is on. 911 * DTR is dropped if the hangup on close termio flag is on.
920 */ 912 */
921static void shutdown(struct esp_struct * info) 913static void shutdown(struct esp_struct *info)
922{ 914{
923 unsigned long flags, f; 915 unsigned long flags, f;
924 916
@@ -929,7 +921,7 @@ static void shutdown(struct esp_struct * info)
929 printk("Shutting down serial port %d (irq %d)....", info->line, 921 printk("Shutting down serial port %d (irq %d)....", info->line,
930 info->irq); 922 info->irq);
931#endif 923#endif
932 924
933 spin_lock_irqsave(&info->lock, flags); 925 spin_lock_irqsave(&info->lock, flags);
934 /* 926 /*
935 * clear delta_msr_wait queue to avoid mem leaks: we may free the irq 927 * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
@@ -941,14 +933,14 @@ static void shutdown(struct esp_struct * info)
941 /* stop a DMA transfer on the port being closed */ 933 /* stop a DMA transfer on the port being closed */
942 /* DMA lock is higher priority always */ 934 /* DMA lock is higher priority always */
943 if (info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) { 935 if (info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) {
944 f=claim_dma_lock(); 936 f = claim_dma_lock();
945 disable_dma(dma); 937 disable_dma(dma);
946 clear_dma_ff(dma); 938 clear_dma_ff(dma);
947 release_dma_lock(f); 939 release_dma_lock(f);
948 940
949 dma_bytes = 0; 941 dma_bytes = 0;
950 } 942 }
951 943
952 /* 944 /*
953 * Free the IRQ 945 * Free the IRQ
954 */ 946 */
@@ -970,7 +962,7 @@ static void shutdown(struct esp_struct * info)
970 free_pages((unsigned long)dma_buffer, 962 free_pages((unsigned long)dma_buffer,
971 get_order(DMA_BUFFER_SZ)); 963 get_order(DMA_BUFFER_SZ));
972 dma_buffer = NULL; 964 dma_buffer = NULL;
973 } 965 }
974 } 966 }
975 967
976 if (info->xmit_buf) { 968 if (info->xmit_buf) {
@@ -992,7 +984,7 @@ static void shutdown(struct esp_struct * info)
992 984
993 if (info->tty) 985 if (info->tty)
994 set_bit(TTY_IO_ERROR, &info->tty->flags); 986 set_bit(TTY_IO_ERROR, &info->tty->flags);
995 987
996 info->flags &= ~ASYNC_INITIALIZED; 988 info->flags &= ~ASYNC_INITIALIZED;
997 spin_unlock_irqrestore(&info->lock, flags); 989 spin_unlock_irqrestore(&info->lock, flags);
998} 990}
@@ -1005,7 +997,7 @@ static void change_speed(struct esp_struct *info)
1005{ 997{
1006 unsigned short port; 998 unsigned short port;
1007 int quot = 0; 999 int quot = 0;
1008 unsigned cflag,cval; 1000 unsigned cflag, cval;
1009 int baud, bits; 1001 int baud, bits;
1010 unsigned char flow1 = 0, flow2 = 0; 1002 unsigned char flow1 = 0, flow2 = 0;
1011 unsigned long flags; 1003 unsigned long flags;
@@ -1014,14 +1006,14 @@ static void change_speed(struct esp_struct *info)
1014 return; 1006 return;
1015 cflag = info->tty->termios->c_cflag; 1007 cflag = info->tty->termios->c_cflag;
1016 port = info->port; 1008 port = info->port;
1017 1009
1018 /* byte size and parity */ 1010 /* byte size and parity */
1019 switch (cflag & CSIZE) { 1011 switch (cflag & CSIZE) {
1020 case CS5: cval = 0x00; bits = 7; break; 1012 case CS5: cval = 0x00; bits = 7; break;
1021 case CS6: cval = 0x01; bits = 8; break; 1013 case CS6: cval = 0x01; bits = 8; break;
1022 case CS7: cval = 0x02; bits = 9; break; 1014 case CS7: cval = 0x02; bits = 9; break;
1023 case CS8: cval = 0x03; bits = 10; break; 1015 case CS8: cval = 0x03; bits = 10; break;
1024 default: cval = 0x00; bits = 7; break; 1016 default: cval = 0x00; bits = 7; break;
1025 } 1017 }
1026 if (cflag & CSTOPB) { 1018 if (cflag & CSTOPB) {
1027 cval |= 0x04; 1019 cval |= 0x04;
@@ -1037,14 +1029,12 @@ static void change_speed(struct esp_struct *info)
1037 if (cflag & CMSPAR) 1029 if (cflag & CMSPAR)
1038 cval |= UART_LCR_SPAR; 1030 cval |= UART_LCR_SPAR;
1039#endif 1031#endif
1040
1041 baud = tty_get_baud_rate(info->tty); 1032 baud = tty_get_baud_rate(info->tty);
1042 if (baud == 38400 && 1033 if (baud == 38400 &&
1043 ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)) 1034 ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST))
1044 quot = info->custom_divisor; 1035 quot = info->custom_divisor;
1045 else { 1036 else {
1046 if (baud == 134) 1037 if (baud == 134) /* Special case since 134 is really 134.5 */
1047 /* Special case since 134 is really 134.5 */
1048 quot = (2*BASE_BAUD / 269); 1038 quot = (2*BASE_BAUD / 269);
1049 else if (baud) 1039 else if (baud)
1050 quot = BASE_BAUD / baud; 1040 quot = BASE_BAUD / baud;
@@ -1052,7 +1042,12 @@ static void change_speed(struct esp_struct *info)
1052 /* If the quotient is ever zero, default to 9600 bps */ 1042 /* If the quotient is ever zero, default to 9600 bps */
1053 if (!quot) 1043 if (!quot)
1054 quot = BASE_BAUD / 9600; 1044 quot = BASE_BAUD / 9600;
1055 1045
1046 if (baud) {
1047 /* Actual rate */
1048 baud = BASE_BAUD/quot;
1049 tty_encode_baud_rate(info->tty, baud, baud);
1050 }
1056 info->timeout = ((1024 * HZ * bits * quot) / BASE_BAUD) + (HZ / 50); 1051 info->timeout = ((1024 * HZ * bits * quot) / BASE_BAUD) + (HZ / 50);
1057 1052
1058 /* CTS flow control flag and modem status interrupts */ 1053 /* CTS flow control flag and modem status interrupts */
@@ -1066,10 +1061,8 @@ static void change_speed(struct esp_struct *info)
1066 info->flags &= ~ASYNC_CTS_FLOW; 1061 info->flags &= ~ASYNC_CTS_FLOW;
1067 if (cflag & CLOCAL) 1062 if (cflag & CLOCAL)
1068 info->flags &= ~ASYNC_CHECK_CD; 1063 info->flags &= ~ASYNC_CHECK_CD;
1069 else { 1064 else
1070 info->flags |= ASYNC_CHECK_CD; 1065 info->flags |= ASYNC_CHECK_CD;
1071 /* info->IER |= UART_IER_MSI; */
1072 }
1073 1066
1074 /* 1067 /*
1075 * Set up parity check flag 1068 * Set up parity check flag
@@ -1079,7 +1072,7 @@ static void change_speed(struct esp_struct *info)
1079 info->read_status_mask |= UART_LSR_FE | UART_LSR_PE; 1072 info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
1080 if (I_BRKINT(info->tty) || I_PARMRK(info->tty)) 1073 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
1081 info->read_status_mask |= UART_LSR_BI; 1074 info->read_status_mask |= UART_LSR_BI;
1082 1075
1083 info->ignore_status_mask = 0; 1076 info->ignore_status_mask = 0;
1084#if 0 1077#if 0
1085 /* This should be safe, but for some broken bits of hardware... */ 1078 /* This should be safe, but for some broken bits of hardware... */
@@ -1092,7 +1085,7 @@ static void change_speed(struct esp_struct *info)
1092 info->ignore_status_mask |= UART_LSR_BI; 1085 info->ignore_status_mask |= UART_LSR_BI;
1093 info->read_status_mask |= UART_LSR_BI; 1086 info->read_status_mask |= UART_LSR_BI;
1094 /* 1087 /*
1095 * If we're ignore parity and break indicators, ignore 1088 * If we're ignore parity and break indicators, ignore
1096 * overruns too. (For real raw support). 1089 * overruns too. (For real raw support).
1097 */ 1090 */
1098 if (I_IGNPAR(info->tty)) { 1091 if (I_IGNPAR(info->tty)) {
@@ -1130,19 +1123,19 @@ static void change_speed(struct esp_struct *info)
1130 serial_out(info, UART_ESI_CMD2, 0x10); 1123 serial_out(info, UART_ESI_CMD2, 0x10);
1131 serial_out(info, UART_ESI_CMD2, 0x21); 1124 serial_out(info, UART_ESI_CMD2, 0x21);
1132 switch (cflag & CSIZE) { 1125 switch (cflag & CSIZE) {
1133 case CS5: 1126 case CS5:
1134 serial_out(info, UART_ESI_CMD2, 0x1f); 1127 serial_out(info, UART_ESI_CMD2, 0x1f);
1135 break; 1128 break;
1136 case CS6: 1129 case CS6:
1137 serial_out(info, UART_ESI_CMD2, 0x3f); 1130 serial_out(info, UART_ESI_CMD2, 0x3f);
1138 break; 1131 break;
1139 case CS7: 1132 case CS7:
1140 case CS8: 1133 case CS8:
1141 serial_out(info, UART_ESI_CMD2, 0x7f); 1134 serial_out(info, UART_ESI_CMD2, 0x7f);
1142 break; 1135 break;
1143 default: 1136 default:
1144 serial_out(info, UART_ESI_CMD2, 0xff); 1137 serial_out(info, UART_ESI_CMD2, 0xff);
1145 break; 1138 break;
1146 } 1139 }
1147 } 1140 }
1148 1141
@@ -1156,31 +1149,34 @@ static void change_speed(struct esp_struct *info)
1156 spin_unlock_irqrestore(&info->lock, flags); 1149 spin_unlock_irqrestore(&info->lock, flags);
1157} 1150}
1158 1151
1159static void rs_put_char(struct tty_struct *tty, unsigned char ch) 1152static int rs_put_char(struct tty_struct *tty, unsigned char ch)
1160{ 1153{
1161 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 1154 struct esp_struct *info = tty->driver_data;
1162 unsigned long flags; 1155 unsigned long flags;
1156 int ret = 0;
1163 1157
1164 if (serial_paranoia_check(info, tty->name, "rs_put_char")) 1158 if (serial_paranoia_check(info, tty->name, "rs_put_char"))
1165 return; 1159 return 0;
1166 1160
1167 if (!info->xmit_buf) 1161 if (!info->xmit_buf)
1168 return; 1162 return 0;
1169 1163
1170 spin_lock_irqsave(&info->lock, flags); 1164 spin_lock_irqsave(&info->lock, flags);
1171 if (info->xmit_cnt < ESP_XMIT_SIZE - 1) { 1165 if (info->xmit_cnt < ESP_XMIT_SIZE - 1) {
1172 info->xmit_buf[info->xmit_head++] = ch; 1166 info->xmit_buf[info->xmit_head++] = ch;
1173 info->xmit_head &= ESP_XMIT_SIZE-1; 1167 info->xmit_head &= ESP_XMIT_SIZE-1;
1174 info->xmit_cnt++; 1168 info->xmit_cnt++;
1169 ret = 1;
1175 } 1170 }
1176 spin_unlock_irqrestore(&info->lock, flags); 1171 spin_unlock_irqrestore(&info->lock, flags);
1172 return ret;
1177} 1173}
1178 1174
1179static void rs_flush_chars(struct tty_struct *tty) 1175static void rs_flush_chars(struct tty_struct *tty)
1180{ 1176{
1181 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 1177 struct esp_struct *info = tty->driver_data;
1182 unsigned long flags; 1178 unsigned long flags;
1183 1179
1184 if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) 1180 if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
1185 return; 1181 return;
1186 1182
@@ -1198,11 +1194,11 @@ out:
1198 spin_unlock_irqrestore(&info->lock, flags); 1194 spin_unlock_irqrestore(&info->lock, flags);
1199} 1195}
1200 1196
1201static int rs_write(struct tty_struct * tty, 1197static int rs_write(struct tty_struct *tty,
1202 const unsigned char *buf, int count) 1198 const unsigned char *buf, int count)
1203{ 1199{
1204 int c, t, ret = 0; 1200 int c, t, ret = 0;
1205 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 1201 struct esp_struct *info = tty->driver_data;
1206 unsigned long flags; 1202 unsigned long flags;
1207 1203
1208 if (serial_paranoia_check(info, tty->name, "rs_write")) 1204 if (serial_paranoia_check(info, tty->name, "rs_write"))
@@ -1210,19 +1206,19 @@ static int rs_write(struct tty_struct * tty,
1210 1206
1211 if (!info->xmit_buf) 1207 if (!info->xmit_buf)
1212 return 0; 1208 return 0;
1213 1209
1214 while (1) { 1210 while (1) {
1215 /* Thanks to R. Wolff for suggesting how to do this with */ 1211 /* Thanks to R. Wolff for suggesting how to do this with */
1216 /* interrupts enabled */ 1212 /* interrupts enabled */
1217 1213
1218 c = count; 1214 c = count;
1219 t = ESP_XMIT_SIZE - info->xmit_cnt - 1; 1215 t = ESP_XMIT_SIZE - info->xmit_cnt - 1;
1220 1216
1221 if (t < c) 1217 if (t < c)
1222 c = t; 1218 c = t;
1223 1219
1224 t = ESP_XMIT_SIZE - info->xmit_head; 1220 t = ESP_XMIT_SIZE - info->xmit_head;
1225 1221
1226 if (t < c) 1222 if (t < c)
1227 c = t; 1223 c = t;
1228 1224
@@ -1252,10 +1248,10 @@ static int rs_write(struct tty_struct * tty,
1252 1248
1253static int rs_write_room(struct tty_struct *tty) 1249static int rs_write_room(struct tty_struct *tty)
1254{ 1250{
1255 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 1251 struct esp_struct *info = tty->driver_data;
1256 int ret; 1252 int ret;
1257 unsigned long flags; 1253 unsigned long flags;
1258 1254
1259 if (serial_paranoia_check(info, tty->name, "rs_write_room")) 1255 if (serial_paranoia_check(info, tty->name, "rs_write_room"))
1260 return 0; 1256 return 0;
1261 1257
@@ -1270,8 +1266,8 @@ static int rs_write_room(struct tty_struct *tty)
1270 1266
1271static int rs_chars_in_buffer(struct tty_struct *tty) 1267static int rs_chars_in_buffer(struct tty_struct *tty)
1272{ 1268{
1273 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 1269 struct esp_struct *info = tty->driver_data;
1274 1270
1275 if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer")) 1271 if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
1276 return 0; 1272 return 0;
1277 return info->xmit_cnt; 1273 return info->xmit_cnt;
@@ -1279,9 +1275,9 @@ static int rs_chars_in_buffer(struct tty_struct *tty)
1279 1275
1280static void rs_flush_buffer(struct tty_struct *tty) 1276static void rs_flush_buffer(struct tty_struct *tty)
1281{ 1277{
1282 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 1278 struct esp_struct *info = tty->driver_data;
1283 unsigned long flags; 1279 unsigned long flags;
1284 1280
1285 if (serial_paranoia_check(info, tty->name, "rs_flush_buffer")) 1281 if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
1286 return; 1282 return;
1287 spin_lock_irqsave(&info->lock, flags); 1283 spin_lock_irqsave(&info->lock, flags);
@@ -1293,20 +1289,20 @@ static void rs_flush_buffer(struct tty_struct *tty)
1293/* 1289/*
1294 * ------------------------------------------------------------ 1290 * ------------------------------------------------------------
1295 * rs_throttle() 1291 * rs_throttle()
1296 * 1292 *
1297 * This routine is called by the upper-layer tty layer to signal that 1293 * This routine is called by the upper-layer tty layer to signal that
1298 * incoming characters should be throttled. 1294 * incoming characters should be throttled.
1299 * ------------------------------------------------------------ 1295 * ------------------------------------------------------------
1300 */ 1296 */
1301static void rs_throttle(struct tty_struct * tty) 1297static void rs_throttle(struct tty_struct *tty)
1302{ 1298{
1303 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 1299 struct esp_struct *info = tty->driver_data;
1304 unsigned long flags; 1300 unsigned long flags;
1305#ifdef SERIAL_DEBUG_THROTTLE 1301#ifdef SERIAL_DEBUG_THROTTLE
1306 char buf[64]; 1302 char buf[64];
1307 1303
1308 printk("throttle %s: %d....\n", tty_name(tty, buf), 1304 printk("throttle %s: %d....\n", tty_name(tty, buf),
1309 tty->ldisc.chars_in_buffer(tty)); 1305 tty_chars_in_buffer(tty));
1310#endif 1306#endif
1311 1307
1312 if (serial_paranoia_check(info, tty->name, "rs_throttle")) 1308 if (serial_paranoia_check(info, tty->name, "rs_throttle"))
@@ -1321,20 +1317,20 @@ static void rs_throttle(struct tty_struct * tty)
1321 spin_unlock_irqrestore(&info->lock, flags); 1317 spin_unlock_irqrestore(&info->lock, flags);
1322} 1318}
1323 1319
1324static void rs_unthrottle(struct tty_struct * tty) 1320static void rs_unthrottle(struct tty_struct *tty)
1325{ 1321{
1326 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 1322 struct esp_struct *info = tty->driver_data;
1327 unsigned long flags; 1323 unsigned long flags;
1328#ifdef SERIAL_DEBUG_THROTTLE 1324#ifdef SERIAL_DEBUG_THROTTLE
1329 char buf[64]; 1325 char buf[64];
1330 1326
1331 printk("unthrottle %s: %d....\n", tty_name(tty, buf), 1327 printk(KERN_DEBUG "unthrottle %s: %d....\n", tty_name(tty, buf),
1332 tty->ldisc.chars_in_buffer(tty)); 1328 tty_chars_in_buffer(tty));
1333#endif 1329#endif
1334 1330
1335 if (serial_paranoia_check(info, tty->name, "rs_unthrottle")) 1331 if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
1336 return; 1332 return;
1337 1333
1338 spin_lock_irqsave(&info->lock, flags); 1334 spin_lock_irqsave(&info->lock, flags);
1339 info->IER |= UART_IER_RDI; 1335 info->IER |= UART_IER_RDI;
1340 serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); 1336 serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
@@ -1350,11 +1346,12 @@ static void rs_unthrottle(struct tty_struct * tty)
1350 * ------------------------------------------------------------ 1346 * ------------------------------------------------------------
1351 */ 1347 */
1352 1348
1353static int get_serial_info(struct esp_struct * info, 1349static int get_serial_info(struct esp_struct *info,
1354 struct serial_struct __user *retinfo) 1350 struct serial_struct __user *retinfo)
1355{ 1351{
1356 struct serial_struct tmp; 1352 struct serial_struct tmp;
1357 1353
1354 lock_kernel();
1358 memset(&tmp, 0, sizeof(tmp)); 1355 memset(&tmp, 0, sizeof(tmp));
1359 tmp.type = PORT_16550A; 1356 tmp.type = PORT_16550A;
1360 tmp.line = info->line; 1357 tmp.line = info->line;
@@ -1367,20 +1364,22 @@ static int get_serial_info(struct esp_struct * info,
1367 tmp.closing_wait = info->closing_wait; 1364 tmp.closing_wait = info->closing_wait;
1368 tmp.custom_divisor = info->custom_divisor; 1365 tmp.custom_divisor = info->custom_divisor;
1369 tmp.hub6 = 0; 1366 tmp.hub6 = 0;
1370 if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) 1367 unlock_kernel();
1368 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
1371 return -EFAULT; 1369 return -EFAULT;
1372 return 0; 1370 return 0;
1373} 1371}
1374 1372
1375static int get_esp_config(struct esp_struct * info, 1373static int get_esp_config(struct esp_struct *info,
1376 struct hayes_esp_config __user *retinfo) 1374 struct hayes_esp_config __user *retinfo)
1377{ 1375{
1378 struct hayes_esp_config tmp; 1376 struct hayes_esp_config tmp;
1379 1377
1380 if (!retinfo) 1378 if (!retinfo)
1381 return -EFAULT; 1379 return -EFAULT;
1382 1380
1383 memset(&tmp, 0, sizeof(tmp)); 1381 memset(&tmp, 0, sizeof(tmp));
1382 lock_kernel();
1384 tmp.rx_timeout = info->config.rx_timeout; 1383 tmp.rx_timeout = info->config.rx_timeout;
1385 tmp.rx_trigger = info->config.rx_trigger; 1384 tmp.rx_trigger = info->config.rx_trigger;
1386 tmp.tx_trigger = info->config.tx_trigger; 1385 tmp.tx_trigger = info->config.tx_trigger;
@@ -1388,11 +1387,12 @@ static int get_esp_config(struct esp_struct * info,
1388 tmp.flow_on = info->config.flow_on; 1387 tmp.flow_on = info->config.flow_on;
1389 tmp.pio_threshold = info->config.pio_threshold; 1388 tmp.pio_threshold = info->config.pio_threshold;
1390 tmp.dma_channel = (info->stat_flags & ESP_STAT_NEVER_DMA ? 0 : dma); 1389 tmp.dma_channel = (info->stat_flags & ESP_STAT_NEVER_DMA ? 0 : dma);
1390 unlock_kernel();
1391 1391
1392 return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; 1392 return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
1393} 1393}
1394 1394
1395static int set_serial_info(struct esp_struct * info, 1395static int set_serial_info(struct esp_struct *info,
1396 struct serial_struct __user *new_info) 1396 struct serial_struct __user *new_info)
1397{ 1397{
1398 struct serial_struct new_serial; 1398 struct serial_struct new_serial;
@@ -1401,7 +1401,7 @@ static int set_serial_info(struct esp_struct * info,
1401 int retval = 0; 1401 int retval = 0;
1402 struct esp_struct *current_async; 1402 struct esp_struct *current_async;
1403 1403
1404 if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) 1404 if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
1405 return -EFAULT; 1405 return -EFAULT;
1406 old_info = *info; 1406 old_info = *info;
1407 1407
@@ -1422,7 +1422,7 @@ static int set_serial_info(struct esp_struct * info,
1422 return -EINVAL; 1422 return -EINVAL;
1423 1423
1424 if (!capable(CAP_SYS_ADMIN)) { 1424 if (!capable(CAP_SYS_ADMIN)) {
1425 if (change_irq || 1425 if (change_irq ||
1426 (new_serial.close_delay != info->close_delay) || 1426 (new_serial.close_delay != info->close_delay) ||
1427 ((new_serial.flags & ~ASYNC_USR_MASK) != 1427 ((new_serial.flags & ~ASYNC_USR_MASK) !=
1428 (info->flags & ~ASYNC_USR_MASK))) 1428 (info->flags & ~ASYNC_USR_MASK)))
@@ -1507,8 +1507,8 @@ static int set_serial_info(struct esp_struct * info,
1507 return retval; 1507 return retval;
1508} 1508}
1509 1509
1510static int set_esp_config(struct esp_struct * info, 1510static int set_esp_config(struct esp_struct *info,
1511 struct hayes_esp_config __user * new_info) 1511 struct hayes_esp_config __user *new_info)
1512{ 1512{
1513 struct hayes_esp_config new_config; 1513 struct hayes_esp_config new_config;
1514 unsigned int change_dma; 1514 unsigned int change_dma;
@@ -1550,7 +1550,6 @@ static int set_esp_config(struct esp_struct * info,
1550 if (new_config.dma_channel) { 1550 if (new_config.dma_channel) {
1551 /* PIO mode to DMA mode transition OR */ 1551 /* PIO mode to DMA mode transition OR */
1552 /* change current DMA channel */ 1552 /* change current DMA channel */
1553
1554 current_async = ports; 1553 current_async = ports;
1555 1554
1556 while (current_async) { 1555 while (current_async) {
@@ -1559,16 +1558,15 @@ static int set_esp_config(struct esp_struct * info,
1559 return -EBUSY; 1558 return -EBUSY;
1560 } else if (current_async->count) 1559 } else if (current_async->count)
1561 return -EBUSY; 1560 return -EBUSY;
1562 1561
1563 current_async = 1562 current_async = current_async->next_port;
1564 current_async->next_port;
1565 } 1563 }
1566 1564
1567 shutdown(info); 1565 shutdown(info);
1568 dma = new_config.dma_channel; 1566 dma = new_config.dma_channel;
1569 info->stat_flags &= ~ESP_STAT_NEVER_DMA; 1567 info->stat_flags &= ~ESP_STAT_NEVER_DMA;
1570 1568
1571 /* all ports must use the same DMA channel */ 1569 /* all ports must use the same DMA channel */
1572 1570
1573 spin_lock_irqsave(&info->lock, flags); 1571 spin_lock_irqsave(&info->lock, flags);
1574 current_async = ports; 1572 current_async = ports;
@@ -1580,7 +1578,6 @@ static int set_esp_config(struct esp_struct * info,
1580 spin_unlock_irqrestore(&info->lock, flags); 1578 spin_unlock_irqrestore(&info->lock, flags);
1581 } else { 1579 } else {
1582 /* DMA mode to PIO mode only */ 1580 /* DMA mode to PIO mode only */
1583
1584 if (info->count > 1) 1581 if (info->count > 1)
1585 return -EBUSY; 1582 return -EBUSY;
1586 1583
@@ -1596,8 +1593,6 @@ static int set_esp_config(struct esp_struct * info,
1596 1593
1597 if ((new_config.flow_off != info->config.flow_off) || 1594 if ((new_config.flow_off != info->config.flow_off) ||
1598 (new_config.flow_on != info->config.flow_on)) { 1595 (new_config.flow_on != info->config.flow_on)) {
1599 unsigned long flags;
1600
1601 info->config.flow_off = new_config.flow_off; 1596 info->config.flow_off = new_config.flow_off;
1602 info->config.flow_on = new_config.flow_on; 1597 info->config.flow_on = new_config.flow_on;
1603 1598
@@ -1612,8 +1607,6 @@ static int set_esp_config(struct esp_struct * info,
1612 1607
1613 if ((new_config.rx_trigger != info->config.rx_trigger) || 1608 if ((new_config.rx_trigger != info->config.rx_trigger) ||
1614 (new_config.tx_trigger != info->config.tx_trigger)) { 1609 (new_config.tx_trigger != info->config.tx_trigger)) {
1615 unsigned long flags;
1616
1617 info->config.rx_trigger = new_config.rx_trigger; 1610 info->config.rx_trigger = new_config.rx_trigger;
1618 info->config.tx_trigger = new_config.tx_trigger; 1611 info->config.tx_trigger = new_config.tx_trigger;
1619 spin_lock_irqsave(&info->lock, flags); 1612 spin_lock_irqsave(&info->lock, flags);
@@ -1628,8 +1621,6 @@ static int set_esp_config(struct esp_struct * info,
1628 } 1621 }
1629 1622
1630 if (new_config.rx_timeout != info->config.rx_timeout) { 1623 if (new_config.rx_timeout != info->config.rx_timeout) {
1631 unsigned long flags;
1632
1633 info->config.rx_timeout = new_config.rx_timeout; 1624 info->config.rx_timeout = new_config.rx_timeout;
1634 spin_lock_irqsave(&info->lock, flags); 1625 spin_lock_irqsave(&info->lock, flags);
1635 1626
@@ -1657,9 +1648,9 @@ static int set_esp_config(struct esp_struct * info,
1657 * release the bus after transmitting. This must be done when 1648 * release the bus after transmitting. This must be done when
1658 * the transmit shift register is empty, not be done when the 1649 * the transmit shift register is empty, not be done when the
1659 * transmit holding register is empty. This functionality 1650 * transmit holding register is empty. This functionality
1660 * allows an RS485 driver to be written in user space. 1651 * allows an RS485 driver to be written in user space.
1661 */ 1652 */
1662static int get_lsr_info(struct esp_struct * info, unsigned int __user *value) 1653static int get_lsr_info(struct esp_struct *info, unsigned int __user *value)
1663{ 1654{
1664 unsigned char status; 1655 unsigned char status;
1665 unsigned int result; 1656 unsigned int result;
@@ -1670,17 +1661,17 @@ static int get_lsr_info(struct esp_struct * info, unsigned int __user *value)
1670 status = serial_in(info, UART_ESI_STAT1); 1661 status = serial_in(info, UART_ESI_STAT1);
1671 spin_unlock_irqrestore(&info->lock, flags); 1662 spin_unlock_irqrestore(&info->lock, flags);
1672 result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0); 1663 result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
1673 return put_user(result,value); 1664 return put_user(result, value);
1674} 1665}
1675 1666
1676 1667
1677static int esp_tiocmget(struct tty_struct *tty, struct file *file) 1668static int esp_tiocmget(struct tty_struct *tty, struct file *file)
1678{ 1669{
1679 struct esp_struct * info = (struct esp_struct *)tty->driver_data; 1670 struct esp_struct *info = tty->driver_data;
1680 unsigned char control, status; 1671 unsigned char control, status;
1681 unsigned long flags; 1672 unsigned long flags;
1682 1673
1683 if (serial_paranoia_check(info, tty->name, __FUNCTION__)) 1674 if (serial_paranoia_check(info, tty->name, __func__))
1684 return -ENODEV; 1675 return -ENODEV;
1685 if (tty->flags & (1 << TTY_IO_ERROR)) 1676 if (tty->flags & (1 << TTY_IO_ERROR))
1686 return -EIO; 1677 return -EIO;
@@ -1703,10 +1694,10 @@ static int esp_tiocmget(struct tty_struct *tty, struct file *file)
1703static int esp_tiocmset(struct tty_struct *tty, struct file *file, 1694static int esp_tiocmset(struct tty_struct *tty, struct file *file,
1704 unsigned int set, unsigned int clear) 1695 unsigned int set, unsigned int clear)
1705{ 1696{
1706 struct esp_struct * info = (struct esp_struct *)tty->driver_data; 1697 struct esp_struct *info = tty->driver_data;
1707 unsigned long flags; 1698 unsigned long flags;
1708 1699
1709 if (serial_paranoia_check(info, tty->name, __FUNCTION__)) 1700 if (serial_paranoia_check(info, tty->name, __func__))
1710 return -ENODEV; 1701 return -ENODEV;
1711 if (tty->flags & (1 << TTY_IO_ERROR)) 1702 if (tty->flags & (1 << TTY_IO_ERROR))
1712 return -EIO; 1703 return -EIO;
@@ -1736,9 +1727,9 @@ static int esp_tiocmset(struct tty_struct *tty, struct file *file,
1736 */ 1727 */
1737static void esp_break(struct tty_struct *tty, int break_state) 1728static void esp_break(struct tty_struct *tty, int break_state)
1738{ 1729{
1739 struct esp_struct * info = (struct esp_struct *)tty->driver_data; 1730 struct esp_struct *info = tty->driver_data;
1740 unsigned long flags; 1731 unsigned long flags;
1741 1732
1742 if (serial_paranoia_check(info, tty->name, "esp_break")) 1733 if (serial_paranoia_check(info, tty->name, "esp_break"))
1743 return; 1734 return;
1744 1735
@@ -1758,14 +1749,15 @@ static void esp_break(struct tty_struct *tty, int break_state)
1758 } 1749 }
1759} 1750}
1760 1751
1761static int rs_ioctl(struct tty_struct *tty, struct file * file, 1752static int rs_ioctl(struct tty_struct *tty, struct file *file,
1762 unsigned int cmd, unsigned long arg) 1753 unsigned int cmd, unsigned long arg)
1763{ 1754{
1764 struct esp_struct * info = (struct esp_struct *)tty->driver_data; 1755 struct esp_struct *info = tty->driver_data;
1765 struct async_icount cprev, cnow; /* kernel counter temps */ 1756 struct async_icount cprev, cnow; /* kernel counter temps */
1766 struct serial_icounter_struct __user *p_cuser; /* user space */ 1757 struct serial_icounter_struct __user *p_cuser; /* user space */
1767 void __user *argp = (void __user *)arg; 1758 void __user *argp = (void __user *)arg;
1768 unsigned long flags; 1759 unsigned long flags;
1760 int ret;
1769 1761
1770 if (serial_paranoia_check(info, tty->name, "rs_ioctl")) 1762 if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
1771 return -ENODEV; 1763 return -ENODEV;
@@ -1778,97 +1770,93 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1778 if (tty->flags & (1 << TTY_IO_ERROR)) 1770 if (tty->flags & (1 << TTY_IO_ERROR))
1779 return -EIO; 1771 return -EIO;
1780 } 1772 }
1781
1782 switch (cmd) {
1783 case TIOCGSERIAL:
1784 return get_serial_info(info, argp);
1785 case TIOCSSERIAL:
1786 return set_serial_info(info, argp);
1787 case TIOCSERCONFIG:
1788 /* do not reconfigure after initial configuration */
1789 return 0;
1790
1791 case TIOCSERGWILD:
1792 return put_user(0L, (unsigned long __user *)argp);
1793 1773
1794 case TIOCSERGETLSR: /* Get line status register */ 1774 switch (cmd) {
1795 return get_lsr_info(info, argp); 1775 case TIOCGSERIAL:
1796 1776 return get_serial_info(info, argp);
1797 case TIOCSERSWILD: 1777 case TIOCSSERIAL:
1798 if (!capable(CAP_SYS_ADMIN)) 1778 lock_kernel();
1799 return -EPERM; 1779 ret = set_serial_info(info, argp);
1800 return 0; 1780 unlock_kernel();
1801 1781 return ret;
1802 /* 1782 case TIOCSERGWILD:
1803 * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change 1783 return put_user(0L, (unsigned long __user *)argp);
1804 * - mask passed in arg for lines of interest 1784 case TIOCSERGETLSR: /* Get line status register */
1805 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) 1785 return get_lsr_info(info, argp);
1806 * Caller should use TIOCGICOUNT to see which one it was 1786 case TIOCSERSWILD:
1807 */ 1787 if (!capable(CAP_SYS_ADMIN))
1808 case TIOCMIWAIT: 1788 return -EPERM;
1789 return 0;
1790 /*
1791 * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
1792 * - mask passed in arg for lines of interest
1793 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
1794 * Caller should use TIOCGICOUNT to see which one it was
1795 */
1796 case TIOCMIWAIT:
1797 spin_lock_irqsave(&info->lock, flags);
1798 cprev = info->icount; /* note the counters on entry */
1799 spin_unlock_irqrestore(&info->lock, flags);
1800 while (1) {
1801 /* FIXME: convert to new style wakeup */
1802 interruptible_sleep_on(&info->delta_msr_wait);
1803 /* see if a signal did it */
1804 if (signal_pending(current))
1805 return -ERESTARTSYS;
1809 spin_lock_irqsave(&info->lock, flags); 1806 spin_lock_irqsave(&info->lock, flags);
1810 cprev = info->icount; /* note the counters on entry */ 1807 cnow = info->icount; /* atomic copy */
1811 spin_unlock_irqrestore(&info->lock, flags); 1808 spin_unlock_irqrestore(&info->lock, flags);
1812 while (1) { 1809 if (cnow.rng == cprev.rng &&
1813 /* FIXME: convert to new style wakeup */ 1810 cnow.dsr == cprev.dsr &&
1814 interruptible_sleep_on(&info->delta_msr_wait); 1811 cnow.dcd == cprev.dcd &&
1815 /* see if a signal did it */ 1812 cnow.cts == cprev.cts)
1816 if (signal_pending(current)) 1813 return -EIO; /* no change => error */
1817 return -ERESTARTSYS; 1814 if (((arg & TIOCM_RNG) &&
1818 spin_lock_irqsave(&info->lock, flags); 1815 (cnow.rng != cprev.rng)) ||
1819 cnow = info->icount; /* atomic copy */ 1816 ((arg & TIOCM_DSR) &&
1820 spin_unlock_irqrestore(&info->lock, flags); 1817 (cnow.dsr != cprev.dsr)) ||
1821 if (cnow.rng == cprev.rng && 1818 ((arg & TIOCM_CD) &&
1822 cnow.dsr == cprev.dsr && 1819 (cnow.dcd != cprev.dcd)) ||
1823 cnow.dcd == cprev.dcd && 1820 ((arg & TIOCM_CTS) &&
1824 cnow.cts == cprev.cts) 1821 (cnow.cts != cprev.cts))) {
1825 return -EIO; /* no change => error */ 1822 return 0;
1826 if (((arg & TIOCM_RNG) &&
1827 (cnow.rng != cprev.rng)) ||
1828 ((arg & TIOCM_DSR) &&
1829 (cnow.dsr != cprev.dsr)) ||
1830 ((arg & TIOCM_CD) &&
1831 (cnow.dcd != cprev.dcd)) ||
1832 ((arg & TIOCM_CTS) &&
1833 (cnow.cts != cprev.cts)) ) {
1834 return 0;
1835 }
1836 cprev = cnow;
1837 } 1823 }
1838 /* NOTREACHED */ 1824 cprev = cnow;
1839 1825 }
1840 /* 1826 /* NOTREACHED */
1841 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) 1827 /*
1842 * Return: write counters to the user passed counter struct 1828 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
1843 * NB: both 1->0 and 0->1 transitions are counted except for 1829 * Return: write counters to the user passed counter struct
1844 * RI where only 0->1 is counted. 1830 * NB: both 1->0 and 0->1 transitions are counted except for
1845 */ 1831 * RI where only 0->1 is counted.
1846 case TIOCGICOUNT: 1832 */
1847 spin_lock_irqsave(&info->lock, flags); 1833 case TIOCGICOUNT:
1848 cnow = info->icount; 1834 spin_lock_irqsave(&info->lock, flags);
1849 spin_unlock_irqrestore(&info->lock, flags); 1835 cnow = info->icount;
1850 p_cuser = argp; 1836 spin_unlock_irqrestore(&info->lock, flags);
1851 if (put_user(cnow.cts, &p_cuser->cts) || 1837 p_cuser = argp;
1852 put_user(cnow.dsr, &p_cuser->dsr) || 1838 if (put_user(cnow.cts, &p_cuser->cts) ||
1853 put_user(cnow.rng, &p_cuser->rng) || 1839 put_user(cnow.dsr, &p_cuser->dsr) ||
1854 put_user(cnow.dcd, &p_cuser->dcd)) 1840 put_user(cnow.rng, &p_cuser->rng) ||
1855 return -EFAULT; 1841 put_user(cnow.dcd, &p_cuser->dcd))
1856 1842 return -EFAULT;
1857 return 0; 1843 return 0;
1858 case TIOCGHAYESESP: 1844 case TIOCGHAYESESP:
1859 return get_esp_config(info, argp); 1845 return get_esp_config(info, argp);
1860 case TIOCSHAYESESP: 1846 case TIOCSHAYESESP:
1861 return set_esp_config(info, argp); 1847 lock_kernel();
1862 1848 ret = set_esp_config(info, argp);
1863 default: 1849 unlock_kernel();
1864 return -ENOIOCTLCMD; 1850 return ret;
1865 } 1851 default:
1852 return -ENOIOCTLCMD;
1853 }
1866 return 0; 1854 return 0;
1867} 1855}
1868 1856
1869static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 1857static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1870{ 1858{
1871 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 1859 struct esp_struct *info = tty->driver_data;
1872 unsigned long flags; 1860 unsigned long flags;
1873 1861
1874 change_speed(info); 1862 change_speed(info);
@@ -1905,32 +1893,33 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1905/* 1893/*
1906 * ------------------------------------------------------------ 1894 * ------------------------------------------------------------
1907 * rs_close() 1895 * rs_close()
1908 * 1896 *
1909 * This routine is called when the serial port gets closed. First, we 1897 * This routine is called when the serial port gets closed. First, we
1910 * wait for the last remaining data to be sent. Then, we unlink its 1898 * wait for the last remaining data to be sent. Then, we unlink its
1911 * async structure from the interrupt chain if necessary, and we free 1899 * async structure from the interrupt chain if necessary, and we free
1912 * that IRQ if nothing is left in the chain. 1900 * that IRQ if nothing is left in the chain.
1913 * ------------------------------------------------------------ 1901 * ------------------------------------------------------------
1914 */ 1902 */
1915static void rs_close(struct tty_struct *tty, struct file * filp) 1903static void rs_close(struct tty_struct *tty, struct file *filp)
1916{ 1904{
1917 struct esp_struct * info = (struct esp_struct *)tty->driver_data; 1905 struct esp_struct *info = tty->driver_data;
1918 unsigned long flags; 1906 unsigned long flags;
1919 1907
1920 if (!info || serial_paranoia_check(info, tty->name, "rs_close")) 1908 if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
1921 return; 1909 return;
1922 1910
1923 spin_lock_irqsave(&info->lock, flags); 1911 spin_lock_irqsave(&info->lock, flags);
1924 1912
1925 if (tty_hung_up_p(filp)) { 1913 if (tty_hung_up_p(filp)) {
1926 DBG_CNT("before DEC-hung"); 1914 DBG_CNT("before DEC-hung");
1927 goto out; 1915 goto out;
1928 } 1916 }
1929 1917
1930#ifdef SERIAL_DEBUG_OPEN 1918#ifdef SERIAL_DEBUG_OPEN
1931 printk("rs_close ttys%d, count = %d\n", info->line, info->count); 1919 printk(KERN_DEBUG "rs_close ttys%d, count = %d\n",
1920 info->line, info->count);
1932#endif 1921#endif
1933 if ((tty->count == 1) && (info->count != 1)) { 1922 if (tty->count == 1 && info->count != 1) {
1934 /* 1923 /*
1935 * Uh, oh. tty->count is 1, which means that the tty 1924 * Uh, oh. tty->count is 1, which means that the tty
1936 * structure will be freed. Info->count should always 1925 * structure will be freed. Info->count should always
@@ -1938,12 +1927,11 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
1938 * one, we've got real problems, since it means the 1927 * one, we've got real problems, since it means the
1939 * serial port won't be shutdown. 1928 * serial port won't be shutdown.
1940 */ 1929 */
1941 printk("rs_close: bad serial port count; tty->count is 1, " 1930 printk(KERN_DEBUG "rs_close: bad serial port count; tty->count is 1, info->count is %d\n", info->count);
1942 "info->count is %d\n", info->count);
1943 info->count = 1; 1931 info->count = 1;
1944 } 1932 }
1945 if (--info->count < 0) { 1933 if (--info->count < 0) {
1946 printk("rs_close: bad serial port count for ttys%d: %d\n", 1934 printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n",
1947 info->line, info->count); 1935 info->line, info->count);
1948 info->count = 0; 1936 info->count = 0;
1949 } 1937 }
@@ -1955,7 +1943,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
1955 1943
1956 spin_unlock_irqrestore(&info->lock, flags); 1944 spin_unlock_irqrestore(&info->lock, flags);
1957 /* 1945 /*
1958 * Now we wait for the transmit buffer to clear; and we notify 1946 * Now we wait for the transmit buffer to clear; and we notify
1959 * the line discipline to only process XON/XOFF characters. 1947 * the line discipline to only process XON/XOFF characters.
1960 */ 1948 */
1961 tty->closing = 1; 1949 tty->closing = 1;
@@ -1990,16 +1978,14 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
1990 rs_wait_until_sent(tty, info->timeout); 1978 rs_wait_until_sent(tty, info->timeout);
1991 } 1979 }
1992 shutdown(info); 1980 shutdown(info);
1993 if (tty->driver->flush_buffer) 1981 rs_flush_buffer(tty);
1994 tty->driver->flush_buffer(tty);
1995 tty_ldisc_flush(tty); 1982 tty_ldisc_flush(tty);
1996 tty->closing = 0; 1983 tty->closing = 0;
1997 info->tty = NULL; 1984 info->tty = NULL;
1998 1985
1999 if (info->blocked_open) { 1986 if (info->blocked_open) {
2000 if (info->close_delay) { 1987 if (info->close_delay)
2001 msleep_interruptible(jiffies_to_msecs(info->close_delay)); 1988 msleep_interruptible(jiffies_to_msecs(info->close_delay));
2002 }
2003 wake_up_interruptible(&info->open_wait); 1989 wake_up_interruptible(&info->open_wait);
2004 } 1990 }
2005 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); 1991 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
@@ -2012,7 +1998,7 @@ out:
2012 1998
2013static void rs_wait_until_sent(struct tty_struct *tty, int timeout) 1999static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
2014{ 2000{
2015 struct esp_struct *info = (struct esp_struct *)tty->driver_data; 2001 struct esp_struct *info = tty->driver_data;
2016 unsigned long orig_jiffies, char_time; 2002 unsigned long orig_jiffies, char_time;
2017 unsigned long flags; 2003 unsigned long flags;
2018 2004
@@ -2036,10 +2022,10 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
2036 msleep_interruptible(jiffies_to_msecs(char_time)); 2022 msleep_interruptible(jiffies_to_msecs(char_time));
2037 2023
2038 if (signal_pending(current)) 2024 if (signal_pending(current))
2039 break; 2025 return;
2040 2026
2041 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 2027 if (timeout && time_after(jiffies, orig_jiffies + timeout))
2042 break; 2028 return;
2043 2029
2044 spin_lock_irqsave(&info->lock, flags); 2030 spin_lock_irqsave(&info->lock, flags);
2045 serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND); 2031 serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND);
@@ -2054,11 +2040,11 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
2054 */ 2040 */
2055static void esp_hangup(struct tty_struct *tty) 2041static void esp_hangup(struct tty_struct *tty)
2056{ 2042{
2057 struct esp_struct * info = (struct esp_struct *)tty->driver_data; 2043 struct esp_struct *info = tty->driver_data;
2058 2044
2059 if (serial_paranoia_check(info, tty->name, "esp_hangup")) 2045 if (serial_paranoia_check(info, tty->name, "esp_hangup"))
2060 return; 2046 return;
2061 2047
2062 rs_flush_buffer(tty); 2048 rs_flush_buffer(tty);
2063 shutdown(info); 2049 shutdown(info);
2064 info->count = 0; 2050 info->count = 0;
@@ -2072,7 +2058,7 @@ static void esp_hangup(struct tty_struct *tty)
2072 * esp_open() and friends 2058 * esp_open() and friends
2073 * ------------------------------------------------------------ 2059 * ------------------------------------------------------------
2074 */ 2060 */
2075static int block_til_ready(struct tty_struct *tty, struct file * filp, 2061static int block_til_ready(struct tty_struct *tty, struct file *filp,
2076 struct esp_struct *info) 2062 struct esp_struct *info)
2077{ 2063{
2078 DECLARE_WAITQUEUE(wait, current); 2064 DECLARE_WAITQUEUE(wait, current);
@@ -2121,11 +2107,11 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
2121 retval = 0; 2107 retval = 0;
2122 add_wait_queue(&info->open_wait, &wait); 2108 add_wait_queue(&info->open_wait, &wait);
2123#ifdef SERIAL_DEBUG_OPEN 2109#ifdef SERIAL_DEBUG_OPEN
2124 printk("block_til_ready before block: ttys%d, count = %d\n", 2110 printk(KERN_DEBUG "block_til_ready before block: ttys%d, count = %d\n",
2125 info->line, info->count); 2111 info->line, info->count);
2126#endif 2112#endif
2127 spin_lock_irqsave(&info->lock, flags); 2113 spin_lock_irqsave(&info->lock, flags);
2128 if (!tty_hung_up_p(filp)) 2114 if (!tty_hung_up_p(filp))
2129 info->count--; 2115 info->count--;
2130 info->blocked_open++; 2116 info->blocked_open++;
2131 while (1) { 2117 while (1) {
@@ -2147,7 +2133,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
2147 if (info->flags & ASYNC_HUP_NOTIFY) 2133 if (info->flags & ASYNC_HUP_NOTIFY)
2148 retval = -EAGAIN; 2134 retval = -EAGAIN;
2149 else 2135 else
2150 retval = -ERESTARTSYS; 2136 retval = -ERESTARTSYS;
2151#else 2137#else
2152 retval = -EAGAIN; 2138 retval = -EAGAIN;
2153#endif 2139#endif
@@ -2166,7 +2152,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
2166 break; 2152 break;
2167 } 2153 }
2168#ifdef SERIAL_DEBUG_OPEN 2154#ifdef SERIAL_DEBUG_OPEN
2169 printk("block_til_ready blocking: ttys%d, count = %d\n", 2155 printk(KERN_DEBUG "block_til_ready blocking: ttys%d, count = %d\n",
2170 info->line, info->count); 2156 info->line, info->count);
2171#endif 2157#endif
2172 spin_unlock_irqrestore(&info->lock, flags); 2158 spin_unlock_irqrestore(&info->lock, flags);
@@ -2180,14 +2166,14 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
2180 info->blocked_open--; 2166 info->blocked_open--;
2181 spin_unlock_irqrestore(&info->lock, flags); 2167 spin_unlock_irqrestore(&info->lock, flags);
2182#ifdef SERIAL_DEBUG_OPEN 2168#ifdef SERIAL_DEBUG_OPEN
2183 printk("block_til_ready after blocking: ttys%d, count = %d\n", 2169 printk(KERN_DEBUG "block_til_ready after blocking: ttys%d, count = %d\n",
2184 info->line, info->count); 2170 info->line, info->count);
2185#endif 2171#endif
2186 if (retval) 2172 if (retval)
2187 return retval; 2173 return retval;
2188 info->flags |= ASYNC_NORMAL_ACTIVE; 2174 info->flags |= ASYNC_NORMAL_ACTIVE;
2189 return 0; 2175 return 0;
2190} 2176}
2191 2177
2192/* 2178/*
2193 * This routine is called whenever a serial port is opened. It 2179 * This routine is called whenever a serial port is opened. It
@@ -2195,7 +2181,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
2195 * the IRQ chain. It also performs the serial-specific 2181 * the IRQ chain. It also performs the serial-specific
2196 * initialization for the tty structure. 2182 * initialization for the tty structure.
2197 */ 2183 */
2198static int esp_open(struct tty_struct *tty, struct file * filp) 2184static int esp_open(struct tty_struct *tty, struct file *filp)
2199{ 2185{
2200 struct esp_struct *info; 2186 struct esp_struct *info;
2201 int retval, line; 2187 int retval, line;
@@ -2218,7 +2204,7 @@ static int esp_open(struct tty_struct *tty, struct file * filp)
2218 } 2204 }
2219 2205
2220#ifdef SERIAL_DEBUG_OPEN 2206#ifdef SERIAL_DEBUG_OPEN
2221 printk("esp_open %s, count = %d\n", tty->name, info->count); 2207 printk(KERN_DEBUG "esp_open %s, count = %d\n", tty->name, info->count);
2222#endif 2208#endif
2223 spin_lock_irqsave(&info->lock, flags); 2209 spin_lock_irqsave(&info->lock, flags);
2224 info->count++; 2210 info->count++;
@@ -2226,7 +2212,7 @@ static int esp_open(struct tty_struct *tty, struct file * filp)
2226 info->tty = tty; 2212 info->tty = tty;
2227 2213
2228 spin_unlock_irqrestore(&info->lock, flags); 2214 spin_unlock_irqrestore(&info->lock, flags);
2229 2215
2230 /* 2216 /*
2231 * Start up serial port 2217 * Start up serial port
2232 */ 2218 */
@@ -2237,14 +2223,13 @@ static int esp_open(struct tty_struct *tty, struct file * filp)
2237 retval = block_til_ready(tty, filp, info); 2223 retval = block_til_ready(tty, filp, info);
2238 if (retval) { 2224 if (retval) {
2239#ifdef SERIAL_DEBUG_OPEN 2225#ifdef SERIAL_DEBUG_OPEN
2240 printk("esp_open returning after block_til_ready with %d\n", 2226 printk(KERN_DEBUG "esp_open returning after block_til_ready with %d\n",
2241 retval); 2227 retval);
2242#endif 2228#endif
2243 return retval; 2229 return retval;
2244 } 2230 }
2245
2246#ifdef SERIAL_DEBUG_OPEN 2231#ifdef SERIAL_DEBUG_OPEN
2247 printk("esp_open %s successful...", tty->name); 2232 printk(KERN_DEBUG "esp_open %s successful...", tty->name);
2248#endif 2233#endif
2249 return 0; 2234 return 0;
2250} 2235}
@@ -2262,10 +2247,10 @@ static int esp_open(struct tty_struct *tty, struct file * filp)
2262 * number, and identifies which options were configured into this 2247 * number, and identifies which options were configured into this
2263 * driver. 2248 * driver.
2264 */ 2249 */
2265 2250
2266static inline void show_serial_version(void) 2251static void show_serial_version(void)
2267{ 2252{
2268 printk(KERN_INFO "%s version %s (DMA %u)\n", 2253 printk(KERN_INFO "%s version %s (DMA %u)\n",
2269 serial_name, serial_version, dma); 2254 serial_name, serial_version, dma);
2270} 2255}
2271 2256
@@ -2273,7 +2258,7 @@ static inline void show_serial_version(void)
2273 * This routine is called by espserial_init() to initialize a specific serial 2258 * This routine is called by espserial_init() to initialize a specific serial
2274 * port. 2259 * port.
2275 */ 2260 */
2276static inline int autoconfig(struct esp_struct * info) 2261static int autoconfig(struct esp_struct *info)
2277{ 2262{
2278 int port_detected = 0; 2263 int port_detected = 0;
2279 unsigned long flags; 2264 unsigned long flags;
@@ -2349,14 +2334,14 @@ static const struct tty_operations esp_ops = {
2349static int __init espserial_init(void) 2334static int __init espserial_init(void)
2350{ 2335{
2351 int i, offset; 2336 int i, offset;
2352 struct esp_struct * info; 2337 struct esp_struct *info;
2353 struct esp_struct *last_primary = NULL; 2338 struct esp_struct *last_primary = NULL;
2354 int esp[] = {0x100,0x140,0x180,0x200,0x240,0x280,0x300,0x380}; 2339 int esp[] = { 0x100, 0x140, 0x180, 0x200, 0x240, 0x280, 0x300, 0x380 };
2355 2340
2356 esp_driver = alloc_tty_driver(NR_PORTS); 2341 esp_driver = alloc_tty_driver(NR_PORTS);
2357 if (!esp_driver) 2342 if (!esp_driver)
2358 return -ENOMEM; 2343 return -ENOMEM;
2359 2344
2360 for (i = 0; i < NR_PRIMARY; i++) { 2345 for (i = 0; i < NR_PRIMARY; i++) {
2361 if (irq[i] != 0) { 2346 if (irq[i] != 0) {
2362 if ((irq[i] < 2) || (irq[i] > 15) || (irq[i] == 6) || 2347 if ((irq[i] < 2) || (irq[i] > 15) || (irq[i] == 6) ||
@@ -2378,20 +2363,20 @@ static int __init espserial_init(void)
2378 2363
2379 if ((flow_off < 1) || (flow_off > 1023)) 2364 if ((flow_off < 1) || (flow_off > 1023))
2380 flow_off = 1016; 2365 flow_off = 1016;
2381 2366
2382 if ((flow_on < 1) || (flow_on > 1023)) 2367 if ((flow_on < 1) || (flow_on > 1023))
2383 flow_on = 944; 2368 flow_on = 944;
2384 2369
2385 if ((rx_timeout < 0) || (rx_timeout > 255)) 2370 if ((rx_timeout < 0) || (rx_timeout > 255))
2386 rx_timeout = 128; 2371 rx_timeout = 128;
2387 2372
2388 if (flow_on >= flow_off) 2373 if (flow_on >= flow_off)
2389 flow_on = flow_off - 1; 2374 flow_on = flow_off - 1;
2390 2375
2391 show_serial_version(); 2376 show_serial_version();
2392 2377
2393 /* Initialize the tty_driver structure */ 2378 /* Initialize the tty_driver structure */
2394 2379
2395 esp_driver->owner = THIS_MODULE; 2380 esp_driver->owner = THIS_MODULE;
2396 esp_driver->name = "ttyP"; 2381 esp_driver->name = "ttyP";
2397 esp_driver->major = ESP_IN_MAJOR; 2382 esp_driver->major = ESP_IN_MAJOR;
@@ -2401,10 +2386,11 @@ static int __init espserial_init(void)
2401 esp_driver->init_termios = tty_std_termios; 2386 esp_driver->init_termios = tty_std_termios;
2402 esp_driver->init_termios.c_cflag = 2387 esp_driver->init_termios.c_cflag =
2403 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 2388 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
2389 esp_driver->init_termios.c_ispeed = 9600;
2390 esp_driver->init_termios.c_ospeed = 9600;
2404 esp_driver->flags = TTY_DRIVER_REAL_RAW; 2391 esp_driver->flags = TTY_DRIVER_REAL_RAW;
2405 tty_set_operations(esp_driver, &esp_ops); 2392 tty_set_operations(esp_driver, &esp_ops);
2406 if (tty_register_driver(esp_driver)) 2393 if (tty_register_driver(esp_driver)) {
2407 {
2408 printk(KERN_ERR "Couldn't register esp serial driver"); 2394 printk(KERN_ERR "Couldn't register esp serial driver");
2409 put_tty_driver(esp_driver); 2395 put_tty_driver(esp_driver);
2410 return 1; 2396 return 1;
@@ -2412,8 +2398,7 @@ static int __init espserial_init(void)
2412 2398
2413 info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL); 2399 info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
2414 2400
2415 if (!info) 2401 if (!info) {
2416 {
2417 printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n"); 2402 printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n");
2418 tty_unregister_driver(esp_driver); 2403 tty_unregister_driver(esp_driver);
2419 put_tty_driver(esp_driver); 2404 put_tty_driver(esp_driver);
@@ -2476,10 +2461,8 @@ static int __init espserial_init(void)
2476 info->stat_flags |= ESP_STAT_NEVER_DMA; 2461 info->stat_flags |= ESP_STAT_NEVER_DMA;
2477 2462
2478 info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL); 2463 info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
2479 if (!info) 2464 if (!info) {
2480 { 2465 printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n");
2481 printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n");
2482
2483 /* allow use of the already detected ports */ 2466 /* allow use of the already detected ports */
2484 return 0; 2467 return 0;
2485 } 2468 }
@@ -2503,22 +2486,20 @@ static int __init espserial_init(void)
2503 return 0; 2486 return 0;
2504} 2487}
2505 2488
2506static void __exit espserial_exit(void) 2489static void __exit espserial_exit(void)
2507{ 2490{
2508 int e1; 2491 int e1;
2509 struct esp_struct *temp_async; 2492 struct esp_struct *temp_async;
2510 struct esp_pio_buffer *pio_buf; 2493 struct esp_pio_buffer *pio_buf;
2511 2494
2512 /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ 2495 e1 = tty_unregister_driver(esp_driver);
2513 if ((e1 = tty_unregister_driver(esp_driver))) 2496 if (e1)
2514 printk("SERIAL: failed to unregister serial driver (%d)\n", 2497 printk(KERN_ERR "esp: failed to unregister driver (%d)\n", e1);
2515 e1);
2516 put_tty_driver(esp_driver); 2498 put_tty_driver(esp_driver);
2517 2499
2518 while (ports) { 2500 while (ports) {
2519 if (ports->port) { 2501 if (ports->port)
2520 release_region(ports->port, REGION_SIZE); 2502 release_region(ports->port, REGION_SIZE);
2521 }
2522 temp_async = ports->next_port; 2503 temp_async = ports->next_port;
2523 kfree(ports); 2504 kfree(ports);
2524 ports = temp_async; 2505 ports = temp_async;
diff --git a/drivers/char/generic_serial.c b/drivers/char/generic_serial.c
index 7ed7da1d99cf..252f73e48596 100644
--- a/drivers/char/generic_serial.c
+++ b/drivers/char/generic_serial.c
@@ -40,27 +40,27 @@ static int gs_debug;
40#define gs_dprintk(f, str...) /* nothing */ 40#define gs_dprintk(f, str...) /* nothing */
41#endif 41#endif
42 42
43#define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __FUNCTION__) 43#define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __func__)
44#define func_exit() gs_dprintk (GS_DEBUG_FLOW, "gs: exit %s\n", __FUNCTION__) 44#define func_exit() gs_dprintk (GS_DEBUG_FLOW, "gs: exit %s\n", __func__)
45 45
46#define RS_EVENT_WRITE_WAKEUP 1 46#define RS_EVENT_WRITE_WAKEUP 1
47 47
48module_param(gs_debug, int, 0644); 48module_param(gs_debug, int, 0644);
49 49
50 50
51void gs_put_char(struct tty_struct * tty, unsigned char ch) 51int gs_put_char(struct tty_struct * tty, unsigned char ch)
52{ 52{
53 struct gs_port *port; 53 struct gs_port *port;
54 54
55 func_enter (); 55 func_enter ();
56 56
57 if (!tty) return; 57 if (!tty) return 0;
58 58
59 port = tty->driver_data; 59 port = tty->driver_data;
60 60
61 if (!port) return; 61 if (!port) return 0;
62 62
63 if (! (port->flags & ASYNC_INITIALIZED)) return; 63 if (! (port->flags & ASYNC_INITIALIZED)) return 0;
64 64
65 /* Take a lock on the serial tranmit buffer! */ 65 /* Take a lock on the serial tranmit buffer! */
66 mutex_lock(& port->port_write_mutex); 66 mutex_lock(& port->port_write_mutex);
@@ -68,7 +68,7 @@ void gs_put_char(struct tty_struct * tty, unsigned char ch)
68 if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { 68 if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) {
69 /* Sorry, buffer is full, drop character. Update statistics???? -- REW */ 69 /* Sorry, buffer is full, drop character. Update statistics???? -- REW */
70 mutex_unlock(&port->port_write_mutex); 70 mutex_unlock(&port->port_write_mutex);
71 return; 71 return 0;
72 } 72 }
73 73
74 port->xmit_buf[port->xmit_head++] = ch; 74 port->xmit_buf[port->xmit_head++] = ch;
@@ -77,6 +77,7 @@ void gs_put_char(struct tty_struct * tty, unsigned char ch)
77 77
78 mutex_unlock(&port->port_write_mutex); 78 mutex_unlock(&port->port_write_mutex);
79 func_exit (); 79 func_exit ();
80 return 1;
80} 81}
81 82
82 83
@@ -586,8 +587,7 @@ void gs_close(struct tty_struct * tty, struct file * filp)
586 587
587 port->flags &= ~GS_ACTIVE; 588 port->flags &= ~GS_ACTIVE;
588 589
589 if (tty->driver->flush_buffer) 590 gs_flush_buffer(tty);
590 tty->driver->flush_buffer(tty);
591 591
592 tty_ldisc_flush(tty); 592 tty_ldisc_flush(tty);
593 tty->closing = 0; 593 tty->closing = 0;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 1399971be689..e7fb0bca3667 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -308,7 +308,7 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
308 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, 308 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
309 PAGE_SIZE, vma->vm_page_prot)) { 309 PAGE_SIZE, vma->vm_page_prot)) {
310 printk(KERN_ERR "%s: io_remap_pfn_range failed\n", 310 printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
311 __FUNCTION__); 311 __func__);
312 return -EAGAIN; 312 return -EAGAIN;
313 } 313 }
314 314
@@ -748,7 +748,7 @@ int hpet_alloc(struct hpet_data *hdp)
748 */ 748 */
749 if (hpet_is_known(hdp)) { 749 if (hpet_is_known(hdp)) {
750 printk(KERN_DEBUG "%s: duplicate HPET ignored\n", 750 printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
751 __FUNCTION__); 751 __func__);
752 return 0; 752 return 0;
753 } 753 }
754 754
@@ -869,7 +869,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
869 869
870 if (hpet_is_known(hdp)) { 870 if (hpet_is_known(hdp)) {
871 printk(KERN_DEBUG "%s: 0x%lx is busy\n", 871 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
872 __FUNCTION__, hdp->hd_phys_address); 872 __func__, hdp->hd_phys_address);
873 iounmap(hdp->hd_address); 873 iounmap(hdp->hd_address);
874 return AE_ALREADY_EXISTS; 874 return AE_ALREADY_EXISTS;
875 } 875 }
@@ -886,7 +886,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
886 886
887 if (hpet_is_known(hdp)) { 887 if (hpet_is_known(hdp)) {
888 printk(KERN_DEBUG "%s: 0x%lx is busy\n", 888 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
889 __FUNCTION__, hdp->hd_phys_address); 889 __func__, hdp->hd_phys_address);
890 iounmap(hdp->hd_address); 890 iounmap(hdp->hd_address);
891 return AE_ALREADY_EXISTS; 891 return AE_ALREADY_EXISTS;
892 } 892 }
@@ -925,7 +925,7 @@ static int hpet_acpi_add(struct acpi_device *device)
925 return -ENODEV; 925 return -ENODEV;
926 926
927 if (!data.hd_address || !data.hd_nirqs) { 927 if (!data.hd_address || !data.hd_nirqs) {
928 printk("%s: no address or irqs in _CRS\n", __FUNCTION__); 928 printk("%s: no address or irqs in _CRS\n", __func__);
929 return -ENODEV; 929 return -ENODEV;
930 } 930 }
931 931
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index d5a752da322f..59c6f9ab94e4 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -246,7 +246,7 @@ static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to)
246{ 246{
247 int remaining = (int)(hp->inbuf_end - read_to); 247 int remaining = (int)(hp->inbuf_end - read_to);
248 248
249 pr_debug("%s: %i chars remain\n", __FUNCTION__, remaining); 249 pr_debug("%s: %i chars remain\n", __func__, remaining);
250 250
251 if (read_to != hp->inbuf) 251 if (read_to != hp->inbuf)
252 memmove(hp->inbuf, read_to, remaining); 252 memmove(hp->inbuf, read_to, remaining);
@@ -365,7 +365,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
365 packet.u.version = HVSI_VERSION; 365 packet.u.version = HVSI_VERSION;
366 packet.query_seqno = query_seqno+1; 366 packet.query_seqno = query_seqno+1;
367 367
368 pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len); 368 pr_debug("%s: sending %i bytes\n", __func__, packet.len);
369 dbg_dump_hex((uint8_t*)&packet, packet.len); 369 dbg_dump_hex((uint8_t*)&packet, packet.len);
370 370
371 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); 371 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
@@ -437,7 +437,7 @@ static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp,
437 return NULL; 437 return NULL;
438 438
439 if (overflow > 0) { 439 if (overflow > 0) {
440 pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __FUNCTION__); 440 pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__);
441 datalen = TTY_THRESHOLD_THROTTLE; 441 datalen = TTY_THRESHOLD_THROTTLE;
442 } 442 }
443 443
@@ -448,7 +448,7 @@ static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp,
448 * we still have more data to deliver, so we need to save off the 448 * we still have more data to deliver, so we need to save off the
449 * overflow and send it later 449 * overflow and send it later
450 */ 450 */
451 pr_debug("%s: deferring overflow\n", __FUNCTION__); 451 pr_debug("%s: deferring overflow\n", __func__);
452 memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow); 452 memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow);
453 hp->n_throttle = overflow; 453 hp->n_throttle = overflow;
454 } 454 }
@@ -474,11 +474,11 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
474 474
475 chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ); 475 chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ);
476 if (chunklen == 0) { 476 if (chunklen == 0) {
477 pr_debug("%s: 0-length read\n", __FUNCTION__); 477 pr_debug("%s: 0-length read\n", __func__);
478 return 0; 478 return 0;
479 } 479 }
480 480
481 pr_debug("%s: got %i bytes\n", __FUNCTION__, chunklen); 481 pr_debug("%s: got %i bytes\n", __func__, chunklen);
482 dbg_dump_hex(hp->inbuf_end, chunklen); 482 dbg_dump_hex(hp->inbuf_end, chunklen);
483 483
484 hp->inbuf_end += chunklen; 484 hp->inbuf_end += chunklen;
@@ -495,7 +495,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
495 continue; 495 continue;
496 } 496 }
497 497
498 pr_debug("%s: handling %i-byte packet\n", __FUNCTION__, 498 pr_debug("%s: handling %i-byte packet\n", __func__,
499 len_packet(packet)); 499 len_packet(packet));
500 dbg_dump_packet(packet); 500 dbg_dump_packet(packet);
501 501
@@ -526,7 +526,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
526 packet += len_packet(packet); 526 packet += len_packet(packet);
527 527
528 if (*hangup || *handshake) { 528 if (*hangup || *handshake) {
529 pr_debug("%s: hangup or handshake\n", __FUNCTION__); 529 pr_debug("%s: hangup or handshake\n", __func__);
530 /* 530 /*
531 * we need to send the hangup now before receiving any more data. 531 * we need to send the hangup now before receiving any more data.
532 * If we get "data, hangup, data", we can't deliver the second 532 * If we get "data, hangup, data", we can't deliver the second
@@ -543,7 +543,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
543 543
544static void hvsi_send_overflow(struct hvsi_struct *hp) 544static void hvsi_send_overflow(struct hvsi_struct *hp)
545{ 545{
546 pr_debug("%s: delivering %i bytes overflow\n", __FUNCTION__, 546 pr_debug("%s: delivering %i bytes overflow\n", __func__,
547 hp->n_throttle); 547 hp->n_throttle);
548 548
549 hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle); 549 hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
@@ -563,7 +563,7 @@ static irqreturn_t hvsi_interrupt(int irq, void *arg)
563 unsigned long flags; 563 unsigned long flags;
564 int again = 1; 564 int again = 1;
565 565
566 pr_debug("%s\n", __FUNCTION__); 566 pr_debug("%s\n", __func__);
567 567
568 while (again) { 568 while (again) {
569 spin_lock_irqsave(&hp->lock, flags); 569 spin_lock_irqsave(&hp->lock, flags);
@@ -647,7 +647,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
647 packet.seqno = atomic_inc_return(&hp->seqno); 647 packet.seqno = atomic_inc_return(&hp->seqno);
648 packet.verb = verb; 648 packet.verb = verb;
649 649
650 pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len); 650 pr_debug("%s: sending %i bytes\n", __func__, packet.len);
651 dbg_dump_hex((uint8_t*)&packet, packet.len); 651 dbg_dump_hex((uint8_t*)&packet, packet.len);
652 652
653 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); 653 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
@@ -674,7 +674,7 @@ static int hvsi_get_mctrl(struct hvsi_struct *hp)
674 return ret; 674 return ret;
675 } 675 }
676 676
677 pr_debug("%s: mctrl 0x%x\n", __FUNCTION__, hp->mctrl); 677 pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl);
678 678
679 return 0; 679 return 0;
680} 680}
@@ -694,7 +694,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
694 if (mctrl & TIOCM_DTR) 694 if (mctrl & TIOCM_DTR)
695 packet.word = HVSI_TSDTR; 695 packet.word = HVSI_TSDTR;
696 696
697 pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len); 697 pr_debug("%s: sending %i bytes\n", __func__, packet.len);
698 dbg_dump_hex((uint8_t*)&packet, packet.len); 698 dbg_dump_hex((uint8_t*)&packet, packet.len);
699 699
700 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); 700 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
@@ -790,7 +790,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
790 packet.len = 6; 790 packet.len = 6;
791 packet.verb = VSV_CLOSE_PROTOCOL; 791 packet.verb = VSV_CLOSE_PROTOCOL;
792 792
793 pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len); 793 pr_debug("%s: sending %i bytes\n", __func__, packet.len);
794 dbg_dump_hex((uint8_t*)&packet, packet.len); 794 dbg_dump_hex((uint8_t*)&packet, packet.len);
795 795
796 hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); 796 hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
@@ -803,7 +803,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
803 int line = tty->index; 803 int line = tty->index;
804 int ret; 804 int ret;
805 805
806 pr_debug("%s\n", __FUNCTION__); 806 pr_debug("%s\n", __func__);
807 807
808 if (line < 0 || line >= hvsi_count) 808 if (line < 0 || line >= hvsi_count)
809 return -ENODEV; 809 return -ENODEV;
@@ -868,7 +868,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
868 struct hvsi_struct *hp = tty->driver_data; 868 struct hvsi_struct *hp = tty->driver_data;
869 unsigned long flags; 869 unsigned long flags;
870 870
871 pr_debug("%s\n", __FUNCTION__); 871 pr_debug("%s\n", __func__);
872 872
873 if (tty_hung_up_p(filp)) 873 if (tty_hung_up_p(filp))
874 return; 874 return;
@@ -920,7 +920,7 @@ static void hvsi_hangup(struct tty_struct *tty)
920 struct hvsi_struct *hp = tty->driver_data; 920 struct hvsi_struct *hp = tty->driver_data;
921 unsigned long flags; 921 unsigned long flags;
922 922
923 pr_debug("%s\n", __FUNCTION__); 923 pr_debug("%s\n", __func__);
924 924
925 spin_lock_irqsave(&hp->lock, flags); 925 spin_lock_irqsave(&hp->lock, flags);
926 926
@@ -942,7 +942,7 @@ static void hvsi_push(struct hvsi_struct *hp)
942 n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf); 942 n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf);
943 if (n > 0) { 943 if (n > 0) {
944 /* success */ 944 /* success */
945 pr_debug("%s: wrote %i chars\n", __FUNCTION__, n); 945 pr_debug("%s: wrote %i chars\n", __func__, n);
946 hp->n_outbuf = 0; 946 hp->n_outbuf = 0;
947 } else if (n == -EIO) { 947 } else if (n == -EIO) {
948 __set_state(hp, HVSI_FSP_DIED); 948 __set_state(hp, HVSI_FSP_DIED);
@@ -965,7 +965,7 @@ static void hvsi_write_worker(struct work_struct *work)
965 965
966 spin_lock_irqsave(&hp->lock, flags); 966 spin_lock_irqsave(&hp->lock, flags);
967 967
968 pr_debug("%s: %i chars in buffer\n", __FUNCTION__, hp->n_outbuf); 968 pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
969 969
970 if (!is_open(hp)) { 970 if (!is_open(hp)) {
971 /* 971 /*
@@ -983,7 +983,7 @@ static void hvsi_write_worker(struct work_struct *work)
983 schedule_delayed_work(&hp->writer, 10); 983 schedule_delayed_work(&hp->writer, 10);
984 else { 984 else {
985#ifdef DEBUG 985#ifdef DEBUG
986 pr_debug("%s: outbuf emptied after %li jiffies\n", __FUNCTION__, 986 pr_debug("%s: outbuf emptied after %li jiffies\n", __func__,
987 jiffies - start_j); 987 jiffies - start_j);
988 start_j = 0; 988 start_j = 0;
989#endif /* DEBUG */ 989#endif /* DEBUG */
@@ -1020,11 +1020,11 @@ static int hvsi_write(struct tty_struct *tty,
1020 1020
1021 spin_lock_irqsave(&hp->lock, flags); 1021 spin_lock_irqsave(&hp->lock, flags);
1022 1022
1023 pr_debug("%s: %i chars in buffer\n", __FUNCTION__, hp->n_outbuf); 1023 pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
1024 1024
1025 if (!is_open(hp)) { 1025 if (!is_open(hp)) {
1026 /* we're either closing or not yet open; don't accept data */ 1026 /* we're either closing or not yet open; don't accept data */
1027 pr_debug("%s: not open\n", __FUNCTION__); 1027 pr_debug("%s: not open\n", __func__);
1028 goto out; 1028 goto out;
1029 } 1029 }
1030 1030
@@ -1058,7 +1058,7 @@ out:
1058 spin_unlock_irqrestore(&hp->lock, flags); 1058 spin_unlock_irqrestore(&hp->lock, flags);
1059 1059
1060 if (total != origcount) 1060 if (total != origcount)
1061 pr_debug("%s: wanted %i, only wrote %i\n", __FUNCTION__, origcount, 1061 pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount,
1062 total); 1062 total);
1063 1063
1064 return total; 1064 return total;
@@ -1072,7 +1072,7 @@ static void hvsi_throttle(struct tty_struct *tty)
1072{ 1072{
1073 struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data; 1073 struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data;
1074 1074
1075 pr_debug("%s\n", __FUNCTION__); 1075 pr_debug("%s\n", __func__);
1076 1076
1077 h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); 1077 h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE);
1078} 1078}
@@ -1083,7 +1083,7 @@ static void hvsi_unthrottle(struct tty_struct *tty)
1083 unsigned long flags; 1083 unsigned long flags;
1084 int shouldflip = 0; 1084 int shouldflip = 0;
1085 1085
1086 pr_debug("%s\n", __FUNCTION__); 1086 pr_debug("%s\n", __func__);
1087 1087
1088 spin_lock_irqsave(&hp->lock, flags); 1088 spin_lock_irqsave(&hp->lock, flags);
1089 if (hp->n_throttle) { 1089 if (hp->n_throttle) {
@@ -1302,7 +1302,7 @@ static int __init hvsi_console_init(void)
1302 hp->virq = irq_create_mapping(NULL, irq[0]); 1302 hp->virq = irq_create_mapping(NULL, irq[0]);
1303 if (hp->virq == NO_IRQ) { 1303 if (hp->virq == NO_IRQ) {
1304 printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", 1304 printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
1305 __FUNCTION__, irq[0]); 1305 __func__, irq[0]);
1306 continue; 1306 continue;
1307 } 1307 }
1308 1308
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 8609b8236c67..b60d425ce8d1 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -77,11 +77,16 @@ static int power_status;
77module_param(power_status, bool, 0600); 77module_param(power_status, bool, 0600);
78MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); 78MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
79 79
80static int fan_mult = I8K_FAN_MULT;
81module_param(fan_mult, int, 0);
82MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
83
80static int i8k_open_fs(struct inode *inode, struct file *file); 84static int i8k_open_fs(struct inode *inode, struct file *file);
81static int i8k_ioctl(struct inode *, struct file *, unsigned int, 85static int i8k_ioctl(struct inode *, struct file *, unsigned int,
82 unsigned long); 86 unsigned long);
83 87
84static const struct file_operations i8k_fops = { 88static const struct file_operations i8k_fops = {
89 .owner = THIS_MODULE,
85 .open = i8k_open_fs, 90 .open = i8k_open_fs,
86 .read = seq_read, 91 .read = seq_read,
87 .llseek = seq_lseek, 92 .llseek = seq_lseek,
@@ -238,7 +243,7 @@ static int i8k_get_fan_speed(int fan)
238 struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, }; 243 struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
239 244
240 regs.ebx = fan & 0xff; 245 regs.ebx = fan & 0xff;
241 return i8k_smm(&regs) ? : (regs.eax & 0xffff) * I8K_FAN_MULT; 246 return i8k_smm(&regs) ? : (regs.eax & 0xffff) * fan_mult;
242} 247}
243 248
244/* 249/*
@@ -554,13 +559,10 @@ static int __init i8k_init(void)
554 return -ENODEV; 559 return -ENODEV;
555 560
556 /* Register the proc entry */ 561 /* Register the proc entry */
557 proc_i8k = create_proc_entry("i8k", 0, NULL); 562 proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops);
558 if (!proc_i8k) 563 if (!proc_i8k)
559 return -ENOENT; 564 return -ENOENT;
560 565
561 proc_i8k->proc_fops = &i8k_fops;
562 proc_i8k->owner = THIS_MODULE;
563
564 printk(KERN_INFO 566 printk(KERN_INFO
565 "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n", 567 "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n",
566 I8K_VERSION); 568 I8K_VERSION);
diff --git a/drivers/char/ip2/i2ellis.c b/drivers/char/ip2/i2ellis.c
index 61ef013b8445..3601017f58cf 100644
--- a/drivers/char/ip2/i2ellis.c
+++ b/drivers/char/ip2/i2ellis.c
@@ -53,7 +53,7 @@ static int ii2Safe; // Safe I/O address for delay routine
53 53
54static int iiDelayed; // Set when the iiResetDelay function is 54static int iiDelayed; // Set when the iiResetDelay function is
55 // called. Cleared when ANY board is reset. 55 // called. Cleared when ANY board is reset.
56static rwlock_t Dl_spinlock; 56static DEFINE_RWLOCK(Dl_spinlock);
57 57
58//******** 58//********
59//* Code * 59//* Code *
@@ -82,7 +82,6 @@ static rwlock_t Dl_spinlock;
82static void 82static void
83iiEllisInit(void) 83iiEllisInit(void)
84{ 84{
85 LOCK_INIT(&Dl_spinlock);
86} 85}
87 86
88//****************************************************************************** 87//******************************************************************************
@@ -132,7 +131,7 @@ iiSetAddress( i2eBordStrPtr pB, int address, delayFunc_t delay )
132 || (address & 0x7) 131 || (address & 0x7)
133 ) 132 )
134 { 133 {
135 COMPLETE(pB,I2EE_BADADDR); 134 I2_COMPLETE(pB, I2EE_BADADDR);
136 } 135 }
137 136
138 // Initialize accelerators 137 // Initialize accelerators
@@ -152,7 +151,7 @@ iiSetAddress( i2eBordStrPtr pB, int address, delayFunc_t delay )
152 pB->i2eValid = I2E_MAGIC; 151 pB->i2eValid = I2E_MAGIC;
153 pB->i2eState = II_STATE_COLD; 152 pB->i2eState = II_STATE_COLD;
154 153
155 COMPLETE(pB, I2EE_GOOD); 154 I2_COMPLETE(pB, I2EE_GOOD);
156} 155}
157 156
158//****************************************************************************** 157//******************************************************************************
@@ -177,12 +176,12 @@ iiReset(i2eBordStrPtr pB)
177 // Magic number should be set, else even the address is suspect 176 // Magic number should be set, else even the address is suspect
178 if (pB->i2eValid != I2E_MAGIC) 177 if (pB->i2eValid != I2E_MAGIC)
179 { 178 {
180 COMPLETE(pB, I2EE_BADMAGIC); 179 I2_COMPLETE(pB, I2EE_BADMAGIC);
181 } 180 }
182 181
183 OUTB(pB->i2eBase + FIFO_RESET, 0); // Any data will do 182 outb(0, pB->i2eBase + FIFO_RESET); /* Any data will do */
184 iiDelay(pB, 50); // Pause between resets 183 iiDelay(pB, 50); // Pause between resets
185 OUTB(pB->i2eBase + FIFO_RESET, 0); // Second reset 184 outb(0, pB->i2eBase + FIFO_RESET); /* Second reset */
186 185
187 // We must wait before even attempting to read anything from the FIFO: the 186 // We must wait before even attempting to read anything from the FIFO: the
188 // board's P.O.S.T may actually attempt to read and write its end of the 187 // board's P.O.S.T may actually attempt to read and write its end of the
@@ -203,7 +202,7 @@ iiReset(i2eBordStrPtr pB)
203 // Ensure anything which would have been of use to standard loadware is 202 // Ensure anything which would have been of use to standard loadware is
204 // blanked out, since board has now forgotten everything!. 203 // blanked out, since board has now forgotten everything!.
205 204
206 pB->i2eUsingIrq = IRQ_UNDEFINED; // Not set up to use an interrupt yet 205 pB->i2eUsingIrq = I2_IRQ_UNDEFINED; /* to not use an interrupt so far */
207 pB->i2eWaitingForEmptyFifo = 0; 206 pB->i2eWaitingForEmptyFifo = 0;
208 pB->i2eOutMailWaiting = 0; 207 pB->i2eOutMailWaiting = 0;
209 pB->i2eChannelPtr = NULL; 208 pB->i2eChannelPtr = NULL;
@@ -215,7 +214,7 @@ iiReset(i2eBordStrPtr pB)
215 pB->i2eFatalTrap = NULL; 214 pB->i2eFatalTrap = NULL;
216 pB->i2eFatal = 0; 215 pB->i2eFatal = 0;
217 216
218 COMPLETE(pB, I2EE_GOOD); 217 I2_COMPLETE(pB, I2EE_GOOD);
219} 218}
220 219
221//****************************************************************************** 220//******************************************************************************
@@ -235,14 +234,14 @@ static int
235iiResetDelay(i2eBordStrPtr pB) 234iiResetDelay(i2eBordStrPtr pB)
236{ 235{
237 if (pB->i2eValid != I2E_MAGIC) { 236 if (pB->i2eValid != I2E_MAGIC) {
238 COMPLETE(pB, I2EE_BADMAGIC); 237 I2_COMPLETE(pB, I2EE_BADMAGIC);
239 } 238 }
240 if (pB->i2eState != II_STATE_RESET) { 239 if (pB->i2eState != II_STATE_RESET) {
241 COMPLETE(pB, I2EE_BADSTATE); 240 I2_COMPLETE(pB, I2EE_BADSTATE);
242 } 241 }
243 iiDelay(pB,2000); /* Now we wait for two seconds. */ 242 iiDelay(pB,2000); /* Now we wait for two seconds. */
244 iiDelayed = 1; /* Delay has been called: ok to initialize */ 243 iiDelayed = 1; /* Delay has been called: ok to initialize */
245 COMPLETE(pB, I2EE_GOOD); 244 I2_COMPLETE(pB, I2EE_GOOD);
246} 245}
247 246
248//****************************************************************************** 247//******************************************************************************
@@ -273,12 +272,12 @@ iiInitialize(i2eBordStrPtr pB)
273 272
274 if (pB->i2eValid != I2E_MAGIC) 273 if (pB->i2eValid != I2E_MAGIC)
275 { 274 {
276 COMPLETE(pB, I2EE_BADMAGIC); 275 I2_COMPLETE(pB, I2EE_BADMAGIC);
277 } 276 }
278 277
279 if (pB->i2eState != II_STATE_RESET || !iiDelayed) 278 if (pB->i2eState != II_STATE_RESET || !iiDelayed)
280 { 279 {
281 COMPLETE(pB, I2EE_BADSTATE); 280 I2_COMPLETE(pB, I2EE_BADSTATE);
282 } 281 }
283 282
284 // In case there is a failure short of our completely reading the power-up 283 // In case there is a failure short of our completely reading the power-up
@@ -291,13 +290,12 @@ iiInitialize(i2eBordStrPtr pB)
291 for (itemp = 0; itemp < sizeof(porStr); itemp++) 290 for (itemp = 0; itemp < sizeof(porStr); itemp++)
292 { 291 {
293 // We expect the entire message is ready. 292 // We expect the entire message is ready.
294 if (HAS_NO_INPUT(pB)) 293 if (!I2_HAS_INPUT(pB)) {
295 {
296 pB->i2ePomSize = itemp; 294 pB->i2ePomSize = itemp;
297 COMPLETE(pB, I2EE_PORM_SHORT); 295 I2_COMPLETE(pB, I2EE_PORM_SHORT);
298 } 296 }
299 297
300 pB->i2ePom.c[itemp] = c = BYTE_FROM(pB); 298 pB->i2ePom.c[itemp] = c = inb(pB->i2eData);
301 299
302 // We check the magic numbers as soon as they are supposed to be read 300 // We check the magic numbers as soon as they are supposed to be read
303 // (rather than after) to minimize effect of reading something we 301 // (rather than after) to minimize effect of reading something we
@@ -306,22 +304,22 @@ iiInitialize(i2eBordStrPtr pB)
306 (itemp == POR_2_INDEX && c != POR_MAGIC_2)) 304 (itemp == POR_2_INDEX && c != POR_MAGIC_2))
307 { 305 {
308 pB->i2ePomSize = itemp+1; 306 pB->i2ePomSize = itemp+1;
309 COMPLETE(pB, I2EE_BADMAGIC); 307 I2_COMPLETE(pB, I2EE_BADMAGIC);
310 } 308 }
311 } 309 }
312 310
313 pB->i2ePomSize = itemp; 311 pB->i2ePomSize = itemp;
314 312
315 // Ensure that this was all the data... 313 // Ensure that this was all the data...
316 if (HAS_INPUT(pB)) 314 if (I2_HAS_INPUT(pB))
317 COMPLETE(pB, I2EE_PORM_LONG); 315 I2_COMPLETE(pB, I2EE_PORM_LONG);
318 316
319 // For now, we'll fail to initialize if P.O.S.T reports bad chip mapper: 317 // For now, we'll fail to initialize if P.O.S.T reports bad chip mapper:
320 // Implying we will not be able to download any code either: That's ok: the 318 // Implying we will not be able to download any code either: That's ok: the
321 // condition is pretty explicit. 319 // condition is pretty explicit.
322 if (pB->i2ePom.e.porDiag1 & POR_BAD_MAPPER) 320 if (pB->i2ePom.e.porDiag1 & POR_BAD_MAPPER)
323 { 321 {
324 COMPLETE(pB, I2EE_POSTERR); 322 I2_COMPLETE(pB, I2EE_POSTERR);
325 } 323 }
326 324
327 // Determine anything which must be done differently depending on the family 325 // Determine anything which must be done differently depending on the family
@@ -332,7 +330,7 @@ iiInitialize(i2eBordStrPtr pB)
332 330
333 pB->i2eFifoStyle = FIFO_II; 331 pB->i2eFifoStyle = FIFO_II;
334 pB->i2eFifoSize = 512; // 512 bytes, always 332 pB->i2eFifoSize = 512; // 512 bytes, always
335 pB->i2eDataWidth16 = NO; 333 pB->i2eDataWidth16 = false;
336 334
337 pB->i2eMaxIrq = 15; // Because board cannot tell us it is in an 8-bit 335 pB->i2eMaxIrq = 15; // Because board cannot tell us it is in an 8-bit
338 // slot, we do allow it to be done (documentation!) 336 // slot, we do allow it to be done (documentation!)
@@ -354,7 +352,7 @@ iiInitialize(i2eBordStrPtr pB)
354 // should always be consistent for IntelliPort-II. Ditto below... 352 // should always be consistent for IntelliPort-II. Ditto below...
355 if (pB->i2ePom.e.porPorts1 != 4) 353 if (pB->i2ePom.e.porPorts1 != 4)
356 { 354 {
357 COMPLETE(pB, I2EE_INCONSIST); 355 I2_COMPLETE(pB, I2EE_INCONSIST);
358 } 356 }
359 break; 357 break;
360 358
@@ -364,7 +362,7 @@ iiInitialize(i2eBordStrPtr pB)
364 pB->i2eChannelMap[0] = 0xff; // Eight port 362 pB->i2eChannelMap[0] = 0xff; // Eight port
365 if (pB->i2ePom.e.porPorts1 != 8) 363 if (pB->i2ePom.e.porPorts1 != 8)
366 { 364 {
367 COMPLETE(pB, I2EE_INCONSIST); 365 I2_COMPLETE(pB, I2EE_INCONSIST);
368 } 366 }
369 break; 367 break;
370 368
@@ -373,7 +371,7 @@ iiInitialize(i2eBordStrPtr pB)
373 pB->i2eChannelMap[0] = 0x3f; // Six Port 371 pB->i2eChannelMap[0] = 0x3f; // Six Port
374 if (pB->i2ePom.e.porPorts1 != 6) 372 if (pB->i2ePom.e.porPorts1 != 6)
375 { 373 {
376 COMPLETE(pB, I2EE_INCONSIST); 374 I2_COMPLETE(pB, I2EE_INCONSIST);
377 } 375 }
378 break; 376 break;
379 } 377 }
@@ -402,7 +400,7 @@ iiInitialize(i2eBordStrPtr pB)
402 400
403 if (itemp < 8 || itemp > 15) 401 if (itemp < 8 || itemp > 15)
404 { 402 {
405 COMPLETE(pB, I2EE_INCONSIST); 403 I2_COMPLETE(pB, I2EE_INCONSIST);
406 } 404 }
407 pB->i2eFifoSize = (1 << itemp); 405 pB->i2eFifoSize = (1 << itemp);
408 406
@@ -450,26 +448,26 @@ iiInitialize(i2eBordStrPtr pB)
450 switch (pB->i2ePom.e.porBus & (POR_BUS_SLOT16 | POR_BUS_DIP16) ) 448 switch (pB->i2ePom.e.porBus & (POR_BUS_SLOT16 | POR_BUS_DIP16) )
451 { 449 {
452 case POR_BUS_SLOT16 | POR_BUS_DIP16: 450 case POR_BUS_SLOT16 | POR_BUS_DIP16:
453 pB->i2eDataWidth16 = YES; 451 pB->i2eDataWidth16 = true;
454 pB->i2eMaxIrq = 15; 452 pB->i2eMaxIrq = 15;
455 break; 453 break;
456 454
457 case POR_BUS_SLOT16: 455 case POR_BUS_SLOT16:
458 pB->i2eDataWidth16 = NO; 456 pB->i2eDataWidth16 = false;
459 pB->i2eMaxIrq = 15; 457 pB->i2eMaxIrq = 15;
460 break; 458 break;
461 459
462 case 0: 460 case 0:
463 case POR_BUS_DIP16: // In an 8-bit slot, DIP switch don't care. 461 case POR_BUS_DIP16: // In an 8-bit slot, DIP switch don't care.
464 default: 462 default:
465 pB->i2eDataWidth16 = NO; 463 pB->i2eDataWidth16 = false;
466 pB->i2eMaxIrq = 7; 464 pB->i2eMaxIrq = 7;
467 break; 465 break;
468 } 466 }
469 break; // POR_ID_FIIEX case 467 break; // POR_ID_FIIEX case
470 468
471 default: // Unknown type of board 469 default: // Unknown type of board
472 COMPLETE(pB, I2EE_BAD_FAMILY); 470 I2_COMPLETE(pB, I2EE_BAD_FAMILY);
473 break; 471 break;
474 } // End the switch based on family 472 } // End the switch based on family
475 473
@@ -483,17 +481,14 @@ iiInitialize(i2eBordStrPtr pB)
483 { 481 {
484 case POR_BUS_T_ISA: 482 case POR_BUS_T_ISA:
485 case POR_BUS_T_UNK: // If the type of bus is undeclared, assume ok. 483 case POR_BUS_T_UNK: // If the type of bus is undeclared, assume ok.
486 pB->i2eChangeIrq = YES;
487 break;
488 case POR_BUS_T_MCA: 484 case POR_BUS_T_MCA:
489 case POR_BUS_T_EISA: 485 case POR_BUS_T_EISA:
490 pB->i2eChangeIrq = NO;
491 break; 486 break;
492 default: 487 default:
493 COMPLETE(pB, I2EE_BADBUS); 488 I2_COMPLETE(pB, I2EE_BADBUS);
494 } 489 }
495 490
496 if (pB->i2eDataWidth16 == YES) 491 if (pB->i2eDataWidth16)
497 { 492 {
498 pB->i2eWriteBuf = iiWriteBuf16; 493 pB->i2eWriteBuf = iiWriteBuf16;
499 pB->i2eReadBuf = iiReadBuf16; 494 pB->i2eReadBuf = iiReadBuf16;
@@ -529,7 +524,7 @@ iiInitialize(i2eBordStrPtr pB)
529 break; 524 break;
530 525
531 default: 526 default:
532 COMPLETE(pB, I2EE_INCONSIST); 527 I2_COMPLETE(pB, I2EE_INCONSIST);
533 } 528 }
534 529
535 // Initialize state information. 530 // Initialize state information.
@@ -549,7 +544,7 @@ iiInitialize(i2eBordStrPtr pB)
549 // Everything is ok now, return with good status/ 544 // Everything is ok now, return with good status/
550 545
551 pB->i2eValid = I2E_MAGIC; 546 pB->i2eValid = I2E_MAGIC;
552 COMPLETE(pB, I2EE_GOOD); 547 I2_COMPLETE(pB, I2EE_GOOD);
553} 548}
554 549
555//****************************************************************************** 550//******************************************************************************
@@ -658,7 +653,7 @@ ii2DelayIO(unsigned int mseconds)
658 while(mseconds--) { 653 while(mseconds--) {
659 int i = ii2DelValue; 654 int i = ii2DelValue;
660 while ( i-- ) { 655 while ( i-- ) {
661 INB ( ii2Safe ); 656 inb(ii2Safe);
662 } 657 }
663 } 658 }
664} 659}
@@ -709,11 +704,11 @@ iiWriteBuf16(i2eBordStrPtr pB, unsigned char *address, int count)
709{ 704{
710 // Rudimentary sanity checking here. 705 // Rudimentary sanity checking here.
711 if (pB->i2eValid != I2E_MAGIC) 706 if (pB->i2eValid != I2E_MAGIC)
712 COMPLETE(pB, I2EE_INVALID); 707 I2_COMPLETE(pB, I2EE_INVALID);
713 708
714 OUTSW ( pB->i2eData, address, count); 709 I2_OUTSW(pB->i2eData, address, count);
715 710
716 COMPLETE(pB, I2EE_GOOD); 711 I2_COMPLETE(pB, I2EE_GOOD);
717} 712}
718 713
719//****************************************************************************** 714//******************************************************************************
@@ -738,11 +733,11 @@ iiWriteBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
738{ 733{
739 /* Rudimentary sanity checking here */ 734 /* Rudimentary sanity checking here */
740 if (pB->i2eValid != I2E_MAGIC) 735 if (pB->i2eValid != I2E_MAGIC)
741 COMPLETE(pB, I2EE_INVALID); 736 I2_COMPLETE(pB, I2EE_INVALID);
742 737
743 OUTSB ( pB->i2eData, address, count ); 738 I2_OUTSB(pB->i2eData, address, count);
744 739
745 COMPLETE(pB, I2EE_GOOD); 740 I2_COMPLETE(pB, I2EE_GOOD);
746} 741}
747 742
748//****************************************************************************** 743//******************************************************************************
@@ -767,11 +762,11 @@ iiReadBuf16(i2eBordStrPtr pB, unsigned char *address, int count)
767{ 762{
768 // Rudimentary sanity checking here. 763 // Rudimentary sanity checking here.
769 if (pB->i2eValid != I2E_MAGIC) 764 if (pB->i2eValid != I2E_MAGIC)
770 COMPLETE(pB, I2EE_INVALID); 765 I2_COMPLETE(pB, I2EE_INVALID);
771 766
772 INSW ( pB->i2eData, address, count); 767 I2_INSW(pB->i2eData, address, count);
773 768
774 COMPLETE(pB, I2EE_GOOD); 769 I2_COMPLETE(pB, I2EE_GOOD);
775} 770}
776 771
777//****************************************************************************** 772//******************************************************************************
@@ -796,11 +791,11 @@ iiReadBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
796{ 791{
797 // Rudimentary sanity checking here. 792 // Rudimentary sanity checking here.
798 if (pB->i2eValid != I2E_MAGIC) 793 if (pB->i2eValid != I2E_MAGIC)
799 COMPLETE(pB, I2EE_INVALID); 794 I2_COMPLETE(pB, I2EE_INVALID);
800 795
801 INSB ( pB->i2eData, address, count); 796 I2_INSB(pB->i2eData, address, count);
802 797
803 COMPLETE(pB, I2EE_GOOD); 798 I2_COMPLETE(pB, I2EE_GOOD);
804} 799}
805 800
806//****************************************************************************** 801//******************************************************************************
@@ -820,7 +815,7 @@ iiReadBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
820static unsigned short 815static unsigned short
821iiReadWord16(i2eBordStrPtr pB) 816iiReadWord16(i2eBordStrPtr pB)
822{ 817{
823 return (unsigned short)( INW(pB->i2eData) ); 818 return inw(pB->i2eData);
824} 819}
825 820
826//****************************************************************************** 821//******************************************************************************
@@ -842,9 +837,9 @@ iiReadWord8(i2eBordStrPtr pB)
842{ 837{
843 unsigned short urs; 838 unsigned short urs;
844 839
845 urs = INB ( pB->i2eData ); 840 urs = inb(pB->i2eData);
846 841
847 return ( ( INB ( pB->i2eData ) << 8 ) | urs ); 842 return (inb(pB->i2eData) << 8) | urs;
848} 843}
849 844
850//****************************************************************************** 845//******************************************************************************
@@ -865,7 +860,7 @@ iiReadWord8(i2eBordStrPtr pB)
865static void 860static void
866iiWriteWord16(i2eBordStrPtr pB, unsigned short value) 861iiWriteWord16(i2eBordStrPtr pB, unsigned short value)
867{ 862{
868 WORD_TO(pB, (int)value); 863 outw((int)value, pB->i2eData);
869} 864}
870 865
871//****************************************************************************** 866//******************************************************************************
@@ -886,8 +881,8 @@ iiWriteWord16(i2eBordStrPtr pB, unsigned short value)
886static void 881static void
887iiWriteWord8(i2eBordStrPtr pB, unsigned short value) 882iiWriteWord8(i2eBordStrPtr pB, unsigned short value)
888{ 883{
889 BYTE_TO(pB, (char)value); 884 outb((char)value, pB->i2eData);
890 BYTE_TO(pB, (char)(value >> 8) ); 885 outb((char)(value >> 8), pB->i2eData);
891} 886}
892 887
893//****************************************************************************** 888//******************************************************************************
@@ -939,30 +934,30 @@ iiWaitForTxEmptyII(i2eBordStrPtr pB, int mSdelay)
939 // interrupts of any kind. 934 // interrupts of any kind.
940 935
941 936
942 WRITE_LOCK_IRQSAVE(&Dl_spinlock,flags) 937 write_lock_irqsave(&Dl_spinlock, flags);
943 OUTB(pB->i2ePointer, SEL_COMMAND); 938 outb(SEL_COMMAND, pB->i2ePointer);
944 OUTB(pB->i2ePointer, SEL_CMD_SH); 939 outb(SEL_CMD_SH, pB->i2ePointer);
945 940
946 itemp = INB(pB->i2eStatus); 941 itemp = inb(pB->i2eStatus);
947 942
948 OUTB(pB->i2ePointer, SEL_COMMAND); 943 outb(SEL_COMMAND, pB->i2ePointer);
949 OUTB(pB->i2ePointer, SEL_CMD_UNSH); 944 outb(SEL_CMD_UNSH, pB->i2ePointer);
950 945
951 if (itemp & ST_IN_EMPTY) 946 if (itemp & ST_IN_EMPTY)
952 { 947 {
953 UPDATE_FIFO_ROOM(pB); 948 I2_UPDATE_FIFO_ROOM(pB);
954 WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags) 949 write_unlock_irqrestore(&Dl_spinlock, flags);
955 COMPLETE(pB, I2EE_GOOD); 950 I2_COMPLETE(pB, I2EE_GOOD);
956 } 951 }
957 952
958 WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags) 953 write_unlock_irqrestore(&Dl_spinlock, flags);
959 954
960 if (mSdelay-- == 0) 955 if (mSdelay-- == 0)
961 break; 956 break;
962 957
963 iiDelay(pB, 1); /* 1 mS granularity on checking condition */ 958 iiDelay(pB, 1); /* 1 mS granularity on checking condition */
964 } 959 }
965 COMPLETE(pB, I2EE_TXE_TIME); 960 I2_COMPLETE(pB, I2EE_TXE_TIME);
966} 961}
967 962
968//****************************************************************************** 963//******************************************************************************
@@ -1002,21 +997,21 @@ iiWaitForTxEmptyIIEX(i2eBordStrPtr pB, int mSdelay)
1002 // you will generally not want to service interrupts or in any way 997 // you will generally not want to service interrupts or in any way
1003 // disrupt the assumptions implicit in the larger context. 998 // disrupt the assumptions implicit in the larger context.
1004 999
1005 WRITE_LOCK_IRQSAVE(&Dl_spinlock,flags) 1000 write_lock_irqsave(&Dl_spinlock, flags);
1006 1001
1007 if (INB(pB->i2eStatus) & STE_OUT_MT) { 1002 if (inb(pB->i2eStatus) & STE_OUT_MT) {
1008 UPDATE_FIFO_ROOM(pB); 1003 I2_UPDATE_FIFO_ROOM(pB);
1009 WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags) 1004 write_unlock_irqrestore(&Dl_spinlock, flags);
1010 COMPLETE(pB, I2EE_GOOD); 1005 I2_COMPLETE(pB, I2EE_GOOD);
1011 } 1006 }
1012 WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags) 1007 write_unlock_irqrestore(&Dl_spinlock, flags);
1013 1008
1014 if (mSdelay-- == 0) 1009 if (mSdelay-- == 0)
1015 break; 1010 break;
1016 1011
1017 iiDelay(pB, 1); // 1 mS granularity on checking condition 1012 iiDelay(pB, 1); // 1 mS granularity on checking condition
1018 } 1013 }
1019 COMPLETE(pB, I2EE_TXE_TIME); 1014 I2_COMPLETE(pB, I2EE_TXE_TIME);
1020} 1015}
1021 1016
1022//****************************************************************************** 1017//******************************************************************************
@@ -1038,8 +1033,8 @@ static int
1038iiTxMailEmptyII(i2eBordStrPtr pB) 1033iiTxMailEmptyII(i2eBordStrPtr pB)
1039{ 1034{
1040 int port = pB->i2ePointer; 1035 int port = pB->i2ePointer;
1041 OUTB ( port, SEL_OUTMAIL ); 1036 outb(SEL_OUTMAIL, port);
1042 return ( INB(port) == 0 ); 1037 return inb(port) == 0;
1043} 1038}
1044 1039
1045//****************************************************************************** 1040//******************************************************************************
@@ -1060,7 +1055,7 @@ iiTxMailEmptyII(i2eBordStrPtr pB)
1060static int 1055static int
1061iiTxMailEmptyIIEX(i2eBordStrPtr pB) 1056iiTxMailEmptyIIEX(i2eBordStrPtr pB)
1062{ 1057{
1063 return !(INB(pB->i2eStatus) & STE_OUT_MAIL); 1058 return !(inb(pB->i2eStatus) & STE_OUT_MAIL);
1064} 1059}
1065 1060
1066//****************************************************************************** 1061//******************************************************************************
@@ -1084,10 +1079,10 @@ iiTrySendMailII(i2eBordStrPtr pB, unsigned char mail)
1084{ 1079{
1085 int port = pB->i2ePointer; 1080 int port = pB->i2ePointer;
1086 1081
1087 OUTB(port, SEL_OUTMAIL); 1082 outb(SEL_OUTMAIL, port);
1088 if (INB(port) == 0) { 1083 if (inb(port) == 0) {
1089 OUTB(port, SEL_OUTMAIL); 1084 outb(SEL_OUTMAIL, port);
1090 OUTB(port, mail); 1085 outb(mail, port);
1091 return 1; 1086 return 1;
1092 } 1087 }
1093 return 0; 1088 return 0;
@@ -1112,10 +1107,9 @@ iiTrySendMailII(i2eBordStrPtr pB, unsigned char mail)
1112static int 1107static int
1113iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail) 1108iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail)
1114{ 1109{
1115 if(INB(pB->i2eStatus) & STE_OUT_MAIL) { 1110 if (inb(pB->i2eStatus) & STE_OUT_MAIL)
1116 return 0; 1111 return 0;
1117 } 1112 outb(mail, pB->i2eXMail);
1118 OUTB(pB->i2eXMail, mail);
1119 return 1; 1113 return 1;
1120} 1114}
1121 1115
@@ -1136,9 +1130,9 @@ iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail)
1136static unsigned short 1130static unsigned short
1137iiGetMailII(i2eBordStrPtr pB) 1131iiGetMailII(i2eBordStrPtr pB)
1138{ 1132{
1139 if (HAS_MAIL(pB)) { 1133 if (I2_HAS_MAIL(pB)) {
1140 OUTB(pB->i2ePointer, SEL_INMAIL); 1134 outb(SEL_INMAIL, pB->i2ePointer);
1141 return INB(pB->i2ePointer); 1135 return inb(pB->i2ePointer);
1142 } else { 1136 } else {
1143 return NO_MAIL_HERE; 1137 return NO_MAIL_HERE;
1144 } 1138 }
@@ -1161,11 +1155,10 @@ iiGetMailII(i2eBordStrPtr pB)
1161static unsigned short 1155static unsigned short
1162iiGetMailIIEX(i2eBordStrPtr pB) 1156iiGetMailIIEX(i2eBordStrPtr pB)
1163{ 1157{
1164 if (HAS_MAIL(pB)) { 1158 if (I2_HAS_MAIL(pB))
1165 return INB(pB->i2eXMail); 1159 return inb(pB->i2eXMail);
1166 } else { 1160 else
1167 return NO_MAIL_HERE; 1161 return NO_MAIL_HERE;
1168 }
1169} 1162}
1170 1163
1171//****************************************************************************** 1164//******************************************************************************
@@ -1184,8 +1177,8 @@ iiGetMailIIEX(i2eBordStrPtr pB)
1184static void 1177static void
1185iiEnableMailIrqII(i2eBordStrPtr pB) 1178iiEnableMailIrqII(i2eBordStrPtr pB)
1186{ 1179{
1187 OUTB(pB->i2ePointer, SEL_MASK); 1180 outb(SEL_MASK, pB->i2ePointer);
1188 OUTB(pB->i2ePointer, ST_IN_MAIL); 1181 outb(ST_IN_MAIL, pB->i2ePointer);
1189} 1182}
1190 1183
1191//****************************************************************************** 1184//******************************************************************************
@@ -1204,7 +1197,7 @@ iiEnableMailIrqII(i2eBordStrPtr pB)
1204static void 1197static void
1205iiEnableMailIrqIIEX(i2eBordStrPtr pB) 1198iiEnableMailIrqIIEX(i2eBordStrPtr pB)
1206{ 1199{
1207 OUTB(pB->i2eXMask, MX_IN_MAIL); 1200 outb(MX_IN_MAIL, pB->i2eXMask);
1208} 1201}
1209 1202
1210//****************************************************************************** 1203//******************************************************************************
@@ -1223,8 +1216,8 @@ iiEnableMailIrqIIEX(i2eBordStrPtr pB)
1223static void 1216static void
1224iiWriteMaskII(i2eBordStrPtr pB, unsigned char value) 1217iiWriteMaskII(i2eBordStrPtr pB, unsigned char value)
1225{ 1218{
1226 OUTB(pB->i2ePointer, SEL_MASK); 1219 outb(SEL_MASK, pB->i2ePointer);
1227 OUTB(pB->i2ePointer, value); 1220 outb(value, pB->i2ePointer);
1228} 1221}
1229 1222
1230//****************************************************************************** 1223//******************************************************************************
@@ -1243,7 +1236,7 @@ iiWriteMaskII(i2eBordStrPtr pB, unsigned char value)
1243static void 1236static void
1244iiWriteMaskIIEX(i2eBordStrPtr pB, unsigned char value) 1237iiWriteMaskIIEX(i2eBordStrPtr pB, unsigned char value)
1245{ 1238{
1246 OUTB(pB->i2eXMask, value); 1239 outb(value, pB->i2eXMask);
1247} 1240}
1248 1241
1249//****************************************************************************** 1242//******************************************************************************
@@ -1354,9 +1347,8 @@ iiDownloadBlock ( i2eBordStrPtr pB, loadHdrStrPtr pSource, int isStandard)
1354 // immediately and be harmless, though not strictly necessary. 1347 // immediately and be harmless, though not strictly necessary.
1355 itemp = MAX_DLOAD_ACK_TIME/10; 1348 itemp = MAX_DLOAD_ACK_TIME/10;
1356 while (--itemp) { 1349 while (--itemp) {
1357 if (HAS_INPUT(pB)) { 1350 if (I2_HAS_INPUT(pB)) {
1358 switch(BYTE_FROM(pB)) 1351 switch (inb(pB->i2eData)) {
1359 {
1360 case LOADWARE_OK: 1352 case LOADWARE_OK:
1361 pB->i2eState = 1353 pB->i2eState =
1362 isStandard ? II_STATE_STDLOADED :II_STATE_LOADED; 1354 isStandard ? II_STATE_STDLOADED :II_STATE_LOADED;
diff --git a/drivers/char/ip2/i2ellis.h b/drivers/char/ip2/i2ellis.h
index 433305062fb8..c88a64e527aa 100644
--- a/drivers/char/ip2/i2ellis.h
+++ b/drivers/char/ip2/i2ellis.h
@@ -185,10 +185,6 @@ typedef struct _i2eBordStr
185 // The highest allowable IRQ, based on the 185 // The highest allowable IRQ, based on the
186 // slot size. 186 // slot size.
187 187
188 unsigned char i2eChangeIrq;
189 // Whether tis valid to change IRQ's
190 // ISA = ok, EISA, MicroChannel, no
191
192 // Accelerators for various addresses on the board 188 // Accelerators for various addresses on the board
193 int i2eBase; // I/O Address of the Board 189 int i2eBase; // I/O Address of the Board
194 int i2eData; // From here data transfers happen 190 int i2eData; // From here data transfers happen
@@ -431,12 +427,6 @@ typedef struct _i2eBordStr
431// Manifests for i2eBordStr: 427// Manifests for i2eBordStr:
432//------------------------------------------- 428//-------------------------------------------
433 429
434#define YES 1
435#define NO 0
436
437#define NULLFUNC (void (*)(void))0
438#define NULLPTR (void *)0
439
440typedef void (*delayFunc_t)(unsigned int); 430typedef void (*delayFunc_t)(unsigned int);
441 431
442// i2eValid 432// i2eValid
@@ -494,8 +484,8 @@ typedef void (*delayFunc_t)(unsigned int);
494 484
495// i2eUsingIrq 485// i2eUsingIrq
496// 486//
497#define IRQ_UNDEFINED 0x1352 // No valid irq (or polling = 0) can ever 487#define I2_IRQ_UNDEFINED 0x1352 /* No valid irq (or polling = 0) can
498 // promote to this! 488 * ever promote to this! */
499//------------------------------------------ 489//------------------------------------------
500// Handy Macros for i2ellis.c and others 490// Handy Macros for i2ellis.c and others
501// Note these are common to -II and -IIEX 491// Note these are common to -II and -IIEX
@@ -504,41 +494,14 @@ typedef void (*delayFunc_t)(unsigned int);
504// Given a pointer to the board structure, does the input FIFO have any data or 494// Given a pointer to the board structure, does the input FIFO have any data or
505// not? 495// not?
506// 496//
507#define HAS_INPUT(pB) !(INB(pB->i2eStatus) & ST_IN_EMPTY) 497#define I2_HAS_INPUT(pB) !(inb(pB->i2eStatus) & ST_IN_EMPTY)
508#define HAS_NO_INPUT(pB) (INB(pB->i2eStatus) & ST_IN_EMPTY)
509
510// Given a pointer to board structure, read a byte or word from the fifo
511//
512#define BYTE_FROM(pB) (unsigned char)INB(pB->i2eData)
513#define WORD_FROM(pB) (unsigned short)INW(pB->i2eData)
514
515// Given a pointer to board structure, is there room for any data to be written
516// to the data fifo?
517//
518#define HAS_OUTROOM(pB) !(INB(pB->i2eStatus) & ST_OUT_FULL)
519#define HAS_NO_OUTROOM(pB) (INB(pB->i2eStatus) & ST_OUT_FULL)
520
521// Given a pointer to board structure, write a single byte to the fifo
522// structure. Note that for 16-bit interfaces, the high order byte is undefined
523// and unknown.
524//
525#define BYTE_TO(pB, c) OUTB(pB->i2eData,(c))
526
527// Write a word to the fifo structure. For 8-bit interfaces, this may have
528// unknown results.
529//
530#define WORD_TO(pB, c) OUTW(pB->i2eData,(c))
531 498
532// Given a pointer to the board structure, is there anything in the incoming 499// Given a pointer to the board structure, is there anything in the incoming
533// mailbox? 500// mailbox?
534// 501//
535#define HAS_MAIL(pB) (INB(pB->i2eStatus) & ST_IN_MAIL) 502#define I2_HAS_MAIL(pB) (inb(pB->i2eStatus) & ST_IN_MAIL)
536 503
537#define UPDATE_FIFO_ROOM(pB) (pB)->i2eFifoRemains=(pB)->i2eFifoSize 504#define I2_UPDATE_FIFO_ROOM(pB) ((pB)->i2eFifoRemains = (pB)->i2eFifoSize)
538
539// Handy macro to round up a number (like the buffer write and read routines do)
540//
541#define ROUNDUP(number) (((number)+1) & (~1))
542 505
543//------------------------------------------ 506//------------------------------------------
544// Function Declarations for i2ellis.c 507// Function Declarations for i2ellis.c
@@ -593,20 +556,11 @@ static int iiDownloadBlock(i2eBordStrPtr, loadHdrStrPtr, int);
593// 556//
594static int iiDownloadAll(i2eBordStrPtr, loadHdrStrPtr, int, int); 557static int iiDownloadAll(i2eBordStrPtr, loadHdrStrPtr, int, int);
595 558
596// Called indirectly always. Needed externally so the routine might be
597// SPECIFIED as an argument to iiReset()
598//
599//static void ii2DelayIO(unsigned int); // N-millisecond delay using
600 //hardware spin
601//static void ii2DelayTimer(unsigned int); // N-millisecond delay using Linux
602 //timer
603
604// Many functions defined here return True if good, False otherwise, with an 559// Many functions defined here return True if good, False otherwise, with an
605// error code in i2eError field. Here is a handy macro for setting the error 560// error code in i2eError field. Here is a handy macro for setting the error
606// code and returning. 561// code and returning.
607// 562//
608#define COMPLETE(pB,code) \ 563#define I2_COMPLETE(pB,code) do { \
609 do { \
610 pB->i2eError = code; \ 564 pB->i2eError = code; \
611 return (code == I2EE_GOOD);\ 565 return (code == I2EE_GOOD);\
612 } while (0) 566 } while (0)
diff --git a/drivers/char/ip2/i2hw.h b/drivers/char/ip2/i2hw.h
index 15fe04e748f4..8aa6e7ab8d5b 100644
--- a/drivers/char/ip2/i2hw.h
+++ b/drivers/char/ip2/i2hw.h
@@ -129,7 +129,6 @@ registers, use byte operations only.
129//------------------------------------------------ 129//------------------------------------------------
130// 130//
131#include "ip2types.h" 131#include "ip2types.h"
132#include "i2os.h" /* For any o.s., compiler, or host-related issues */
133 132
134//------------------------------------------------------------------------- 133//-------------------------------------------------------------------------
135// Manifests for the I/O map: 134// Manifests for the I/O map:
@@ -644,5 +643,10 @@ typedef union _loadHdrStr
644#define ABS_BIGGEST_BOX 16 // Absolute the most ports per box 643#define ABS_BIGGEST_BOX 16 // Absolute the most ports per box
645#define ABS_MOST_PORTS (ABS_MAX_BOXES * ABS_BIGGEST_BOX) 644#define ABS_MOST_PORTS (ABS_MAX_BOXES * ABS_BIGGEST_BOX)
646 645
646#define I2_OUTSW(port, addr, count) outsw((port), (addr), (((count)+1)/2))
647#define I2_OUTSB(port, addr, count) outsb((port), (addr), (((count)+1))&-2)
648#define I2_INSW(port, addr, count) insw((port), (addr), (((count)+1)/2))
649#define I2_INSB(port, addr, count) insb((port), (addr), (((count)+1))&-2)
650
647#endif // I2HW_H 651#endif // I2HW_H
648 652
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index 9c25320121ef..938879cc7bcc 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -227,17 +227,17 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
227 i2ChanStrPtr *ppCh; 227 i2ChanStrPtr *ppCh;
228 228
229 if (pB->i2eValid != I2E_MAGIC) { 229 if (pB->i2eValid != I2E_MAGIC) {
230 COMPLETE(pB, I2EE_BADMAGIC); 230 I2_COMPLETE(pB, I2EE_BADMAGIC);
231 } 231 }
232 if (pB->i2eState != II_STATE_STDLOADED) { 232 if (pB->i2eState != II_STATE_STDLOADED) {
233 COMPLETE(pB, I2EE_BADSTATE); 233 I2_COMPLETE(pB, I2EE_BADSTATE);
234 } 234 }
235 235
236 LOCK_INIT(&pB->read_fifo_spinlock); 236 rwlock_init(&pB->read_fifo_spinlock);
237 LOCK_INIT(&pB->write_fifo_spinlock); 237 rwlock_init(&pB->write_fifo_spinlock);
238 LOCK_INIT(&pB->Dbuf_spinlock); 238 rwlock_init(&pB->Dbuf_spinlock);
239 LOCK_INIT(&pB->Bbuf_spinlock); 239 rwlock_init(&pB->Bbuf_spinlock);
240 LOCK_INIT(&pB->Fbuf_spinlock); 240 rwlock_init(&pB->Fbuf_spinlock);
241 241
242 // NO LOCK needed yet - this is init 242 // NO LOCK needed yet - this is init
243 243
@@ -259,10 +259,10 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
259 if ( !(pB->i2eChannelMap[index >> 4] & (1 << (index & 0xf)) ) ) { 259 if ( !(pB->i2eChannelMap[index >> 4] & (1 << (index & 0xf)) ) ) {
260 continue; 260 continue;
261 } 261 }
262 LOCK_INIT(&pCh->Ibuf_spinlock); 262 rwlock_init(&pCh->Ibuf_spinlock);
263 LOCK_INIT(&pCh->Obuf_spinlock); 263 rwlock_init(&pCh->Obuf_spinlock);
264 LOCK_INIT(&pCh->Cbuf_spinlock); 264 rwlock_init(&pCh->Cbuf_spinlock);
265 LOCK_INIT(&pCh->Pbuf_spinlock); 265 rwlock_init(&pCh->Pbuf_spinlock);
266 // NO LOCK needed yet - this is init 266 // NO LOCK needed yet - this is init
267 // Set up validity flag according to support level 267 // Set up validity flag according to support level
268 if (pB->i2eGoodMap[index >> 4] & (1 << (index & 0xf)) ) { 268 if (pB->i2eGoodMap[index >> 4] & (1 << (index & 0xf)) ) {
@@ -347,7 +347,7 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
347 } 347 }
348 // No need to check for wrap here; this is initialization. 348 // No need to check for wrap here; this is initialization.
349 pB->i2Fbuf_stuff = stuffIndex; 349 pB->i2Fbuf_stuff = stuffIndex;
350 COMPLETE(pB, I2EE_GOOD); 350 I2_COMPLETE(pB, I2EE_GOOD);
351 351
352} 352}
353 353
@@ -374,7 +374,7 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type)
374 374
375 case NEED_INLINE: 375 case NEED_INLINE:
376 376
377 WRITE_LOCK_IRQSAVE(&pB->Dbuf_spinlock,flags); 377 write_lock_irqsave(&pB->Dbuf_spinlock, flags);
378 if ( pB->i2Dbuf_stuff != pB->i2Dbuf_strip) 378 if ( pB->i2Dbuf_stuff != pB->i2Dbuf_strip)
379 { 379 {
380 queueIndex = pB->i2Dbuf_strip; 380 queueIndex = pB->i2Dbuf_strip;
@@ -386,12 +386,12 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type)
386 pB->i2Dbuf_strip = queueIndex; 386 pB->i2Dbuf_strip = queueIndex;
387 pCh->channelNeeds &= ~NEED_INLINE; 387 pCh->channelNeeds &= ~NEED_INLINE;
388 } 388 }
389 WRITE_UNLOCK_IRQRESTORE(&pB->Dbuf_spinlock,flags); 389 write_unlock_irqrestore(&pB->Dbuf_spinlock, flags);
390 break; 390 break;
391 391
392 case NEED_BYPASS: 392 case NEED_BYPASS:
393 393
394 WRITE_LOCK_IRQSAVE(&pB->Bbuf_spinlock,flags); 394 write_lock_irqsave(&pB->Bbuf_spinlock, flags);
395 if (pB->i2Bbuf_stuff != pB->i2Bbuf_strip) 395 if (pB->i2Bbuf_stuff != pB->i2Bbuf_strip)
396 { 396 {
397 queueIndex = pB->i2Bbuf_strip; 397 queueIndex = pB->i2Bbuf_strip;
@@ -403,12 +403,12 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type)
403 pB->i2Bbuf_strip = queueIndex; 403 pB->i2Bbuf_strip = queueIndex;
404 pCh->channelNeeds &= ~NEED_BYPASS; 404 pCh->channelNeeds &= ~NEED_BYPASS;
405 } 405 }
406 WRITE_UNLOCK_IRQRESTORE(&pB->Bbuf_spinlock,flags); 406 write_unlock_irqrestore(&pB->Bbuf_spinlock, flags);
407 break; 407 break;
408 408
409 case NEED_FLOW: 409 case NEED_FLOW:
410 410
411 WRITE_LOCK_IRQSAVE(&pB->Fbuf_spinlock,flags); 411 write_lock_irqsave(&pB->Fbuf_spinlock, flags);
412 if (pB->i2Fbuf_stuff != pB->i2Fbuf_strip) 412 if (pB->i2Fbuf_stuff != pB->i2Fbuf_strip)
413 { 413 {
414 queueIndex = pB->i2Fbuf_strip; 414 queueIndex = pB->i2Fbuf_strip;
@@ -420,7 +420,7 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type)
420 pB->i2Fbuf_strip = queueIndex; 420 pB->i2Fbuf_strip = queueIndex;
421 pCh->channelNeeds &= ~NEED_FLOW; 421 pCh->channelNeeds &= ~NEED_FLOW;
422 } 422 }
423 WRITE_UNLOCK_IRQRESTORE(&pB->Fbuf_spinlock,flags); 423 write_unlock_irqrestore(&pB->Fbuf_spinlock, flags);
424 break; 424 break;
425 default: 425 default:
426 printk(KERN_ERR "i2DeQueueNeeds called with bad type:%x\n",type); 426 printk(KERN_ERR "i2DeQueueNeeds called with bad type:%x\n",type);
@@ -453,7 +453,7 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
453 453
454 case NEED_INLINE: 454 case NEED_INLINE:
455 455
456 WRITE_LOCK_IRQSAVE(&pB->Dbuf_spinlock,flags); 456 write_lock_irqsave(&pB->Dbuf_spinlock, flags);
457 if ( !(pCh->channelNeeds & NEED_INLINE) ) 457 if ( !(pCh->channelNeeds & NEED_INLINE) )
458 { 458 {
459 pCh->channelNeeds |= NEED_INLINE; 459 pCh->channelNeeds |= NEED_INLINE;
@@ -463,12 +463,12 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
463 queueIndex = 0; 463 queueIndex = 0;
464 pB->i2Dbuf_stuff = queueIndex; 464 pB->i2Dbuf_stuff = queueIndex;
465 } 465 }
466 WRITE_UNLOCK_IRQRESTORE(&pB->Dbuf_spinlock,flags); 466 write_unlock_irqrestore(&pB->Dbuf_spinlock, flags);
467 break; 467 break;
468 468
469 case NEED_BYPASS: 469 case NEED_BYPASS:
470 470
471 WRITE_LOCK_IRQSAVE(&pB->Bbuf_spinlock,flags); 471 write_lock_irqsave(&pB->Bbuf_spinlock, flags);
472 if ((type & NEED_BYPASS) && !(pCh->channelNeeds & NEED_BYPASS)) 472 if ((type & NEED_BYPASS) && !(pCh->channelNeeds & NEED_BYPASS))
473 { 473 {
474 pCh->channelNeeds |= NEED_BYPASS; 474 pCh->channelNeeds |= NEED_BYPASS;
@@ -478,12 +478,12 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
478 queueIndex = 0; 478 queueIndex = 0;
479 pB->i2Bbuf_stuff = queueIndex; 479 pB->i2Bbuf_stuff = queueIndex;
480 } 480 }
481 WRITE_UNLOCK_IRQRESTORE(&pB->Bbuf_spinlock,flags); 481 write_unlock_irqrestore(&pB->Bbuf_spinlock, flags);
482 break; 482 break;
483 483
484 case NEED_FLOW: 484 case NEED_FLOW:
485 485
486 WRITE_LOCK_IRQSAVE(&pB->Fbuf_spinlock,flags); 486 write_lock_irqsave(&pB->Fbuf_spinlock, flags);
487 if ((type & NEED_FLOW) && !(pCh->channelNeeds & NEED_FLOW)) 487 if ((type & NEED_FLOW) && !(pCh->channelNeeds & NEED_FLOW))
488 { 488 {
489 pCh->channelNeeds |= NEED_FLOW; 489 pCh->channelNeeds |= NEED_FLOW;
@@ -493,7 +493,7 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
493 queueIndex = 0; 493 queueIndex = 0;
494 pB->i2Fbuf_stuff = queueIndex; 494 pB->i2Fbuf_stuff = queueIndex;
495 } 495 }
496 WRITE_UNLOCK_IRQRESTORE(&pB->Fbuf_spinlock,flags); 496 write_unlock_irqrestore(&pB->Fbuf_spinlock, flags);
497 break; 497 break;
498 498
499 case NEED_CREDIT: 499 case NEED_CREDIT:
@@ -562,9 +562,8 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
562 pB = pCh->pMyBord; 562 pB = pCh->pMyBord;
563 563
564 // Board must also exist, and THE INTERRUPT COMMAND ALREADY SENT 564 // Board must also exist, and THE INTERRUPT COMMAND ALREADY SENT
565 if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == IRQ_UNDEFINED) { 565 if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == I2_IRQ_UNDEFINED)
566 return -2; 566 return -2;
567 }
568 // If the board has gone fatal, return bad, and also hit the trap routine if 567 // If the board has gone fatal, return bad, and also hit the trap routine if
569 // it exists. 568 // it exists.
570 if (pB->i2eFatal) { 569 if (pB->i2eFatal) {
@@ -620,13 +619,13 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
620 switch(type) { 619 switch(type) {
621 case PTYPE_INLINE: 620 case PTYPE_INLINE:
622 lock_var_p = &pCh->Obuf_spinlock; 621 lock_var_p = &pCh->Obuf_spinlock;
623 WRITE_LOCK_IRQSAVE(lock_var_p,flags); 622 write_lock_irqsave(lock_var_p, flags);
624 stuffIndex = pCh->Obuf_stuff; 623 stuffIndex = pCh->Obuf_stuff;
625 bufroom = pCh->Obuf_strip - stuffIndex; 624 bufroom = pCh->Obuf_strip - stuffIndex;
626 break; 625 break;
627 case PTYPE_BYPASS: 626 case PTYPE_BYPASS:
628 lock_var_p = &pCh->Cbuf_spinlock; 627 lock_var_p = &pCh->Cbuf_spinlock;
629 WRITE_LOCK_IRQSAVE(lock_var_p,flags); 628 write_lock_irqsave(lock_var_p, flags);
630 stuffIndex = pCh->Cbuf_stuff; 629 stuffIndex = pCh->Cbuf_stuff;
631 bufroom = pCh->Cbuf_strip - stuffIndex; 630 bufroom = pCh->Cbuf_strip - stuffIndex;
632 break; 631 break;
@@ -645,7 +644,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
645 break; /* from for()- Enough room: goto proceed */ 644 break; /* from for()- Enough room: goto proceed */
646 } 645 }
647 ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); 646 ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize);
648 WRITE_UNLOCK_IRQRESTORE(lock_var_p, flags); 647 write_unlock_irqrestore(lock_var_p, flags);
649 } else 648 } else
650 ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); 649 ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize);
651 650
@@ -747,7 +746,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
747 { 746 {
748 case PTYPE_INLINE: 747 case PTYPE_INLINE:
749 pCh->Obuf_stuff = stuffIndex; // Store buffer pointer 748 pCh->Obuf_stuff = stuffIndex; // Store buffer pointer
750 WRITE_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); 749 write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
751 750
752 pB->debugInlineQueued++; 751 pB->debugInlineQueued++;
753 // Add the channel pointer to list of channels needing service (first 752 // Add the channel pointer to list of channels needing service (first
@@ -757,7 +756,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
757 756
758 case PTYPE_BYPASS: 757 case PTYPE_BYPASS:
759 pCh->Cbuf_stuff = stuffIndex; // Store buffer pointer 758 pCh->Cbuf_stuff = stuffIndex; // Store buffer pointer
760 WRITE_UNLOCK_IRQRESTORE(&pCh->Cbuf_spinlock,flags); 759 write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags);
761 760
762 pB->debugBypassQueued++; 761 pB->debugBypassQueued++;
763 // Add the channel pointer to list of channels needing service (first 762 // Add the channel pointer to list of channels needing service (first
@@ -840,7 +839,7 @@ i2Input(i2ChanStrPtr pCh)
840 count = -1; 839 count = -1;
841 goto i2Input_exit; 840 goto i2Input_exit;
842 } 841 }
843 WRITE_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags); 842 write_lock_irqsave(&pCh->Ibuf_spinlock, flags);
844 843
845 // initialize some accelerators and private copies 844 // initialize some accelerators and private copies
846 stripIndex = pCh->Ibuf_strip; 845 stripIndex = pCh->Ibuf_strip;
@@ -850,7 +849,7 @@ i2Input(i2ChanStrPtr pCh)
850 // If buffer is empty or requested data count was 0, (trivial case) return 849 // If buffer is empty or requested data count was 0, (trivial case) return
851 // without any further thought. 850 // without any further thought.
852 if ( count == 0 ) { 851 if ( count == 0 ) {
853 WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); 852 write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
854 goto i2Input_exit; 853 goto i2Input_exit;
855 } 854 }
856 // Adjust for buffer wrap 855 // Adjust for buffer wrap
@@ -891,10 +890,10 @@ i2Input(i2ChanStrPtr pCh)
891 890
892 if ((pCh->sinceLastFlow += count) >= pCh->whenSendFlow) { 891 if ((pCh->sinceLastFlow += count) >= pCh->whenSendFlow) {
893 pCh->sinceLastFlow -= pCh->whenSendFlow; 892 pCh->sinceLastFlow -= pCh->whenSendFlow;
894 WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); 893 write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
895 i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); 894 i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW);
896 } else { 895 } else {
897 WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); 896 write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
898 } 897 }
899 898
900i2Input_exit: 899i2Input_exit:
@@ -926,7 +925,7 @@ i2InputFlush(i2ChanStrPtr pCh)
926 925
927 ip2trace (CHANN, ITRC_INPUT, 10, 0); 926 ip2trace (CHANN, ITRC_INPUT, 10, 0);
928 927
929 WRITE_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags); 928 write_lock_irqsave(&pCh->Ibuf_spinlock, flags);
930 count = pCh->Ibuf_stuff - pCh->Ibuf_strip; 929 count = pCh->Ibuf_stuff - pCh->Ibuf_strip;
931 930
932 // Adjust for buffer wrap 931 // Adjust for buffer wrap
@@ -947,10 +946,10 @@ i2InputFlush(i2ChanStrPtr pCh)
947 if ( (pCh->sinceLastFlow += count) >= pCh->whenSendFlow ) 946 if ( (pCh->sinceLastFlow += count) >= pCh->whenSendFlow )
948 { 947 {
949 pCh->sinceLastFlow -= pCh->whenSendFlow; 948 pCh->sinceLastFlow -= pCh->whenSendFlow;
950 WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); 949 write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
951 i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); 950 i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW);
952 } else { 951 } else {
953 WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); 952 write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
954 } 953 }
955 954
956 ip2trace (CHANN, ITRC_INPUT, 19, 1, count); 955 ip2trace (CHANN, ITRC_INPUT, 19, 1, count);
@@ -979,9 +978,9 @@ i2InputAvailable(i2ChanStrPtr pCh)
979 978
980 979
981 // initialize some accelerators and private copies 980 // initialize some accelerators and private copies
982 READ_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags); 981 read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
983 count = pCh->Ibuf_stuff - pCh->Ibuf_strip; 982 count = pCh->Ibuf_stuff - pCh->Ibuf_strip;
984 READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); 983 read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
985 984
986 // Adjust for buffer wrap 985 // Adjust for buffer wrap
987 if (count < 0) 986 if (count < 0)
@@ -1045,9 +1044,9 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
1045 while ( count > 0 ) { 1044 while ( count > 0 ) {
1046 1045
1047 // How much room in output buffer is there? 1046 // How much room in output buffer is there?
1048 READ_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); 1047 read_lock_irqsave(&pCh->Obuf_spinlock, flags);
1049 amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; 1048 amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1;
1050 READ_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); 1049 read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
1051 if (amountToMove < 0) { 1050 if (amountToMove < 0) {
1052 amountToMove += OBUF_SIZE; 1051 amountToMove += OBUF_SIZE;
1053 } 1052 }
@@ -1075,7 +1074,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
1075 if ( !(pCh->flush_flags && i2RetryFlushOutput(pCh) ) 1074 if ( !(pCh->flush_flags && i2RetryFlushOutput(pCh) )
1076 && amountToMove > 0 ) 1075 && amountToMove > 0 )
1077 { 1076 {
1078 WRITE_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); 1077 write_lock_irqsave(&pCh->Obuf_spinlock, flags);
1079 stuffIndex = pCh->Obuf_stuff; 1078 stuffIndex = pCh->Obuf_stuff;
1080 1079
1081 // Had room to move some data: don't know whether the block size, 1080 // Had room to move some data: don't know whether the block size,
@@ -1102,7 +1101,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
1102 } 1101 }
1103 pCh->Obuf_stuff = stuffIndex; 1102 pCh->Obuf_stuff = stuffIndex;
1104 1103
1105 WRITE_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); 1104 write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
1106 1105
1107 ip2trace (CHANN, ITRC_OUTPUT, 13, 1, stuffIndex ); 1106 ip2trace (CHANN, ITRC_OUTPUT, 13, 1, stuffIndex );
1108 1107
@@ -1352,9 +1351,9 @@ i2OutputFree(i2ChanStrPtr pCh)
1352 if ( !i2Validate ( pCh ) ) { 1351 if ( !i2Validate ( pCh ) ) {
1353 return -1; 1352 return -1;
1354 } 1353 }
1355 READ_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); 1354 read_lock_irqsave(&pCh->Obuf_spinlock, flags);
1356 amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; 1355 amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1;
1357 READ_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); 1356 read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
1358 1357
1359 if (amountToMove < 0) { 1358 if (amountToMove < 0) {
1360 amountToMove += OBUF_SIZE; 1359 amountToMove += OBUF_SIZE;
@@ -1464,11 +1463,11 @@ i2StripFifo(i2eBordStrPtr pB)
1464 1463
1465// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_ENTER, 0 ); 1464// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_ENTER, 0 );
1466 1465
1467 while (HAS_INPUT(pB)) { 1466 while (I2_HAS_INPUT(pB)) {
1468// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 2, 0 ); 1467// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 2, 0 );
1469 1468
1470 // Process packet from fifo a one atomic unit 1469 // Process packet from fifo a one atomic unit
1471 WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock,bflags); 1470 write_lock_irqsave(&pB->read_fifo_spinlock, bflags);
1472 1471
1473 // The first word (or two bytes) will have channel number and type of 1472 // The first word (or two bytes) will have channel number and type of
1474 // packet, possibly other information 1473 // packet, possibly other information
@@ -1490,7 +1489,8 @@ i2StripFifo(i2eBordStrPtr pB)
1490// sick! 1489// sick!
1491 if ( ((unsigned int)count) > IBUF_SIZE ) { 1490 if ( ((unsigned int)count) > IBUF_SIZE ) {
1492 pB->i2eFatal = 2; 1491 pB->i2eFatal = 2;
1493 WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); 1492 write_unlock_irqrestore(&pB->read_fifo_spinlock,
1493 bflags);
1494 return; /* Bail out ASAP */ 1494 return; /* Bail out ASAP */
1495 } 1495 }
1496 // Channel is illegally big ? 1496 // Channel is illegally big ?
@@ -1498,7 +1498,8 @@ i2StripFifo(i2eBordStrPtr pB)
1498 (NULL==(pCh = ((i2ChanStrPtr*)pB->i2eChannelPtr)[channel]))) 1498 (NULL==(pCh = ((i2ChanStrPtr*)pB->i2eChannelPtr)[channel])))
1499 { 1499 {
1500 iiReadBuf(pB, junkBuffer, count); 1500 iiReadBuf(pB, junkBuffer, count);
1501 WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); 1501 write_unlock_irqrestore(&pB->read_fifo_spinlock,
1502 bflags);
1502 break; /* From switch: ready for next packet */ 1503 break; /* From switch: ready for next packet */
1503 } 1504 }
1504 1505
@@ -1512,14 +1513,15 @@ i2StripFifo(i2eBordStrPtr pB)
1512 if(ID_OF(pB->i2eLeadoffWord) == ID_HOT_KEY) 1513 if(ID_OF(pB->i2eLeadoffWord) == ID_HOT_KEY)
1513 { 1514 {
1514 pCh->hotKeyIn = iiReadWord(pB) & 0xff; 1515 pCh->hotKeyIn = iiReadWord(pB) & 0xff;
1515 WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); 1516 write_unlock_irqrestore(&pB->read_fifo_spinlock,
1517 bflags);
1516 i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_HOTACK); 1518 i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_HOTACK);
1517 break; /* From the switch: ready for next packet */ 1519 break; /* From the switch: ready for next packet */
1518 } 1520 }
1519 1521
1520 // Normal data! We crudely assume there is room for the data in our 1522 // Normal data! We crudely assume there is room for the data in our
1521 // buffer because the board wouldn't have exceeded his credit limit. 1523 // buffer because the board wouldn't have exceeded his credit limit.
1522 WRITE_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,cflags); 1524 write_lock_irqsave(&pCh->Ibuf_spinlock, cflags);
1523 // We have 2 locks now 1525 // We have 2 locks now
1524 stuffIndex = pCh->Ibuf_stuff; 1526 stuffIndex = pCh->Ibuf_stuff;
1525 amountToRead = IBUF_SIZE - stuffIndex; 1527 amountToRead = IBUF_SIZE - stuffIndex;
@@ -1562,8 +1564,9 @@ i2StripFifo(i2eBordStrPtr pB)
1562 1564
1563 // Update stuff index 1565 // Update stuff index
1564 pCh->Ibuf_stuff = stuffIndex; 1566 pCh->Ibuf_stuff = stuffIndex;
1565 WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,cflags); 1567 write_unlock_irqrestore(&pCh->Ibuf_spinlock, cflags);
1566 WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); 1568 write_unlock_irqrestore(&pB->read_fifo_spinlock,
1569 bflags);
1567 1570
1568#ifdef USE_IQ 1571#ifdef USE_IQ
1569 schedule_work(&pCh->tqueue_input); 1572 schedule_work(&pCh->tqueue_input);
@@ -1585,7 +1588,8 @@ i2StripFifo(i2eBordStrPtr pB)
1585 1588
1586 iiReadBuf(pB, cmdBuffer, count); 1589 iiReadBuf(pB, cmdBuffer, count);
1587 // We can release early with buffer grab 1590 // We can release early with buffer grab
1588 WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); 1591 write_unlock_irqrestore(&pB->read_fifo_spinlock,
1592 bflags);
1589 1593
1590 pc = cmdBuffer; 1594 pc = cmdBuffer;
1591 pcLimit = &(cmdBuffer[count]); 1595 pcLimit = &(cmdBuffer[count]);
@@ -1830,12 +1834,12 @@ i2StripFifo(i2eBordStrPtr pB)
1830 default: // Neither packet? should be impossible 1834 default: // Neither packet? should be impossible
1831 ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1, 1835 ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1,
1832 PTYPE_OF(pB->i2eLeadoffWord) ); 1836 PTYPE_OF(pB->i2eLeadoffWord) );
1833 WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, 1837 write_unlock_irqrestore(&pB->read_fifo_spinlock,
1834 bflags); 1838 bflags);
1835 1839
1836 break; 1840 break;
1837 } // End of switch on type of packets 1841 } // End of switch on type of packets
1838 } //while(board HAS_INPUT) 1842 } /*while(board I2_HAS_INPUT)*/
1839 1843
1840 ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_RETURN, 0 ); 1844 ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_RETURN, 0 );
1841 1845
@@ -1858,7 +1862,7 @@ i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve)
1858{ 1862{
1859 int rc = 0; 1863 int rc = 0;
1860 unsigned long flags; 1864 unsigned long flags;
1861 WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); 1865 write_lock_irqsave(&pB->write_fifo_spinlock, flags);
1862 if (!pB->i2eWaitingForEmptyFifo) { 1866 if (!pB->i2eWaitingForEmptyFifo) {
1863 if (pB->i2eFifoRemains > (count+reserve)) { 1867 if (pB->i2eFifoRemains > (count+reserve)) {
1864 pB->i2eFifoRemains -= count; 1868 pB->i2eFifoRemains -= count;
@@ -1867,7 +1871,7 @@ i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve)
1867 rc = count; 1871 rc = count;
1868 } 1872 }
1869 } 1873 }
1870 WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); 1874 write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
1871 return rc; 1875 return rc;
1872} 1876}
1873//****************************************************************************** 1877//******************************************************************************
@@ -1898,7 +1902,7 @@ i2StuffFifoBypass(i2eBordStrPtr pB)
1898 while ( --bailout && notClogged && 1902 while ( --bailout && notClogged &&
1899 (NULL != (pCh = i2DeQueueNeeds(pB,NEED_BYPASS)))) 1903 (NULL != (pCh = i2DeQueueNeeds(pB,NEED_BYPASS))))
1900 { 1904 {
1901 WRITE_LOCK_IRQSAVE(&pCh->Cbuf_spinlock,flags); 1905 write_lock_irqsave(&pCh->Cbuf_spinlock, flags);
1902 stripIndex = pCh->Cbuf_strip; 1906 stripIndex = pCh->Cbuf_strip;
1903 1907
1904 // as long as there are packets for this channel... 1908 // as long as there are packets for this channel...
@@ -1906,7 +1910,7 @@ i2StuffFifoBypass(i2eBordStrPtr pB)
1906 while (stripIndex != pCh->Cbuf_stuff) { 1910 while (stripIndex != pCh->Cbuf_stuff) {
1907 pRemove = &(pCh->Cbuf[stripIndex]); 1911 pRemove = &(pCh->Cbuf[stripIndex]);
1908 packetSize = CMD_COUNT_OF(pRemove) + sizeof(i2CmdHeader); 1912 packetSize = CMD_COUNT_OF(pRemove) + sizeof(i2CmdHeader);
1909 paddedSize = ROUNDUP(packetSize); 1913 paddedSize = roundup(packetSize, 2);
1910 1914
1911 if (paddedSize > 0) { 1915 if (paddedSize > 0) {
1912 if ( 0 == i2Write2Fifo(pB, pRemove, paddedSize,0)) { 1916 if ( 0 == i2Write2Fifo(pB, pRemove, paddedSize,0)) {
@@ -1930,7 +1934,7 @@ WriteDBGBuf("BYPS", pRemove, paddedSize);
1930 // Done with this channel. Move to next, removing this one from 1934 // Done with this channel. Move to next, removing this one from
1931 // the queue of channels if we cleaned it out (i.e., didn't get clogged. 1935 // the queue of channels if we cleaned it out (i.e., didn't get clogged.
1932 pCh->Cbuf_strip = stripIndex; 1936 pCh->Cbuf_strip = stripIndex;
1933 WRITE_UNLOCK_IRQRESTORE(&pCh->Cbuf_spinlock,flags); 1937 write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags);
1934 } // Either clogged or finished all the work 1938 } // Either clogged or finished all the work
1935 1939
1936#ifdef IP2DEBUG_TRACE 1940#ifdef IP2DEBUG_TRACE
@@ -1954,7 +1958,7 @@ static inline void
1954i2StuffFifoFlow(i2eBordStrPtr pB) 1958i2StuffFifoFlow(i2eBordStrPtr pB)
1955{ 1959{
1956 i2ChanStrPtr pCh; 1960 i2ChanStrPtr pCh;
1957 unsigned short paddedSize = ROUNDUP(sizeof(flowIn)); 1961 unsigned short paddedSize = roundup(sizeof(flowIn), 2);
1958 1962
1959 ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_ENTER, 2, 1963 ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_ENTER, 2,
1960 pB->i2eFifoRemains, paddedSize ); 1964 pB->i2eFifoRemains, paddedSize );
@@ -2010,7 +2014,7 @@ i2StuffFifoInline(i2eBordStrPtr pB)
2010 while ( --bailout && notClogged && 2014 while ( --bailout && notClogged &&
2011 (NULL != (pCh = i2DeQueueNeeds(pB,NEED_INLINE))) ) 2015 (NULL != (pCh = i2DeQueueNeeds(pB,NEED_INLINE))) )
2012 { 2016 {
2013 WRITE_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); 2017 write_lock_irqsave(&pCh->Obuf_spinlock, flags);
2014 stripIndex = pCh->Obuf_strip; 2018 stripIndex = pCh->Obuf_strip;
2015 2019
2016 ip2trace (CHANN, ITRC_SICMD, 3, 2, stripIndex, pCh->Obuf_stuff ); 2020 ip2trace (CHANN, ITRC_SICMD, 3, 2, stripIndex, pCh->Obuf_stuff );
@@ -2031,7 +2035,7 @@ i2StuffFifoInline(i2eBordStrPtr pB)
2031 packetSize = flowsize + sizeof(i2CmdHeader); 2035 packetSize = flowsize + sizeof(i2CmdHeader);
2032 } 2036 }
2033 flowsize = CREDIT_USAGE(flowsize); 2037 flowsize = CREDIT_USAGE(flowsize);
2034 paddedSize = ROUNDUP(packetSize); 2038 paddedSize = roundup(packetSize, 2);
2035 2039
2036 ip2trace (CHANN, ITRC_SICMD, 4, 2, pB->i2eFifoRemains, paddedSize ); 2040 ip2trace (CHANN, ITRC_SICMD, 4, 2, pB->i2eFifoRemains, paddedSize );
2037 2041
@@ -2086,7 +2090,7 @@ WriteDBGBuf("DATA", pRemove, paddedSize);
2086 // Done with this channel. Move to next, removing this one from the 2090 // Done with this channel. Move to next, removing this one from the
2087 // queue of channels if we cleaned it out (i.e., didn't get clogged. 2091 // queue of channels if we cleaned it out (i.e., didn't get clogged.
2088 pCh->Obuf_strip = stripIndex; 2092 pCh->Obuf_strip = stripIndex;
2089 WRITE_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); 2093 write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
2090 if ( notClogged ) 2094 if ( notClogged )
2091 { 2095 {
2092 2096
@@ -2190,10 +2194,11 @@ i2ServiceBoard ( i2eBordStrPtr pB )
2190 2194
2191 if (inmail & MB_OUT_STRIPPED) { 2195 if (inmail & MB_OUT_STRIPPED) {
2192 pB->i2eFifoOutInts++; 2196 pB->i2eFifoOutInts++;
2193 WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); 2197 write_lock_irqsave(&pB->write_fifo_spinlock, flags);
2194 pB->i2eFifoRemains = pB->i2eFifoSize; 2198 pB->i2eFifoRemains = pB->i2eFifoSize;
2195 pB->i2eWaitingForEmptyFifo = 0; 2199 pB->i2eWaitingForEmptyFifo = 0;
2196 WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); 2200 write_unlock_irqrestore(&pB->write_fifo_spinlock,
2201 flags);
2197 2202
2198 ip2trace (ITRC_NO_PORT, ITRC_INTR, 30, 1, pB->i2eFifoRemains ); 2203 ip2trace (ITRC_NO_PORT, ITRC_INTR, 30, 1, pB->i2eFifoRemains );
2199 2204
diff --git a/drivers/char/ip2/i2os.h b/drivers/char/ip2/i2os.h
deleted file mode 100644
index eff9b542d699..000000000000
--- a/drivers/char/ip2/i2os.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/*******************************************************************************
2*
3* (c) 1999 by Computone Corporation
4*
5********************************************************************************
6*
7*
8* PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport
9* serial I/O controllers.
10*
11* DESCRIPTION: Defines, definitions and includes which are heavily dependent
12* on O/S, host, compiler, etc. This file is tailored for:
13* Linux v2.0.0 and later
14* Gnu gcc c2.7.2
15* 80x86 architecture
16*
17*******************************************************************************/
18
19#ifndef I2OS_H /* To prevent multiple includes */
20#define I2OS_H 1
21
22//-------------------------------------------------
23// Required Includes
24//-------------------------------------------------
25
26#include "ip2types.h"
27#include <asm/io.h> /* For inb, etc */
28
29//------------------------------------
30// Defines for I/O instructions:
31//------------------------------------
32
33#define INB(port) inb(port)
34#define OUTB(port,value) outb((value),(port))
35#define INW(port) inw(port)
36#define OUTW(port,value) outw((value),(port))
37#define OUTSW(port,addr,count) outsw((port),(addr),(((count)+1)/2))
38#define OUTSB(port,addr,count) outsb((port),(addr),(((count)+1))&-2)
39#define INSW(port,addr,count) insw((port),(addr),(((count)+1)/2))
40#define INSB(port,addr,count) insb((port),(addr),(((count)+1))&-2)
41
42//--------------------------------------------
43// Interrupt control
44//--------------------------------------------
45
46#define LOCK_INIT(a) rwlock_init(a)
47
48#define SAVE_AND_DISABLE_INTS(a,b) { \
49 /* printk("get_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
50 spin_lock_irqsave(a,b); \
51}
52
53#define RESTORE_INTS(a,b) { \
54 /* printk("rel_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
55 spin_unlock_irqrestore(a,b); \
56}
57
58#define READ_LOCK_IRQSAVE(a,b) { \
59 /* printk("get_read_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
60 read_lock_irqsave(a,b); \
61}
62
63#define READ_UNLOCK_IRQRESTORE(a,b) { \
64 /* printk("rel_read_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
65 read_unlock_irqrestore(a,b); \
66}
67
68#define WRITE_LOCK_IRQSAVE(a,b) { \
69 /* printk("get_write_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
70 write_lock_irqsave(a,b); \
71}
72
73#define WRITE_UNLOCK_IRQRESTORE(a,b) { \
74 /* printk("rel_write_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
75 write_unlock_irqrestore(a,b); \
76}
77
78
79//------------------------------------------------------------------------------
80// Hardware-delay loop
81//
82// Probably used in only one place (see i2ellis.c) but this helps keep things
83// together. Note we have unwound the IN instructions. On machines with a
84// reasonable cache, the eight instructions (1 byte each) should fit in cache
85// nicely, and on un-cached machines, the code-fetch would tend not to dominate.
86// Note that cx is shifted so that "count" still reflects the total number of
87// iterations assuming no unwinding.
88//------------------------------------------------------------------------------
89
90//#define DELAY1MS(port,count,label)
91
92//------------------------------------------------------------------------------
93// Macros to switch to a new stack, saving stack pointers, and to restore the
94// old stack (Used, for example, in i2lib.c) "heap" is the address of some
95// buffer which will become the new stack (working down from highest address).
96// The two words at the two lowest addresses in this stack are for storing the
97// SS and SP.
98//------------------------------------------------------------------------------
99
100//#define TO_NEW_STACK(heap,size)
101//#define TO_OLD_STACK(heap)
102
103//------------------------------------------------------------------------------
104// Macros to save the original IRQ vectors and masks, and to patch in new ones.
105//------------------------------------------------------------------------------
106
107//#define SAVE_IRQ_MASKS(dest)
108//#define WRITE_IRQ_MASKS(src)
109//#define SAVE_IRQ_VECTOR(value,dest)
110//#define WRITE_IRQ_VECTOR(value,src)
111
112//------------------------------------------------------------------------------
113// Macro to copy data from one far pointer to another.
114//------------------------------------------------------------------------------
115
116#define I2_MOVE_DATA(fpSource,fpDest,count) memmove(fpDest,fpSource,count);
117
118//------------------------------------------------------------------------------
119// Macros to issue eoi's to host interrupt control (IBM AT 8259-style).
120//------------------------------------------------------------------------------
121
122//#define MASTER_EOI
123//#define SLAVE_EOI
124
125#endif /* I2OS_H */
126
127
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index b1d6cad84282..70957acaa960 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -133,8 +133,9 @@
133 *****************/ 133 *****************/
134 134
135#include <linux/proc_fs.h> 135#include <linux/proc_fs.h>
136#include <linux/seq_file.h>
136 137
137static int ip2_read_procmem(char *, char **, off_t, int); 138static const struct file_operations ip2mem_proc_fops;
138static int ip2_read_proc(char *, char **, off_t, int, int *, void * ); 139static int ip2_read_proc(char *, char **, off_t, int, int *, void * );
139 140
140/********************/ 141/********************/
@@ -168,7 +169,7 @@ static int Fip_firmware_size;
168static int ip2_open(PTTY, struct file *); 169static int ip2_open(PTTY, struct file *);
169static void ip2_close(PTTY, struct file *); 170static void ip2_close(PTTY, struct file *);
170static int ip2_write(PTTY, const unsigned char *, int); 171static int ip2_write(PTTY, const unsigned char *, int);
171static void ip2_putchar(PTTY, unsigned char); 172static int ip2_putchar(PTTY, unsigned char);
172static void ip2_flush_chars(PTTY); 173static void ip2_flush_chars(PTTY);
173static int ip2_write_room(PTTY); 174static int ip2_write_room(PTTY);
174static int ip2_chars_in_buf(PTTY); 175static int ip2_chars_in_buf(PTTY);
@@ -354,14 +355,15 @@ have_requested_irq( char irq )
354/* the driver initialisation function and returns what it returns. */ 355/* the driver initialisation function and returns what it returns. */
355/******************************************************************************/ 356/******************************************************************************/
356#ifdef MODULE 357#ifdef MODULE
357int 358static int __init
358init_module(void) 359ip2_init_module(void)
359{ 360{
360#ifdef IP2DEBUG_INIT 361#ifdef IP2DEBUG_INIT
361 printk (KERN_DEBUG "Loading module ...\n" ); 362 printk (KERN_DEBUG "Loading module ...\n" );
362#endif 363#endif
363 return 0; 364 return 0;
364} 365}
366module_init(ip2_init_module);
365#endif /* MODULE */ 367#endif /* MODULE */
366 368
367/******************************************************************************/ 369/******************************************************************************/
@@ -380,8 +382,8 @@ init_module(void)
380/* driver should be returned since it may be unloaded from memory. */ 382/* driver should be returned since it may be unloaded from memory. */
381/******************************************************************************/ 383/******************************************************************************/
382#ifdef MODULE 384#ifdef MODULE
383void 385void __exit
384cleanup_module(void) 386ip2_cleanup_module(void)
385{ 387{
386 int err; 388 int err;
387 int i; 389 int i;
@@ -423,7 +425,7 @@ cleanup_module(void)
423 } 425 }
424 put_tty_driver(ip2_tty_driver); 426 put_tty_driver(ip2_tty_driver);
425 unregister_chrdev(IP2_IPL_MAJOR, pcIpl); 427 unregister_chrdev(IP2_IPL_MAJOR, pcIpl);
426 remove_proc_entry("ip2mem", &proc_root); 428 remove_proc_entry("ip2mem", NULL);
427 429
428 // free memory 430 // free memory
429 for (i = 0; i < IP2_MAX_BOARDS; i++) { 431 for (i = 0; i < IP2_MAX_BOARDS; i++) {
@@ -451,6 +453,7 @@ cleanup_module(void)
451 printk (KERN_DEBUG "IP2 Unloaded\n" ); 453 printk (KERN_DEBUG "IP2 Unloaded\n" );
452#endif 454#endif
453} 455}
456module_exit(ip2_cleanup_module);
454#endif /* MODULE */ 457#endif /* MODULE */
455 458
456static const struct tty_operations ip2_ops = { 459static const struct tty_operations ip2_ops = {
@@ -695,7 +698,7 @@ ip2_loadmain(int *iop, int *irqp, unsigned char *firmware, int firmsize)
695 } 698 }
696 } 699 }
697 /* Register the read_procmem thing */ 700 /* Register the read_procmem thing */
698 if (!create_proc_info_entry("ip2mem",0,&proc_root,ip2_read_procmem)) { 701 if (!proc_create("ip2mem",0,NULL,&ip2mem_proc_fops)) {
699 printk(KERN_ERR "IP2: failed to register read_procmem\n"); 702 printk(KERN_ERR "IP2: failed to register read_procmem\n");
700 } else { 703 } else {
701 704
@@ -1049,9 +1052,9 @@ set_irq( int boardnum, int boardIrq )
1049 * Write to FIFO; don't bother to adjust fifo capacity for this, since 1052 * Write to FIFO; don't bother to adjust fifo capacity for this, since
1050 * board will respond almost immediately after SendMail hit. 1053 * board will respond almost immediately after SendMail hit.
1051 */ 1054 */
1052 WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); 1055 write_lock_irqsave(&pB->write_fifo_spinlock, flags);
1053 iiWriteBuf(pB, tempCommand, 4); 1056 iiWriteBuf(pB, tempCommand, 4);
1054 WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); 1057 write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
1055 pB->i2eUsingIrq = boardIrq; 1058 pB->i2eUsingIrq = boardIrq;
1056 pB->i2eOutMailWaiting |= MB_OUT_STUFFED; 1059 pB->i2eOutMailWaiting |= MB_OUT_STUFFED;
1057 1060
@@ -1069,9 +1072,9 @@ set_irq( int boardnum, int boardIrq )
1069 (CMD_OF(tempCommand))[4] = 64; // chars 1072 (CMD_OF(tempCommand))[4] = 64; // chars
1070 1073
1071 (CMD_OF(tempCommand))[5] = 87; // HW_TEST 1074 (CMD_OF(tempCommand))[5] = 87; // HW_TEST
1072 WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); 1075 write_lock_irqsave(&pB->write_fifo_spinlock, flags);
1073 iiWriteBuf(pB, tempCommand, 8); 1076 iiWriteBuf(pB, tempCommand, 8);
1074 WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); 1077 write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
1075 1078
1076 CHANNEL_OF(tempCommand) = 0; 1079 CHANNEL_OF(tempCommand) = 0;
1077 PTYPE_OF(tempCommand) = PTYPE_BYPASS; 1080 PTYPE_OF(tempCommand) = PTYPE_BYPASS;
@@ -1086,9 +1089,9 @@ set_irq( int boardnum, int boardIrq )
1086 CMD_COUNT_OF(tempCommand) = 2; 1089 CMD_COUNT_OF(tempCommand) = 2;
1087 (CMD_OF(tempCommand))[0] = 44; /* get ping */ 1090 (CMD_OF(tempCommand))[0] = 44; /* get ping */
1088 (CMD_OF(tempCommand))[1] = 200; /* 200 ms */ 1091 (CMD_OF(tempCommand))[1] = 200; /* 200 ms */
1089 WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); 1092 write_lock_irqsave(&pB->write_fifo_spinlock, flags);
1090 iiWriteBuf(pB, tempCommand, 4); 1093 iiWriteBuf(pB, tempCommand, 4);
1091 WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); 1094 write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
1092#endif 1095#endif
1093 1096
1094 iiEnableMailIrq(pB); 1097 iiEnableMailIrq(pB);
@@ -1267,12 +1270,12 @@ static void do_input(struct work_struct *work)
1267 1270
1268 // Data input 1271 // Data input
1269 if ( pCh->pTTY != NULL ) { 1272 if ( pCh->pTTY != NULL ) {
1270 READ_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags) 1273 read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
1271 if (!pCh->throttled && (pCh->Ibuf_stuff != pCh->Ibuf_strip)) { 1274 if (!pCh->throttled && (pCh->Ibuf_stuff != pCh->Ibuf_strip)) {
1272 READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags) 1275 read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
1273 i2Input( pCh ); 1276 i2Input( pCh );
1274 } else 1277 } else
1275 READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags) 1278 read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
1276 } else { 1279 } else {
1277 ip2trace(CHANN, ITRC_INPUT, 22, 0 ); 1280 ip2trace(CHANN, ITRC_INPUT, 22, 0 );
1278 1281
@@ -1613,10 +1616,8 @@ ip2_close( PTTY tty, struct file *pFile )
1613 1616
1614 serviceOutgoingFifo ( pCh->pMyBord ); 1617 serviceOutgoingFifo ( pCh->pMyBord );
1615 1618
1616 if ( tty->driver->flush_buffer ) 1619 tty_ldisc_flush(tty);
1617 tty->driver->flush_buffer(tty); 1620 tty_driver_flush_buffer(tty);
1618 if ( tty->ldisc.flush_buffer )
1619 tty->ldisc.flush_buffer(tty);
1620 tty->closing = 0; 1621 tty->closing = 0;
1621 1622
1622 pCh->pTTY = NULL; 1623 pCh->pTTY = NULL;
@@ -1716,9 +1717,9 @@ ip2_write( PTTY tty, const unsigned char *pData, int count)
1716 ip2_flush_chars( tty ); 1717 ip2_flush_chars( tty );
1717 1718
1718 /* This is the actual move bit. Make sure it does what we need!!!!! */ 1719 /* This is the actual move bit. Make sure it does what we need!!!!! */
1719 WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); 1720 write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
1720 bytesSent = i2Output( pCh, pData, count); 1721 bytesSent = i2Output( pCh, pData, count);
1721 WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); 1722 write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
1722 1723
1723 ip2trace (CHANN, ITRC_WRITE, ITRC_RETURN, 1, bytesSent ); 1724 ip2trace (CHANN, ITRC_WRITE, ITRC_RETURN, 1, bytesSent );
1724 1725
@@ -1735,7 +1736,7 @@ ip2_write( PTTY tty, const unsigned char *pData, int count)
1735/* */ 1736/* */
1736/* */ 1737/* */
1737/******************************************************************************/ 1738/******************************************************************************/
1738static void 1739static int
1739ip2_putchar( PTTY tty, unsigned char ch ) 1740ip2_putchar( PTTY tty, unsigned char ch )
1740{ 1741{
1741 i2ChanStrPtr pCh = tty->driver_data; 1742 i2ChanStrPtr pCh = tty->driver_data;
@@ -1743,13 +1744,14 @@ ip2_putchar( PTTY tty, unsigned char ch )
1743 1744
1744// ip2trace (CHANN, ITRC_PUTC, ITRC_ENTER, 1, ch ); 1745// ip2trace (CHANN, ITRC_PUTC, ITRC_ENTER, 1, ch );
1745 1746
1746 WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); 1747 write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
1747 pCh->Pbuf[pCh->Pbuf_stuff++] = ch; 1748 pCh->Pbuf[pCh->Pbuf_stuff++] = ch;
1748 if ( pCh->Pbuf_stuff == sizeof pCh->Pbuf ) { 1749 if ( pCh->Pbuf_stuff == sizeof pCh->Pbuf ) {
1749 WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); 1750 write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
1750 ip2_flush_chars( tty ); 1751 ip2_flush_chars( tty );
1751 } else 1752 } else
1752 WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); 1753 write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
1754 return 1;
1753 1755
1754// ip2trace (CHANN, ITRC_PUTC, ITRC_RETURN, 1, ch ); 1756// ip2trace (CHANN, ITRC_PUTC, ITRC_RETURN, 1, ch );
1755} 1757}
@@ -1769,7 +1771,7 @@ ip2_flush_chars( PTTY tty )
1769 i2ChanStrPtr pCh = tty->driver_data; 1771 i2ChanStrPtr pCh = tty->driver_data;
1770 unsigned long flags; 1772 unsigned long flags;
1771 1773
1772 WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); 1774 write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
1773 if ( pCh->Pbuf_stuff ) { 1775 if ( pCh->Pbuf_stuff ) {
1774 1776
1775// ip2trace (CHANN, ITRC_PUTC, 10, 1, strip ); 1777// ip2trace (CHANN, ITRC_PUTC, 10, 1, strip );
@@ -1783,7 +1785,7 @@ ip2_flush_chars( PTTY tty )
1783 } 1785 }
1784 pCh->Pbuf_stuff -= strip; 1786 pCh->Pbuf_stuff -= strip;
1785 } 1787 }
1786 WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); 1788 write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
1787} 1789}
1788 1790
1789/******************************************************************************/ 1791/******************************************************************************/
@@ -1801,9 +1803,9 @@ ip2_write_room ( PTTY tty )
1801 i2ChanStrPtr pCh = tty->driver_data; 1803 i2ChanStrPtr pCh = tty->driver_data;
1802 unsigned long flags; 1804 unsigned long flags;
1803 1805
1804 READ_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); 1806 read_lock_irqsave(&pCh->Pbuf_spinlock, flags);
1805 bytesFree = i2OutputFree( pCh ) - pCh->Pbuf_stuff; 1807 bytesFree = i2OutputFree( pCh ) - pCh->Pbuf_stuff;
1806 READ_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); 1808 read_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
1807 1809
1808 ip2trace (CHANN, ITRC_WRITE, 11, 1, bytesFree ); 1810 ip2trace (CHANN, ITRC_WRITE, 11, 1, bytesFree );
1809 1811
@@ -1833,12 +1835,12 @@ ip2_chars_in_buf ( PTTY tty )
1833 pCh->Obuf_char_count + pCh->Pbuf_stuff, 1835 pCh->Obuf_char_count + pCh->Pbuf_stuff,
1834 pCh->Obuf_char_count, pCh->Pbuf_stuff ); 1836 pCh->Obuf_char_count, pCh->Pbuf_stuff );
1835#endif 1837#endif
1836 READ_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); 1838 read_lock_irqsave(&pCh->Obuf_spinlock, flags);
1837 rc = pCh->Obuf_char_count; 1839 rc = pCh->Obuf_char_count;
1838 READ_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); 1840 read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
1839 READ_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); 1841 read_lock_irqsave(&pCh->Pbuf_spinlock, flags);
1840 rc += pCh->Pbuf_stuff; 1842 rc += pCh->Pbuf_stuff;
1841 READ_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); 1843 read_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
1842 return rc; 1844 return rc;
1843} 1845}
1844 1846
@@ -1862,9 +1864,9 @@ ip2_flush_buffer( PTTY tty )
1862#ifdef IP2DEBUG_WRITE 1864#ifdef IP2DEBUG_WRITE
1863 printk (KERN_DEBUG "IP2: flush buffer\n" ); 1865 printk (KERN_DEBUG "IP2: flush buffer\n" );
1864#endif 1866#endif
1865 WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); 1867 write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
1866 pCh->Pbuf_stuff = 0; 1868 pCh->Pbuf_stuff = 0;
1867 WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); 1869 write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
1868 i2FlushOutput( pCh ); 1870 i2FlushOutput( pCh );
1869 ip2_owake(tty); 1871 ip2_owake(tty);
1870 1872
@@ -1950,15 +1952,15 @@ ip2_unthrottle ( PTTY tty )
1950 pCh->throttled = 0; 1952 pCh->throttled = 0;
1951 i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_RESUME); 1953 i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_RESUME);
1952 serviceOutgoingFifo( pCh->pMyBord ); 1954 serviceOutgoingFifo( pCh->pMyBord );
1953 READ_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags) 1955 read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
1954 if ( pCh->Ibuf_stuff != pCh->Ibuf_strip ) { 1956 if ( pCh->Ibuf_stuff != pCh->Ibuf_strip ) {
1955 READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags) 1957 read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
1956#ifdef IP2DEBUG_READ 1958#ifdef IP2DEBUG_READ
1957 printk (KERN_DEBUG "i2Input called from unthrottle\n" ); 1959 printk (KERN_DEBUG "i2Input called from unthrottle\n" );
1958#endif 1960#endif
1959 i2Input( pCh ); 1961 i2Input( pCh );
1960 } else 1962 } else
1961 READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags) 1963 read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
1962} 1964}
1963 1965
1964static void 1966static void
@@ -2201,9 +2203,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
2201 * for masking). Caller should use TIOCGICOUNT to see which one it was 2203 * for masking). Caller should use TIOCGICOUNT to see which one it was
2202 */ 2204 */
2203 case TIOCMIWAIT: 2205 case TIOCMIWAIT:
2204 WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags); 2206 write_lock_irqsave(&pB->read_fifo_spinlock, flags);
2205 cprev = pCh->icount; /* note the counters on entry */ 2207 cprev = pCh->icount; /* note the counters on entry */
2206 WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags); 2208 write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
2207 i2QueueCommands(PTYPE_BYPASS, pCh, 100, 4, 2209 i2QueueCommands(PTYPE_BYPASS, pCh, 100, 4,
2208 CMD_DCD_REP, CMD_CTS_REP, CMD_DSR_REP, CMD_RI_REP); 2210 CMD_DCD_REP, CMD_CTS_REP, CMD_DSR_REP, CMD_RI_REP);
2209 init_waitqueue_entry(&wait, current); 2211 init_waitqueue_entry(&wait, current);
@@ -2223,9 +2225,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
2223 rc = -ERESTARTSYS; 2225 rc = -ERESTARTSYS;
2224 break; 2226 break;
2225 } 2227 }
2226 WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags); 2228 write_lock_irqsave(&pB->read_fifo_spinlock, flags);
2227 cnow = pCh->icount; /* atomic copy */ 2229 cnow = pCh->icount; /* atomic copy */
2228 WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags); 2230 write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
2229 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2231 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2230 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { 2232 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2231 rc = -EIO; /* no change => rc */ 2233 rc = -EIO; /* no change => rc */
@@ -2263,9 +2265,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
2263 case TIOCGICOUNT: 2265 case TIOCGICOUNT:
2264 ip2trace (CHANN, ITRC_IOCTL, 11, 1, rc ); 2266 ip2trace (CHANN, ITRC_IOCTL, 11, 1, rc );
2265 2267
2266 WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags); 2268 write_lock_irqsave(&pB->read_fifo_spinlock, flags);
2267 cnow = pCh->icount; 2269 cnow = pCh->icount;
2268 WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags); 2270 write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
2269 p_cuser = argp; 2271 p_cuser = argp;
2270 rc = put_user(cnow.cts, &p_cuser->cts); 2272 rc = put_user(cnow.cts, &p_cuser->cts);
2271 rc = put_user(cnow.dsr, &p_cuser->dsr); 2273 rc = put_user(cnow.dsr, &p_cuser->dsr);
@@ -2871,7 +2873,7 @@ ip2_ipl_ioctl ( struct inode *pInode, struct file *pFile, UINT cmd, ULONG arg )
2871 case 65: /* Board - ip2stat */ 2873 case 65: /* Board - ip2stat */
2872 if ( pB ) { 2874 if ( pB ) {
2873 rc = copy_to_user(argp, pB, sizeof(i2eBordStr)); 2875 rc = copy_to_user(argp, pB, sizeof(i2eBordStr));
2874 rc = put_user(INB(pB->i2eStatus), 2876 rc = put_user(inb(pB->i2eStatus),
2875 (ULONG __user *)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) ); 2877 (ULONG __user *)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) );
2876 } else { 2878 } else {
2877 rc = -ENODEV; 2879 rc = -ENODEV;
@@ -2967,65 +2969,61 @@ ip2_ipl_open( struct inode *pInode, struct file *pFile )
2967 } 2969 }
2968 return 0; 2970 return 0;
2969} 2971}
2970/******************************************************************************/
2971/* Function: ip2_read_procmem */
2972/* Parameters: */
2973/* */
2974/* Returns: Length of output */
2975/* */
2976/* Description: */
2977/* Supplies some driver operating parameters */
2978/* Not real useful unless your debugging the fifo */
2979/* */
2980/******************************************************************************/
2981
2982#define LIMIT (PAGE_SIZE - 120)
2983 2972
2984static int 2973static int
2985ip2_read_procmem(char *buf, char **start, off_t offset, int len) 2974proc_ip2mem_show(struct seq_file *m, void *v)
2986{ 2975{
2987 i2eBordStrPtr pB; 2976 i2eBordStrPtr pB;
2988 i2ChanStrPtr pCh; 2977 i2ChanStrPtr pCh;
2989 PTTY tty; 2978 PTTY tty;
2990 int i; 2979 int i;
2991 2980
2992 len = 0;
2993
2994#define FMTLINE "%3d: 0x%08x 0x%08x 0%011o 0%011o\n" 2981#define FMTLINE "%3d: 0x%08x 0x%08x 0%011o 0%011o\n"
2995#define FMTLIN2 " 0x%04x 0x%04x tx flow 0x%x\n" 2982#define FMTLIN2 " 0x%04x 0x%04x tx flow 0x%x\n"
2996#define FMTLIN3 " 0x%04x 0x%04x rc flow\n" 2983#define FMTLIN3 " 0x%04x 0x%04x rc flow\n"
2997 2984
2998 len += sprintf(buf+len,"\n"); 2985 seq_printf(m,"\n");
2999 2986
3000 for( i = 0; i < IP2_MAX_BOARDS; ++i ) { 2987 for( i = 0; i < IP2_MAX_BOARDS; ++i ) {
3001 pB = i2BoardPtrTable[i]; 2988 pB = i2BoardPtrTable[i];
3002 if ( pB ) { 2989 if ( pB ) {
3003 len += sprintf(buf+len,"board %d:\n",i); 2990 seq_printf(m,"board %d:\n",i);
3004 len += sprintf(buf+len,"\tFifo rem: %d mty: %x outM %x\n", 2991 seq_printf(m,"\tFifo rem: %d mty: %x outM %x\n",
3005 pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting); 2992 pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting);
3006 } 2993 }
3007 } 2994 }
3008 2995
3009 len += sprintf(buf+len,"#: tty flags, port flags, cflags, iflags\n"); 2996 seq_printf(m,"#: tty flags, port flags, cflags, iflags\n");
3010 for (i=0; i < IP2_MAX_PORTS; i++) { 2997 for (i=0; i < IP2_MAX_PORTS; i++) {
3011 if (len > LIMIT)
3012 break;
3013 pCh = DevTable[i]; 2998 pCh = DevTable[i];
3014 if (pCh) { 2999 if (pCh) {
3015 tty = pCh->pTTY; 3000 tty = pCh->pTTY;
3016 if (tty && tty->count) { 3001 if (tty && tty->count) {
3017 len += sprintf(buf+len,FMTLINE,i,(int)tty->flags,pCh->flags, 3002 seq_printf(m,FMTLINE,i,(int)tty->flags,pCh->flags,
3018 tty->termios->c_cflag,tty->termios->c_iflag); 3003 tty->termios->c_cflag,tty->termios->c_iflag);
3019 3004
3020 len += sprintf(buf+len,FMTLIN2, 3005 seq_printf(m,FMTLIN2,
3021 pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds); 3006 pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds);
3022 len += sprintf(buf+len,FMTLIN3,pCh->infl.asof,pCh->infl.room); 3007 seq_printf(m,FMTLIN3,pCh->infl.asof,pCh->infl.room);
3023 } 3008 }
3024 } 3009 }
3025 } 3010 }
3026 return len; 3011 return 0;
3012}
3013
3014static int proc_ip2mem_open(struct inode *inode, struct file *file)
3015{
3016 return single_open(file, proc_ip2mem_show, NULL);
3027} 3017}
3028 3018
3019static const struct file_operations ip2mem_proc_fops = {
3020 .owner = THIS_MODULE,
3021 .open = proc_ip2mem_open,
3022 .read = seq_read,
3023 .llseek = seq_lseek,
3024 .release = single_release,
3025};
3026
3029/* 3027/*
3030 * This is the handler for /proc/tty/driver/ip2 3028 * This is the handler for /proc/tty/driver/ip2
3031 * 3029 *
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 553f0a408eda..eb8a1a8c188e 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -9,7 +9,3 @@ obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
9obj-$(CONFIG_IPMI_SI) += ipmi_si.o 9obj-$(CONFIG_IPMI_SI) += ipmi_si.o
10obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o 10obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
11obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o 11obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
12
13ipmi_si.o: $(ipmi_si-objs)
14 $(LD) -r -o $@ $(ipmi_si-objs)
15
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index e736119b6497..7b98c067190a 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -37,26 +37,32 @@
37#define BT_DEBUG_ENABLE 1 /* Generic messages */ 37#define BT_DEBUG_ENABLE 1 /* Generic messages */
38#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ 38#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */
39#define BT_DEBUG_STATES 4 /* Verbose look at state changes */ 39#define BT_DEBUG_STATES 4 /* Verbose look at state changes */
40/* BT_DEBUG_OFF must be zero to correspond to the default uninitialized 40/*
41 value */ 41 * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
42 * value
43 */
42 44
43static int bt_debug; /* 0 == BT_DEBUG_OFF */ 45static int bt_debug; /* 0 == BT_DEBUG_OFF */
44 46
45module_param(bt_debug, int, 0644); 47module_param(bt_debug, int, 0644);
46MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); 48MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
47 49
48/* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, 50/*
49 and 64 byte buffers. However, one HP implementation wants 255 bytes of 51 * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
50 buffer (with a documented message of 160 bytes) so go for the max. 52 * and 64 byte buffers. However, one HP implementation wants 255 bytes of
51 Since the Open IPMI architecture is single-message oriented at this 53 * buffer (with a documented message of 160 bytes) so go for the max.
52 stage, the queue depth of BT is of no concern. */ 54 * Since the Open IPMI architecture is single-message oriented at this
55 * stage, the queue depth of BT is of no concern.
56 */
53 57
54#define BT_NORMAL_TIMEOUT 5 /* seconds */ 58#define BT_NORMAL_TIMEOUT 5 /* seconds */
55#define BT_NORMAL_RETRY_LIMIT 2 59#define BT_NORMAL_RETRY_LIMIT 2
56#define BT_RESET_DELAY 6 /* seconds after warm reset */ 60#define BT_RESET_DELAY 6 /* seconds after warm reset */
57 61
58/* States are written in chronological order and usually cover 62/*
59 multiple rows of the state table discussion in the IPMI spec. */ 63 * States are written in chronological order and usually cover
64 * multiple rows of the state table discussion in the IPMI spec.
65 */
60 66
61enum bt_states { 67enum bt_states {
62 BT_STATE_IDLE = 0, /* Order is critical in this list */ 68 BT_STATE_IDLE = 0, /* Order is critical in this list */
@@ -76,10 +82,12 @@ enum bt_states {
76 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ 82 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
77}; 83};
78 84
79/* Macros seen at the end of state "case" blocks. They help with legibility 85/*
80 and debugging. */ 86 * Macros seen at the end of state "case" blocks. They help with legibility
87 * and debugging.
88 */
81 89
82#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; } 90#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
83 91
84#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } 92#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; }
85 93
@@ -110,11 +118,13 @@ struct si_sm_data {
110#define BT_H_BUSY 0x40 118#define BT_H_BUSY 0x40
111#define BT_B_BUSY 0x80 119#define BT_B_BUSY 0x80
112 120
113/* Some bits are toggled on each write: write once to set it, once 121/*
114 more to clear it; writing a zero does nothing. To absolutely 122 * Some bits are toggled on each write: write once to set it, once
115 clear it, check its state and write if set. This avoids the "get 123 * more to clear it; writing a zero does nothing. To absolutely
116 current then use as mask" scheme to modify one bit. Note that the 124 * clear it, check its state and write if set. This avoids the "get
117 variable "bt" is hardcoded into these macros. */ 125 * current then use as mask" scheme to modify one bit. Note that the
126 * variable "bt" is hardcoded into these macros.
127 */
118 128
119#define BT_STATUS bt->io->inputb(bt->io, 0) 129#define BT_STATUS bt->io->inputb(bt->io, 0)
120#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) 130#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x)
@@ -125,8 +135,10 @@ struct si_sm_data {
125#define BT_INTMASK_R bt->io->inputb(bt->io, 2) 135#define BT_INTMASK_R bt->io->inputb(bt->io, 2)
126#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) 136#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x)
127 137
128/* Convenience routines for debugging. These are not multi-open safe! 138/*
129 Note the macros have hardcoded variables in them. */ 139 * Convenience routines for debugging. These are not multi-open safe!
140 * Note the macros have hardcoded variables in them.
141 */
130 142
131static char *state2txt(unsigned char state) 143static char *state2txt(unsigned char state)
132{ 144{
@@ -182,7 +194,8 @@ static char *status2txt(unsigned char status)
182static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) 194static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
183{ 195{
184 memset(bt, 0, sizeof(struct si_sm_data)); 196 memset(bt, 0, sizeof(struct si_sm_data));
185 if (bt->io != io) { /* external: one-time only things */ 197 if (bt->io != io) {
198 /* external: one-time only things */
186 bt->io = io; 199 bt->io = io;
187 bt->seq = 0; 200 bt->seq = 0;
188 } 201 }
@@ -229,7 +242,7 @@ static int bt_start_transaction(struct si_sm_data *bt,
229 printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); 242 printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
230 printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); 243 printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
231 for (i = 0; i < size; i ++) 244 for (i = 0; i < size; i ++)
232 printk (" %02x", data[i]); 245 printk(" %02x", data[i]);
233 printk("\n"); 246 printk("\n");
234 } 247 }
235 bt->write_data[0] = size + 1; /* all data plus seq byte */ 248 bt->write_data[0] = size + 1; /* all data plus seq byte */
@@ -246,8 +259,10 @@ static int bt_start_transaction(struct si_sm_data *bt,
246 return 0; 259 return 0;
247} 260}
248 261
249/* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE 262/*
250 it calls this. Strip out the length and seq bytes. */ 263 * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
264 * it calls this. Strip out the length and seq bytes.
265 */
251 266
252static int bt_get_result(struct si_sm_data *bt, 267static int bt_get_result(struct si_sm_data *bt,
253 unsigned char *data, 268 unsigned char *data,
@@ -269,10 +284,10 @@ static int bt_get_result(struct si_sm_data *bt,
269 memcpy(data + 2, bt->read_data + 4, msg_len - 2); 284 memcpy(data + 2, bt->read_data + 4, msg_len - 2);
270 285
271 if (bt_debug & BT_DEBUG_MSG) { 286 if (bt_debug & BT_DEBUG_MSG) {
272 printk (KERN_WARNING "BT: result %d bytes:", msg_len); 287 printk(KERN_WARNING "BT: result %d bytes:", msg_len);
273 for (i = 0; i < msg_len; i++) 288 for (i = 0; i < msg_len; i++)
274 printk(" %02x", data[i]); 289 printk(" %02x", data[i]);
275 printk ("\n"); 290 printk("\n");
276 } 291 }
277 return msg_len; 292 return msg_len;
278} 293}
@@ -292,8 +307,10 @@ static void reset_flags(struct si_sm_data *bt)
292 BT_INTMASK_W(BT_BMC_HWRST); 307 BT_INTMASK_W(BT_BMC_HWRST);
293} 308}
294 309
295/* Get rid of an unwanted/stale response. This should only be needed for 310/*
296 BMCs that support multiple outstanding requests. */ 311 * Get rid of an unwanted/stale response. This should only be needed for
312 * BMCs that support multiple outstanding requests.
313 */
297 314
298static void drain_BMC2HOST(struct si_sm_data *bt) 315static void drain_BMC2HOST(struct si_sm_data *bt)
299{ 316{
@@ -326,8 +343,8 @@ static inline void write_all_bytes(struct si_sm_data *bt)
326 printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", 343 printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
327 bt->write_count, bt->seq); 344 bt->write_count, bt->seq);
328 for (i = 0; i < bt->write_count; i++) 345 for (i = 0; i < bt->write_count; i++)
329 printk (" %02x", bt->write_data[i]); 346 printk(" %02x", bt->write_data[i]);
330 printk ("\n"); 347 printk("\n");
331 } 348 }
332 for (i = 0; i < bt->write_count; i++) 349 for (i = 0; i < bt->write_count; i++)
333 HOST2BMC(bt->write_data[i]); 350 HOST2BMC(bt->write_data[i]);
@@ -337,8 +354,10 @@ static inline int read_all_bytes(struct si_sm_data *bt)
337{ 354{
338 unsigned char i; 355 unsigned char i;
339 356
340 /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. 357 /*
341 Keep layout of first four bytes aligned with write_data[] */ 358 * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
359 * Keep layout of first four bytes aligned with write_data[]
360 */
342 361
343 bt->read_data[0] = BMC2HOST; 362 bt->read_data[0] = BMC2HOST;
344 bt->read_count = bt->read_data[0]; 363 bt->read_count = bt->read_data[0];
@@ -362,8 +381,8 @@ static inline int read_all_bytes(struct si_sm_data *bt)
362 if (max > 16) 381 if (max > 16)
363 max = 16; 382 max = 16;
364 for (i = 0; i < max; i++) 383 for (i = 0; i < max; i++)
365 printk (" %02x", bt->read_data[i]); 384 printk(KERN_CONT " %02x", bt->read_data[i]);
366 printk ("%s\n", bt->read_count == max ? "" : " ..."); 385 printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ...");
367 } 386 }
368 387
369 /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ 388 /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
@@ -402,8 +421,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
402 printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ 421 printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */
403 reason, STATE2TXT, STATUS2TXT); 422 reason, STATE2TXT, STATUS2TXT);
404 423
405 /* Per the IPMI spec, retries are based on the sequence number 424 /*
406 known only to this module, so manage a restart here. */ 425 * Per the IPMI spec, retries are based on the sequence number
426 * known only to this module, so manage a restart here.
427 */
407 (bt->error_retries)++; 428 (bt->error_retries)++;
408 if (bt->error_retries < bt->BT_CAP_retries) { 429 if (bt->error_retries < bt->BT_CAP_retries) {
409 printk("%d retries left\n", 430 printk("%d retries left\n",
@@ -412,8 +433,8 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
412 return SI_SM_CALL_WITHOUT_DELAY; 433 return SI_SM_CALL_WITHOUT_DELAY;
413 } 434 }
414 435
415 printk("failed %d retries, sending error response\n", 436 printk(KERN_WARNING "failed %d retries, sending error response\n",
416 bt->BT_CAP_retries); 437 bt->BT_CAP_retries);
417 if (!bt->nonzero_status) 438 if (!bt->nonzero_status)
418 printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); 439 printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
419 440
@@ -424,8 +445,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
424 return SI_SM_CALL_WITHOUT_DELAY; 445 return SI_SM_CALL_WITHOUT_DELAY;
425 } 446 }
426 447
427 /* Concoct a useful error message, set up the next state, and 448 /*
428 be done with this sequence. */ 449 * Concoct a useful error message, set up the next state, and
450 * be done with this sequence.
451 */
429 452
430 bt->state = BT_STATE_IDLE; 453 bt->state = BT_STATE_IDLE;
431 switch (cCode) { 454 switch (cCode) {
@@ -461,10 +484,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
461 last_printed = bt->state; 484 last_printed = bt->state;
462 } 485 }
463 486
464 /* Commands that time out may still (eventually) provide a response. 487 /*
465 This stale response will get in the way of a new response so remove 488 * Commands that time out may still (eventually) provide a response.
466 it if possible (hopefully during IDLE). Even if it comes up later 489 * This stale response will get in the way of a new response so remove
467 it will be rejected by its (now-forgotten) seq number. */ 490 * it if possible (hopefully during IDLE). Even if it comes up later
491 * it will be rejected by its (now-forgotten) seq number.
492 */
468 493
469 if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { 494 if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
470 drain_BMC2HOST(bt); 495 drain_BMC2HOST(bt);
@@ -472,7 +497,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
472 } 497 }
473 498
474 if ((bt->state != BT_STATE_IDLE) && 499 if ((bt->state != BT_STATE_IDLE) &&
475 (bt->state < BT_STATE_PRINTME)) { /* check timeout */ 500 (bt->state < BT_STATE_PRINTME)) {
501 /* check timeout */
476 bt->timeout -= time; 502 bt->timeout -= time;
477 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) 503 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
478 return error_recovery(bt, 504 return error_recovery(bt,
@@ -482,8 +508,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
482 508
483 switch (bt->state) { 509 switch (bt->state) {
484 510
485 /* Idle state first checks for asynchronous messages from another 511 /*
486 channel, then does some opportunistic housekeeping. */ 512 * Idle state first checks for asynchronous messages from another
513 * channel, then does some opportunistic housekeeping.
514 */
487 515
488 case BT_STATE_IDLE: 516 case BT_STATE_IDLE:
489 if (status & BT_SMS_ATN) { 517 if (status & BT_SMS_ATN) {
@@ -531,16 +559,19 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
531 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); 559 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
532 BT_CONTROL(BT_H_BUSY); /* set */ 560 BT_CONTROL(BT_H_BUSY); /* set */
533 561
534 /* Uncached, ordered writes should just proceeed serially but 562 /*
535 some BMCs don't clear B2H_ATN with one hit. Fast-path a 563 * Uncached, ordered writes should just proceeed serially but
536 workaround without too much penalty to the general case. */ 564 * some BMCs don't clear B2H_ATN with one hit. Fast-path a
565 * workaround without too much penalty to the general case.
566 */
537 567
538 BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ 568 BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */
539 BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, 569 BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
540 SI_SM_CALL_WITHOUT_DELAY); 570 SI_SM_CALL_WITHOUT_DELAY);
541 571
542 case BT_STATE_CLEAR_B2H: 572 case BT_STATE_CLEAR_B2H:
543 if (status & BT_B2H_ATN) { /* keep hitting it */ 573 if (status & BT_B2H_ATN) {
574 /* keep hitting it */
544 BT_CONTROL(BT_B2H_ATN); 575 BT_CONTROL(BT_B2H_ATN);
545 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); 576 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
546 } 577 }
@@ -548,7 +579,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
548 SI_SM_CALL_WITHOUT_DELAY); 579 SI_SM_CALL_WITHOUT_DELAY);
549 580
550 case BT_STATE_READ_BYTES: 581 case BT_STATE_READ_BYTES:
551 if (!(status & BT_H_BUSY)) /* check in case of retry */ 582 if (!(status & BT_H_BUSY))
583 /* check in case of retry */
552 BT_CONTROL(BT_H_BUSY); 584 BT_CONTROL(BT_H_BUSY);
553 BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ 585 BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */
554 i = read_all_bytes(bt); /* true == packet seq match */ 586 i = read_all_bytes(bt); /* true == packet seq match */
@@ -599,8 +631,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
599 BT_STATE_CHANGE(BT_STATE_XACTION_START, 631 BT_STATE_CHANGE(BT_STATE_XACTION_START,
600 SI_SM_CALL_WITH_DELAY); 632 SI_SM_CALL_WITH_DELAY);
601 633
602 /* Get BT Capabilities, using timing of upper level state machine. 634 /*
603 Set outreqs to prevent infinite loop on timeout. */ 635 * Get BT Capabilities, using timing of upper level state machine.
636 * Set outreqs to prevent infinite loop on timeout.
637 */
604 case BT_STATE_CAPABILITIES_BEGIN: 638 case BT_STATE_CAPABILITIES_BEGIN:
605 bt->BT_CAP_outreqs = 1; 639 bt->BT_CAP_outreqs = 1;
606 { 640 {
@@ -638,10 +672,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
638 672
639static int bt_detect(struct si_sm_data *bt) 673static int bt_detect(struct si_sm_data *bt)
640{ 674{
641 /* It's impossible for the BT status and interrupt registers to be 675 /*
642 all 1's, (assuming a properly functioning, self-initialized BMC) 676 * It's impossible for the BT status and interrupt registers to be
643 but that's what you get from reading a bogus address, so we 677 * all 1's, (assuming a properly functioning, self-initialized BMC)
644 test that first. The calling routine uses negative logic. */ 678 * but that's what you get from reading a bogus address, so we
679 * test that first. The calling routine uses negative logic.
680 */
645 681
646 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) 682 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
647 return 1; 683 return 1;
@@ -658,8 +694,7 @@ static int bt_size(void)
658 return sizeof(struct si_sm_data); 694 return sizeof(struct si_sm_data);
659} 695}
660 696
661struct si_sm_handlers bt_smi_handlers = 697struct si_sm_handlers bt_smi_handlers = {
662{
663 .init_data = bt_init_data, 698 .init_data = bt_init_data,
664 .start_transaction = bt_start_transaction, 699 .start_transaction = bt_start_transaction,
665 .get_result = bt_get_result, 700 .get_result = bt_get_result,
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index c1b8228cb7b6..80704875794c 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -60,37 +60,58 @@ MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
60 60
61/* The states the KCS driver may be in. */ 61/* The states the KCS driver may be in. */
62enum kcs_states { 62enum kcs_states {
63 KCS_IDLE, /* The KCS interface is currently 63 /* The KCS interface is currently doing nothing. */
64 doing nothing. */ 64 KCS_IDLE,
65 KCS_START_OP, /* We are starting an operation. The 65
66 data is in the output buffer, but 66 /*
67 nothing has been done to the 67 * We are starting an operation. The data is in the output
68 interface yet. This was added to 68 * buffer, but nothing has been done to the interface yet. This
69 the state machine in the spec to 69 * was added to the state machine in the spec to wait for the
70 wait for the initial IBF. */ 70 * initial IBF.
71 KCS_WAIT_WRITE_START, /* We have written a write cmd to the 71 */
72 interface. */ 72 KCS_START_OP,
73 KCS_WAIT_WRITE, /* We are writing bytes to the 73
74 interface. */ 74 /* We have written a write cmd to the interface. */
75 KCS_WAIT_WRITE_END, /* We have written the write end cmd 75 KCS_WAIT_WRITE_START,
76 to the interface, and still need to 76
77 write the last byte. */ 77 /* We are writing bytes to the interface. */
78 KCS_WAIT_READ, /* We are waiting to read data from 78 KCS_WAIT_WRITE,
79 the interface. */ 79
80 KCS_ERROR0, /* State to transition to the error 80 /*
81 handler, this was added to the 81 * We have written the write end cmd to the interface, and
82 state machine in the spec to be 82 * still need to write the last byte.
83 sure IBF was there. */ 83 */
84 KCS_ERROR1, /* First stage error handler, wait for 84 KCS_WAIT_WRITE_END,
85 the interface to respond. */ 85
86 KCS_ERROR2, /* The abort cmd has been written, 86 /* We are waiting to read data from the interface. */
87 wait for the interface to 87 KCS_WAIT_READ,
88 respond. */ 88
89 KCS_ERROR3, /* We wrote some data to the 89 /*
90 interface, wait for it to switch to 90 * State to transition to the error handler, this was added to
91 read mode. */ 91 * the state machine in the spec to be sure IBF was there.
92 KCS_HOSED /* The hardware failed to follow the 92 */
93 state machine. */ 93 KCS_ERROR0,
94
95 /*
96 * First stage error handler, wait for the interface to
97 * respond.
98 */
99 KCS_ERROR1,
100
101 /*
102 * The abort cmd has been written, wait for the interface to
103 * respond.
104 */
105 KCS_ERROR2,
106
107 /*
108 * We wrote some data to the interface, wait for it to switch
109 * to read mode.
110 */
111 KCS_ERROR3,
112
113 /* The hardware failed to follow the state machine. */
114 KCS_HOSED
94}; 115};
95 116
96#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH 117#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
@@ -102,8 +123,7 @@ enum kcs_states {
102#define MAX_ERROR_RETRIES 10 123#define MAX_ERROR_RETRIES 10
103#define ERROR0_OBF_WAIT_JIFFIES (2*HZ) 124#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
104 125
105struct si_sm_data 126struct si_sm_data {
106{
107 enum kcs_states state; 127 enum kcs_states state;
108 struct si_sm_io *io; 128 struct si_sm_io *io;
109 unsigned char write_data[MAX_KCS_WRITE_SIZE]; 129 unsigned char write_data[MAX_KCS_WRITE_SIZE];
@@ -187,7 +207,8 @@ static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
187 (kcs->error_retries)++; 207 (kcs->error_retries)++;
188 if (kcs->error_retries > MAX_ERROR_RETRIES) { 208 if (kcs->error_retries > MAX_ERROR_RETRIES) {
189 if (kcs_debug & KCS_DEBUG_ENABLE) 209 if (kcs_debug & KCS_DEBUG_ENABLE)
190 printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason); 210 printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n",
211 reason);
191 kcs->state = KCS_HOSED; 212 kcs->state = KCS_HOSED;
192 } else { 213 } else {
193 kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; 214 kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
@@ -271,10 +292,9 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
271 292
272 if (kcs_debug & KCS_DEBUG_MSG) { 293 if (kcs_debug & KCS_DEBUG_MSG) {
273 printk(KERN_DEBUG "start_kcs_transaction -"); 294 printk(KERN_DEBUG "start_kcs_transaction -");
274 for (i = 0; i < size; i ++) { 295 for (i = 0; i < size; i++)
275 printk(" %02x", (unsigned char) (data [i])); 296 printk(" %02x", (unsigned char) (data [i]));
276 } 297 printk("\n");
277 printk ("\n");
278 } 298 }
279 kcs->error_retries = 0; 299 kcs->error_retries = 0;
280 memcpy(kcs->write_data, data, size); 300 memcpy(kcs->write_data, data, size);
@@ -305,9 +325,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
305 kcs->read_pos = 3; 325 kcs->read_pos = 3;
306 } 326 }
307 if (kcs->truncated) { 327 if (kcs->truncated) {
308 /* Report a truncated error. We might overwrite 328 /*
309 another error, but that's too bad, the user needs 329 * Report a truncated error. We might overwrite
310 to know it was truncated. */ 330 * another error, but that's too bad, the user needs
331 * to know it was truncated.
332 */
311 data[2] = IPMI_ERR_MSG_TRUNCATED; 333 data[2] = IPMI_ERR_MSG_TRUNCATED;
312 kcs->truncated = 0; 334 kcs->truncated = 0;
313 } 335 }
@@ -315,9 +337,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
315 return kcs->read_pos; 337 return kcs->read_pos;
316} 338}
317 339
318/* This implements the state machine defined in the IPMI manual, see 340/*
319 that for details on how this works. Divide that flowchart into 341 * This implements the state machine defined in the IPMI manual, see
320 sections delimited by "Wait for IBF" and this will become clear. */ 342 * that for details on how this works. Divide that flowchart into
343 * sections delimited by "Wait for IBF" and this will become clear.
344 */
321static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) 345static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
322{ 346{
323 unsigned char status; 347 unsigned char status;
@@ -388,11 +412,12 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
388 write_next_byte(kcs); 412 write_next_byte(kcs);
389 } 413 }
390 break; 414 break;
391 415
392 case KCS_WAIT_WRITE_END: 416 case KCS_WAIT_WRITE_END:
393 if (state != KCS_WRITE_STATE) { 417 if (state != KCS_WRITE_STATE) {
394 start_error_recovery(kcs, 418 start_error_recovery(kcs,
395 "Not in write state for write end"); 419 "Not in write state"
420 " for write end");
396 break; 421 break;
397 } 422 }
398 clear_obf(kcs, status); 423 clear_obf(kcs, status);
@@ -413,13 +438,15 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
413 return SI_SM_CALL_WITH_DELAY; 438 return SI_SM_CALL_WITH_DELAY;
414 read_next_byte(kcs); 439 read_next_byte(kcs);
415 } else { 440 } else {
416 /* We don't implement this exactly like the state 441 /*
417 machine in the spec. Some broken hardware 442 * We don't implement this exactly like the state
418 does not write the final dummy byte to the 443 * machine in the spec. Some broken hardware
419 read register. Thus obf will never go high 444 * does not write the final dummy byte to the
420 here. We just go straight to idle, and we 445 * read register. Thus obf will never go high
421 handle clearing out obf in idle state if it 446 * here. We just go straight to idle, and we
422 happens to come in. */ 447 * handle clearing out obf in idle state if it
448 * happens to come in.
449 */
423 clear_obf(kcs, status); 450 clear_obf(kcs, status);
424 kcs->orig_write_count = 0; 451 kcs->orig_write_count = 0;
425 kcs->state = KCS_IDLE; 452 kcs->state = KCS_IDLE;
@@ -430,7 +457,8 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
430 case KCS_ERROR0: 457 case KCS_ERROR0:
431 clear_obf(kcs, status); 458 clear_obf(kcs, status);
432 status = read_status(kcs); 459 status = read_status(kcs);
433 if (GET_STATUS_OBF(status)) /* controller isn't responding */ 460 if (GET_STATUS_OBF(status))
461 /* controller isn't responding */
434 if (time_before(jiffies, kcs->error0_timeout)) 462 if (time_before(jiffies, kcs->error0_timeout))
435 return SI_SM_CALL_WITH_TICK_DELAY; 463 return SI_SM_CALL_WITH_TICK_DELAY;
436 write_cmd(kcs, KCS_GET_STATUS_ABORT); 464 write_cmd(kcs, KCS_GET_STATUS_ABORT);
@@ -442,7 +470,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
442 write_data(kcs, 0); 470 write_data(kcs, 0);
443 kcs->state = KCS_ERROR2; 471 kcs->state = KCS_ERROR2;
444 break; 472 break;
445 473
446 case KCS_ERROR2: 474 case KCS_ERROR2:
447 if (state != KCS_READ_STATE) { 475 if (state != KCS_READ_STATE) {
448 start_error_recovery(kcs, 476 start_error_recovery(kcs,
@@ -456,7 +484,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
456 write_data(kcs, KCS_READ_BYTE); 484 write_data(kcs, KCS_READ_BYTE);
457 kcs->state = KCS_ERROR3; 485 kcs->state = KCS_ERROR3;
458 break; 486 break;
459 487
460 case KCS_ERROR3: 488 case KCS_ERROR3:
461 if (state != KCS_IDLE_STATE) { 489 if (state != KCS_IDLE_STATE) {
462 start_error_recovery(kcs, 490 start_error_recovery(kcs,
@@ -475,7 +503,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
475 return SI_SM_TRANSACTION_COMPLETE; 503 return SI_SM_TRANSACTION_COMPLETE;
476 } 504 }
477 break; 505 break;
478 506
479 case KCS_HOSED: 507 case KCS_HOSED:
480 break; 508 break;
481 } 509 }
@@ -495,10 +523,12 @@ static int kcs_size(void)
495 523
496static int kcs_detect(struct si_sm_data *kcs) 524static int kcs_detect(struct si_sm_data *kcs)
497{ 525{
498 /* It's impossible for the KCS status register to be all 1's, 526 /*
499 (assuming a properly functioning, self-initialized BMC) 527 * It's impossible for the KCS status register to be all 1's,
500 but that's what you get from reading a bogus address, so we 528 * (assuming a properly functioning, self-initialized BMC)
501 test that first. */ 529 * but that's what you get from reading a bogus address, so we
530 * test that first.
531 */
502 if (read_status(kcs) == 0xff) 532 if (read_status(kcs) == 0xff)
503 return 1; 533 return 1;
504 534
@@ -509,8 +539,7 @@ static void kcs_cleanup(struct si_sm_data *kcs)
509{ 539{
510} 540}
511 541
512struct si_sm_handlers kcs_smi_handlers = 542struct si_sm_handlers kcs_smi_handlers = {
513{
514 .init_data = init_kcs_data, 543 .init_data = init_kcs_data,
515 .start_transaction = start_kcs_transaction, 544 .start_transaction = start_kcs_transaction,
516 .get_result = get_kcs_result, 545 .get_result = get_kcs_result,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 32b2b22996dc..8a59aaa21be5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -47,7 +47,7 @@
47 47
48#define PFX "IPMI message handler: " 48#define PFX "IPMI message handler: "
49 49
50#define IPMI_DRIVER_VERSION "39.1" 50#define IPMI_DRIVER_VERSION "39.2"
51 51
52static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 52static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53static int ipmi_init_msghandler(void); 53static int ipmi_init_msghandler(void);
@@ -63,16 +63,16 @@ static struct proc_dir_entry *proc_ipmi_root;
63 63
64#define MAX_EVENTS_IN_QUEUE 25 64#define MAX_EVENTS_IN_QUEUE 25
65 65
66/* Don't let a message sit in a queue forever, always time it with at lest 66/*
67 the max message timer. This is in milliseconds. */ 67 * Don't let a message sit in a queue forever, always time it with at lest
68 * the max message timer. This is in milliseconds.
69 */
68#define MAX_MSG_TIMEOUT 60000 70#define MAX_MSG_TIMEOUT 60000
69 71
70
71/* 72/*
72 * The main "user" data structure. 73 * The main "user" data structure.
73 */ 74 */
74struct ipmi_user 75struct ipmi_user {
75{
76 struct list_head link; 76 struct list_head link;
77 77
78 /* Set to "0" when the user is destroyed. */ 78 /* Set to "0" when the user is destroyed. */
@@ -91,8 +91,7 @@ struct ipmi_user
91 int gets_events; 91 int gets_events;
92}; 92};
93 93
94struct cmd_rcvr 94struct cmd_rcvr {
95{
96 struct list_head link; 95 struct list_head link;
97 96
98 ipmi_user_t user; 97 ipmi_user_t user;
@@ -106,12 +105,12 @@ struct cmd_rcvr
106 * or change any data until the RCU period completes. So we 105 * or change any data until the RCU period completes. So we
107 * use this next variable during mass deletion so we can have 106 * use this next variable during mass deletion so we can have
108 * a list and don't have to wait and restart the search on 107 * a list and don't have to wait and restart the search on
109 * every individual deletion of a command. */ 108 * every individual deletion of a command.
109 */
110 struct cmd_rcvr *next; 110 struct cmd_rcvr *next;
111}; 111};
112 112
113struct seq_table 113struct seq_table {
114{
115 unsigned int inuse : 1; 114 unsigned int inuse : 1;
116 unsigned int broadcast : 1; 115 unsigned int broadcast : 1;
117 116
@@ -119,53 +118,60 @@ struct seq_table
119 unsigned long orig_timeout; 118 unsigned long orig_timeout;
120 unsigned int retries_left; 119 unsigned int retries_left;
121 120
122 /* To verify on an incoming send message response that this is 121 /*
123 the message that the response is for, we keep a sequence id 122 * To verify on an incoming send message response that this is
124 and increment it every time we send a message. */ 123 * the message that the response is for, we keep a sequence id
124 * and increment it every time we send a message.
125 */
125 long seqid; 126 long seqid;
126 127
127 /* This is held so we can properly respond to the message on a 128 /*
128 timeout, and it is used to hold the temporary data for 129 * This is held so we can properly respond to the message on a
129 retransmission, too. */ 130 * timeout, and it is used to hold the temporary data for
131 * retransmission, too.
132 */
130 struct ipmi_recv_msg *recv_msg; 133 struct ipmi_recv_msg *recv_msg;
131}; 134};
132 135
133/* Store the information in a msgid (long) to allow us to find a 136/*
134 sequence table entry from the msgid. */ 137 * Store the information in a msgid (long) to allow us to find a
138 * sequence table entry from the msgid.
139 */
135#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) 140#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
136 141
137#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 142#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
138 do { \ 143 do { \
139 seq = ((msgid >> 26) & 0x3f); \ 144 seq = ((msgid >> 26) & 0x3f); \
140 seqid = (msgid & 0x3fffff); \ 145 seqid = (msgid & 0x3fffff); \
141 } while (0) 146 } while (0)
142 147
143#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) 148#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
144 149
145struct ipmi_channel 150struct ipmi_channel {
146{
147 unsigned char medium; 151 unsigned char medium;
148 unsigned char protocol; 152 unsigned char protocol;
149 153
150 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 154 /*
151 but may be changed by the user. */ 155 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
156 * but may be changed by the user.
157 */
152 unsigned char address; 158 unsigned char address;
153 159
154 /* My LUN. This should generally stay the SMS LUN, but just in 160 /*
155 case... */ 161 * My LUN. This should generally stay the SMS LUN, but just in
162 * case...
163 */
156 unsigned char lun; 164 unsigned char lun;
157}; 165};
158 166
159#ifdef CONFIG_PROC_FS 167#ifdef CONFIG_PROC_FS
160struct ipmi_proc_entry 168struct ipmi_proc_entry {
161{
162 char *name; 169 char *name;
163 struct ipmi_proc_entry *next; 170 struct ipmi_proc_entry *next;
164}; 171};
165#endif 172#endif
166 173
167struct bmc_device 174struct bmc_device {
168{
169 struct platform_device *dev; 175 struct platform_device *dev;
170 struct ipmi_device_id id; 176 struct ipmi_device_id id;
171 unsigned char guid[16]; 177 unsigned char guid[16];
@@ -186,10 +192,108 @@ struct bmc_device
186 struct device_attribute aux_firmware_rev_attr; 192 struct device_attribute aux_firmware_rev_attr;
187}; 193};
188 194
195/*
196 * Various statistics for IPMI, these index stats[] in the ipmi_smi
197 * structure.
198 */
199enum ipmi_stat_indexes {
200 /* Commands we got from the user that were invalid. */
201 IPMI_STAT_sent_invalid_commands = 0,
202
203 /* Commands we sent to the MC. */
204 IPMI_STAT_sent_local_commands,
205
206 /* Responses from the MC that were delivered to a user. */
207 IPMI_STAT_handled_local_responses,
208
209 /* Responses from the MC that were not delivered to a user. */
210 IPMI_STAT_unhandled_local_responses,
211
212 /* Commands we sent out to the IPMB bus. */
213 IPMI_STAT_sent_ipmb_commands,
214
215 /* Commands sent on the IPMB that had errors on the SEND CMD */
216 IPMI_STAT_sent_ipmb_command_errs,
217
218 /* Each retransmit increments this count. */
219 IPMI_STAT_retransmitted_ipmb_commands,
220
221 /*
222 * When a message times out (runs out of retransmits) this is
223 * incremented.
224 */
225 IPMI_STAT_timed_out_ipmb_commands,
226
227 /*
228 * This is like above, but for broadcasts. Broadcasts are
229 * *not* included in the above count (they are expected to
230 * time out).
231 */
232 IPMI_STAT_timed_out_ipmb_broadcasts,
233
234 /* Responses I have sent to the IPMB bus. */
235 IPMI_STAT_sent_ipmb_responses,
236
237 /* The response was delivered to the user. */
238 IPMI_STAT_handled_ipmb_responses,
239
240 /* The response had invalid data in it. */
241 IPMI_STAT_invalid_ipmb_responses,
242
243 /* The response didn't have anyone waiting for it. */
244 IPMI_STAT_unhandled_ipmb_responses,
245
246 /* Commands we sent out to the IPMB bus. */
247 IPMI_STAT_sent_lan_commands,
248
249 /* Commands sent on the IPMB that had errors on the SEND CMD */
250 IPMI_STAT_sent_lan_command_errs,
251
252 /* Each retransmit increments this count. */
253 IPMI_STAT_retransmitted_lan_commands,
254
255 /*
256 * When a message times out (runs out of retransmits) this is
257 * incremented.
258 */
259 IPMI_STAT_timed_out_lan_commands,
260
261 /* Responses I have sent to the IPMB bus. */
262 IPMI_STAT_sent_lan_responses,
263
264 /* The response was delivered to the user. */
265 IPMI_STAT_handled_lan_responses,
266
267 /* The response had invalid data in it. */
268 IPMI_STAT_invalid_lan_responses,
269
270 /* The response didn't have anyone waiting for it. */
271 IPMI_STAT_unhandled_lan_responses,
272
273 /* The command was delivered to the user. */
274 IPMI_STAT_handled_commands,
275
276 /* The command had invalid data in it. */
277 IPMI_STAT_invalid_commands,
278
279 /* The command didn't have anyone waiting for it. */
280 IPMI_STAT_unhandled_commands,
281
282 /* Invalid data in an event. */
283 IPMI_STAT_invalid_events,
284
285 /* Events that were received with the proper format. */
286 IPMI_STAT_events,
287
288
289 /* This *must* remain last, add new values above this. */
290 IPMI_NUM_STATS
291};
292
293
189#define IPMI_IPMB_NUM_SEQ 64 294#define IPMI_IPMB_NUM_SEQ 64
190#define IPMI_MAX_CHANNELS 16 295#define IPMI_MAX_CHANNELS 16
191struct ipmi_smi 296struct ipmi_smi {
192{
193 /* What interface number are we? */ 297 /* What interface number are we? */
194 int intf_num; 298 int intf_num;
195 299
@@ -198,8 +302,10 @@ struct ipmi_smi
198 /* Used for a list of interfaces. */ 302 /* Used for a list of interfaces. */
199 struct list_head link; 303 struct list_head link;
200 304
201 /* The list of upper layers that are using me. seq_lock 305 /*
202 * protects this. */ 306 * The list of upper layers that are using me. seq_lock
307 * protects this.
308 */
203 struct list_head users; 309 struct list_head users;
204 310
205 /* Information to supply to users. */ 311 /* Information to supply to users. */
@@ -213,10 +319,12 @@ struct ipmi_smi
213 char *my_dev_name; 319 char *my_dev_name;
214 char *sysfs_name; 320 char *sysfs_name;
215 321
216 /* This is the lower-layer's sender routine. Note that you 322 /*
323 * This is the lower-layer's sender routine. Note that you
217 * must either be holding the ipmi_interfaces_mutex or be in 324 * must either be holding the ipmi_interfaces_mutex or be in
218 * an umpreemptible region to use this. You must fetch the 325 * an umpreemptible region to use this. You must fetch the
219 * value into a local variable and make sure it is not NULL. */ 326 * value into a local variable and make sure it is not NULL.
327 */
220 struct ipmi_smi_handlers *handlers; 328 struct ipmi_smi_handlers *handlers;
221 void *send_info; 329 void *send_info;
222 330
@@ -229,34 +337,45 @@ struct ipmi_smi
229 /* Driver-model device for the system interface. */ 337 /* Driver-model device for the system interface. */
230 struct device *si_dev; 338 struct device *si_dev;
231 339
232 /* A table of sequence numbers for this interface. We use the 340 /*
233 sequence numbers for IPMB messages that go out of the 341 * A table of sequence numbers for this interface. We use the
234 interface to match them up with their responses. A routine 342 * sequence numbers for IPMB messages that go out of the
235 is called periodically to time the items in this list. */ 343 * interface to match them up with their responses. A routine
344 * is called periodically to time the items in this list.
345 */
236 spinlock_t seq_lock; 346 spinlock_t seq_lock;
237 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 347 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
238 int curr_seq; 348 int curr_seq;
239 349
240 /* Messages that were delayed for some reason (out of memory, 350 /*
241 for instance), will go in here to be processed later in a 351 * Messages that were delayed for some reason (out of memory,
242 periodic timer interrupt. */ 352 * for instance), will go in here to be processed later in a
353 * periodic timer interrupt.
354 */
243 spinlock_t waiting_msgs_lock; 355 spinlock_t waiting_msgs_lock;
244 struct list_head waiting_msgs; 356 struct list_head waiting_msgs;
245 357
246 /* The list of command receivers that are registered for commands 358 /*
247 on this interface. */ 359 * The list of command receivers that are registered for commands
360 * on this interface.
361 */
248 struct mutex cmd_rcvrs_mutex; 362 struct mutex cmd_rcvrs_mutex;
249 struct list_head cmd_rcvrs; 363 struct list_head cmd_rcvrs;
250 364
251 /* Events that were queues because no one was there to receive 365 /*
252 them. */ 366 * Events that were queues because no one was there to receive
367 * them.
368 */
253 spinlock_t events_lock; /* For dealing with event stuff. */ 369 spinlock_t events_lock; /* For dealing with event stuff. */
254 struct list_head waiting_events; 370 struct list_head waiting_events;
255 unsigned int waiting_events_count; /* How many events in queue? */ 371 unsigned int waiting_events_count; /* How many events in queue? */
256 int delivering_events; 372 char delivering_events;
373 char event_msg_printed;
257 374
258 /* The event receiver for my BMC, only really used at panic 375 /*
259 shutdown as a place to store this. */ 376 * The event receiver for my BMC, only really used at panic
377 * shutdown as a place to store this.
378 */
260 unsigned char event_receiver; 379 unsigned char event_receiver;
261 unsigned char event_receiver_lun; 380 unsigned char event_receiver_lun;
262 unsigned char local_sel_device; 381 unsigned char local_sel_device;
@@ -268,14 +387,18 @@ struct ipmi_smi
268 int auto_maintenance_timeout; 387 int auto_maintenance_timeout;
269 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 388 spinlock_t maintenance_mode_lock; /* Used in a timer... */
270 389
271 /* A cheap hack, if this is non-null and a message to an 390 /*
272 interface comes in with a NULL user, call this routine with 391 * A cheap hack, if this is non-null and a message to an
273 it. Note that the message will still be freed by the 392 * interface comes in with a NULL user, call this routine with
274 caller. This only works on the system interface. */ 393 * it. Note that the message will still be freed by the
394 * caller. This only works on the system interface.
395 */
275 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); 396 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
276 397
277 /* When we are scanning the channels for an SMI, this will 398 /*
278 tell which channel we are scanning. */ 399 * When we are scanning the channels for an SMI, this will
400 * tell which channel we are scanning.
401 */
279 int curr_channel; 402 int curr_channel;
280 403
281 /* Channel information */ 404 /* Channel information */
@@ -285,74 +408,14 @@ struct ipmi_smi
285 struct proc_dir_entry *proc_dir; 408 struct proc_dir_entry *proc_dir;
286 char proc_dir_name[10]; 409 char proc_dir_name[10];
287 410
288 spinlock_t counter_lock; /* For making counters atomic. */ 411 atomic_t stats[IPMI_NUM_STATS];
289
290 /* Commands we got that were invalid. */
291 unsigned int sent_invalid_commands;
292
293 /* Commands we sent to the MC. */
294 unsigned int sent_local_commands;
295 /* Responses from the MC that were delivered to a user. */
296 unsigned int handled_local_responses;
297 /* Responses from the MC that were not delivered to a user. */
298 unsigned int unhandled_local_responses;
299
300 /* Commands we sent out to the IPMB bus. */
301 unsigned int sent_ipmb_commands;
302 /* Commands sent on the IPMB that had errors on the SEND CMD */
303 unsigned int sent_ipmb_command_errs;
304 /* Each retransmit increments this count. */
305 unsigned int retransmitted_ipmb_commands;
306 /* When a message times out (runs out of retransmits) this is
307 incremented. */
308 unsigned int timed_out_ipmb_commands;
309
310 /* This is like above, but for broadcasts. Broadcasts are
311 *not* included in the above count (they are expected to
312 time out). */
313 unsigned int timed_out_ipmb_broadcasts;
314 412
315 /* Responses I have sent to the IPMB bus. */ 413 /*
316 unsigned int sent_ipmb_responses; 414 * run_to_completion duplicate of smb_info, smi_info
317 415 * and ipmi_serial_info structures. Used to decrease numbers of
318 /* The response was delivered to the user. */ 416 * parameters passed by "low" level IPMI code.
319 unsigned int handled_ipmb_responses; 417 */
320 /* The response had invalid data in it. */ 418 int run_to_completion;
321 unsigned int invalid_ipmb_responses;
322 /* The response didn't have anyone waiting for it. */
323 unsigned int unhandled_ipmb_responses;
324
325 /* Commands we sent out to the IPMB bus. */
326 unsigned int sent_lan_commands;
327 /* Commands sent on the IPMB that had errors on the SEND CMD */
328 unsigned int sent_lan_command_errs;
329 /* Each retransmit increments this count. */
330 unsigned int retransmitted_lan_commands;
331 /* When a message times out (runs out of retransmits) this is
332 incremented. */
333 unsigned int timed_out_lan_commands;
334
335 /* Responses I have sent to the IPMB bus. */
336 unsigned int sent_lan_responses;
337
338 /* The response was delivered to the user. */
339 unsigned int handled_lan_responses;
340 /* The response had invalid data in it. */
341 unsigned int invalid_lan_responses;
342 /* The response didn't have anyone waiting for it. */
343 unsigned int unhandled_lan_responses;
344
345 /* The command was delivered to the user. */
346 unsigned int handled_commands;
347 /* The command had invalid data in it. */
348 unsigned int invalid_commands;
349 /* The command didn't have anyone waiting for it. */
350 unsigned int unhandled_commands;
351
352 /* Invalid data in an event. */
353 unsigned int invalid_events;
354 /* Events that were received with the proper format. */
355 unsigned int events;
356}; 419};
357#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 420#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
358 421
@@ -368,12 +431,19 @@ static DEFINE_MUTEX(ipmidriver_mutex);
368static LIST_HEAD(ipmi_interfaces); 431static LIST_HEAD(ipmi_interfaces);
369static DEFINE_MUTEX(ipmi_interfaces_mutex); 432static DEFINE_MUTEX(ipmi_interfaces_mutex);
370 433
371/* List of watchers that want to know when smi's are added and 434/*
372 deleted. */ 435 * List of watchers that want to know when smi's are added and deleted.
436 */
373static LIST_HEAD(smi_watchers); 437static LIST_HEAD(smi_watchers);
374static DEFINE_MUTEX(smi_watchers_mutex); 438static DEFINE_MUTEX(smi_watchers_mutex);
375 439
376 440
441#define ipmi_inc_stat(intf, stat) \
442 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
443#define ipmi_get_stat(intf, stat) \
444 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
445
446
377static void free_recv_msg_list(struct list_head *q) 447static void free_recv_msg_list(struct list_head *q)
378{ 448{
379 struct ipmi_recv_msg *msg, *msg2; 449 struct ipmi_recv_msg *msg, *msg2;
@@ -417,10 +487,8 @@ static void clean_up_interface_data(ipmi_smi_t intf)
417 487
418 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 488 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
419 if ((intf->seq_table[i].inuse) 489 if ((intf->seq_table[i].inuse)
420 && (intf->seq_table[i].recv_msg)) 490 && (intf->seq_table[i].recv_msg))
421 {
422 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 491 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
423 }
424 } 492 }
425} 493}
426 494
@@ -487,6 +555,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
487 } 555 }
488 return -ENOMEM; 556 return -ENOMEM;
489} 557}
558EXPORT_SYMBOL(ipmi_smi_watcher_register);
490 559
491int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 560int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
492{ 561{
@@ -495,6 +564,7 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
495 mutex_unlock(&smi_watchers_mutex); 564 mutex_unlock(&smi_watchers_mutex);
496 return 0; 565 return 0;
497} 566}
567EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
498 568
499/* 569/*
500 * Must be called with smi_watchers_mutex held. 570 * Must be called with smi_watchers_mutex held.
@@ -530,8 +600,7 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
530 } 600 }
531 601
532 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE) 602 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
533 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 603 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
534 {
535 struct ipmi_ipmb_addr *ipmb_addr1 604 struct ipmi_ipmb_addr *ipmb_addr1
536 = (struct ipmi_ipmb_addr *) addr1; 605 = (struct ipmi_ipmb_addr *) addr1;
537 struct ipmi_ipmb_addr *ipmb_addr2 606 struct ipmi_ipmb_addr *ipmb_addr2
@@ -559,9 +628,8 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
559 628
560int ipmi_validate_addr(struct ipmi_addr *addr, int len) 629int ipmi_validate_addr(struct ipmi_addr *addr, int len)
561{ 630{
562 if (len < sizeof(struct ipmi_system_interface_addr)) { 631 if (len < sizeof(struct ipmi_system_interface_addr))
563 return -EINVAL; 632 return -EINVAL;
564 }
565 633
566 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 634 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
567 if (addr->channel != IPMI_BMC_CHANNEL) 635 if (addr->channel != IPMI_BMC_CHANNEL)
@@ -575,23 +643,21 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len)
575 return -EINVAL; 643 return -EINVAL;
576 644
577 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) 645 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
578 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 646 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
579 { 647 if (len < sizeof(struct ipmi_ipmb_addr))
580 if (len < sizeof(struct ipmi_ipmb_addr)) {
581 return -EINVAL; 648 return -EINVAL;
582 }
583 return 0; 649 return 0;
584 } 650 }
585 651
586 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { 652 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
587 if (len < sizeof(struct ipmi_lan_addr)) { 653 if (len < sizeof(struct ipmi_lan_addr))
588 return -EINVAL; 654 return -EINVAL;
589 }
590 return 0; 655 return 0;
591 } 656 }
592 657
593 return -EINVAL; 658 return -EINVAL;
594} 659}
660EXPORT_SYMBOL(ipmi_validate_addr);
595 661
596unsigned int ipmi_addr_length(int addr_type) 662unsigned int ipmi_addr_length(int addr_type)
597{ 663{
@@ -599,34 +665,28 @@ unsigned int ipmi_addr_length(int addr_type)
599 return sizeof(struct ipmi_system_interface_addr); 665 return sizeof(struct ipmi_system_interface_addr);
600 666
601 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 667 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
602 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 668 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
603 {
604 return sizeof(struct ipmi_ipmb_addr); 669 return sizeof(struct ipmi_ipmb_addr);
605 }
606 670
607 if (addr_type == IPMI_LAN_ADDR_TYPE) 671 if (addr_type == IPMI_LAN_ADDR_TYPE)
608 return sizeof(struct ipmi_lan_addr); 672 return sizeof(struct ipmi_lan_addr);
609 673
610 return 0; 674 return 0;
611} 675}
676EXPORT_SYMBOL(ipmi_addr_length);
612 677
613static void deliver_response(struct ipmi_recv_msg *msg) 678static void deliver_response(struct ipmi_recv_msg *msg)
614{ 679{
615 if (!msg->user) { 680 if (!msg->user) {
616 ipmi_smi_t intf = msg->user_msg_data; 681 ipmi_smi_t intf = msg->user_msg_data;
617 unsigned long flags;
618 682
619 /* Special handling for NULL users. */ 683 /* Special handling for NULL users. */
620 if (intf->null_user_handler) { 684 if (intf->null_user_handler) {
621 intf->null_user_handler(intf, msg); 685 intf->null_user_handler(intf, msg);
622 spin_lock_irqsave(&intf->counter_lock, flags); 686 ipmi_inc_stat(intf, handled_local_responses);
623 intf->handled_local_responses++;
624 spin_unlock_irqrestore(&intf->counter_lock, flags);
625 } else { 687 } else {
626 /* No handler, so give up. */ 688 /* No handler, so give up. */
627 spin_lock_irqsave(&intf->counter_lock, flags); 689 ipmi_inc_stat(intf, unhandled_local_responses);
628 intf->unhandled_local_responses++;
629 spin_unlock_irqrestore(&intf->counter_lock, flags);
630 } 690 }
631 ipmi_free_recv_msg(msg); 691 ipmi_free_recv_msg(msg);
632 } else { 692 } else {
@@ -646,9 +706,11 @@ deliver_err_response(struct ipmi_recv_msg *msg, int err)
646 deliver_response(msg); 706 deliver_response(msg);
647} 707}
648 708
649/* Find the next sequence number not being used and add the given 709/*
650 message with the given timeout to the sequence table. This must be 710 * Find the next sequence number not being used and add the given
651 called with the interface's seq_lock held. */ 711 * message with the given timeout to the sequence table. This must be
712 * called with the interface's seq_lock held.
713 */
652static int intf_next_seq(ipmi_smi_t intf, 714static int intf_next_seq(ipmi_smi_t intf,
653 struct ipmi_recv_msg *recv_msg, 715 struct ipmi_recv_msg *recv_msg,
654 unsigned long timeout, 716 unsigned long timeout,
@@ -660,10 +722,8 @@ static int intf_next_seq(ipmi_smi_t intf,
660 int rv = 0; 722 int rv = 0;
661 unsigned int i; 723 unsigned int i;
662 724
663 for (i = intf->curr_seq; 725 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
664 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 726 i = (i+1)%IPMI_IPMB_NUM_SEQ) {
665 i = (i+1)%IPMI_IPMB_NUM_SEQ)
666 {
667 if (!intf->seq_table[i].inuse) 727 if (!intf->seq_table[i].inuse)
668 break; 728 break;
669 } 729 }
@@ -671,8 +731,10 @@ static int intf_next_seq(ipmi_smi_t intf,
671 if (!intf->seq_table[i].inuse) { 731 if (!intf->seq_table[i].inuse) {
672 intf->seq_table[i].recv_msg = recv_msg; 732 intf->seq_table[i].recv_msg = recv_msg;
673 733
674 /* Start with the maximum timeout, when the send response 734 /*
675 comes in we will start the real timer. */ 735 * Start with the maximum timeout, when the send response
736 * comes in we will start the real timer.
737 */
676 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 738 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
677 intf->seq_table[i].orig_timeout = timeout; 739 intf->seq_table[i].orig_timeout = timeout;
678 intf->seq_table[i].retries_left = retries; 740 intf->seq_table[i].retries_left = retries;
@@ -685,15 +747,17 @@ static int intf_next_seq(ipmi_smi_t intf,
685 } else { 747 } else {
686 rv = -EAGAIN; 748 rv = -EAGAIN;
687 } 749 }
688 750
689 return rv; 751 return rv;
690} 752}
691 753
692/* Return the receive message for the given sequence number and 754/*
693 release the sequence number so it can be reused. Some other data 755 * Return the receive message for the given sequence number and
694 is passed in to be sure the message matches up correctly (to help 756 * release the sequence number so it can be reused. Some other data
695 guard against message coming in after their timeout and the 757 * is passed in to be sure the message matches up correctly (to help
696 sequence number being reused). */ 758 * guard against message coming in after their timeout and the
759 * sequence number being reused).
760 */
697static int intf_find_seq(ipmi_smi_t intf, 761static int intf_find_seq(ipmi_smi_t intf,
698 unsigned char seq, 762 unsigned char seq,
699 short channel, 763 short channel,
@@ -712,11 +776,9 @@ static int intf_find_seq(ipmi_smi_t intf,
712 if (intf->seq_table[seq].inuse) { 776 if (intf->seq_table[seq].inuse) {
713 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 777 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
714 778
715 if ((msg->addr.channel == channel) 779 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
716 && (msg->msg.cmd == cmd) 780 && (msg->msg.netfn == netfn)
717 && (msg->msg.netfn == netfn) 781 && (ipmi_addr_equal(addr, &(msg->addr)))) {
718 && (ipmi_addr_equal(addr, &(msg->addr))))
719 {
720 *recv_msg = msg; 782 *recv_msg = msg;
721 intf->seq_table[seq].inuse = 0; 783 intf->seq_table[seq].inuse = 0;
722 rv = 0; 784 rv = 0;
@@ -741,11 +803,12 @@ static int intf_start_seq_timer(ipmi_smi_t intf,
741 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 803 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
742 804
743 spin_lock_irqsave(&(intf->seq_lock), flags); 805 spin_lock_irqsave(&(intf->seq_lock), flags);
744 /* We do this verification because the user can be deleted 806 /*
745 while a message is outstanding. */ 807 * We do this verification because the user can be deleted
808 * while a message is outstanding.
809 */
746 if ((intf->seq_table[seq].inuse) 810 if ((intf->seq_table[seq].inuse)
747 && (intf->seq_table[seq].seqid == seqid)) 811 && (intf->seq_table[seq].seqid == seqid)) {
748 {
749 struct seq_table *ent = &(intf->seq_table[seq]); 812 struct seq_table *ent = &(intf->seq_table[seq]);
750 ent->timeout = ent->orig_timeout; 813 ent->timeout = ent->orig_timeout;
751 rv = 0; 814 rv = 0;
@@ -770,11 +833,12 @@ static int intf_err_seq(ipmi_smi_t intf,
770 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 833 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
771 834
772 spin_lock_irqsave(&(intf->seq_lock), flags); 835 spin_lock_irqsave(&(intf->seq_lock), flags);
773 /* We do this verification because the user can be deleted 836 /*
774 while a message is outstanding. */ 837 * We do this verification because the user can be deleted
838 * while a message is outstanding.
839 */
775 if ((intf->seq_table[seq].inuse) 840 if ((intf->seq_table[seq].inuse)
776 && (intf->seq_table[seq].seqid == seqid)) 841 && (intf->seq_table[seq].seqid == seqid)) {
777 {
778 struct seq_table *ent = &(intf->seq_table[seq]); 842 struct seq_table *ent = &(intf->seq_table[seq]);
779 843
780 ent->inuse = 0; 844 ent->inuse = 0;
@@ -800,24 +864,30 @@ int ipmi_create_user(unsigned int if_num,
800 int rv = 0; 864 int rv = 0;
801 ipmi_smi_t intf; 865 ipmi_smi_t intf;
802 866
803 /* There is no module usecount here, because it's not 867 /*
804 required. Since this can only be used by and called from 868 * There is no module usecount here, because it's not
805 other modules, they will implicitly use this module, and 869 * required. Since this can only be used by and called from
806 thus this can't be removed unless the other modules are 870 * other modules, they will implicitly use this module, and
807 removed. */ 871 * thus this can't be removed unless the other modules are
872 * removed.
873 */
808 874
809 if (handler == NULL) 875 if (handler == NULL)
810 return -EINVAL; 876 return -EINVAL;
811 877
812 /* Make sure the driver is actually initialized, this handles 878 /*
813 problems with initialization order. */ 879 * Make sure the driver is actually initialized, this handles
880 * problems with initialization order.
881 */
814 if (!initialized) { 882 if (!initialized) {
815 rv = ipmi_init_msghandler(); 883 rv = ipmi_init_msghandler();
816 if (rv) 884 if (rv)
817 return rv; 885 return rv;
818 886
819 /* The init code doesn't return an error if it was turned 887 /*
820 off, but it won't initialize. Check that. */ 888 * The init code doesn't return an error if it was turned
889 * off, but it won't initialize. Check that.
890 */
821 if (!initialized) 891 if (!initialized)
822 return -ENODEV; 892 return -ENODEV;
823 } 893 }
@@ -858,8 +928,10 @@ int ipmi_create_user(unsigned int if_num,
858 } 928 }
859 } 929 }
860 930
861 /* Hold the lock so intf->handlers is guaranteed to be good 931 /*
862 * until now */ 932 * Hold the lock so intf->handlers is guaranteed to be good
933 * until now
934 */
863 mutex_unlock(&ipmi_interfaces_mutex); 935 mutex_unlock(&ipmi_interfaces_mutex);
864 936
865 new_user->valid = 1; 937 new_user->valid = 1;
@@ -876,6 +948,7 @@ out_kfree:
876 kfree(new_user); 948 kfree(new_user);
877 return rv; 949 return rv;
878} 950}
951EXPORT_SYMBOL(ipmi_create_user);
879 952
880static void free_user(struct kref *ref) 953static void free_user(struct kref *ref)
881{ 954{
@@ -899,8 +972,7 @@ int ipmi_destroy_user(ipmi_user_t user)
899 972
900 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 973 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
901 if (intf->seq_table[i].inuse 974 if (intf->seq_table[i].inuse
902 && (intf->seq_table[i].recv_msg->user == user)) 975 && (intf->seq_table[i].recv_msg->user == user)) {
903 {
904 intf->seq_table[i].inuse = 0; 976 intf->seq_table[i].inuse = 0;
905 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 977 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
906 } 978 }
@@ -943,6 +1015,7 @@ int ipmi_destroy_user(ipmi_user_t user)
943 1015
944 return 0; 1016 return 0;
945} 1017}
1018EXPORT_SYMBOL(ipmi_destroy_user);
946 1019
947void ipmi_get_version(ipmi_user_t user, 1020void ipmi_get_version(ipmi_user_t user,
948 unsigned char *major, 1021 unsigned char *major,
@@ -951,6 +1024,7 @@ void ipmi_get_version(ipmi_user_t user,
951 *major = user->intf->ipmi_version_major; 1024 *major = user->intf->ipmi_version_major;
952 *minor = user->intf->ipmi_version_minor; 1025 *minor = user->intf->ipmi_version_minor;
953} 1026}
1027EXPORT_SYMBOL(ipmi_get_version);
954 1028
955int ipmi_set_my_address(ipmi_user_t user, 1029int ipmi_set_my_address(ipmi_user_t user,
956 unsigned int channel, 1030 unsigned int channel,
@@ -961,6 +1035,7 @@ int ipmi_set_my_address(ipmi_user_t user,
961 user->intf->channels[channel].address = address; 1035 user->intf->channels[channel].address = address;
962 return 0; 1036 return 0;
963} 1037}
1038EXPORT_SYMBOL(ipmi_set_my_address);
964 1039
965int ipmi_get_my_address(ipmi_user_t user, 1040int ipmi_get_my_address(ipmi_user_t user,
966 unsigned int channel, 1041 unsigned int channel,
@@ -971,6 +1046,7 @@ int ipmi_get_my_address(ipmi_user_t user,
971 *address = user->intf->channels[channel].address; 1046 *address = user->intf->channels[channel].address;
972 return 0; 1047 return 0;
973} 1048}
1049EXPORT_SYMBOL(ipmi_get_my_address);
974 1050
975int ipmi_set_my_LUN(ipmi_user_t user, 1051int ipmi_set_my_LUN(ipmi_user_t user,
976 unsigned int channel, 1052 unsigned int channel,
@@ -981,6 +1057,7 @@ int ipmi_set_my_LUN(ipmi_user_t user,
981 user->intf->channels[channel].lun = LUN & 0x3; 1057 user->intf->channels[channel].lun = LUN & 0x3;
982 return 0; 1058 return 0;
983} 1059}
1060EXPORT_SYMBOL(ipmi_set_my_LUN);
984 1061
985int ipmi_get_my_LUN(ipmi_user_t user, 1062int ipmi_get_my_LUN(ipmi_user_t user,
986 unsigned int channel, 1063 unsigned int channel,
@@ -991,6 +1068,7 @@ int ipmi_get_my_LUN(ipmi_user_t user,
991 *address = user->intf->channels[channel].lun; 1068 *address = user->intf->channels[channel].lun;
992 return 0; 1069 return 0;
993} 1070}
1071EXPORT_SYMBOL(ipmi_get_my_LUN);
994 1072
995int ipmi_get_maintenance_mode(ipmi_user_t user) 1073int ipmi_get_maintenance_mode(ipmi_user_t user)
996{ 1074{
@@ -1075,6 +1153,11 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
1075 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1153 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1076 list_move_tail(&msg->link, &msgs); 1154 list_move_tail(&msg->link, &msgs);
1077 intf->waiting_events_count = 0; 1155 intf->waiting_events_count = 0;
1156 if (intf->event_msg_printed) {
1157 printk(KERN_WARNING PFX "Event queue no longer"
1158 " full\n");
1159 intf->event_msg_printed = 0;
1160 }
1078 1161
1079 intf->delivering_events = 1; 1162 intf->delivering_events = 1;
1080 spin_unlock_irqrestore(&intf->events_lock, flags); 1163 spin_unlock_irqrestore(&intf->events_lock, flags);
@@ -1094,6 +1177,7 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
1094 1177
1095 return 0; 1178 return 0;
1096} 1179}
1180EXPORT_SYMBOL(ipmi_set_gets_events);
1097 1181
1098static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, 1182static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1099 unsigned char netfn, 1183 unsigned char netfn,
@@ -1159,6 +1243,7 @@ int ipmi_register_for_cmd(ipmi_user_t user,
1159 1243
1160 return rv; 1244 return rv;
1161} 1245}
1246EXPORT_SYMBOL(ipmi_register_for_cmd);
1162 1247
1163int ipmi_unregister_for_cmd(ipmi_user_t user, 1248int ipmi_unregister_for_cmd(ipmi_user_t user,
1164 unsigned char netfn, 1249 unsigned char netfn,
@@ -1196,19 +1281,13 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
1196 } 1281 }
1197 return rv; 1282 return rv;
1198} 1283}
1199 1284EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1200void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1201{
1202 ipmi_smi_t intf = user->intf;
1203 if (intf->handlers)
1204 intf->handlers->set_run_to_completion(intf->send_info, val);
1205}
1206 1285
1207static unsigned char 1286static unsigned char
1208ipmb_checksum(unsigned char *data, int size) 1287ipmb_checksum(unsigned char *data, int size)
1209{ 1288{
1210 unsigned char csum = 0; 1289 unsigned char csum = 0;
1211 1290
1212 for (; size > 0; size--, data++) 1291 for (; size > 0; size--, data++)
1213 csum += *data; 1292 csum += *data;
1214 1293
@@ -1250,8 +1329,10 @@ static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1250 = ipmb_checksum(&(smi_msg->data[i+6]), 1329 = ipmb_checksum(&(smi_msg->data[i+6]),
1251 smi_msg->data_size-6); 1330 smi_msg->data_size-6);
1252 1331
1253 /* Add on the checksum size and the offset from the 1332 /*
1254 broadcast. */ 1333 * Add on the checksum size and the offset from the
1334 * broadcast.
1335 */
1255 smi_msg->data_size += 1 + i; 1336 smi_msg->data_size += 1 + i;
1256 1337
1257 smi_msg->msgid = msgid; 1338 smi_msg->msgid = msgid;
@@ -1287,17 +1368,21 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1287 = ipmb_checksum(&(smi_msg->data[7]), 1368 = ipmb_checksum(&(smi_msg->data[7]),
1288 smi_msg->data_size-7); 1369 smi_msg->data_size-7);
1289 1370
1290 /* Add on the checksum size and the offset from the 1371 /*
1291 broadcast. */ 1372 * Add on the checksum size and the offset from the
1373 * broadcast.
1374 */
1292 smi_msg->data_size += 1; 1375 smi_msg->data_size += 1;
1293 1376
1294 smi_msg->msgid = msgid; 1377 smi_msg->msgid = msgid;
1295} 1378}
1296 1379
1297/* Separate from ipmi_request so that the user does not have to be 1380/*
1298 supplied in certain circumstances (mainly at panic time). If 1381 * Separate from ipmi_request so that the user does not have to be
1299 messages are supplied, they will be freed, even if an error 1382 * supplied in certain circumstances (mainly at panic time). If
1300 occurs. */ 1383 * messages are supplied, they will be freed, even if an error
1384 * occurs.
1385 */
1301static int i_ipmi_request(ipmi_user_t user, 1386static int i_ipmi_request(ipmi_user_t user,
1302 ipmi_smi_t intf, 1387 ipmi_smi_t intf,
1303 struct ipmi_addr *addr, 1388 struct ipmi_addr *addr,
@@ -1319,19 +1404,18 @@ static int i_ipmi_request(ipmi_user_t user,
1319 struct ipmi_smi_handlers *handlers; 1404 struct ipmi_smi_handlers *handlers;
1320 1405
1321 1406
1322 if (supplied_recv) { 1407 if (supplied_recv)
1323 recv_msg = supplied_recv; 1408 recv_msg = supplied_recv;
1324 } else { 1409 else {
1325 recv_msg = ipmi_alloc_recv_msg(); 1410 recv_msg = ipmi_alloc_recv_msg();
1326 if (recv_msg == NULL) { 1411 if (recv_msg == NULL)
1327 return -ENOMEM; 1412 return -ENOMEM;
1328 }
1329 } 1413 }
1330 recv_msg->user_msg_data = user_msg_data; 1414 recv_msg->user_msg_data = user_msg_data;
1331 1415
1332 if (supplied_smi) { 1416 if (supplied_smi)
1333 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 1417 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1334 } else { 1418 else {
1335 smi_msg = ipmi_alloc_smi_msg(); 1419 smi_msg = ipmi_alloc_smi_msg();
1336 if (smi_msg == NULL) { 1420 if (smi_msg == NULL) {
1337 ipmi_free_recv_msg(recv_msg); 1421 ipmi_free_recv_msg(recv_msg);
@@ -1350,8 +1434,10 @@ static int i_ipmi_request(ipmi_user_t user,
1350 if (user) 1434 if (user)
1351 kref_get(&user->refcount); 1435 kref_get(&user->refcount);
1352 recv_msg->msgid = msgid; 1436 recv_msg->msgid = msgid;
1353 /* Store the message to send in the receive message so timeout 1437 /*
1354 responses can get the proper response data. */ 1438 * Store the message to send in the receive message so timeout
1439 * responses can get the proper response data.
1440 */
1355 recv_msg->msg = *msg; 1441 recv_msg->msg = *msg;
1356 1442
1357 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 1443 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
@@ -1365,9 +1451,7 @@ static int i_ipmi_request(ipmi_user_t user,
1365 1451
1366 smi_addr = (struct ipmi_system_interface_addr *) addr; 1452 smi_addr = (struct ipmi_system_interface_addr *) addr;
1367 if (smi_addr->lun > 3) { 1453 if (smi_addr->lun > 3) {
1368 spin_lock_irqsave(&intf->counter_lock, flags); 1454 ipmi_inc_stat(intf, sent_invalid_commands);
1369 intf->sent_invalid_commands++;
1370 spin_unlock_irqrestore(&intf->counter_lock, flags);
1371 rv = -EINVAL; 1455 rv = -EINVAL;
1372 goto out_err; 1456 goto out_err;
1373 } 1457 }
@@ -1377,13 +1461,12 @@ static int i_ipmi_request(ipmi_user_t user,
1377 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1461 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1378 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1462 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1379 || (msg->cmd == IPMI_GET_MSG_CMD) 1463 || (msg->cmd == IPMI_GET_MSG_CMD)
1380 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) 1464 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1381 { 1465 /*
1382 /* We don't let the user do these, since we manage 1466 * We don't let the user do these, since we manage
1383 the sequence numbers. */ 1467 * the sequence numbers.
1384 spin_lock_irqsave(&intf->counter_lock, flags); 1468 */
1385 intf->sent_invalid_commands++; 1469 ipmi_inc_stat(intf, sent_invalid_commands);
1386 spin_unlock_irqrestore(&intf->counter_lock, flags);
1387 rv = -EINVAL; 1470 rv = -EINVAL;
1388 goto out_err; 1471 goto out_err;
1389 } 1472 }
@@ -1391,14 +1474,12 @@ static int i_ipmi_request(ipmi_user_t user,
1391 if (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1474 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1392 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1475 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1393 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1476 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1394 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) 1477 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
1395 {
1396 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1478 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1397 intf->auto_maintenance_timeout 1479 intf->auto_maintenance_timeout
1398 = IPMI_MAINTENANCE_MODE_TIMEOUT; 1480 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1399 if (!intf->maintenance_mode 1481 if (!intf->maintenance_mode
1400 && !intf->maintenance_mode_enable) 1482 && !intf->maintenance_mode_enable) {
1401 {
1402 intf->maintenance_mode_enable = 1; 1483 intf->maintenance_mode_enable = 1;
1403 maintenance_mode_update(intf); 1484 maintenance_mode_update(intf);
1404 } 1485 }
@@ -1407,9 +1488,7 @@ static int i_ipmi_request(ipmi_user_t user,
1407 } 1488 }
1408 1489
1409 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { 1490 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1410 spin_lock_irqsave(&intf->counter_lock, flags); 1491 ipmi_inc_stat(intf, sent_invalid_commands);
1411 intf->sent_invalid_commands++;
1412 spin_unlock_irqrestore(&intf->counter_lock, flags);
1413 rv = -EMSGSIZE; 1492 rv = -EMSGSIZE;
1414 goto out_err; 1493 goto out_err;
1415 } 1494 }
@@ -1421,31 +1500,23 @@ static int i_ipmi_request(ipmi_user_t user,
1421 if (msg->data_len > 0) 1500 if (msg->data_len > 0)
1422 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); 1501 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1423 smi_msg->data_size = msg->data_len + 2; 1502 smi_msg->data_size = msg->data_len + 2;
1424 spin_lock_irqsave(&intf->counter_lock, flags); 1503 ipmi_inc_stat(intf, sent_local_commands);
1425 intf->sent_local_commands++;
1426 spin_unlock_irqrestore(&intf->counter_lock, flags);
1427 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) 1504 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1428 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 1505 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
1429 {
1430 struct ipmi_ipmb_addr *ipmb_addr; 1506 struct ipmi_ipmb_addr *ipmb_addr;
1431 unsigned char ipmb_seq; 1507 unsigned char ipmb_seq;
1432 long seqid; 1508 long seqid;
1433 int broadcast = 0; 1509 int broadcast = 0;
1434 1510
1435 if (addr->channel >= IPMI_MAX_CHANNELS) { 1511 if (addr->channel >= IPMI_MAX_CHANNELS) {
1436 spin_lock_irqsave(&intf->counter_lock, flags); 1512 ipmi_inc_stat(intf, sent_invalid_commands);
1437 intf->sent_invalid_commands++;
1438 spin_unlock_irqrestore(&intf->counter_lock, flags);
1439 rv = -EINVAL; 1513 rv = -EINVAL;
1440 goto out_err; 1514 goto out_err;
1441 } 1515 }
1442 1516
1443 if (intf->channels[addr->channel].medium 1517 if (intf->channels[addr->channel].medium
1444 != IPMI_CHANNEL_MEDIUM_IPMB) 1518 != IPMI_CHANNEL_MEDIUM_IPMB) {
1445 { 1519 ipmi_inc_stat(intf, sent_invalid_commands);
1446 spin_lock_irqsave(&intf->counter_lock, flags);
1447 intf->sent_invalid_commands++;
1448 spin_unlock_irqrestore(&intf->counter_lock, flags);
1449 rv = -EINVAL; 1520 rv = -EINVAL;
1450 goto out_err; 1521 goto out_err;
1451 } 1522 }
@@ -1457,9 +1528,11 @@ static int i_ipmi_request(ipmi_user_t user,
1457 retries = 4; 1528 retries = 4;
1458 } 1529 }
1459 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1530 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1460 /* Broadcasts add a zero at the beginning of the 1531 /*
1461 message, but otherwise is the same as an IPMB 1532 * Broadcasts add a zero at the beginning of the
1462 address. */ 1533 * message, but otherwise is the same as an IPMB
1534 * address.
1535 */
1463 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1536 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1464 broadcast = 1; 1537 broadcast = 1;
1465 } 1538 }
@@ -1469,21 +1542,19 @@ static int i_ipmi_request(ipmi_user_t user,
1469 if (retry_time_ms == 0) 1542 if (retry_time_ms == 0)
1470 retry_time_ms = 1000; 1543 retry_time_ms = 1000;
1471 1544
1472 /* 9 for the header and 1 for the checksum, plus 1545 /*
1473 possibly one for the broadcast. */ 1546 * 9 for the header and 1 for the checksum, plus
1547 * possibly one for the broadcast.
1548 */
1474 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1549 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1475 spin_lock_irqsave(&intf->counter_lock, flags); 1550 ipmi_inc_stat(intf, sent_invalid_commands);
1476 intf->sent_invalid_commands++;
1477 spin_unlock_irqrestore(&intf->counter_lock, flags);
1478 rv = -EMSGSIZE; 1551 rv = -EMSGSIZE;
1479 goto out_err; 1552 goto out_err;
1480 } 1553 }
1481 1554
1482 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 1555 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1483 if (ipmb_addr->lun > 3) { 1556 if (ipmb_addr->lun > 3) {
1484 spin_lock_irqsave(&intf->counter_lock, flags); 1557 ipmi_inc_stat(intf, sent_invalid_commands);
1485 intf->sent_invalid_commands++;
1486 spin_unlock_irqrestore(&intf->counter_lock, flags);
1487 rv = -EINVAL; 1558 rv = -EINVAL;
1488 goto out_err; 1559 goto out_err;
1489 } 1560 }
@@ -1491,29 +1562,31 @@ static int i_ipmi_request(ipmi_user_t user,
1491 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 1562 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1492 1563
1493 if (recv_msg->msg.netfn & 0x1) { 1564 if (recv_msg->msg.netfn & 0x1) {
1494 /* It's a response, so use the user's sequence 1565 /*
1495 from msgid. */ 1566 * It's a response, so use the user's sequence
1496 spin_lock_irqsave(&intf->counter_lock, flags); 1567 * from msgid.
1497 intf->sent_ipmb_responses++; 1568 */
1498 spin_unlock_irqrestore(&intf->counter_lock, flags); 1569 ipmi_inc_stat(intf, sent_ipmb_responses);
1499 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 1570 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1500 msgid, broadcast, 1571 msgid, broadcast,
1501 source_address, source_lun); 1572 source_address, source_lun);
1502 1573
1503 /* Save the receive message so we can use it 1574 /*
1504 to deliver the response. */ 1575 * Save the receive message so we can use it
1576 * to deliver the response.
1577 */
1505 smi_msg->user_data = recv_msg; 1578 smi_msg->user_data = recv_msg;
1506 } else { 1579 } else {
1507 /* It's a command, so get a sequence for it. */ 1580 /* It's a command, so get a sequence for it. */
1508 1581
1509 spin_lock_irqsave(&(intf->seq_lock), flags); 1582 spin_lock_irqsave(&(intf->seq_lock), flags);
1510 1583
1511 spin_lock(&intf->counter_lock); 1584 ipmi_inc_stat(intf, sent_ipmb_commands);
1512 intf->sent_ipmb_commands++;
1513 spin_unlock(&intf->counter_lock);
1514 1585
1515 /* Create a sequence number with a 1 second 1586 /*
1516 timeout and 4 retries. */ 1587 * Create a sequence number with a 1 second
1588 * timeout and 4 retries.
1589 */
1517 rv = intf_next_seq(intf, 1590 rv = intf_next_seq(intf,
1518 recv_msg, 1591 recv_msg,
1519 retry_time_ms, 1592 retry_time_ms,
@@ -1522,34 +1595,42 @@ static int i_ipmi_request(ipmi_user_t user,
1522 &ipmb_seq, 1595 &ipmb_seq,
1523 &seqid); 1596 &seqid);
1524 if (rv) { 1597 if (rv) {
1525 /* We have used up all the sequence numbers, 1598 /*
1526 probably, so abort. */ 1599 * We have used up all the sequence numbers,
1600 * probably, so abort.
1601 */
1527 spin_unlock_irqrestore(&(intf->seq_lock), 1602 spin_unlock_irqrestore(&(intf->seq_lock),
1528 flags); 1603 flags);
1529 goto out_err; 1604 goto out_err;
1530 } 1605 }
1531 1606
1532 /* Store the sequence number in the message, 1607 /*
1533 so that when the send message response 1608 * Store the sequence number in the message,
1534 comes back we can start the timer. */ 1609 * so that when the send message response
1610 * comes back we can start the timer.
1611 */
1535 format_ipmb_msg(smi_msg, msg, ipmb_addr, 1612 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1536 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 1613 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1537 ipmb_seq, broadcast, 1614 ipmb_seq, broadcast,
1538 source_address, source_lun); 1615 source_address, source_lun);
1539 1616
1540 /* Copy the message into the recv message data, so we 1617 /*
1541 can retransmit it later if necessary. */ 1618 * Copy the message into the recv message data, so we
1619 * can retransmit it later if necessary.
1620 */
1542 memcpy(recv_msg->msg_data, smi_msg->data, 1621 memcpy(recv_msg->msg_data, smi_msg->data,
1543 smi_msg->data_size); 1622 smi_msg->data_size);
1544 recv_msg->msg.data = recv_msg->msg_data; 1623 recv_msg->msg.data = recv_msg->msg_data;
1545 recv_msg->msg.data_len = smi_msg->data_size; 1624 recv_msg->msg.data_len = smi_msg->data_size;
1546 1625
1547 /* We don't unlock until here, because we need 1626 /*
1548 to copy the completed message into the 1627 * We don't unlock until here, because we need
1549 recv_msg before we release the lock. 1628 * to copy the completed message into the
1550 Otherwise, race conditions may bite us. I 1629 * recv_msg before we release the lock.
1551 know that's pretty paranoid, but I prefer 1630 * Otherwise, race conditions may bite us. I
1552 to be correct. */ 1631 * know that's pretty paranoid, but I prefer
1632 * to be correct.
1633 */
1553 spin_unlock_irqrestore(&(intf->seq_lock), flags); 1634 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1554 } 1635 }
1555 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { 1636 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
@@ -1558,21 +1639,16 @@ static int i_ipmi_request(ipmi_user_t user,
1558 long seqid; 1639 long seqid;
1559 1640
1560 if (addr->channel >= IPMI_MAX_CHANNELS) { 1641 if (addr->channel >= IPMI_MAX_CHANNELS) {
1561 spin_lock_irqsave(&intf->counter_lock, flags); 1642 ipmi_inc_stat(intf, sent_invalid_commands);
1562 intf->sent_invalid_commands++;
1563 spin_unlock_irqrestore(&intf->counter_lock, flags);
1564 rv = -EINVAL; 1643 rv = -EINVAL;
1565 goto out_err; 1644 goto out_err;
1566 } 1645 }
1567 1646
1568 if ((intf->channels[addr->channel].medium 1647 if ((intf->channels[addr->channel].medium
1569 != IPMI_CHANNEL_MEDIUM_8023LAN) 1648 != IPMI_CHANNEL_MEDIUM_8023LAN)
1570 && (intf->channels[addr->channel].medium 1649 && (intf->channels[addr->channel].medium
1571 != IPMI_CHANNEL_MEDIUM_ASYNC)) 1650 != IPMI_CHANNEL_MEDIUM_ASYNC)) {
1572 { 1651 ipmi_inc_stat(intf, sent_invalid_commands);
1573 spin_lock_irqsave(&intf->counter_lock, flags);
1574 intf->sent_invalid_commands++;
1575 spin_unlock_irqrestore(&intf->counter_lock, flags);
1576 rv = -EINVAL; 1652 rv = -EINVAL;
1577 goto out_err; 1653 goto out_err;
1578 } 1654 }
@@ -1585,18 +1661,14 @@ static int i_ipmi_request(ipmi_user_t user,
1585 1661
1586 /* 11 for the header and 1 for the checksum. */ 1662 /* 11 for the header and 1 for the checksum. */
1587 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 1663 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1588 spin_lock_irqsave(&intf->counter_lock, flags); 1664 ipmi_inc_stat(intf, sent_invalid_commands);
1589 intf->sent_invalid_commands++;
1590 spin_unlock_irqrestore(&intf->counter_lock, flags);
1591 rv = -EMSGSIZE; 1665 rv = -EMSGSIZE;
1592 goto out_err; 1666 goto out_err;
1593 } 1667 }
1594 1668
1595 lan_addr = (struct ipmi_lan_addr *) addr; 1669 lan_addr = (struct ipmi_lan_addr *) addr;
1596 if (lan_addr->lun > 3) { 1670 if (lan_addr->lun > 3) {
1597 spin_lock_irqsave(&intf->counter_lock, flags); 1671 ipmi_inc_stat(intf, sent_invalid_commands);
1598 intf->sent_invalid_commands++;
1599 spin_unlock_irqrestore(&intf->counter_lock, flags);
1600 rv = -EINVAL; 1672 rv = -EINVAL;
1601 goto out_err; 1673 goto out_err;
1602 } 1674 }
@@ -1604,28 +1676,30 @@ static int i_ipmi_request(ipmi_user_t user,
1604 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 1676 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1605 1677
1606 if (recv_msg->msg.netfn & 0x1) { 1678 if (recv_msg->msg.netfn & 0x1) {
1607 /* It's a response, so use the user's sequence 1679 /*
1608 from msgid. */ 1680 * It's a response, so use the user's sequence
1609 spin_lock_irqsave(&intf->counter_lock, flags); 1681 * from msgid.
1610 intf->sent_lan_responses++; 1682 */
1611 spin_unlock_irqrestore(&intf->counter_lock, flags); 1683 ipmi_inc_stat(intf, sent_lan_responses);
1612 format_lan_msg(smi_msg, msg, lan_addr, msgid, 1684 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1613 msgid, source_lun); 1685 msgid, source_lun);
1614 1686
1615 /* Save the receive message so we can use it 1687 /*
1616 to deliver the response. */ 1688 * Save the receive message so we can use it
1689 * to deliver the response.
1690 */
1617 smi_msg->user_data = recv_msg; 1691 smi_msg->user_data = recv_msg;
1618 } else { 1692 } else {
1619 /* It's a command, so get a sequence for it. */ 1693 /* It's a command, so get a sequence for it. */
1620 1694
1621 spin_lock_irqsave(&(intf->seq_lock), flags); 1695 spin_lock_irqsave(&(intf->seq_lock), flags);
1622 1696
1623 spin_lock(&intf->counter_lock); 1697 ipmi_inc_stat(intf, sent_lan_commands);
1624 intf->sent_lan_commands++;
1625 spin_unlock(&intf->counter_lock);
1626 1698
1627 /* Create a sequence number with a 1 second 1699 /*
1628 timeout and 4 retries. */ 1700 * Create a sequence number with a 1 second
1701 * timeout and 4 retries.
1702 */
1629 rv = intf_next_seq(intf, 1703 rv = intf_next_seq(intf,
1630 recv_msg, 1704 recv_msg,
1631 retry_time_ms, 1705 retry_time_ms,
@@ -1634,40 +1708,46 @@ static int i_ipmi_request(ipmi_user_t user,
1634 &ipmb_seq, 1708 &ipmb_seq,
1635 &seqid); 1709 &seqid);
1636 if (rv) { 1710 if (rv) {
1637 /* We have used up all the sequence numbers, 1711 /*
1638 probably, so abort. */ 1712 * We have used up all the sequence numbers,
1713 * probably, so abort.
1714 */
1639 spin_unlock_irqrestore(&(intf->seq_lock), 1715 spin_unlock_irqrestore(&(intf->seq_lock),
1640 flags); 1716 flags);
1641 goto out_err; 1717 goto out_err;
1642 } 1718 }
1643 1719
1644 /* Store the sequence number in the message, 1720 /*
1645 so that when the send message response 1721 * Store the sequence number in the message,
1646 comes back we can start the timer. */ 1722 * so that when the send message response
1723 * comes back we can start the timer.
1724 */
1647 format_lan_msg(smi_msg, msg, lan_addr, 1725 format_lan_msg(smi_msg, msg, lan_addr,
1648 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 1726 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1649 ipmb_seq, source_lun); 1727 ipmb_seq, source_lun);
1650 1728
1651 /* Copy the message into the recv message data, so we 1729 /*
1652 can retransmit it later if necessary. */ 1730 * Copy the message into the recv message data, so we
1731 * can retransmit it later if necessary.
1732 */
1653 memcpy(recv_msg->msg_data, smi_msg->data, 1733 memcpy(recv_msg->msg_data, smi_msg->data,
1654 smi_msg->data_size); 1734 smi_msg->data_size);
1655 recv_msg->msg.data = recv_msg->msg_data; 1735 recv_msg->msg.data = recv_msg->msg_data;
1656 recv_msg->msg.data_len = smi_msg->data_size; 1736 recv_msg->msg.data_len = smi_msg->data_size;
1657 1737
1658 /* We don't unlock until here, because we need 1738 /*
1659 to copy the completed message into the 1739 * We don't unlock until here, because we need
1660 recv_msg before we release the lock. 1740 * to copy the completed message into the
1661 Otherwise, race conditions may bite us. I 1741 * recv_msg before we release the lock.
1662 know that's pretty paranoid, but I prefer 1742 * Otherwise, race conditions may bite us. I
1663 to be correct. */ 1743 * know that's pretty paranoid, but I prefer
1744 * to be correct.
1745 */
1664 spin_unlock_irqrestore(&(intf->seq_lock), flags); 1746 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1665 } 1747 }
1666 } else { 1748 } else {
1667 /* Unknown address type. */ 1749 /* Unknown address type. */
1668 spin_lock_irqsave(&intf->counter_lock, flags); 1750 ipmi_inc_stat(intf, sent_invalid_commands);
1669 intf->sent_invalid_commands++;
1670 spin_unlock_irqrestore(&intf->counter_lock, flags);
1671 rv = -EINVAL; 1751 rv = -EINVAL;
1672 goto out_err; 1752 goto out_err;
1673 } 1753 }
@@ -1735,6 +1815,7 @@ int ipmi_request_settime(ipmi_user_t user,
1735 retries, 1815 retries,
1736 retry_time_ms); 1816 retry_time_ms);
1737} 1817}
1818EXPORT_SYMBOL(ipmi_request_settime);
1738 1819
1739int ipmi_request_supply_msgs(ipmi_user_t user, 1820int ipmi_request_supply_msgs(ipmi_user_t user,
1740 struct ipmi_addr *addr, 1821 struct ipmi_addr *addr,
@@ -1766,6 +1847,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1766 lun, 1847 lun,
1767 -1, 0); 1848 -1, 0);
1768} 1849}
1850EXPORT_SYMBOL(ipmi_request_supply_msgs);
1769 1851
1770#ifdef CONFIG_PROC_FS 1852#ifdef CONFIG_PROC_FS
1771static int ipmb_file_read_proc(char *page, char **start, off_t off, 1853static int ipmb_file_read_proc(char *page, char **start, off_t off,
@@ -1790,7 +1872,7 @@ static int version_file_read_proc(char *page, char **start, off_t off,
1790 char *out = (char *) page; 1872 char *out = (char *) page;
1791 ipmi_smi_t intf = data; 1873 ipmi_smi_t intf = data;
1792 1874
1793 return sprintf(out, "%d.%d\n", 1875 return sprintf(out, "%u.%u\n",
1794 ipmi_version_major(&intf->bmc->id), 1876 ipmi_version_major(&intf->bmc->id),
1795 ipmi_version_minor(&intf->bmc->id)); 1877 ipmi_version_minor(&intf->bmc->id));
1796} 1878}
@@ -1801,65 +1883,65 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
1801 char *out = (char *) page; 1883 char *out = (char *) page;
1802 ipmi_smi_t intf = data; 1884 ipmi_smi_t intf = data;
1803 1885
1804 out += sprintf(out, "sent_invalid_commands: %d\n", 1886 out += sprintf(out, "sent_invalid_commands: %u\n",
1805 intf->sent_invalid_commands); 1887 ipmi_get_stat(intf, sent_invalid_commands));
1806 out += sprintf(out, "sent_local_commands: %d\n", 1888 out += sprintf(out, "sent_local_commands: %u\n",
1807 intf->sent_local_commands); 1889 ipmi_get_stat(intf, sent_local_commands));
1808 out += sprintf(out, "handled_local_responses: %d\n", 1890 out += sprintf(out, "handled_local_responses: %u\n",
1809 intf->handled_local_responses); 1891 ipmi_get_stat(intf, handled_local_responses));
1810 out += sprintf(out, "unhandled_local_responses: %d\n", 1892 out += sprintf(out, "unhandled_local_responses: %u\n",
1811 intf->unhandled_local_responses); 1893 ipmi_get_stat(intf, unhandled_local_responses));
1812 out += sprintf(out, "sent_ipmb_commands: %d\n", 1894 out += sprintf(out, "sent_ipmb_commands: %u\n",
1813 intf->sent_ipmb_commands); 1895 ipmi_get_stat(intf, sent_ipmb_commands));
1814 out += sprintf(out, "sent_ipmb_command_errs: %d\n", 1896 out += sprintf(out, "sent_ipmb_command_errs: %u\n",
1815 intf->sent_ipmb_command_errs); 1897 ipmi_get_stat(intf, sent_ipmb_command_errs));
1816 out += sprintf(out, "retransmitted_ipmb_commands: %d\n", 1898 out += sprintf(out, "retransmitted_ipmb_commands: %u\n",
1817 intf->retransmitted_ipmb_commands); 1899 ipmi_get_stat(intf, retransmitted_ipmb_commands));
1818 out += sprintf(out, "timed_out_ipmb_commands: %d\n", 1900 out += sprintf(out, "timed_out_ipmb_commands: %u\n",
1819 intf->timed_out_ipmb_commands); 1901 ipmi_get_stat(intf, timed_out_ipmb_commands));
1820 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n", 1902 out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n",
1821 intf->timed_out_ipmb_broadcasts); 1903 ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
1822 out += sprintf(out, "sent_ipmb_responses: %d\n", 1904 out += sprintf(out, "sent_ipmb_responses: %u\n",
1823 intf->sent_ipmb_responses); 1905 ipmi_get_stat(intf, sent_ipmb_responses));
1824 out += sprintf(out, "handled_ipmb_responses: %d\n", 1906 out += sprintf(out, "handled_ipmb_responses: %u\n",
1825 intf->handled_ipmb_responses); 1907 ipmi_get_stat(intf, handled_ipmb_responses));
1826 out += sprintf(out, "invalid_ipmb_responses: %d\n", 1908 out += sprintf(out, "invalid_ipmb_responses: %u\n",
1827 intf->invalid_ipmb_responses); 1909 ipmi_get_stat(intf, invalid_ipmb_responses));
1828 out += sprintf(out, "unhandled_ipmb_responses: %d\n", 1910 out += sprintf(out, "unhandled_ipmb_responses: %u\n",
1829 intf->unhandled_ipmb_responses); 1911 ipmi_get_stat(intf, unhandled_ipmb_responses));
1830 out += sprintf(out, "sent_lan_commands: %d\n", 1912 out += sprintf(out, "sent_lan_commands: %u\n",
1831 intf->sent_lan_commands); 1913 ipmi_get_stat(intf, sent_lan_commands));
1832 out += sprintf(out, "sent_lan_command_errs: %d\n", 1914 out += sprintf(out, "sent_lan_command_errs: %u\n",
1833 intf->sent_lan_command_errs); 1915 ipmi_get_stat(intf, sent_lan_command_errs));
1834 out += sprintf(out, "retransmitted_lan_commands: %d\n", 1916 out += sprintf(out, "retransmitted_lan_commands: %u\n",
1835 intf->retransmitted_lan_commands); 1917 ipmi_get_stat(intf, retransmitted_lan_commands));
1836 out += sprintf(out, "timed_out_lan_commands: %d\n", 1918 out += sprintf(out, "timed_out_lan_commands: %u\n",
1837 intf->timed_out_lan_commands); 1919 ipmi_get_stat(intf, timed_out_lan_commands));
1838 out += sprintf(out, "sent_lan_responses: %d\n", 1920 out += sprintf(out, "sent_lan_responses: %u\n",
1839 intf->sent_lan_responses); 1921 ipmi_get_stat(intf, sent_lan_responses));
1840 out += sprintf(out, "handled_lan_responses: %d\n", 1922 out += sprintf(out, "handled_lan_responses: %u\n",
1841 intf->handled_lan_responses); 1923 ipmi_get_stat(intf, handled_lan_responses));
1842 out += sprintf(out, "invalid_lan_responses: %d\n", 1924 out += sprintf(out, "invalid_lan_responses: %u\n",
1843 intf->invalid_lan_responses); 1925 ipmi_get_stat(intf, invalid_lan_responses));
1844 out += sprintf(out, "unhandled_lan_responses: %d\n", 1926 out += sprintf(out, "unhandled_lan_responses: %u\n",
1845 intf->unhandled_lan_responses); 1927 ipmi_get_stat(intf, unhandled_lan_responses));
1846 out += sprintf(out, "handled_commands: %d\n", 1928 out += sprintf(out, "handled_commands: %u\n",
1847 intf->handled_commands); 1929 ipmi_get_stat(intf, handled_commands));
1848 out += sprintf(out, "invalid_commands: %d\n", 1930 out += sprintf(out, "invalid_commands: %u\n",
1849 intf->invalid_commands); 1931 ipmi_get_stat(intf, invalid_commands));
1850 out += sprintf(out, "unhandled_commands: %d\n", 1932 out += sprintf(out, "unhandled_commands: %u\n",
1851 intf->unhandled_commands); 1933 ipmi_get_stat(intf, unhandled_commands));
1852 out += sprintf(out, "invalid_events: %d\n", 1934 out += sprintf(out, "invalid_events: %u\n",
1853 intf->invalid_events); 1935 ipmi_get_stat(intf, invalid_events));
1854 out += sprintf(out, "events: %d\n", 1936 out += sprintf(out, "events: %u\n",
1855 intf->events); 1937 ipmi_get_stat(intf, events));
1856 1938
1857 return (out - ((char *) page)); 1939 return (out - ((char *) page));
1858} 1940}
1859#endif /* CONFIG_PROC_FS */ 1941#endif /* CONFIG_PROC_FS */
1860 1942
1861int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, 1943int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1862 read_proc_t *read_proc, write_proc_t *write_proc, 1944 read_proc_t *read_proc,
1863 void *data, struct module *owner) 1945 void *data, struct module *owner)
1864{ 1946{
1865 int rv = 0; 1947 int rv = 0;
@@ -1886,7 +1968,6 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1886 } else { 1968 } else {
1887 file->data = data; 1969 file->data = data;
1888 file->read_proc = read_proc; 1970 file->read_proc = read_proc;
1889 file->write_proc = write_proc;
1890 file->owner = owner; 1971 file->owner = owner;
1891 1972
1892 mutex_lock(&smi->proc_entry_lock); 1973 mutex_lock(&smi->proc_entry_lock);
@@ -1899,6 +1980,7 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1899 1980
1900 return rv; 1981 return rv;
1901} 1982}
1983EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
1902 1984
1903static int add_proc_entries(ipmi_smi_t smi, int num) 1985static int add_proc_entries(ipmi_smi_t smi, int num)
1904{ 1986{
@@ -1909,23 +1991,22 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
1909 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); 1991 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1910 if (!smi->proc_dir) 1992 if (!smi->proc_dir)
1911 rv = -ENOMEM; 1993 rv = -ENOMEM;
1912 else { 1994 else
1913 smi->proc_dir->owner = THIS_MODULE; 1995 smi->proc_dir->owner = THIS_MODULE;
1914 }
1915 1996
1916 if (rv == 0) 1997 if (rv == 0)
1917 rv = ipmi_smi_add_proc_entry(smi, "stats", 1998 rv = ipmi_smi_add_proc_entry(smi, "stats",
1918 stat_file_read_proc, NULL, 1999 stat_file_read_proc,
1919 smi, THIS_MODULE); 2000 smi, THIS_MODULE);
1920 2001
1921 if (rv == 0) 2002 if (rv == 0)
1922 rv = ipmi_smi_add_proc_entry(smi, "ipmb", 2003 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1923 ipmb_file_read_proc, NULL, 2004 ipmb_file_read_proc,
1924 smi, THIS_MODULE); 2005 smi, THIS_MODULE);
1925 2006
1926 if (rv == 0) 2007 if (rv == 0)
1927 rv = ipmi_smi_add_proc_entry(smi, "version", 2008 rv = ipmi_smi_add_proc_entry(smi, "version",
1928 version_file_read_proc, NULL, 2009 version_file_read_proc,
1929 smi, THIS_MODULE); 2010 smi, THIS_MODULE);
1930#endif /* CONFIG_PROC_FS */ 2011#endif /* CONFIG_PROC_FS */
1931 2012
@@ -2210,37 +2291,47 @@ static int create_files(struct bmc_device *bmc)
2210 2291
2211 err = device_create_file(&bmc->dev->dev, 2292 err = device_create_file(&bmc->dev->dev,
2212 &bmc->device_id_attr); 2293 &bmc->device_id_attr);
2213 if (err) goto out; 2294 if (err)
2295 goto out;
2214 err = device_create_file(&bmc->dev->dev, 2296 err = device_create_file(&bmc->dev->dev,
2215 &bmc->provides_dev_sdrs_attr); 2297 &bmc->provides_dev_sdrs_attr);
2216 if (err) goto out_devid; 2298 if (err)
2299 goto out_devid;
2217 err = device_create_file(&bmc->dev->dev, 2300 err = device_create_file(&bmc->dev->dev,
2218 &bmc->revision_attr); 2301 &bmc->revision_attr);
2219 if (err) goto out_sdrs; 2302 if (err)
2303 goto out_sdrs;
2220 err = device_create_file(&bmc->dev->dev, 2304 err = device_create_file(&bmc->dev->dev,
2221 &bmc->firmware_rev_attr); 2305 &bmc->firmware_rev_attr);
2222 if (err) goto out_rev; 2306 if (err)
2307 goto out_rev;
2223 err = device_create_file(&bmc->dev->dev, 2308 err = device_create_file(&bmc->dev->dev,
2224 &bmc->version_attr); 2309 &bmc->version_attr);
2225 if (err) goto out_firm; 2310 if (err)
2311 goto out_firm;
2226 err = device_create_file(&bmc->dev->dev, 2312 err = device_create_file(&bmc->dev->dev,
2227 &bmc->add_dev_support_attr); 2313 &bmc->add_dev_support_attr);
2228 if (err) goto out_version; 2314 if (err)
2315 goto out_version;
2229 err = device_create_file(&bmc->dev->dev, 2316 err = device_create_file(&bmc->dev->dev,
2230 &bmc->manufacturer_id_attr); 2317 &bmc->manufacturer_id_attr);
2231 if (err) goto out_add_dev; 2318 if (err)
2319 goto out_add_dev;
2232 err = device_create_file(&bmc->dev->dev, 2320 err = device_create_file(&bmc->dev->dev,
2233 &bmc->product_id_attr); 2321 &bmc->product_id_attr);
2234 if (err) goto out_manu; 2322 if (err)
2323 goto out_manu;
2235 if (bmc->id.aux_firmware_revision_set) { 2324 if (bmc->id.aux_firmware_revision_set) {
2236 err = device_create_file(&bmc->dev->dev, 2325 err = device_create_file(&bmc->dev->dev,
2237 &bmc->aux_firmware_rev_attr); 2326 &bmc->aux_firmware_rev_attr);
2238 if (err) goto out_prod_id; 2327 if (err)
2328 goto out_prod_id;
2239 } 2329 }
2240 if (bmc->guid_set) { 2330 if (bmc->guid_set) {
2241 err = device_create_file(&bmc->dev->dev, 2331 err = device_create_file(&bmc->dev->dev,
2242 &bmc->guid_attr); 2332 &bmc->guid_attr);
2243 if (err) goto out_aux_firm; 2333 if (err)
2334 goto out_aux_firm;
2244 } 2335 }
2245 2336
2246 return 0; 2337 return 0;
@@ -2368,8 +2459,10 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2368 "ipmi_msghandler:" 2459 "ipmi_msghandler:"
2369 " Unable to register bmc device: %d\n", 2460 " Unable to register bmc device: %d\n",
2370 rv); 2461 rv);
2371 /* Don't go to out_err, you can only do that if 2462 /*
2372 the device is registered already. */ 2463 * Don't go to out_err, you can only do that if
2464 * the device is registered already.
2465 */
2373 return rv; 2466 return rv;
2374 } 2467 }
2375 2468
@@ -2560,17 +2653,18 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2560 2653
2561 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2654 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2562 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 2655 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2563 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) 2656 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
2564 {
2565 /* It's the one we want */ 2657 /* It's the one we want */
2566 if (msg->msg.data[0] != 0) { 2658 if (msg->msg.data[0] != 0) {
2567 /* Got an error from the channel, just go on. */ 2659 /* Got an error from the channel, just go on. */
2568 2660
2569 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 2661 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2570 /* If the MC does not support this 2662 /*
2571 command, that is legal. We just 2663 * If the MC does not support this
2572 assume it has one IPMB at channel 2664 * command, that is legal. We just
2573 zero. */ 2665 * assume it has one IPMB at channel
2666 * zero.
2667 */
2574 intf->channels[0].medium 2668 intf->channels[0].medium
2575 = IPMI_CHANNEL_MEDIUM_IPMB; 2669 = IPMI_CHANNEL_MEDIUM_IPMB;
2576 intf->channels[0].protocol 2670 intf->channels[0].protocol
@@ -2591,7 +2685,7 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2591 intf->channels[chan].medium = msg->msg.data[2] & 0x7f; 2685 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2592 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; 2686 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2593 2687
2594 next_channel: 2688 next_channel:
2595 intf->curr_channel++; 2689 intf->curr_channel++;
2596 if (intf->curr_channel >= IPMI_MAX_CHANNELS) 2690 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2597 wake_up(&intf->waitq); 2691 wake_up(&intf->waitq);
@@ -2619,6 +2713,7 @@ void ipmi_poll_interface(ipmi_user_t user)
2619 if (intf->handlers->poll) 2713 if (intf->handlers->poll)
2620 intf->handlers->poll(intf->send_info); 2714 intf->handlers->poll(intf->send_info);
2621} 2715}
2716EXPORT_SYMBOL(ipmi_poll_interface);
2622 2717
2623int ipmi_register_smi(struct ipmi_smi_handlers *handlers, 2718int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2624 void *send_info, 2719 void *send_info,
@@ -2633,14 +2728,18 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2633 ipmi_smi_t tintf; 2728 ipmi_smi_t tintf;
2634 struct list_head *link; 2729 struct list_head *link;
2635 2730
2636 /* Make sure the driver is actually initialized, this handles 2731 /*
2637 problems with initialization order. */ 2732 * Make sure the driver is actually initialized, this handles
2733 * problems with initialization order.
2734 */
2638 if (!initialized) { 2735 if (!initialized) {
2639 rv = ipmi_init_msghandler(); 2736 rv = ipmi_init_msghandler();
2640 if (rv) 2737 if (rv)
2641 return rv; 2738 return rv;
2642 /* The init code doesn't return an error if it was turned 2739 /*
2643 off, but it won't initialize. Check that. */ 2740 * The init code doesn't return an error if it was turned
2741 * off, but it won't initialize. Check that.
2742 */
2644 if (!initialized) 2743 if (!initialized)
2645 return -ENODEV; 2744 return -ENODEV;
2646 } 2745 }
@@ -2688,8 +2787,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2688 spin_lock_init(&intf->maintenance_mode_lock); 2787 spin_lock_init(&intf->maintenance_mode_lock);
2689 INIT_LIST_HEAD(&intf->cmd_rcvrs); 2788 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2690 init_waitqueue_head(&intf->waitq); 2789 init_waitqueue_head(&intf->waitq);
2790 for (i = 0; i < IPMI_NUM_STATS; i++)
2791 atomic_set(&intf->stats[i], 0);
2691 2792
2692 spin_lock_init(&intf->counter_lock);
2693 intf->proc_dir = NULL; 2793 intf->proc_dir = NULL;
2694 2794
2695 mutex_lock(&smi_watchers_mutex); 2795 mutex_lock(&smi_watchers_mutex);
@@ -2717,11 +2817,12 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2717 get_guid(intf); 2817 get_guid(intf);
2718 2818
2719 if ((intf->ipmi_version_major > 1) 2819 if ((intf->ipmi_version_major > 1)
2720 || ((intf->ipmi_version_major == 1) 2820 || ((intf->ipmi_version_major == 1)
2721 && (intf->ipmi_version_minor >= 5))) 2821 && (intf->ipmi_version_minor >= 5))) {
2722 { 2822 /*
2723 /* Start scanning the channels to see what is 2823 * Start scanning the channels to see what is
2724 available. */ 2824 * available.
2825 */
2725 intf->null_user_handler = channel_handler; 2826 intf->null_user_handler = channel_handler;
2726 intf->curr_channel = 0; 2827 intf->curr_channel = 0;
2727 rv = send_channel_info_cmd(intf, 0); 2828 rv = send_channel_info_cmd(intf, 0);
@@ -2769,6 +2870,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2769 2870
2770 return rv; 2871 return rv;
2771} 2872}
2873EXPORT_SYMBOL(ipmi_register_smi);
2772 2874
2773static void cleanup_smi_msgs(ipmi_smi_t intf) 2875static void cleanup_smi_msgs(ipmi_smi_t intf)
2774{ 2876{
@@ -2803,8 +2905,10 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
2803 2905
2804 remove_proc_entries(intf); 2906 remove_proc_entries(intf);
2805 2907
2806 /* Call all the watcher interfaces to tell them that 2908 /*
2807 an interface is gone. */ 2909 * Call all the watcher interfaces to tell them that
2910 * an interface is gone.
2911 */
2808 list_for_each_entry(w, &smi_watchers, link) 2912 list_for_each_entry(w, &smi_watchers, link)
2809 w->smi_gone(intf_num); 2913 w->smi_gone(intf_num);
2810 mutex_unlock(&smi_watchers_mutex); 2914 mutex_unlock(&smi_watchers_mutex);
@@ -2812,22 +2916,21 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
2812 kref_put(&intf->refcount, intf_free); 2916 kref_put(&intf->refcount, intf_free);
2813 return 0; 2917 return 0;
2814} 2918}
2919EXPORT_SYMBOL(ipmi_unregister_smi);
2815 2920
2816static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, 2921static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2817 struct ipmi_smi_msg *msg) 2922 struct ipmi_smi_msg *msg)
2818{ 2923{
2819 struct ipmi_ipmb_addr ipmb_addr; 2924 struct ipmi_ipmb_addr ipmb_addr;
2820 struct ipmi_recv_msg *recv_msg; 2925 struct ipmi_recv_msg *recv_msg;
2821 unsigned long flags;
2822 2926
2823 2927 /*
2824 /* This is 11, not 10, because the response must contain a 2928 * This is 11, not 10, because the response must contain a
2825 * completion code. */ 2929 * completion code.
2930 */
2826 if (msg->rsp_size < 11) { 2931 if (msg->rsp_size < 11) {
2827 /* Message not big enough, just ignore it. */ 2932 /* Message not big enough, just ignore it. */
2828 spin_lock_irqsave(&intf->counter_lock, flags); 2933 ipmi_inc_stat(intf, invalid_ipmb_responses);
2829 intf->invalid_ipmb_responses++;
2830 spin_unlock_irqrestore(&intf->counter_lock, flags);
2831 return 0; 2934 return 0;
2832 } 2935 }
2833 2936
@@ -2841,37 +2944,38 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2841 ipmb_addr.channel = msg->rsp[3] & 0x0f; 2944 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2842 ipmb_addr.lun = msg->rsp[7] & 3; 2945 ipmb_addr.lun = msg->rsp[7] & 3;
2843 2946
2844 /* It's a response from a remote entity. Look up the sequence 2947 /*
2845 number and handle the response. */ 2948 * It's a response from a remote entity. Look up the sequence
2949 * number and handle the response.
2950 */
2846 if (intf_find_seq(intf, 2951 if (intf_find_seq(intf,
2847 msg->rsp[7] >> 2, 2952 msg->rsp[7] >> 2,
2848 msg->rsp[3] & 0x0f, 2953 msg->rsp[3] & 0x0f,
2849 msg->rsp[8], 2954 msg->rsp[8],
2850 (msg->rsp[4] >> 2) & (~1), 2955 (msg->rsp[4] >> 2) & (~1),
2851 (struct ipmi_addr *) &(ipmb_addr), 2956 (struct ipmi_addr *) &(ipmb_addr),
2852 &recv_msg)) 2957 &recv_msg)) {
2853 { 2958 /*
2854 /* We were unable to find the sequence number, 2959 * We were unable to find the sequence number,
2855 so just nuke the message. */ 2960 * so just nuke the message.
2856 spin_lock_irqsave(&intf->counter_lock, flags); 2961 */
2857 intf->unhandled_ipmb_responses++; 2962 ipmi_inc_stat(intf, unhandled_ipmb_responses);
2858 spin_unlock_irqrestore(&intf->counter_lock, flags);
2859 return 0; 2963 return 0;
2860 } 2964 }
2861 2965
2862 memcpy(recv_msg->msg_data, 2966 memcpy(recv_msg->msg_data,
2863 &(msg->rsp[9]), 2967 &(msg->rsp[9]),
2864 msg->rsp_size - 9); 2968 msg->rsp_size - 9);
2865 /* THe other fields matched, so no need to set them, except 2969 /*
2866 for netfn, which needs to be the response that was 2970 * The other fields matched, so no need to set them, except
2867 returned, not the request value. */ 2971 * for netfn, which needs to be the response that was
2972 * returned, not the request value.
2973 */
2868 recv_msg->msg.netfn = msg->rsp[4] >> 2; 2974 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2869 recv_msg->msg.data = recv_msg->msg_data; 2975 recv_msg->msg.data = recv_msg->msg_data;
2870 recv_msg->msg.data_len = msg->rsp_size - 10; 2976 recv_msg->msg.data_len = msg->rsp_size - 10;
2871 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 2977 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2872 spin_lock_irqsave(&intf->counter_lock, flags); 2978 ipmi_inc_stat(intf, handled_ipmb_responses);
2873 intf->handled_ipmb_responses++;
2874 spin_unlock_irqrestore(&intf->counter_lock, flags);
2875 deliver_response(recv_msg); 2979 deliver_response(recv_msg);
2876 2980
2877 return 0; 2981 return 0;
@@ -2888,14 +2992,11 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2888 ipmi_user_t user = NULL; 2992 ipmi_user_t user = NULL;
2889 struct ipmi_ipmb_addr *ipmb_addr; 2993 struct ipmi_ipmb_addr *ipmb_addr;
2890 struct ipmi_recv_msg *recv_msg; 2994 struct ipmi_recv_msg *recv_msg;
2891 unsigned long flags;
2892 struct ipmi_smi_handlers *handlers; 2995 struct ipmi_smi_handlers *handlers;
2893 2996
2894 if (msg->rsp_size < 10) { 2997 if (msg->rsp_size < 10) {
2895 /* Message not big enough, just ignore it. */ 2998 /* Message not big enough, just ignore it. */
2896 spin_lock_irqsave(&intf->counter_lock, flags); 2999 ipmi_inc_stat(intf, invalid_commands);
2897 intf->invalid_commands++;
2898 spin_unlock_irqrestore(&intf->counter_lock, flags);
2899 return 0; 3000 return 0;
2900 } 3001 }
2901 3002
@@ -2919,19 +3020,17 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2919 3020
2920 if (user == NULL) { 3021 if (user == NULL) {
2921 /* We didn't find a user, deliver an error response. */ 3022 /* We didn't find a user, deliver an error response. */
2922 spin_lock_irqsave(&intf->counter_lock, flags); 3023 ipmi_inc_stat(intf, unhandled_commands);
2923 intf->unhandled_commands++;
2924 spin_unlock_irqrestore(&intf->counter_lock, flags);
2925 3024
2926 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3025 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2927 msg->data[1] = IPMI_SEND_MSG_CMD; 3026 msg->data[1] = IPMI_SEND_MSG_CMD;
2928 msg->data[2] = msg->rsp[3]; 3027 msg->data[2] = msg->rsp[3];
2929 msg->data[3] = msg->rsp[6]; 3028 msg->data[3] = msg->rsp[6];
2930 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3029 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2931 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); 3030 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2932 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; 3031 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2933 /* rqseq/lun */ 3032 /* rqseq/lun */
2934 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3033 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2935 msg->data[8] = msg->rsp[8]; /* cmd */ 3034 msg->data[8] = msg->rsp[8]; /* cmd */
2936 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3035 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2937 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); 3036 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
@@ -2950,23 +3049,25 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2950 handlers = intf->handlers; 3049 handlers = intf->handlers;
2951 if (handlers) { 3050 if (handlers) {
2952 handlers->sender(intf->send_info, msg, 0); 3051 handlers->sender(intf->send_info, msg, 0);
2953 /* We used the message, so return the value 3052 /*
2954 that causes it to not be freed or 3053 * We used the message, so return the value
2955 queued. */ 3054 * that causes it to not be freed or
3055 * queued.
3056 */
2956 rv = -1; 3057 rv = -1;
2957 } 3058 }
2958 rcu_read_unlock(); 3059 rcu_read_unlock();
2959 } else { 3060 } else {
2960 /* Deliver the message to the user. */ 3061 /* Deliver the message to the user. */
2961 spin_lock_irqsave(&intf->counter_lock, flags); 3062 ipmi_inc_stat(intf, handled_commands);
2962 intf->handled_commands++;
2963 spin_unlock_irqrestore(&intf->counter_lock, flags);
2964 3063
2965 recv_msg = ipmi_alloc_recv_msg(); 3064 recv_msg = ipmi_alloc_recv_msg();
2966 if (!recv_msg) { 3065 if (!recv_msg) {
2967 /* We couldn't allocate memory for the 3066 /*
2968 message, so requeue it for handling 3067 * We couldn't allocate memory for the
2969 later. */ 3068 * message, so requeue it for handling
3069 * later.
3070 */
2970 rv = 1; 3071 rv = 1;
2971 kref_put(&user->refcount, free_user); 3072 kref_put(&user->refcount, free_user);
2972 } else { 3073 } else {
@@ -2977,8 +3078,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2977 ipmb_addr->lun = msg->rsp[7] & 3; 3078 ipmb_addr->lun = msg->rsp[7] & 3;
2978 ipmb_addr->channel = msg->rsp[3] & 0xf; 3079 ipmb_addr->channel = msg->rsp[3] & 0xf;
2979 3080
2980 /* Extract the rest of the message information 3081 /*
2981 from the IPMB header.*/ 3082 * Extract the rest of the message information
3083 * from the IPMB header.
3084 */
2982 recv_msg->user = user; 3085 recv_msg->user = user;
2983 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3086 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2984 recv_msg->msgid = msg->rsp[7] >> 2; 3087 recv_msg->msgid = msg->rsp[7] >> 2;
@@ -2986,8 +3089,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2986 recv_msg->msg.cmd = msg->rsp[8]; 3089 recv_msg->msg.cmd = msg->rsp[8];
2987 recv_msg->msg.data = recv_msg->msg_data; 3090 recv_msg->msg.data = recv_msg->msg_data;
2988 3091
2989 /* We chop off 10, not 9 bytes because the checksum 3092 /*
2990 at the end also needs to be removed. */ 3093 * We chop off 10, not 9 bytes because the checksum
3094 * at the end also needs to be removed.
3095 */
2991 recv_msg->msg.data_len = msg->rsp_size - 10; 3096 recv_msg->msg.data_len = msg->rsp_size - 10;
2992 memcpy(recv_msg->msg_data, 3097 memcpy(recv_msg->msg_data,
2993 &(msg->rsp[9]), 3098 &(msg->rsp[9]),
@@ -3004,16 +3109,15 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
3004{ 3109{
3005 struct ipmi_lan_addr lan_addr; 3110 struct ipmi_lan_addr lan_addr;
3006 struct ipmi_recv_msg *recv_msg; 3111 struct ipmi_recv_msg *recv_msg;
3007 unsigned long flags;
3008 3112
3009 3113
3010 /* This is 13, not 12, because the response must contain a 3114 /*
3011 * completion code. */ 3115 * This is 13, not 12, because the response must contain a
3116 * completion code.
3117 */
3012 if (msg->rsp_size < 13) { 3118 if (msg->rsp_size < 13) {
3013 /* Message not big enough, just ignore it. */ 3119 /* Message not big enough, just ignore it. */
3014 spin_lock_irqsave(&intf->counter_lock, flags); 3120 ipmi_inc_stat(intf, invalid_lan_responses);
3015 intf->invalid_lan_responses++;
3016 spin_unlock_irqrestore(&intf->counter_lock, flags);
3017 return 0; 3121 return 0;
3018 } 3122 }
3019 3123
@@ -3030,37 +3134,38 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
3030 lan_addr.privilege = msg->rsp[3] >> 4; 3134 lan_addr.privilege = msg->rsp[3] >> 4;
3031 lan_addr.lun = msg->rsp[9] & 3; 3135 lan_addr.lun = msg->rsp[9] & 3;
3032 3136
3033 /* It's a response from a remote entity. Look up the sequence 3137 /*
3034 number and handle the response. */ 3138 * It's a response from a remote entity. Look up the sequence
3139 * number and handle the response.
3140 */
3035 if (intf_find_seq(intf, 3141 if (intf_find_seq(intf,
3036 msg->rsp[9] >> 2, 3142 msg->rsp[9] >> 2,
3037 msg->rsp[3] & 0x0f, 3143 msg->rsp[3] & 0x0f,
3038 msg->rsp[10], 3144 msg->rsp[10],
3039 (msg->rsp[6] >> 2) & (~1), 3145 (msg->rsp[6] >> 2) & (~1),
3040 (struct ipmi_addr *) &(lan_addr), 3146 (struct ipmi_addr *) &(lan_addr),
3041 &recv_msg)) 3147 &recv_msg)) {
3042 { 3148 /*
3043 /* We were unable to find the sequence number, 3149 * We were unable to find the sequence number,
3044 so just nuke the message. */ 3150 * so just nuke the message.
3045 spin_lock_irqsave(&intf->counter_lock, flags); 3151 */
3046 intf->unhandled_lan_responses++; 3152 ipmi_inc_stat(intf, unhandled_lan_responses);
3047 spin_unlock_irqrestore(&intf->counter_lock, flags);
3048 return 0; 3153 return 0;
3049 } 3154 }
3050 3155
3051 memcpy(recv_msg->msg_data, 3156 memcpy(recv_msg->msg_data,
3052 &(msg->rsp[11]), 3157 &(msg->rsp[11]),
3053 msg->rsp_size - 11); 3158 msg->rsp_size - 11);
3054 /* The other fields matched, so no need to set them, except 3159 /*
3055 for netfn, which needs to be the response that was 3160 * The other fields matched, so no need to set them, except
3056 returned, not the request value. */ 3161 * for netfn, which needs to be the response that was
3162 * returned, not the request value.
3163 */
3057 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3164 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3058 recv_msg->msg.data = recv_msg->msg_data; 3165 recv_msg->msg.data = recv_msg->msg_data;
3059 recv_msg->msg.data_len = msg->rsp_size - 12; 3166 recv_msg->msg.data_len = msg->rsp_size - 12;
3060 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3167 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3061 spin_lock_irqsave(&intf->counter_lock, flags); 3168 ipmi_inc_stat(intf, handled_lan_responses);
3062 intf->handled_lan_responses++;
3063 spin_unlock_irqrestore(&intf->counter_lock, flags);
3064 deliver_response(recv_msg); 3169 deliver_response(recv_msg);
3065 3170
3066 return 0; 3171 return 0;
@@ -3077,13 +3182,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3077 ipmi_user_t user = NULL; 3182 ipmi_user_t user = NULL;
3078 struct ipmi_lan_addr *lan_addr; 3183 struct ipmi_lan_addr *lan_addr;
3079 struct ipmi_recv_msg *recv_msg; 3184 struct ipmi_recv_msg *recv_msg;
3080 unsigned long flags;
3081 3185
3082 if (msg->rsp_size < 12) { 3186 if (msg->rsp_size < 12) {
3083 /* Message not big enough, just ignore it. */ 3187 /* Message not big enough, just ignore it. */
3084 spin_lock_irqsave(&intf->counter_lock, flags); 3188 ipmi_inc_stat(intf, invalid_commands);
3085 intf->invalid_commands++;
3086 spin_unlock_irqrestore(&intf->counter_lock, flags);
3087 return 0; 3189 return 0;
3088 } 3190 }
3089 3191
@@ -3107,23 +3209,23 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3107 3209
3108 if (user == NULL) { 3210 if (user == NULL) {
3109 /* We didn't find a user, just give up. */ 3211 /* We didn't find a user, just give up. */
3110 spin_lock_irqsave(&intf->counter_lock, flags); 3212 ipmi_inc_stat(intf, unhandled_commands);
3111 intf->unhandled_commands++;
3112 spin_unlock_irqrestore(&intf->counter_lock, flags);
3113 3213
3114 rv = 0; /* Don't do anything with these messages, just 3214 /*
3115 allow them to be freed. */ 3215 * Don't do anything with these messages, just allow
3216 * them to be freed.
3217 */
3218 rv = 0;
3116 } else { 3219 } else {
3117 /* Deliver the message to the user. */ 3220 /* Deliver the message to the user. */
3118 spin_lock_irqsave(&intf->counter_lock, flags); 3221 ipmi_inc_stat(intf, handled_commands);
3119 intf->handled_commands++;
3120 spin_unlock_irqrestore(&intf->counter_lock, flags);
3121 3222
3122 recv_msg = ipmi_alloc_recv_msg(); 3223 recv_msg = ipmi_alloc_recv_msg();
3123 if (!recv_msg) { 3224 if (!recv_msg) {
3124 /* We couldn't allocate memory for the 3225 /*
3125 message, so requeue it for handling 3226 * We couldn't allocate memory for the
3126 later. */ 3227 * message, so requeue it for handling later.
3228 */
3127 rv = 1; 3229 rv = 1;
3128 kref_put(&user->refcount, free_user); 3230 kref_put(&user->refcount, free_user);
3129 } else { 3231 } else {
@@ -3137,8 +3239,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3137 lan_addr->channel = msg->rsp[3] & 0xf; 3239 lan_addr->channel = msg->rsp[3] & 0xf;
3138 lan_addr->privilege = msg->rsp[3] >> 4; 3240 lan_addr->privilege = msg->rsp[3] >> 4;
3139 3241
3140 /* Extract the rest of the message information 3242 /*
3141 from the IPMB header.*/ 3243 * Extract the rest of the message information
3244 * from the IPMB header.
3245 */
3142 recv_msg->user = user; 3246 recv_msg->user = user;
3143 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3247 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3144 recv_msg->msgid = msg->rsp[9] >> 2; 3248 recv_msg->msgid = msg->rsp[9] >> 2;
@@ -3146,8 +3250,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3146 recv_msg->msg.cmd = msg->rsp[10]; 3250 recv_msg->msg.cmd = msg->rsp[10];
3147 recv_msg->msg.data = recv_msg->msg_data; 3251 recv_msg->msg.data = recv_msg->msg_data;
3148 3252
3149 /* We chop off 12, not 11 bytes because the checksum 3253 /*
3150 at the end also needs to be removed. */ 3254 * We chop off 12, not 11 bytes because the checksum
3255 * at the end also needs to be removed.
3256 */
3151 recv_msg->msg.data_len = msg->rsp_size - 12; 3257 recv_msg->msg.data_len = msg->rsp_size - 12;
3152 memcpy(recv_msg->msg_data, 3258 memcpy(recv_msg->msg_data,
3153 &(msg->rsp[11]), 3259 &(msg->rsp[11]),
@@ -3163,7 +3269,7 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3163 struct ipmi_smi_msg *msg) 3269 struct ipmi_smi_msg *msg)
3164{ 3270{
3165 struct ipmi_system_interface_addr *smi_addr; 3271 struct ipmi_system_interface_addr *smi_addr;
3166 3272
3167 recv_msg->msgid = 0; 3273 recv_msg->msgid = 0;
3168 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); 3274 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3169 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3275 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -3189,9 +3295,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
3189 3295
3190 if (msg->rsp_size < 19) { 3296 if (msg->rsp_size < 19) {
3191 /* Message is too small to be an IPMB event. */ 3297 /* Message is too small to be an IPMB event. */
3192 spin_lock_irqsave(&intf->counter_lock, flags); 3298 ipmi_inc_stat(intf, invalid_events);
3193 intf->invalid_events++;
3194 spin_unlock_irqrestore(&intf->counter_lock, flags);
3195 return 0; 3299 return 0;
3196 } 3300 }
3197 3301
@@ -3204,12 +3308,12 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
3204 3308
3205 spin_lock_irqsave(&intf->events_lock, flags); 3309 spin_lock_irqsave(&intf->events_lock, flags);
3206 3310
3207 spin_lock(&intf->counter_lock); 3311 ipmi_inc_stat(intf, events);
3208 intf->events++;
3209 spin_unlock(&intf->counter_lock);
3210 3312
3211 /* Allocate and fill in one message for every user that is getting 3313 /*
3212 events. */ 3314 * Allocate and fill in one message for every user that is
3315 * getting events.
3316 */
3213 rcu_read_lock(); 3317 rcu_read_lock();
3214 list_for_each_entry_rcu(user, &intf->users, link) { 3318 list_for_each_entry_rcu(user, &intf->users, link) {
3215 if (!user->gets_events) 3319 if (!user->gets_events)
@@ -3223,9 +3327,11 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
3223 list_del(&recv_msg->link); 3327 list_del(&recv_msg->link);
3224 ipmi_free_recv_msg(recv_msg); 3328 ipmi_free_recv_msg(recv_msg);
3225 } 3329 }
3226 /* We couldn't allocate memory for the 3330 /*
3227 message, so requeue it for handling 3331 * We couldn't allocate memory for the
3228 later. */ 3332 * message, so requeue it for handling
3333 * later.
3334 */
3229 rv = 1; 3335 rv = 1;
3230 goto out; 3336 goto out;
3231 } 3337 }
@@ -3246,13 +3352,17 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
3246 deliver_response(recv_msg); 3352 deliver_response(recv_msg);
3247 } 3353 }
3248 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 3354 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3249 /* No one to receive the message, put it in queue if there's 3355 /*
3250 not already too many things in the queue. */ 3356 * No one to receive the message, put it in queue if there's
3357 * not already too many things in the queue.
3358 */
3251 recv_msg = ipmi_alloc_recv_msg(); 3359 recv_msg = ipmi_alloc_recv_msg();
3252 if (!recv_msg) { 3360 if (!recv_msg) {
3253 /* We couldn't allocate memory for the 3361 /*
3254 message, so requeue it for handling 3362 * We couldn't allocate memory for the
3255 later. */ 3363 * message, so requeue it for handling
3364 * later.
3365 */
3256 rv = 1; 3366 rv = 1;
3257 goto out; 3367 goto out;
3258 } 3368 }
@@ -3260,11 +3370,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
3260 copy_event_into_recv_msg(recv_msg, msg); 3370 copy_event_into_recv_msg(recv_msg, msg);
3261 list_add_tail(&(recv_msg->link), &(intf->waiting_events)); 3371 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3262 intf->waiting_events_count++; 3372 intf->waiting_events_count++;
3263 } else { 3373 } else if (!intf->event_msg_printed) {
3264 /* There's too many things in the queue, discard this 3374 /*
3265 message. */ 3375 * There's too many things in the queue, discard this
3266 printk(KERN_WARNING PFX "Event queue full, discarding an" 3376 * message.
3267 " incoming event\n"); 3377 */
3378 printk(KERN_WARNING PFX "Event queue full, discarding"
3379 " incoming events\n");
3380 intf->event_msg_printed = 1;
3268 } 3381 }
3269 3382
3270 out: 3383 out:
@@ -3277,16 +3390,15 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
3277 struct ipmi_smi_msg *msg) 3390 struct ipmi_smi_msg *msg)
3278{ 3391{
3279 struct ipmi_recv_msg *recv_msg; 3392 struct ipmi_recv_msg *recv_msg;
3280 unsigned long flags;
3281 struct ipmi_user *user; 3393 struct ipmi_user *user;
3282 3394
3283 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 3395 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3284 if (recv_msg == NULL) 3396 if (recv_msg == NULL) {
3285 { 3397 printk(KERN_WARNING
3286 printk(KERN_WARNING"IPMI message received with no owner. This\n" 3398 "IPMI message received with no owner. This\n"
3287 "could be because of a malformed message, or\n" 3399 "could be because of a malformed message, or\n"
3288 "because of a hardware error. Contact your\n" 3400 "because of a hardware error. Contact your\n"
3289 "hardware vender for assistance\n"); 3401 "hardware vender for assistance\n");
3290 return 0; 3402 return 0;
3291 } 3403 }
3292 3404
@@ -3294,16 +3406,12 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
3294 /* Make sure the user still exists. */ 3406 /* Make sure the user still exists. */
3295 if (user && !user->valid) { 3407 if (user && !user->valid) {
3296 /* The user for the message went away, so give up. */ 3408 /* The user for the message went away, so give up. */
3297 spin_lock_irqsave(&intf->counter_lock, flags); 3409 ipmi_inc_stat(intf, unhandled_local_responses);
3298 intf->unhandled_local_responses++;
3299 spin_unlock_irqrestore(&intf->counter_lock, flags);
3300 ipmi_free_recv_msg(recv_msg); 3410 ipmi_free_recv_msg(recv_msg);
3301 } else { 3411 } else {
3302 struct ipmi_system_interface_addr *smi_addr; 3412 struct ipmi_system_interface_addr *smi_addr;
3303 3413
3304 spin_lock_irqsave(&intf->counter_lock, flags); 3414 ipmi_inc_stat(intf, handled_local_responses);
3305 intf->handled_local_responses++;
3306 spin_unlock_irqrestore(&intf->counter_lock, flags);
3307 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3415 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3308 recv_msg->msgid = msg->msgid; 3416 recv_msg->msgid = msg->msgid;
3309 smi_addr = ((struct ipmi_system_interface_addr *) 3417 smi_addr = ((struct ipmi_system_interface_addr *)
@@ -3324,9 +3432,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
3324 return 0; 3432 return 0;
3325} 3433}
3326 3434
3327/* Handle a new message. Return 1 if the message should be requeued, 3435/*
3328 0 if the message should be freed, or -1 if the message should not 3436 * Handle a new message. Return 1 if the message should be requeued,
3329 be freed or requeued. */ 3437 * 0 if the message should be freed, or -1 if the message should not
3438 * be freed or requeued.
3439 */
3330static int handle_new_recv_msg(ipmi_smi_t intf, 3440static int handle_new_recv_msg(ipmi_smi_t intf,
3331 struct ipmi_smi_msg *msg) 3441 struct ipmi_smi_msg *msg)
3332{ 3442{
@@ -3351,10 +3461,12 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3351 msg->rsp[1] = msg->data[1]; 3461 msg->rsp[1] = msg->data[1];
3352 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 3462 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3353 msg->rsp_size = 3; 3463 msg->rsp_size = 3;
3354 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */ 3464 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
3355 || (msg->rsp[1] != msg->data[1])) /* Command */ 3465 || (msg->rsp[1] != msg->data[1])) {
3356 { 3466 /*
3357 /* The response is not even marginally correct. */ 3467 * The NetFN and Command in the response is not even
3468 * marginally correct.
3469 */
3358 printk(KERN_WARNING PFX "BMC returned incorrect response," 3470 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3359 " expected netfn %x cmd %x, got netfn %x cmd %x\n", 3471 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3360 (msg->data[0] >> 2) | 1, msg->data[1], 3472 (msg->data[0] >> 2) | 1, msg->data[1],
@@ -3369,10 +3481,11 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3369 3481
3370 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3482 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3371 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 3483 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3372 && (msg->user_data != NULL)) 3484 && (msg->user_data != NULL)) {
3373 { 3485 /*
3374 /* It's a response to a response we sent. For this we 3486 * It's a response to a response we sent. For this we
3375 deliver a send message response to the user. */ 3487 * deliver a send message response to the user.
3488 */
3376 struct ipmi_recv_msg *recv_msg = msg->user_data; 3489 struct ipmi_recv_msg *recv_msg = msg->user_data;
3377 3490
3378 requeue = 0; 3491 requeue = 0;
@@ -3398,8 +3511,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3398 recv_msg->msg_data[0] = msg->rsp[2]; 3511 recv_msg->msg_data[0] = msg->rsp[2];
3399 deliver_response(recv_msg); 3512 deliver_response(recv_msg);
3400 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3513 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3401 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) 3514 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
3402 {
3403 /* It's from the receive queue. */ 3515 /* It's from the receive queue. */
3404 chan = msg->rsp[3] & 0xf; 3516 chan = msg->rsp[3] & 0xf;
3405 if (chan >= IPMI_MAX_CHANNELS) { 3517 if (chan >= IPMI_MAX_CHANNELS) {
@@ -3411,12 +3523,16 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3411 switch (intf->channels[chan].medium) { 3523 switch (intf->channels[chan].medium) {
3412 case IPMI_CHANNEL_MEDIUM_IPMB: 3524 case IPMI_CHANNEL_MEDIUM_IPMB:
3413 if (msg->rsp[4] & 0x04) { 3525 if (msg->rsp[4] & 0x04) {
3414 /* It's a response, so find the 3526 /*
3415 requesting message and send it up. */ 3527 * It's a response, so find the
3528 * requesting message and send it up.
3529 */
3416 requeue = handle_ipmb_get_msg_rsp(intf, msg); 3530 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3417 } else { 3531 } else {
3418 /* It's a command to the SMS from some other 3532 /*
3419 entity. Handle that. */ 3533 * It's a command to the SMS from some other
3534 * entity. Handle that.
3535 */
3420 requeue = handle_ipmb_get_msg_cmd(intf, msg); 3536 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3421 } 3537 }
3422 break; 3538 break;
@@ -3424,25 +3540,30 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3424 case IPMI_CHANNEL_MEDIUM_8023LAN: 3540 case IPMI_CHANNEL_MEDIUM_8023LAN:
3425 case IPMI_CHANNEL_MEDIUM_ASYNC: 3541 case IPMI_CHANNEL_MEDIUM_ASYNC:
3426 if (msg->rsp[6] & 0x04) { 3542 if (msg->rsp[6] & 0x04) {
3427 /* It's a response, so find the 3543 /*
3428 requesting message and send it up. */ 3544 * It's a response, so find the
3545 * requesting message and send it up.
3546 */
3429 requeue = handle_lan_get_msg_rsp(intf, msg); 3547 requeue = handle_lan_get_msg_rsp(intf, msg);
3430 } else { 3548 } else {
3431 /* It's a command to the SMS from some other 3549 /*
3432 entity. Handle that. */ 3550 * It's a command to the SMS from some other
3551 * entity. Handle that.
3552 */
3433 requeue = handle_lan_get_msg_cmd(intf, msg); 3553 requeue = handle_lan_get_msg_cmd(intf, msg);
3434 } 3554 }
3435 break; 3555 break;
3436 3556
3437 default: 3557 default:
3438 /* We don't handle the channel type, so just 3558 /*
3439 * free the message. */ 3559 * We don't handle the channel type, so just
3560 * free the message.
3561 */
3440 requeue = 0; 3562 requeue = 0;
3441 } 3563 }
3442 3564
3443 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3565 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3444 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) 3566 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
3445 {
3446 /* It's an asyncronous event. */ 3567 /* It's an asyncronous event. */
3447 requeue = handle_read_event_rsp(intf, msg); 3568 requeue = handle_read_event_rsp(intf, msg);
3448 } else { 3569 } else {
@@ -3458,71 +3579,82 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3458void ipmi_smi_msg_received(ipmi_smi_t intf, 3579void ipmi_smi_msg_received(ipmi_smi_t intf,
3459 struct ipmi_smi_msg *msg) 3580 struct ipmi_smi_msg *msg)
3460{ 3581{
3461 unsigned long flags; 3582 unsigned long flags = 0; /* keep us warning-free. */
3462 int rv; 3583 int rv;
3584 int run_to_completion;
3463 3585
3464 3586
3465 if ((msg->data_size >= 2) 3587 if ((msg->data_size >= 2)
3466 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 3588 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3467 && (msg->data[1] == IPMI_SEND_MSG_CMD) 3589 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3468 && (msg->user_data == NULL)) 3590 && (msg->user_data == NULL)) {
3469 { 3591 /*
3470 /* This is the local response to a command send, start 3592 * This is the local response to a command send, start
3471 the timer for these. The user_data will not be 3593 * the timer for these. The user_data will not be
3472 NULL if this is a response send, and we will let 3594 * NULL if this is a response send, and we will let
3473 response sends just go through. */ 3595 * response sends just go through.
3474 3596 */
3475 /* Check for errors, if we get certain errors (ones 3597
3476 that mean basically we can try again later), we 3598 /*
3477 ignore them and start the timer. Otherwise we 3599 * Check for errors, if we get certain errors (ones
3478 report the error immediately. */ 3600 * that mean basically we can try again later), we
3601 * ignore them and start the timer. Otherwise we
3602 * report the error immediately.
3603 */
3479 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 3604 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3480 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 3605 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3481 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 3606 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3482 && (msg->rsp[2] != IPMI_BUS_ERR) 3607 && (msg->rsp[2] != IPMI_BUS_ERR)
3483 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) 3608 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
3484 {
3485 int chan = msg->rsp[3] & 0xf; 3609 int chan = msg->rsp[3] & 0xf;
3486 3610
3487 /* Got an error sending the message, handle it. */ 3611 /* Got an error sending the message, handle it. */
3488 spin_lock_irqsave(&intf->counter_lock, flags);
3489 if (chan >= IPMI_MAX_CHANNELS) 3612 if (chan >= IPMI_MAX_CHANNELS)
3490 ; /* This shouldn't happen */ 3613 ; /* This shouldn't happen */
3491 else if ((intf->channels[chan].medium 3614 else if ((intf->channels[chan].medium
3492 == IPMI_CHANNEL_MEDIUM_8023LAN) 3615 == IPMI_CHANNEL_MEDIUM_8023LAN)
3493 || (intf->channels[chan].medium 3616 || (intf->channels[chan].medium
3494 == IPMI_CHANNEL_MEDIUM_ASYNC)) 3617 == IPMI_CHANNEL_MEDIUM_ASYNC))
3495 intf->sent_lan_command_errs++; 3618 ipmi_inc_stat(intf, sent_lan_command_errs);
3496 else 3619 else
3497 intf->sent_ipmb_command_errs++; 3620 ipmi_inc_stat(intf, sent_ipmb_command_errs);
3498 spin_unlock_irqrestore(&intf->counter_lock, flags);
3499 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 3621 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3500 } else { 3622 } else
3501 /* The message was sent, start the timer. */ 3623 /* The message was sent, start the timer. */
3502 intf_start_seq_timer(intf, msg->msgid); 3624 intf_start_seq_timer(intf, msg->msgid);
3503 }
3504 3625
3505 ipmi_free_smi_msg(msg); 3626 ipmi_free_smi_msg(msg);
3506 goto out; 3627 goto out;
3507 } 3628 }
3508 3629
3509 /* To preserve message order, if the list is not empty, we 3630 /*
3510 tack this message onto the end of the list. */ 3631 * To preserve message order, if the list is not empty, we
3511 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3632 * tack this message onto the end of the list.
3633 */
3634 run_to_completion = intf->run_to_completion;
3635 if (!run_to_completion)
3636 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3512 if (!list_empty(&intf->waiting_msgs)) { 3637 if (!list_empty(&intf->waiting_msgs)) {
3513 list_add_tail(&msg->link, &intf->waiting_msgs); 3638 list_add_tail(&msg->link, &intf->waiting_msgs);
3514 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3639 if (!run_to_completion)
3640 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3515 goto out; 3641 goto out;
3516 } 3642 }
3517 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3643 if (!run_to_completion)
3518 3644 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3645
3519 rv = handle_new_recv_msg(intf, msg); 3646 rv = handle_new_recv_msg(intf, msg);
3520 if (rv > 0) { 3647 if (rv > 0) {
3521 /* Could not handle the message now, just add it to a 3648 /*
3522 list to handle later. */ 3649 * Could not handle the message now, just add it to a
3523 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3650 * list to handle later.
3651 */
3652 run_to_completion = intf->run_to_completion;
3653 if (!run_to_completion)
3654 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3524 list_add_tail(&msg->link, &intf->waiting_msgs); 3655 list_add_tail(&msg->link, &intf->waiting_msgs);
3525 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3656 if (!run_to_completion)
3657 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3526 } else if (rv == 0) { 3658 } else if (rv == 0) {
3527 ipmi_free_smi_msg(msg); 3659 ipmi_free_smi_msg(msg);
3528 } 3660 }
@@ -3530,6 +3662,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
3530 out: 3662 out:
3531 return; 3663 return;
3532} 3664}
3665EXPORT_SYMBOL(ipmi_smi_msg_received);
3533 3666
3534void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 3667void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3535{ 3668{
@@ -3544,7 +3677,7 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3544 } 3677 }
3545 rcu_read_unlock(); 3678 rcu_read_unlock();
3546} 3679}
3547 3680EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3548 3681
3549static struct ipmi_smi_msg * 3682static struct ipmi_smi_msg *
3550smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, 3683smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
@@ -3552,14 +3685,16 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3552{ 3685{
3553 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 3686 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3554 if (!smi_msg) 3687 if (!smi_msg)
3555 /* If we can't allocate the message, then just return, we 3688 /*
3556 get 4 retries, so this should be ok. */ 3689 * If we can't allocate the message, then just return, we
3690 * get 4 retries, so this should be ok.
3691 */
3557 return NULL; 3692 return NULL;
3558 3693
3559 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 3694 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3560 smi_msg->data_size = recv_msg->msg.data_len; 3695 smi_msg->data_size = recv_msg->msg.data_len;
3561 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 3696 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3562 3697
3563#ifdef DEBUG_MSGING 3698#ifdef DEBUG_MSGING
3564 { 3699 {
3565 int m; 3700 int m;
@@ -3594,28 +3729,26 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3594 ent->inuse = 0; 3729 ent->inuse = 0;
3595 msg = ent->recv_msg; 3730 msg = ent->recv_msg;
3596 list_add_tail(&msg->link, timeouts); 3731 list_add_tail(&msg->link, timeouts);
3597 spin_lock(&intf->counter_lock);
3598 if (ent->broadcast) 3732 if (ent->broadcast)
3599 intf->timed_out_ipmb_broadcasts++; 3733 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
3600 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) 3734 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3601 intf->timed_out_lan_commands++; 3735 ipmi_inc_stat(intf, timed_out_lan_commands);
3602 else 3736 else
3603 intf->timed_out_ipmb_commands++; 3737 ipmi_inc_stat(intf, timed_out_ipmb_commands);
3604 spin_unlock(&intf->counter_lock);
3605 } else { 3738 } else {
3606 struct ipmi_smi_msg *smi_msg; 3739 struct ipmi_smi_msg *smi_msg;
3607 /* More retries, send again. */ 3740 /* More retries, send again. */
3608 3741
3609 /* Start with the max timer, set to normal 3742 /*
3610 timer after the message is sent. */ 3743 * Start with the max timer, set to normal timer after
3744 * the message is sent.
3745 */
3611 ent->timeout = MAX_MSG_TIMEOUT; 3746 ent->timeout = MAX_MSG_TIMEOUT;
3612 ent->retries_left--; 3747 ent->retries_left--;
3613 spin_lock(&intf->counter_lock);
3614 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) 3748 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3615 intf->retransmitted_lan_commands++; 3749 ipmi_inc_stat(intf, retransmitted_lan_commands);
3616 else 3750 else
3617 intf->retransmitted_ipmb_commands++; 3751 ipmi_inc_stat(intf, retransmitted_ipmb_commands);
3618 spin_unlock(&intf->counter_lock);
3619 3752
3620 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 3753 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3621 ent->seqid); 3754 ent->seqid);
@@ -3624,11 +3757,13 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3624 3757
3625 spin_unlock_irqrestore(&intf->seq_lock, *flags); 3758 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3626 3759
3627 /* Send the new message. We send with a zero 3760 /*
3628 * priority. It timed out, I doubt time is 3761 * Send the new message. We send with a zero
3629 * that critical now, and high priority 3762 * priority. It timed out, I doubt time is that
3630 * messages are really only for messages to the 3763 * critical now, and high priority messages are really
3631 * local MC, which don't get resent. */ 3764 * only for messages to the local MC, which don't get
3765 * resent.
3766 */
3632 handlers = intf->handlers; 3767 handlers = intf->handlers;
3633 if (handlers) 3768 if (handlers)
3634 intf->handlers->sender(intf->send_info, 3769 intf->handlers->sender(intf->send_info,
@@ -3659,16 +3794,20 @@ static void ipmi_timeout_handler(long timeout_period)
3659 list_del(&smi_msg->link); 3794 list_del(&smi_msg->link);
3660 ipmi_free_smi_msg(smi_msg); 3795 ipmi_free_smi_msg(smi_msg);
3661 } else { 3796 } else {
3662 /* To preserve message order, quit if we 3797 /*
3663 can't handle a message. */ 3798 * To preserve message order, quit if we
3799 * can't handle a message.
3800 */
3664 break; 3801 break;
3665 } 3802 }
3666 } 3803 }
3667 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3804 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3668 3805
3669 /* Go through the seq table and find any messages that 3806 /*
3670 have timed out, putting them in the timeouts 3807 * Go through the seq table and find any messages that
3671 list. */ 3808 * have timed out, putting them in the timeouts
3809 * list.
3810 */
3672 INIT_LIST_HEAD(&timeouts); 3811 INIT_LIST_HEAD(&timeouts);
3673 spin_lock_irqsave(&intf->seq_lock, flags); 3812 spin_lock_irqsave(&intf->seq_lock, flags);
3674 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 3813 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
@@ -3694,8 +3833,7 @@ static void ipmi_timeout_handler(long timeout_period)
3694 intf->auto_maintenance_timeout 3833 intf->auto_maintenance_timeout
3695 -= timeout_period; 3834 -= timeout_period;
3696 if (!intf->maintenance_mode 3835 if (!intf->maintenance_mode
3697 && (intf->auto_maintenance_timeout <= 0)) 3836 && (intf->auto_maintenance_timeout <= 0)) {
3698 {
3699 intf->maintenance_mode_enable = 0; 3837 intf->maintenance_mode_enable = 0;
3700 maintenance_mode_update(intf); 3838 maintenance_mode_update(intf);
3701 } 3839 }
@@ -3713,8 +3851,10 @@ static void ipmi_request_event(void)
3713 struct ipmi_smi_handlers *handlers; 3851 struct ipmi_smi_handlers *handlers;
3714 3852
3715 rcu_read_lock(); 3853 rcu_read_lock();
3716 /* Called from the timer, no need to check if handlers is 3854 /*
3717 * valid. */ 3855 * Called from the timer, no need to check if handlers is
3856 * valid.
3857 */
3718 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 3858 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3719 /* No event requests when in maintenance mode. */ 3859 /* No event requests when in maintenance mode. */
3720 if (intf->maintenance_mode_enable) 3860 if (intf->maintenance_mode_enable)
@@ -3735,10 +3875,12 @@ static struct timer_list ipmi_timer;
3735/* How many jiffies does it take to get to the timeout time. */ 3875/* How many jiffies does it take to get to the timeout time. */
3736#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 3876#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3737 3877
3738/* Request events from the queue every second (this is the number of 3878/*
3739 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 3879 * Request events from the queue every second (this is the number of
3740 future, IPMI will add a way to know immediately if an event is in 3880 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3741 the queue and this silliness can go away. */ 3881 * future, IPMI will add a way to know immediately if an event is in
3882 * the queue and this silliness can go away.
3883 */
3742#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 3884#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3743 3885
3744static atomic_t stop_operation; 3886static atomic_t stop_operation;
@@ -3782,6 +3924,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3782 } 3924 }
3783 return rv; 3925 return rv;
3784} 3926}
3927EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3785 3928
3786static void free_recv_msg(struct ipmi_recv_msg *msg) 3929static void free_recv_msg(struct ipmi_recv_msg *msg)
3787{ 3930{
@@ -3789,7 +3932,7 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
3789 kfree(msg); 3932 kfree(msg);
3790} 3933}
3791 3934
3792struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 3935static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3793{ 3936{
3794 struct ipmi_recv_msg *rv; 3937 struct ipmi_recv_msg *rv;
3795 3938
@@ -3808,6 +3951,7 @@ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3808 kref_put(&msg->user->refcount, free_user); 3951 kref_put(&msg->user->refcount, free_user);
3809 msg->done(msg); 3952 msg->done(msg);
3810} 3953}
3954EXPORT_SYMBOL(ipmi_free_recv_msg);
3811 3955
3812#ifdef CONFIG_IPMI_PANIC_EVENT 3956#ifdef CONFIG_IPMI_PANIC_EVENT
3813 3957
@@ -3825,8 +3969,7 @@ static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3825 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3969 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3826 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 3970 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3827 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 3971 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3828 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) 3972 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
3829 {
3830 /* A get event receiver command, save it. */ 3973 /* A get event receiver command, save it. */
3831 intf->event_receiver = msg->msg.data[1]; 3974 intf->event_receiver = msg->msg.data[1];
3832 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 3975 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
@@ -3838,10 +3981,11 @@ static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3838 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3981 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3839 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3982 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3840 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 3983 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3841 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) 3984 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
3842 { 3985 /*
3843 /* A get device id command, save if we are an event 3986 * A get device id command, save if we are an event
3844 receiver or generator. */ 3987 * receiver or generator.
3988 */
3845 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 3989 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3846 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 3990 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3847 } 3991 }
@@ -3874,8 +4018,10 @@ static void send_panic_events(char *str)
3874 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 4018 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3875 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 4019 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3876 4020
3877 /* Put a few breadcrumbs in. Hopefully later we can add more things 4021 /*
3878 to make the panic events more useful. */ 4022 * Put a few breadcrumbs in. Hopefully later we can add more things
4023 * to make the panic events more useful.
4024 */
3879 if (str) { 4025 if (str) {
3880 data[3] = str[0]; 4026 data[3] = str[0];
3881 data[6] = str[1]; 4027 data[6] = str[1];
@@ -3891,6 +4037,7 @@ static void send_panic_events(char *str)
3891 /* Interface is not ready. */ 4037 /* Interface is not ready. */
3892 continue; 4038 continue;
3893 4039
4040 intf->run_to_completion = 1;
3894 /* Send the event announcing the panic. */ 4041 /* Send the event announcing the panic. */
3895 intf->handlers->set_run_to_completion(intf->send_info, 1); 4042 intf->handlers->set_run_to_completion(intf->send_info, 1);
3896 i_ipmi_request(NULL, 4043 i_ipmi_request(NULL,
@@ -3908,9 +4055,11 @@ static void send_panic_events(char *str)
3908 } 4055 }
3909 4056
3910#ifdef CONFIG_IPMI_PANIC_STRING 4057#ifdef CONFIG_IPMI_PANIC_STRING
3911 /* On every interface, dump a bunch of OEM event holding the 4058 /*
3912 string. */ 4059 * On every interface, dump a bunch of OEM event holding the
3913 if (!str) 4060 * string.
4061 */
4062 if (!str)
3914 return; 4063 return;
3915 4064
3916 /* For every registered interface, send the event. */ 4065 /* For every registered interface, send the event. */
@@ -3931,11 +4080,13 @@ static void send_panic_events(char *str)
3931 */ 4080 */
3932 smp_rmb(); 4081 smp_rmb();
3933 4082
3934 /* First job here is to figure out where to send the 4083 /*
3935 OEM events. There's no way in IPMI to send OEM 4084 * First job here is to figure out where to send the
3936 events using an event send command, so we have to 4085 * OEM events. There's no way in IPMI to send OEM
3937 find the SEL to put them in and stick them in 4086 * events using an event send command, so we have to
3938 there. */ 4087 * find the SEL to put them in and stick them in
4088 * there.
4089 */
3939 4090
3940 /* Get capabilities from the get device id. */ 4091 /* Get capabilities from the get device id. */
3941 intf->local_sel_device = 0; 4092 intf->local_sel_device = 0;
@@ -3983,24 +4134,29 @@ static void send_panic_events(char *str)
3983 } 4134 }
3984 intf->null_user_handler = NULL; 4135 intf->null_user_handler = NULL;
3985 4136
3986 /* Validate the event receiver. The low bit must not 4137 /*
3987 be 1 (it must be a valid IPMB address), it cannot 4138 * Validate the event receiver. The low bit must not
3988 be zero, and it must not be my address. */ 4139 * be 1 (it must be a valid IPMB address), it cannot
3989 if (((intf->event_receiver & 1) == 0) 4140 * be zero, and it must not be my address.
4141 */
4142 if (((intf->event_receiver & 1) == 0)
3990 && (intf->event_receiver != 0) 4143 && (intf->event_receiver != 0)
3991 && (intf->event_receiver != intf->channels[0].address)) 4144 && (intf->event_receiver != intf->channels[0].address)) {
3992 { 4145 /*
3993 /* The event receiver is valid, send an IPMB 4146 * The event receiver is valid, send an IPMB
3994 message. */ 4147 * message.
4148 */
3995 ipmb = (struct ipmi_ipmb_addr *) &addr; 4149 ipmb = (struct ipmi_ipmb_addr *) &addr;
3996 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 4150 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3997 ipmb->channel = 0; /* FIXME - is this right? */ 4151 ipmb->channel = 0; /* FIXME - is this right? */
3998 ipmb->lun = intf->event_receiver_lun; 4152 ipmb->lun = intf->event_receiver_lun;
3999 ipmb->slave_addr = intf->event_receiver; 4153 ipmb->slave_addr = intf->event_receiver;
4000 } else if (intf->local_sel_device) { 4154 } else if (intf->local_sel_device) {
4001 /* The event receiver was not valid (or was 4155 /*
4002 me), but I am an SEL device, just dump it 4156 * The event receiver was not valid (or was
4003 in my SEL. */ 4157 * me), but I am an SEL device, just dump it
4158 * in my SEL.
4159 */
4004 si = (struct ipmi_system_interface_addr *) &addr; 4160 si = (struct ipmi_system_interface_addr *) &addr;
4005 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4161 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4006 si->channel = IPMI_BMC_CHANNEL; 4162 si->channel = IPMI_BMC_CHANNEL;
@@ -4008,7 +4164,6 @@ static void send_panic_events(char *str)
4008 } else 4164 } else
4009 continue; /* No where to send the event. */ 4165 continue; /* No where to send the event. */
4010 4166
4011
4012 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 4167 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4013 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 4168 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4014 msg.data = data; 4169 msg.data = data;
@@ -4025,8 +4180,10 @@ static void send_panic_events(char *str)
4025 data[2] = 0xf0; /* OEM event without timestamp. */ 4180 data[2] = 0xf0; /* OEM event without timestamp. */
4026 data[3] = intf->channels[0].address; 4181 data[3] = intf->channels[0].address;
4027 data[4] = j++; /* sequence # */ 4182 data[4] = j++; /* sequence # */
4028 /* Always give 11 bytes, so strncpy will fill 4183 /*
4029 it with zeroes for me. */ 4184 * Always give 11 bytes, so strncpy will fill
4185 * it with zeroes for me.
4186 */
4030 strncpy(data+5, p, 11); 4187 strncpy(data+5, p, 11);
4031 p += size; 4188 p += size;
4032 4189
@@ -4043,7 +4200,7 @@ static void send_panic_events(char *str)
4043 intf->channels[0].lun, 4200 intf->channels[0].lun,
4044 0, 1); /* no retry, and no wait. */ 4201 0, 1); /* no retry, and no wait. */
4045 } 4202 }
4046 } 4203 }
4047#endif /* CONFIG_IPMI_PANIC_STRING */ 4204#endif /* CONFIG_IPMI_PANIC_STRING */
4048} 4205}
4049#endif /* CONFIG_IPMI_PANIC_EVENT */ 4206#endif /* CONFIG_IPMI_PANIC_EVENT */
@@ -4052,7 +4209,7 @@ static int has_panicked;
4052 4209
4053static int panic_event(struct notifier_block *this, 4210static int panic_event(struct notifier_block *this,
4054 unsigned long event, 4211 unsigned long event,
4055 void *ptr) 4212 void *ptr)
4056{ 4213{
4057 ipmi_smi_t intf; 4214 ipmi_smi_t intf;
4058 4215
@@ -4066,6 +4223,7 @@ static int panic_event(struct notifier_block *this,
4066 /* Interface is not ready. */ 4223 /* Interface is not ready. */
4067 continue; 4224 continue;
4068 4225
4226 intf->run_to_completion = 1;
4069 intf->handlers->set_run_to_completion(intf->send_info, 1); 4227 intf->handlers->set_run_to_completion(intf->send_info, 1);
4070 } 4228 }
4071 4229
@@ -4133,11 +4291,16 @@ static __exit void cleanup_ipmi(void)
4133 4291
4134 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); 4292 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4135 4293
4136 /* This can't be called if any interfaces exist, so no worry about 4294 /*
4137 shutting down the interfaces. */ 4295 * This can't be called if any interfaces exist, so no worry
4296 * about shutting down the interfaces.
4297 */
4138 4298
4139 /* Tell the timer to stop, then wait for it to stop. This avoids 4299 /*
4140 problems with race conditions removing the timer here. */ 4300 * Tell the timer to stop, then wait for it to stop. This
4301 * avoids problems with race conditions removing the timer
4302 * here.
4303 */
4141 atomic_inc(&stop_operation); 4304 atomic_inc(&stop_operation);
4142 del_timer_sync(&ipmi_timer); 4305 del_timer_sync(&ipmi_timer);
4143 4306
@@ -4164,31 +4327,6 @@ module_exit(cleanup_ipmi);
4164module_init(ipmi_init_msghandler_mod); 4327module_init(ipmi_init_msghandler_mod);
4165MODULE_LICENSE("GPL"); 4328MODULE_LICENSE("GPL");
4166MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 4329MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4167MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 4330MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
4331 " interface.");
4168MODULE_VERSION(IPMI_DRIVER_VERSION); 4332MODULE_VERSION(IPMI_DRIVER_VERSION);
4169
4170EXPORT_SYMBOL(ipmi_create_user);
4171EXPORT_SYMBOL(ipmi_destroy_user);
4172EXPORT_SYMBOL(ipmi_get_version);
4173EXPORT_SYMBOL(ipmi_request_settime);
4174EXPORT_SYMBOL(ipmi_request_supply_msgs);
4175EXPORT_SYMBOL(ipmi_poll_interface);
4176EXPORT_SYMBOL(ipmi_register_smi);
4177EXPORT_SYMBOL(ipmi_unregister_smi);
4178EXPORT_SYMBOL(ipmi_register_for_cmd);
4179EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4180EXPORT_SYMBOL(ipmi_smi_msg_received);
4181EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4182EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4183EXPORT_SYMBOL(ipmi_addr_length);
4184EXPORT_SYMBOL(ipmi_validate_addr);
4185EXPORT_SYMBOL(ipmi_set_gets_events);
4186EXPORT_SYMBOL(ipmi_smi_watcher_register);
4187EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4188EXPORT_SYMBOL(ipmi_set_my_address);
4189EXPORT_SYMBOL(ipmi_get_my_address);
4190EXPORT_SYMBOL(ipmi_set_my_LUN);
4191EXPORT_SYMBOL(ipmi_get_my_LUN);
4192EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4193EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
4194EXPORT_SYMBOL(ipmi_free_recv_msg);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index b86186de7f07..a261bd735dfb 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -87,7 +87,10 @@ MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
87 87
88/* parameter definition to allow user to flag power cycle */ 88/* parameter definition to allow user to flag power cycle */
89module_param(poweroff_powercycle, int, 0644); 89module_param(poweroff_powercycle, int, 0644);
90MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); 90MODULE_PARM_DESC(poweroff_powercycle,
91 " Set to non-zero to enable power cycle instead of power"
92 " down. Power cycle is contingent on hardware support,"
93 " otherwise it defaults back to power down.");
91 94
92/* Stuff from the get device id command. */ 95/* Stuff from the get device id command. */
93static unsigned int mfg_id; 96static unsigned int mfg_id;
@@ -95,22 +98,25 @@ static unsigned int prod_id;
95static unsigned char capabilities; 98static unsigned char capabilities;
96static unsigned char ipmi_version; 99static unsigned char ipmi_version;
97 100
98/* We use our own messages for this operation, we don't let the system 101/*
99 allocate them, since we may be in a panic situation. The whole 102 * We use our own messages for this operation, we don't let the system
100 thing is single-threaded, anyway, so multiple messages are not 103 * allocate them, since we may be in a panic situation. The whole
101 required. */ 104 * thing is single-threaded, anyway, so multiple messages are not
105 * required.
106 */
107static atomic_t dummy_count = ATOMIC_INIT(0);
102static void dummy_smi_free(struct ipmi_smi_msg *msg) 108static void dummy_smi_free(struct ipmi_smi_msg *msg)
103{ 109{
110 atomic_dec(&dummy_count);
104} 111}
105static void dummy_recv_free(struct ipmi_recv_msg *msg) 112static void dummy_recv_free(struct ipmi_recv_msg *msg)
106{ 113{
114 atomic_dec(&dummy_count);
107} 115}
108static struct ipmi_smi_msg halt_smi_msg = 116static struct ipmi_smi_msg halt_smi_msg = {
109{
110 .done = dummy_smi_free 117 .done = dummy_smi_free
111}; 118};
112static struct ipmi_recv_msg halt_recv_msg = 119static struct ipmi_recv_msg halt_recv_msg = {
113{
114 .done = dummy_recv_free 120 .done = dummy_recv_free
115}; 121};
116 122
@@ -127,8 +133,7 @@ static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
127 complete(comp); 133 complete(comp);
128} 134}
129 135
130static struct ipmi_user_hndl ipmi_poweroff_handler = 136static struct ipmi_user_hndl ipmi_poweroff_handler = {
131{
132 .ipmi_recv_hndl = receive_handler 137 .ipmi_recv_hndl = receive_handler
133}; 138};
134 139
@@ -152,17 +157,28 @@ static int ipmi_request_wait_for_response(ipmi_user_t user,
152 return halt_recv_msg.msg.data[0]; 157 return halt_recv_msg.msg.data[0];
153} 158}
154 159
155/* We are in run-to-completion mode, no completion is desired. */ 160/* Wait for message to complete, spinning. */
156static int ipmi_request_in_rc_mode(ipmi_user_t user, 161static int ipmi_request_in_rc_mode(ipmi_user_t user,
157 struct ipmi_addr *addr, 162 struct ipmi_addr *addr,
158 struct kernel_ipmi_msg *send_msg) 163 struct kernel_ipmi_msg *send_msg)
159{ 164{
160 int rv; 165 int rv;
161 166
167 atomic_set(&dummy_count, 2);
162 rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL, 168 rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
163 &halt_smi_msg, &halt_recv_msg, 0); 169 &halt_smi_msg, &halt_recv_msg, 0);
164 if (rv) 170 if (rv) {
171 atomic_set(&dummy_count, 0);
165 return rv; 172 return rv;
173 }
174
175 /*
176 * Spin until our message is done.
177 */
178 while (atomic_read(&dummy_count) > 0) {
179 ipmi_poll_interface(user);
180 cpu_relax();
181 }
166 182
167 return halt_recv_msg.msg.data[0]; 183 return halt_recv_msg.msg.data[0];
168} 184}
@@ -184,47 +200,47 @@ static int ipmi_request_in_rc_mode(ipmi_user_t user,
184 200
185static void (*atca_oem_poweroff_hook)(ipmi_user_t user); 201static void (*atca_oem_poweroff_hook)(ipmi_user_t user);
186 202
187static void pps_poweroff_atca (ipmi_user_t user) 203static void pps_poweroff_atca(ipmi_user_t user)
188{ 204{
189 struct ipmi_system_interface_addr smi_addr; 205 struct ipmi_system_interface_addr smi_addr;
190 struct kernel_ipmi_msg send_msg; 206 struct kernel_ipmi_msg send_msg;
191 int rv; 207 int rv;
192 /* 208 /*
193 * Configure IPMI address for local access 209 * Configure IPMI address for local access
194 */ 210 */
195 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 211 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
196 smi_addr.channel = IPMI_BMC_CHANNEL; 212 smi_addr.channel = IPMI_BMC_CHANNEL;
197 smi_addr.lun = 0; 213 smi_addr.lun = 0;
198 214
199 printk(KERN_INFO PFX "PPS powerdown hook used"); 215 printk(KERN_INFO PFX "PPS powerdown hook used");
200 216
201 send_msg.netfn = IPMI_NETFN_OEM; 217 send_msg.netfn = IPMI_NETFN_OEM;
202 send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART; 218 send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
203 send_msg.data = IPMI_ATCA_PPS_IANA; 219 send_msg.data = IPMI_ATCA_PPS_IANA;
204 send_msg.data_len = 3; 220 send_msg.data_len = 3;
205 rv = ipmi_request_in_rc_mode(user, 221 rv = ipmi_request_in_rc_mode(user,
206 (struct ipmi_addr *) &smi_addr, 222 (struct ipmi_addr *) &smi_addr,
207 &send_msg); 223 &send_msg);
208 if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { 224 if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
209 printk(KERN_ERR PFX "Unable to send ATCA ," 225 printk(KERN_ERR PFX "Unable to send ATCA ,"
210 " IPMI error 0x%x\n", rv); 226 " IPMI error 0x%x\n", rv);
211 } 227 }
212 return; 228 return;
213} 229}
214 230
215static int ipmi_atca_detect (ipmi_user_t user) 231static int ipmi_atca_detect(ipmi_user_t user)
216{ 232{
217 struct ipmi_system_interface_addr smi_addr; 233 struct ipmi_system_interface_addr smi_addr;
218 struct kernel_ipmi_msg send_msg; 234 struct kernel_ipmi_msg send_msg;
219 int rv; 235 int rv;
220 unsigned char data[1]; 236 unsigned char data[1];
221 237
222 /* 238 /*
223 * Configure IPMI address for local access 239 * Configure IPMI address for local access
224 */ 240 */
225 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 241 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
226 smi_addr.channel = IPMI_BMC_CHANNEL; 242 smi_addr.channel = IPMI_BMC_CHANNEL;
227 smi_addr.lun = 0; 243 smi_addr.lun = 0;
228 244
229 /* 245 /*
230 * Use get address info to check and see if we are ATCA 246 * Use get address info to check and see if we are ATCA
@@ -238,28 +254,30 @@ static int ipmi_atca_detect (ipmi_user_t user)
238 (struct ipmi_addr *) &smi_addr, 254 (struct ipmi_addr *) &smi_addr,
239 &send_msg); 255 &send_msg);
240 256
241 printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id); 257 printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
242 if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID) 258 mfg_id, prod_id);
243 && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) { 259 if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
244 printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n"); 260 && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
261 printk(KERN_INFO PFX
262 "Installing Pigeon Point Systems Poweroff Hook\n");
245 atca_oem_poweroff_hook = pps_poweroff_atca; 263 atca_oem_poweroff_hook = pps_poweroff_atca;
246 } 264 }
247 return !rv; 265 return !rv;
248} 266}
249 267
250static void ipmi_poweroff_atca (ipmi_user_t user) 268static void ipmi_poweroff_atca(ipmi_user_t user)
251{ 269{
252 struct ipmi_system_interface_addr smi_addr; 270 struct ipmi_system_interface_addr smi_addr;
253 struct kernel_ipmi_msg send_msg; 271 struct kernel_ipmi_msg send_msg;
254 int rv; 272 int rv;
255 unsigned char data[4]; 273 unsigned char data[4];
256 274
257 /* 275 /*
258 * Configure IPMI address for local access 276 * Configure IPMI address for local access
259 */ 277 */
260 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 278 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
261 smi_addr.channel = IPMI_BMC_CHANNEL; 279 smi_addr.channel = IPMI_BMC_CHANNEL;
262 smi_addr.lun = 0; 280 smi_addr.lun = 0;
263 281
264 printk(KERN_INFO PFX "Powering down via ATCA power command\n"); 282 printk(KERN_INFO PFX "Powering down via ATCA power command\n");
265 283
@@ -273,23 +291,24 @@ static void ipmi_poweroff_atca (ipmi_user_t user)
273 data[2] = 0; /* Power Level */ 291 data[2] = 0; /* Power Level */
274 data[3] = 0; /* Don't change saved presets */ 292 data[3] = 0; /* Don't change saved presets */
275 send_msg.data = data; 293 send_msg.data = data;
276 send_msg.data_len = sizeof (data); 294 send_msg.data_len = sizeof(data);
277 rv = ipmi_request_in_rc_mode(user, 295 rv = ipmi_request_in_rc_mode(user,
278 (struct ipmi_addr *) &smi_addr, 296 (struct ipmi_addr *) &smi_addr,
279 &send_msg); 297 &send_msg);
280 /** At this point, the system may be shutting down, and most 298 /*
281 ** serial drivers (if used) will have interrupts turned off 299 * At this point, the system may be shutting down, and most
282 ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE 300 * serial drivers (if used) will have interrupts turned off
283 ** return code 301 * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
284 **/ 302 * return code
285 if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { 303 */
304 if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
286 printk(KERN_ERR PFX "Unable to send ATCA powerdown message," 305 printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
287 " IPMI error 0x%x\n", rv); 306 " IPMI error 0x%x\n", rv);
288 goto out; 307 goto out;
289 } 308 }
290 309
291 if(atca_oem_poweroff_hook) 310 if (atca_oem_poweroff_hook)
292 return atca_oem_poweroff_hook(user); 311 atca_oem_poweroff_hook(user);
293 out: 312 out:
294 return; 313 return;
295} 314}
@@ -310,13 +329,13 @@ static void ipmi_poweroff_atca (ipmi_user_t user)
310#define IPMI_CPI1_PRODUCT_ID 0x000157 329#define IPMI_CPI1_PRODUCT_ID 0x000157
311#define IPMI_CPI1_MANUFACTURER_ID 0x0108 330#define IPMI_CPI1_MANUFACTURER_ID 0x0108
312 331
313static int ipmi_cpi1_detect (ipmi_user_t user) 332static int ipmi_cpi1_detect(ipmi_user_t user)
314{ 333{
315 return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID) 334 return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
316 && (prod_id == IPMI_CPI1_PRODUCT_ID)); 335 && (prod_id == IPMI_CPI1_PRODUCT_ID));
317} 336}
318 337
319static void ipmi_poweroff_cpi1 (ipmi_user_t user) 338static void ipmi_poweroff_cpi1(ipmi_user_t user)
320{ 339{
321 struct ipmi_system_interface_addr smi_addr; 340 struct ipmi_system_interface_addr smi_addr;
322 struct ipmi_ipmb_addr ipmb_addr; 341 struct ipmi_ipmb_addr ipmb_addr;
@@ -328,12 +347,12 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user)
328 unsigned char aer_addr; 347 unsigned char aer_addr;
329 unsigned char aer_lun; 348 unsigned char aer_lun;
330 349
331 /* 350 /*
332 * Configure IPMI address for local access 351 * Configure IPMI address for local access
333 */ 352 */
334 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 353 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
335 smi_addr.channel = IPMI_BMC_CHANNEL; 354 smi_addr.channel = IPMI_BMC_CHANNEL;
336 smi_addr.lun = 0; 355 smi_addr.lun = 0;
337 356
338 printk(KERN_INFO PFX "Powering down via CPI1 power command\n"); 357 printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
339 358
@@ -425,7 +444,7 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user)
425 */ 444 */
426 445
427#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} 446#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
428static int ipmi_dell_chassis_detect (ipmi_user_t user) 447static int ipmi_dell_chassis_detect(ipmi_user_t user)
429{ 448{
430 const char ipmi_version_major = ipmi_version & 0xF; 449 const char ipmi_version_major = ipmi_version & 0xF;
431 const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; 450 const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
@@ -444,25 +463,25 @@ static int ipmi_dell_chassis_detect (ipmi_user_t user)
444#define IPMI_NETFN_CHASSIS_REQUEST 0 463#define IPMI_NETFN_CHASSIS_REQUEST 0
445#define IPMI_CHASSIS_CONTROL_CMD 0x02 464#define IPMI_CHASSIS_CONTROL_CMD 0x02
446 465
447static int ipmi_chassis_detect (ipmi_user_t user) 466static int ipmi_chassis_detect(ipmi_user_t user)
448{ 467{
449 /* Chassis support, use it. */ 468 /* Chassis support, use it. */
450 return (capabilities & 0x80); 469 return (capabilities & 0x80);
451} 470}
452 471
453static void ipmi_poweroff_chassis (ipmi_user_t user) 472static void ipmi_poweroff_chassis(ipmi_user_t user)
454{ 473{
455 struct ipmi_system_interface_addr smi_addr; 474 struct ipmi_system_interface_addr smi_addr;
456 struct kernel_ipmi_msg send_msg; 475 struct kernel_ipmi_msg send_msg;
457 int rv; 476 int rv;
458 unsigned char data[1]; 477 unsigned char data[1];
459 478
460 /* 479 /*
461 * Configure IPMI address for local access 480 * Configure IPMI address for local access
462 */ 481 */
463 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 482 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
464 smi_addr.channel = IPMI_BMC_CHANNEL; 483 smi_addr.channel = IPMI_BMC_CHANNEL;
465 smi_addr.lun = 0; 484 smi_addr.lun = 0;
466 485
467 powercyclefailed: 486 powercyclefailed:
468 printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n", 487 printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
@@ -525,15 +544,13 @@ static struct poweroff_function poweroff_functions[] = {
525 544
526 545
527/* Called on a powerdown request. */ 546/* Called on a powerdown request. */
528static void ipmi_poweroff_function (void) 547static void ipmi_poweroff_function(void)
529{ 548{
530 if (!ready) 549 if (!ready)
531 return; 550 return;
532 551
533 /* Use run-to-completion mode, since interrupts may be off. */ 552 /* Use run-to-completion mode, since interrupts may be off. */
534 ipmi_user_set_run_to_completion(ipmi_user, 1);
535 specific_poweroff_func(ipmi_user); 553 specific_poweroff_func(ipmi_user);
536 ipmi_user_set_run_to_completion(ipmi_user, 0);
537} 554}
538 555
539/* Wait for an IPMI interface to be installed, the first one installed 556/* Wait for an IPMI interface to be installed, the first one installed
@@ -561,13 +578,13 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
561 578
562 ipmi_ifnum = if_num; 579 ipmi_ifnum = if_num;
563 580
564 /* 581 /*
565 * Do a get device ide and store some results, since this is 582 * Do a get device ide and store some results, since this is
566 * used by several functions. 583 * used by several functions.
567 */ 584 */
568 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 585 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
569 smi_addr.channel = IPMI_BMC_CHANNEL; 586 smi_addr.channel = IPMI_BMC_CHANNEL;
570 smi_addr.lun = 0; 587 smi_addr.lun = 0;
571 588
572 send_msg.netfn = IPMI_NETFN_APP_REQUEST; 589 send_msg.netfn = IPMI_NETFN_APP_REQUEST;
573 send_msg.cmd = IPMI_GET_DEVICE_ID_CMD; 590 send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
@@ -632,8 +649,7 @@ static void ipmi_po_smi_gone(int if_num)
632 pm_power_off = old_poweroff_func; 649 pm_power_off = old_poweroff_func;
633} 650}
634 651
635static struct ipmi_smi_watcher smi_watcher = 652static struct ipmi_smi_watcher smi_watcher = {
636{
637 .owner = THIS_MODULE, 653 .owner = THIS_MODULE,
638 .new_smi = ipmi_po_new_smi, 654 .new_smi = ipmi_po_new_smi,
639 .smi_gone = ipmi_po_smi_gone 655 .smi_gone = ipmi_po_smi_gone
@@ -675,12 +691,12 @@ static struct ctl_table_header *ipmi_table_header;
675/* 691/*
676 * Startup and shutdown functions. 692 * Startup and shutdown functions.
677 */ 693 */
678static int ipmi_poweroff_init (void) 694static int ipmi_poweroff_init(void)
679{ 695{
680 int rv; 696 int rv;
681 697
682 printk (KERN_INFO "Copyright (C) 2004 MontaVista Software -" 698 printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
683 " IPMI Powerdown via sys_reboot.\n"); 699 " IPMI Powerdown via sys_reboot.\n");
684 700
685 if (poweroff_powercycle) 701 if (poweroff_powercycle)
686 printk(KERN_INFO PFX "Power cycle is enabled.\n"); 702 printk(KERN_INFO PFX "Power cycle is enabled.\n");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 4f560d0bb808..5a5455585c1d 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -80,7 +80,7 @@
80#define SI_USEC_PER_JIFFY (1000000/HZ) 80#define SI_USEC_PER_JIFFY (1000000/HZ)
81#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) 81#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a 82#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
83 short timeout */ 83 short timeout */
84 84
85/* Bit for BMC global enables. */ 85/* Bit for BMC global enables. */
86#define IPMI_BMC_RCV_MSG_INTR 0x01 86#define IPMI_BMC_RCV_MSG_INTR 0x01
@@ -114,14 +114,61 @@ static char *si_to_str[] = { "kcs", "smic", "bt" };
114 114
115#define DEVICE_NAME "ipmi_si" 115#define DEVICE_NAME "ipmi_si"
116 116
117static struct device_driver ipmi_driver = 117static struct device_driver ipmi_driver = {
118{
119 .name = DEVICE_NAME, 118 .name = DEVICE_NAME,
120 .bus = &platform_bus_type 119 .bus = &platform_bus_type
121}; 120};
122 121
123struct smi_info 122
124{ 123/*
124 * Indexes into stats[] in smi_info below.
125 */
126enum si_stat_indexes {
127 /*
128 * Number of times the driver requested a timer while an operation
129 * was in progress.
130 */
131 SI_STAT_short_timeouts = 0,
132
133 /*
134 * Number of times the driver requested a timer while nothing was in
135 * progress.
136 */
137 SI_STAT_long_timeouts,
138
139 /* Number of times the interface was idle while being polled. */
140 SI_STAT_idles,
141
142 /* Number of interrupts the driver handled. */
143 SI_STAT_interrupts,
144
145 /* Number of time the driver got an ATTN from the hardware. */
146 SI_STAT_attentions,
147
148 /* Number of times the driver requested flags from the hardware. */
149 SI_STAT_flag_fetches,
150
151 /* Number of times the hardware didn't follow the state machine. */
152 SI_STAT_hosed_count,
153
154 /* Number of completed messages. */
155 SI_STAT_complete_transactions,
156
157 /* Number of IPMI events received from the hardware. */
158 SI_STAT_events,
159
160 /* Number of watchdog pretimeouts. */
161 SI_STAT_watchdog_pretimeouts,
162
163 /* Number of asyncronous messages received. */
164 SI_STAT_incoming_messages,
165
166
167 /* This *must* remain last, add new values above this. */
168 SI_NUM_STATS
169};
170
171struct smi_info {
125 int intf_num; 172 int intf_num;
126 ipmi_smi_t intf; 173 ipmi_smi_t intf;
127 struct si_sm_data *si_sm; 174 struct si_sm_data *si_sm;
@@ -134,8 +181,10 @@ struct smi_info
134 struct ipmi_smi_msg *curr_msg; 181 struct ipmi_smi_msg *curr_msg;
135 enum si_intf_state si_state; 182 enum si_intf_state si_state;
136 183
137 /* Used to handle the various types of I/O that can occur with 184 /*
138 IPMI */ 185 * Used to handle the various types of I/O that can occur with
186 * IPMI
187 */
139 struct si_sm_io io; 188 struct si_sm_io io;
140 int (*io_setup)(struct smi_info *info); 189 int (*io_setup)(struct smi_info *info);
141 void (*io_cleanup)(struct smi_info *info); 190 void (*io_cleanup)(struct smi_info *info);
@@ -146,15 +195,18 @@ struct smi_info
146 void (*addr_source_cleanup)(struct smi_info *info); 195 void (*addr_source_cleanup)(struct smi_info *info);
147 void *addr_source_data; 196 void *addr_source_data;
148 197
149 /* Per-OEM handler, called from handle_flags(). 198 /*
150 Returns 1 when handle_flags() needs to be re-run 199 * Per-OEM handler, called from handle_flags(). Returns 1
151 or 0 indicating it set si_state itself. 200 * when handle_flags() needs to be re-run or 0 indicating it
152 */ 201 * set si_state itself.
202 */
153 int (*oem_data_avail_handler)(struct smi_info *smi_info); 203 int (*oem_data_avail_handler)(struct smi_info *smi_info);
154 204
155 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN 205 /*
156 is set to hold the flags until we are done handling everything 206 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
157 from the flags. */ 207 * is set to hold the flags until we are done handling everything
208 * from the flags.
209 */
158#define RECEIVE_MSG_AVAIL 0x01 210#define RECEIVE_MSG_AVAIL 0x01
159#define EVENT_MSG_BUFFER_FULL 0x02 211#define EVENT_MSG_BUFFER_FULL 0x02
160#define WDT_PRE_TIMEOUT_INT 0x08 212#define WDT_PRE_TIMEOUT_INT 0x08
@@ -162,25 +214,31 @@ struct smi_info
162#define OEM1_DATA_AVAIL 0x40 214#define OEM1_DATA_AVAIL 0x40
163#define OEM2_DATA_AVAIL 0x80 215#define OEM2_DATA_AVAIL 0x80
164#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ 216#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
165 OEM1_DATA_AVAIL | \ 217 OEM1_DATA_AVAIL | \
166 OEM2_DATA_AVAIL) 218 OEM2_DATA_AVAIL)
167 unsigned char msg_flags; 219 unsigned char msg_flags;
168 220
169 /* If set to true, this will request events the next time the 221 /*
170 state machine is idle. */ 222 * If set to true, this will request events the next time the
223 * state machine is idle.
224 */
171 atomic_t req_events; 225 atomic_t req_events;
172 226
173 /* If true, run the state machine to completion on every send 227 /*
174 call. Generally used after a panic to make sure stuff goes 228 * If true, run the state machine to completion on every send
175 out. */ 229 * call. Generally used after a panic to make sure stuff goes
230 * out.
231 */
176 int run_to_completion; 232 int run_to_completion;
177 233
178 /* The I/O port of an SI interface. */ 234 /* The I/O port of an SI interface. */
179 int port; 235 int port;
180 236
181 /* The space between start addresses of the two ports. For 237 /*
182 instance, if the first port is 0xca2 and the spacing is 4, then 238 * The space between start addresses of the two ports. For
183 the second port is 0xca6. */ 239 * instance, if the first port is 0xca2 and the spacing is 4, then
240 * the second port is 0xca6.
241 */
184 unsigned int spacing; 242 unsigned int spacing;
185 243
186 /* zero if no irq; */ 244 /* zero if no irq; */
@@ -195,10 +253,12 @@ struct smi_info
195 /* Used to gracefully stop the timer without race conditions. */ 253 /* Used to gracefully stop the timer without race conditions. */
196 atomic_t stop_operation; 254 atomic_t stop_operation;
197 255
198 /* The driver will disable interrupts when it gets into a 256 /*
199 situation where it cannot handle messages due to lack of 257 * The driver will disable interrupts when it gets into a
200 memory. Once that situation clears up, it will re-enable 258 * situation where it cannot handle messages due to lack of
201 interrupts. */ 259 * memory. Once that situation clears up, it will re-enable
260 * interrupts.
261 */
202 int interrupt_disabled; 262 int interrupt_disabled;
203 263
204 /* From the get device id response... */ 264 /* From the get device id response... */
@@ -208,33 +268,28 @@ struct smi_info
208 struct device *dev; 268 struct device *dev;
209 struct platform_device *pdev; 269 struct platform_device *pdev;
210 270
211 /* True if we allocated the device, false if it came from 271 /*
212 * someplace else (like PCI). */ 272 * True if we allocated the device, false if it came from
273 * someplace else (like PCI).
274 */
213 int dev_registered; 275 int dev_registered;
214 276
215 /* Slave address, could be reported from DMI. */ 277 /* Slave address, could be reported from DMI. */
216 unsigned char slave_addr; 278 unsigned char slave_addr;
217 279
218 /* Counters and things for the proc filesystem. */ 280 /* Counters and things for the proc filesystem. */
219 spinlock_t count_lock; 281 atomic_t stats[SI_NUM_STATS];
220 unsigned long short_timeouts; 282
221 unsigned long long_timeouts; 283 struct task_struct *thread;
222 unsigned long timeout_restarts;
223 unsigned long idles;
224 unsigned long interrupts;
225 unsigned long attentions;
226 unsigned long flag_fetches;
227 unsigned long hosed_count;
228 unsigned long complete_transactions;
229 unsigned long events;
230 unsigned long watchdog_pretimeouts;
231 unsigned long incoming_messages;
232
233 struct task_struct *thread;
234 284
235 struct list_head link; 285 struct list_head link;
236}; 286};
237 287
288#define smi_inc_stat(smi, stat) \
289 atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
290#define smi_get_stat(smi, stat) \
291 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
292
238#define SI_MAX_PARMS 4 293#define SI_MAX_PARMS 4
239 294
240static int force_kipmid[SI_MAX_PARMS]; 295static int force_kipmid[SI_MAX_PARMS];
@@ -246,7 +301,7 @@ static int try_smi_init(struct smi_info *smi);
246static void cleanup_one_si(struct smi_info *to_clean); 301static void cleanup_one_si(struct smi_info *to_clean);
247 302
248static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 303static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
249static int register_xaction_notifier(struct notifier_block * nb) 304static int register_xaction_notifier(struct notifier_block *nb)
250{ 305{
251 return atomic_notifier_chain_register(&xaction_notifier_list, nb); 306 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
252} 307}
@@ -255,7 +310,7 @@ static void deliver_recv_msg(struct smi_info *smi_info,
255 struct ipmi_smi_msg *msg) 310 struct ipmi_smi_msg *msg)
256{ 311{
257 /* Deliver the message to the upper layer with the lock 312 /* Deliver the message to the upper layer with the lock
258 released. */ 313 released. */
259 spin_unlock(&(smi_info->si_lock)); 314 spin_unlock(&(smi_info->si_lock));
260 ipmi_smi_msg_received(smi_info->intf, msg); 315 ipmi_smi_msg_received(smi_info->intf, msg);
261 spin_lock(&(smi_info->si_lock)); 316 spin_lock(&(smi_info->si_lock));
@@ -287,9 +342,12 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
287 struct timeval t; 342 struct timeval t;
288#endif 343#endif
289 344
290 /* No need to save flags, we aleady have interrupts off and we 345 /*
291 already hold the SMI lock. */ 346 * No need to save flags, we aleady have interrupts off and we
292 spin_lock(&(smi_info->msg_lock)); 347 * already hold the SMI lock.
348 */
349 if (!smi_info->run_to_completion)
350 spin_lock(&(smi_info->msg_lock));
293 351
294 /* Pick the high priority queue first. */ 352 /* Pick the high priority queue first. */
295 if (!list_empty(&(smi_info->hp_xmit_msgs))) { 353 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
@@ -310,7 +368,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
310 link); 368 link);
311#ifdef DEBUG_TIMING 369#ifdef DEBUG_TIMING
312 do_gettimeofday(&t); 370 do_gettimeofday(&t);
313 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 371 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
314#endif 372#endif
315 err = atomic_notifier_call_chain(&xaction_notifier_list, 373 err = atomic_notifier_call_chain(&xaction_notifier_list,
316 0, smi_info); 374 0, smi_info);
@@ -322,14 +380,14 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
322 smi_info->si_sm, 380 smi_info->si_sm,
323 smi_info->curr_msg->data, 381 smi_info->curr_msg->data,
324 smi_info->curr_msg->data_size); 382 smi_info->curr_msg->data_size);
325 if (err) { 383 if (err)
326 return_hosed_msg(smi_info, err); 384 return_hosed_msg(smi_info, err);
327 }
328 385
329 rv = SI_SM_CALL_WITHOUT_DELAY; 386 rv = SI_SM_CALL_WITHOUT_DELAY;
330 } 387 }
331 out: 388 out:
332 spin_unlock(&(smi_info->msg_lock)); 389 if (!smi_info->run_to_completion)
390 spin_unlock(&(smi_info->msg_lock));
333 391
334 return rv; 392 return rv;
335} 393}
@@ -338,8 +396,10 @@ static void start_enable_irq(struct smi_info *smi_info)
338{ 396{
339 unsigned char msg[2]; 397 unsigned char msg[2];
340 398
341 /* If we are enabling interrupts, we have to tell the 399 /*
342 BMC to use them. */ 400 * If we are enabling interrupts, we have to tell the
401 * BMC to use them.
402 */
343 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 403 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
344 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 404 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
345 405
@@ -371,10 +431,12 @@ static void start_clear_flags(struct smi_info *smi_info)
371 smi_info->si_state = SI_CLEARING_FLAGS; 431 smi_info->si_state = SI_CLEARING_FLAGS;
372} 432}
373 433
374/* When we have a situtaion where we run out of memory and cannot 434/*
375 allocate messages, we just leave them in the BMC and run the system 435 * When we have a situtaion where we run out of memory and cannot
376 polled until we can allocate some memory. Once we have some 436 * allocate messages, we just leave them in the BMC and run the system
377 memory, we will re-enable the interrupt. */ 437 * polled until we can allocate some memory. Once we have some
438 * memory, we will re-enable the interrupt.
439 */
378static inline void disable_si_irq(struct smi_info *smi_info) 440static inline void disable_si_irq(struct smi_info *smi_info)
379{ 441{
380 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 442 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
@@ -396,9 +458,7 @@ static void handle_flags(struct smi_info *smi_info)
396 retry: 458 retry:
397 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { 459 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
398 /* Watchdog pre-timeout */ 460 /* Watchdog pre-timeout */
399 spin_lock(&smi_info->count_lock); 461 smi_inc_stat(smi_info, watchdog_pretimeouts);
400 smi_info->watchdog_pretimeouts++;
401 spin_unlock(&smi_info->count_lock);
402 462
403 start_clear_flags(smi_info); 463 start_clear_flags(smi_info);
404 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 464 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
@@ -444,12 +504,11 @@ static void handle_flags(struct smi_info *smi_info)
444 smi_info->curr_msg->data_size); 504 smi_info->curr_msg->data_size);
445 smi_info->si_state = SI_GETTING_EVENTS; 505 smi_info->si_state = SI_GETTING_EVENTS;
446 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && 506 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
447 smi_info->oem_data_avail_handler) { 507 smi_info->oem_data_avail_handler) {
448 if (smi_info->oem_data_avail_handler(smi_info)) 508 if (smi_info->oem_data_avail_handler(smi_info))
449 goto retry; 509 goto retry;
450 } else { 510 } else
451 smi_info->si_state = SI_NORMAL; 511 smi_info->si_state = SI_NORMAL;
452 }
453} 512}
454 513
455static void handle_transaction_done(struct smi_info *smi_info) 514static void handle_transaction_done(struct smi_info *smi_info)
@@ -459,7 +518,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
459 struct timeval t; 518 struct timeval t;
460 519
461 do_gettimeofday(&t); 520 do_gettimeofday(&t);
462 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); 521 printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
463#endif 522#endif
464 switch (smi_info->si_state) { 523 switch (smi_info->si_state) {
465 case SI_NORMAL: 524 case SI_NORMAL:
@@ -472,9 +531,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
472 smi_info->curr_msg->rsp, 531 smi_info->curr_msg->rsp,
473 IPMI_MAX_MSG_LENGTH); 532 IPMI_MAX_MSG_LENGTH);
474 533
475 /* Do this here becase deliver_recv_msg() releases the 534 /*
476 lock, and a new message can be put in during the 535 * Do this here becase deliver_recv_msg() releases the
477 time the lock is released. */ 536 * lock, and a new message can be put in during the
537 * time the lock is released.
538 */
478 msg = smi_info->curr_msg; 539 msg = smi_info->curr_msg;
479 smi_info->curr_msg = NULL; 540 smi_info->curr_msg = NULL;
480 deliver_recv_msg(smi_info, msg); 541 deliver_recv_msg(smi_info, msg);
@@ -488,12 +549,13 @@ static void handle_transaction_done(struct smi_info *smi_info)
488 /* We got the flags from the SMI, now handle them. */ 549 /* We got the flags from the SMI, now handle them. */
489 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 550 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
490 if (msg[2] != 0) { 551 if (msg[2] != 0) {
491 /* Error fetching flags, just give up for 552 /* Error fetching flags, just give up for now. */
492 now. */
493 smi_info->si_state = SI_NORMAL; 553 smi_info->si_state = SI_NORMAL;
494 } else if (len < 4) { 554 } else if (len < 4) {
495 /* Hmm, no flags. That's technically illegal, but 555 /*
496 don't use uninitialized data. */ 556 * Hmm, no flags. That's technically illegal, but
557 * don't use uninitialized data.
558 */
497 smi_info->si_state = SI_NORMAL; 559 smi_info->si_state = SI_NORMAL;
498 } else { 560 } else {
499 smi_info->msg_flags = msg[3]; 561 smi_info->msg_flags = msg[3];
@@ -530,9 +592,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
530 smi_info->curr_msg->rsp, 592 smi_info->curr_msg->rsp,
531 IPMI_MAX_MSG_LENGTH); 593 IPMI_MAX_MSG_LENGTH);
532 594
533 /* Do this here becase deliver_recv_msg() releases the 595 /*
534 lock, and a new message can be put in during the 596 * Do this here becase deliver_recv_msg() releases the
535 time the lock is released. */ 597 * lock, and a new message can be put in during the
598 * time the lock is released.
599 */
536 msg = smi_info->curr_msg; 600 msg = smi_info->curr_msg;
537 smi_info->curr_msg = NULL; 601 smi_info->curr_msg = NULL;
538 if (msg->rsp[2] != 0) { 602 if (msg->rsp[2] != 0) {
@@ -543,14 +607,14 @@ static void handle_transaction_done(struct smi_info *smi_info)
543 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; 607 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
544 handle_flags(smi_info); 608 handle_flags(smi_info);
545 } else { 609 } else {
546 spin_lock(&smi_info->count_lock); 610 smi_inc_stat(smi_info, events);
547 smi_info->events++; 611
548 spin_unlock(&smi_info->count_lock); 612 /*
549 613 * Do this before we deliver the message
550 /* Do this before we deliver the message 614 * because delivering the message releases the
551 because delivering the message releases the 615 * lock and something else can mess with the
552 lock and something else can mess with the 616 * state.
553 state. */ 617 */
554 handle_flags(smi_info); 618 handle_flags(smi_info);
555 619
556 deliver_recv_msg(smi_info, msg); 620 deliver_recv_msg(smi_info, msg);
@@ -566,9 +630,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
566 smi_info->curr_msg->rsp, 630 smi_info->curr_msg->rsp,
567 IPMI_MAX_MSG_LENGTH); 631 IPMI_MAX_MSG_LENGTH);
568 632
569 /* Do this here becase deliver_recv_msg() releases the 633 /*
570 lock, and a new message can be put in during the 634 * Do this here becase deliver_recv_msg() releases the
571 time the lock is released. */ 635 * lock, and a new message can be put in during the
636 * time the lock is released.
637 */
572 msg = smi_info->curr_msg; 638 msg = smi_info->curr_msg;
573 smi_info->curr_msg = NULL; 639 smi_info->curr_msg = NULL;
574 if (msg->rsp[2] != 0) { 640 if (msg->rsp[2] != 0) {
@@ -579,14 +645,14 @@ static void handle_transaction_done(struct smi_info *smi_info)
579 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; 645 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
580 handle_flags(smi_info); 646 handle_flags(smi_info);
581 } else { 647 } else {
582 spin_lock(&smi_info->count_lock); 648 smi_inc_stat(smi_info, incoming_messages);
583 smi_info->incoming_messages++; 649
584 spin_unlock(&smi_info->count_lock); 650 /*
585 651 * Do this before we deliver the message
586 /* Do this before we deliver the message 652 * because delivering the message releases the
587 because delivering the message releases the 653 * lock and something else can mess with the
588 lock and something else can mess with the 654 * state.
589 state. */ 655 */
590 handle_flags(smi_info); 656 handle_flags(smi_info);
591 657
592 deliver_recv_msg(smi_info, msg); 658 deliver_recv_msg(smi_info, msg);
@@ -674,69 +740,70 @@ static void handle_transaction_done(struct smi_info *smi_info)
674 } 740 }
675} 741}
676 742
677/* Called on timeouts and events. Timeouts should pass the elapsed 743/*
678 time, interrupts should pass in zero. Must be called with 744 * Called on timeouts and events. Timeouts should pass the elapsed
679 si_lock held and interrupts disabled. */ 745 * time, interrupts should pass in zero. Must be called with
746 * si_lock held and interrupts disabled.
747 */
680static enum si_sm_result smi_event_handler(struct smi_info *smi_info, 748static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
681 int time) 749 int time)
682{ 750{
683 enum si_sm_result si_sm_result; 751 enum si_sm_result si_sm_result;
684 752
685 restart: 753 restart:
686 /* There used to be a loop here that waited a little while 754 /*
687 (around 25us) before giving up. That turned out to be 755 * There used to be a loop here that waited a little while
688 pointless, the minimum delays I was seeing were in the 300us 756 * (around 25us) before giving up. That turned out to be
689 range, which is far too long to wait in an interrupt. So 757 * pointless, the minimum delays I was seeing were in the 300us
690 we just run until the state machine tells us something 758 * range, which is far too long to wait in an interrupt. So
691 happened or it needs a delay. */ 759 * we just run until the state machine tells us something
760 * happened or it needs a delay.
761 */
692 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); 762 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
693 time = 0; 763 time = 0;
694 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) 764 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
695 {
696 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 765 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
697 }
698 766
699 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) 767 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
700 { 768 smi_inc_stat(smi_info, complete_transactions);
701 spin_lock(&smi_info->count_lock);
702 smi_info->complete_transactions++;
703 spin_unlock(&smi_info->count_lock);
704 769
705 handle_transaction_done(smi_info); 770 handle_transaction_done(smi_info);
706 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 771 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
707 } 772 } else if (si_sm_result == SI_SM_HOSED) {
708 else if (si_sm_result == SI_SM_HOSED) 773 smi_inc_stat(smi_info, hosed_count);
709 {
710 spin_lock(&smi_info->count_lock);
711 smi_info->hosed_count++;
712 spin_unlock(&smi_info->count_lock);
713 774
714 /* Do the before return_hosed_msg, because that 775 /*
715 releases the lock. */ 776 * Do the before return_hosed_msg, because that
777 * releases the lock.
778 */
716 smi_info->si_state = SI_NORMAL; 779 smi_info->si_state = SI_NORMAL;
717 if (smi_info->curr_msg != NULL) { 780 if (smi_info->curr_msg != NULL) {
718 /* If we were handling a user message, format 781 /*
719 a response to send to the upper layer to 782 * If we were handling a user message, format
720 tell it about the error. */ 783 * a response to send to the upper layer to
784 * tell it about the error.
785 */
721 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); 786 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
722 } 787 }
723 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 788 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
724 } 789 }
725 790
726 /* We prefer handling attn over new messages. */ 791 /*
727 if (si_sm_result == SI_SM_ATTN) 792 * We prefer handling attn over new messages. But don't do
728 { 793 * this if there is not yet an upper layer to handle anything.
794 */
795 if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
729 unsigned char msg[2]; 796 unsigned char msg[2];
730 797
731 spin_lock(&smi_info->count_lock); 798 smi_inc_stat(smi_info, attentions);
732 smi_info->attentions++;
733 spin_unlock(&smi_info->count_lock);
734 799
735 /* Got a attn, send down a get message flags to see 800 /*
736 what's causing it. It would be better to handle 801 * Got a attn, send down a get message flags to see
737 this in the upper layer, but due to the way 802 * what's causing it. It would be better to handle
738 interrupts work with the SMI, that's not really 803 * this in the upper layer, but due to the way
739 possible. */ 804 * interrupts work with the SMI, that's not really
805 * possible.
806 */
740 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 807 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
741 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 808 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
742 809
@@ -748,20 +815,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
748 815
749 /* If we are currently idle, try to start the next message. */ 816 /* If we are currently idle, try to start the next message. */
750 if (si_sm_result == SI_SM_IDLE) { 817 if (si_sm_result == SI_SM_IDLE) {
751 spin_lock(&smi_info->count_lock); 818 smi_inc_stat(smi_info, idles);
752 smi_info->idles++;
753 spin_unlock(&smi_info->count_lock);
754 819
755 si_sm_result = start_next_msg(smi_info); 820 si_sm_result = start_next_msg(smi_info);
756 if (si_sm_result != SI_SM_IDLE) 821 if (si_sm_result != SI_SM_IDLE)
757 goto restart; 822 goto restart;
758 } 823 }
759 824
760 if ((si_sm_result == SI_SM_IDLE) 825 if ((si_sm_result == SI_SM_IDLE)
761 && (atomic_read(&smi_info->req_events))) 826 && (atomic_read(&smi_info->req_events))) {
762 { 827 /*
763 /* We are idle and the upper layer requested that I fetch 828 * We are idle and the upper layer requested that I fetch
764 events, so do so. */ 829 * events, so do so.
830 */
765 atomic_set(&smi_info->req_events, 0); 831 atomic_set(&smi_info->req_events, 0);
766 832
767 smi_info->curr_msg = ipmi_alloc_smi_msg(); 833 smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -803,56 +869,50 @@ static void sender(void *send_info,
803 return; 869 return;
804 } 870 }
805 871
806 spin_lock_irqsave(&(smi_info->msg_lock), flags);
807#ifdef DEBUG_TIMING 872#ifdef DEBUG_TIMING
808 do_gettimeofday(&t); 873 do_gettimeofday(&t);
809 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 874 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
810#endif 875#endif
811 876
812 if (smi_info->run_to_completion) { 877 if (smi_info->run_to_completion) {
813 /* If we are running to completion, then throw it in 878 /*
814 the list and run transactions until everything is 879 * If we are running to completion, then throw it in
815 clear. Priority doesn't matter here. */ 880 * the list and run transactions until everything is
881 * clear. Priority doesn't matter here.
882 */
883
884 /*
885 * Run to completion means we are single-threaded, no
886 * need for locks.
887 */
816 list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); 888 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
817 889
818 /* We have to release the msg lock and claim the smi
819 lock in this case, because of race conditions. */
820 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
821
822 spin_lock_irqsave(&(smi_info->si_lock), flags);
823 result = smi_event_handler(smi_info, 0); 890 result = smi_event_handler(smi_info, 0);
824 while (result != SI_SM_IDLE) { 891 while (result != SI_SM_IDLE) {
825 udelay(SI_SHORT_TIMEOUT_USEC); 892 udelay(SI_SHORT_TIMEOUT_USEC);
826 result = smi_event_handler(smi_info, 893 result = smi_event_handler(smi_info,
827 SI_SHORT_TIMEOUT_USEC); 894 SI_SHORT_TIMEOUT_USEC);
828 } 895 }
829 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
830 return; 896 return;
831 } else {
832 if (priority > 0) {
833 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
834 } else {
835 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
836 }
837 } 897 }
838 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
839 898
840 spin_lock_irqsave(&(smi_info->si_lock), flags); 899 spin_lock_irqsave(&smi_info->msg_lock, flags);
841 if ((smi_info->si_state == SI_NORMAL) 900 if (priority > 0)
842 && (smi_info->curr_msg == NULL)) 901 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
843 { 902 else
903 list_add_tail(&msg->link, &smi_info->xmit_msgs);
904 spin_unlock_irqrestore(&smi_info->msg_lock, flags);
905
906 spin_lock_irqsave(&smi_info->si_lock, flags);
907 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
844 start_next_msg(smi_info); 908 start_next_msg(smi_info);
845 } 909 spin_unlock_irqrestore(&smi_info->si_lock, flags);
846 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
847} 910}
848 911
849static void set_run_to_completion(void *send_info, int i_run_to_completion) 912static void set_run_to_completion(void *send_info, int i_run_to_completion)
850{ 913{
851 struct smi_info *smi_info = send_info; 914 struct smi_info *smi_info = send_info;
852 enum si_sm_result result; 915 enum si_sm_result result;
853 unsigned long flags;
854
855 spin_lock_irqsave(&(smi_info->si_lock), flags);
856 916
857 smi_info->run_to_completion = i_run_to_completion; 917 smi_info->run_to_completion = i_run_to_completion;
858 if (i_run_to_completion) { 918 if (i_run_to_completion) {
@@ -863,8 +923,6 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
863 SI_SHORT_TIMEOUT_USEC); 923 SI_SHORT_TIMEOUT_USEC);
864 } 924 }
865 } 925 }
866
867 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
868} 926}
869 927
870static int ipmi_thread(void *data) 928static int ipmi_thread(void *data)
@@ -878,9 +936,8 @@ static int ipmi_thread(void *data)
878 spin_lock_irqsave(&(smi_info->si_lock), flags); 936 spin_lock_irqsave(&(smi_info->si_lock), flags);
879 smi_result = smi_event_handler(smi_info, 0); 937 smi_result = smi_event_handler(smi_info, 0);
880 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 938 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
881 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 939 if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
882 /* do nothing */ 940 ; /* do nothing */
883 }
884 else if (smi_result == SI_SM_CALL_WITH_DELAY) 941 else if (smi_result == SI_SM_CALL_WITH_DELAY)
885 schedule(); 942 schedule();
886 else 943 else
@@ -931,7 +988,7 @@ static void smi_timeout(unsigned long data)
931 spin_lock_irqsave(&(smi_info->si_lock), flags); 988 spin_lock_irqsave(&(smi_info->si_lock), flags);
932#ifdef DEBUG_TIMING 989#ifdef DEBUG_TIMING
933 do_gettimeofday(&t); 990 do_gettimeofday(&t);
934 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); 991 printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
935#endif 992#endif
936 jiffies_now = jiffies; 993 jiffies_now = jiffies;
937 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) 994 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
@@ -945,23 +1002,19 @@ static void smi_timeout(unsigned long data)
945 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 1002 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
946 /* Running with interrupts, only do long timeouts. */ 1003 /* Running with interrupts, only do long timeouts. */
947 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1004 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
948 spin_lock_irqsave(&smi_info->count_lock, flags); 1005 smi_inc_stat(smi_info, long_timeouts);
949 smi_info->long_timeouts++;
950 spin_unlock_irqrestore(&smi_info->count_lock, flags);
951 goto do_add_timer; 1006 goto do_add_timer;
952 } 1007 }
953 1008
954 /* If the state machine asks for a short delay, then shorten 1009 /*
955 the timer timeout. */ 1010 * If the state machine asks for a short delay, then shorten
1011 * the timer timeout.
1012 */
956 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1013 if (smi_result == SI_SM_CALL_WITH_DELAY) {
957 spin_lock_irqsave(&smi_info->count_lock, flags); 1014 smi_inc_stat(smi_info, short_timeouts);
958 smi_info->short_timeouts++;
959 spin_unlock_irqrestore(&smi_info->count_lock, flags);
960 smi_info->si_timer.expires = jiffies + 1; 1015 smi_info->si_timer.expires = jiffies + 1;
961 } else { 1016 } else {
962 spin_lock_irqsave(&smi_info->count_lock, flags); 1017 smi_inc_stat(smi_info, long_timeouts);
963 smi_info->long_timeouts++;
964 spin_unlock_irqrestore(&smi_info->count_lock, flags);
965 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1018 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
966 } 1019 }
967 1020
@@ -979,13 +1032,11 @@ static irqreturn_t si_irq_handler(int irq, void *data)
979 1032
980 spin_lock_irqsave(&(smi_info->si_lock), flags); 1033 spin_lock_irqsave(&(smi_info->si_lock), flags);
981 1034
982 spin_lock(&smi_info->count_lock); 1035 smi_inc_stat(smi_info, interrupts);
983 smi_info->interrupts++;
984 spin_unlock(&smi_info->count_lock);
985 1036
986#ifdef DEBUG_TIMING 1037#ifdef DEBUG_TIMING
987 do_gettimeofday(&t); 1038 do_gettimeofday(&t);
988 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); 1039 printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
989#endif 1040#endif
990 smi_event_handler(smi_info, 0); 1041 smi_event_handler(smi_info, 0);
991 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1042 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
@@ -1028,7 +1079,7 @@ static int smi_start_processing(void *send_info,
1028 * The BT interface is efficient enough to not need a thread, 1079 * The BT interface is efficient enough to not need a thread,
1029 * and there is no need for a thread if we have interrupts. 1080 * and there is no need for a thread if we have interrupts.
1030 */ 1081 */
1031 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) 1082 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1032 enable = 1; 1083 enable = 1;
1033 1084
1034 if (enable) { 1085 if (enable) {
@@ -1054,8 +1105,7 @@ static void set_maintenance_mode(void *send_info, int enable)
1054 atomic_set(&smi_info->req_events, 0); 1105 atomic_set(&smi_info->req_events, 0);
1055} 1106}
1056 1107
1057static struct ipmi_smi_handlers handlers = 1108static struct ipmi_smi_handlers handlers = {
1058{
1059 .owner = THIS_MODULE, 1109 .owner = THIS_MODULE,
1060 .start_processing = smi_start_processing, 1110 .start_processing = smi_start_processing,
1061 .sender = sender, 1111 .sender = sender,
@@ -1065,8 +1115,10 @@ static struct ipmi_smi_handlers handlers =
1065 .poll = poll, 1115 .poll = poll,
1066}; 1116};
1067 1117
1068/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses, 1118/*
1069 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ 1119 * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1120 * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
1121 */
1070 1122
1071static LIST_HEAD(smi_infos); 1123static LIST_HEAD(smi_infos);
1072static DEFINE_MUTEX(smi_infos_lock); 1124static DEFINE_MUTEX(smi_infos_lock);
@@ -1257,10 +1309,9 @@ static void port_cleanup(struct smi_info *info)
1257 int idx; 1309 int idx;
1258 1310
1259 if (addr) { 1311 if (addr) {
1260 for (idx = 0; idx < info->io_size; idx++) { 1312 for (idx = 0; idx < info->io_size; idx++)
1261 release_region(addr + idx * info->io.regspacing, 1313 release_region(addr + idx * info->io.regspacing,
1262 info->io.regsize); 1314 info->io.regsize);
1263 }
1264 } 1315 }
1265} 1316}
1266 1317
@@ -1274,8 +1325,10 @@ static int port_setup(struct smi_info *info)
1274 1325
1275 info->io_cleanup = port_cleanup; 1326 info->io_cleanup = port_cleanup;
1276 1327
1277 /* Figure out the actual inb/inw/inl/etc routine to use based 1328 /*
1278 upon the register size. */ 1329 * Figure out the actual inb/inw/inl/etc routine to use based
1330 * upon the register size.
1331 */
1279 switch (info->io.regsize) { 1332 switch (info->io.regsize) {
1280 case 1: 1333 case 1:
1281 info->io.inputb = port_inb; 1334 info->io.inputb = port_inb;
@@ -1290,17 +1343,18 @@ static int port_setup(struct smi_info *info)
1290 info->io.outputb = port_outl; 1343 info->io.outputb = port_outl;
1291 break; 1344 break;
1292 default: 1345 default:
1293 printk("ipmi_si: Invalid register size: %d\n", 1346 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
1294 info->io.regsize); 1347 info->io.regsize);
1295 return -EINVAL; 1348 return -EINVAL;
1296 } 1349 }
1297 1350
1298 /* Some BIOSes reserve disjoint I/O regions in their ACPI 1351 /*
1352 * Some BIOSes reserve disjoint I/O regions in their ACPI
1299 * tables. This causes problems when trying to register the 1353 * tables. This causes problems when trying to register the
1300 * entire I/O region. Therefore we must register each I/O 1354 * entire I/O region. Therefore we must register each I/O
1301 * port separately. 1355 * port separately.
1302 */ 1356 */
1303 for (idx = 0; idx < info->io_size; idx++) { 1357 for (idx = 0; idx < info->io_size; idx++) {
1304 if (request_region(addr + idx * info->io.regspacing, 1358 if (request_region(addr + idx * info->io.regspacing,
1305 info->io.regsize, DEVICE_NAME) == NULL) { 1359 info->io.regsize, DEVICE_NAME) == NULL) {
1306 /* Undo allocations */ 1360 /* Undo allocations */
@@ -1388,8 +1442,10 @@ static int mem_setup(struct smi_info *info)
1388 1442
1389 info->io_cleanup = mem_cleanup; 1443 info->io_cleanup = mem_cleanup;
1390 1444
1391 /* Figure out the actual readb/readw/readl/etc routine to use based 1445 /*
1392 upon the register size. */ 1446 * Figure out the actual readb/readw/readl/etc routine to use based
1447 * upon the register size.
1448 */
1393 switch (info->io.regsize) { 1449 switch (info->io.regsize) {
1394 case 1: 1450 case 1:
1395 info->io.inputb = intf_mem_inb; 1451 info->io.inputb = intf_mem_inb;
@@ -1410,16 +1466,18 @@ static int mem_setup(struct smi_info *info)
1410 break; 1466 break;
1411#endif 1467#endif
1412 default: 1468 default:
1413 printk("ipmi_si: Invalid register size: %d\n", 1469 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
1414 info->io.regsize); 1470 info->io.regsize);
1415 return -EINVAL; 1471 return -EINVAL;
1416 } 1472 }
1417 1473
1418 /* Calculate the total amount of memory to claim. This is an 1474 /*
1475 * Calculate the total amount of memory to claim. This is an
1419 * unusual looking calculation, but it avoids claiming any 1476 * unusual looking calculation, but it avoids claiming any
1420 * more memory than it has to. It will claim everything 1477 * more memory than it has to. It will claim everything
1421 * between the first address to the end of the last full 1478 * between the first address to the end of the last full
1422 * register. */ 1479 * register.
1480 */
1423 mapsize = ((info->io_size * info->io.regspacing) 1481 mapsize = ((info->io_size * info->io.regspacing)
1424 - (info->io.regspacing - info->io.regsize)); 1482 - (info->io.regspacing - info->io.regsize));
1425 1483
@@ -1749,9 +1807,11 @@ static __devinit void hardcode_find_bmc(void)
1749 1807
1750#include <linux/acpi.h> 1808#include <linux/acpi.h>
1751 1809
1752/* Once we get an ACPI failure, we don't try any more, because we go 1810/*
1753 through the tables sequentially. Once we don't find a table, there 1811 * Once we get an ACPI failure, we don't try any more, because we go
1754 are no more. */ 1812 * through the tables sequentially. Once we don't find a table, there
1813 * are no more.
1814 */
1755static int acpi_failure; 1815static int acpi_failure;
1756 1816
1757/* For GPE-type interrupts. */ 1817/* For GPE-type interrupts. */
@@ -1765,9 +1825,7 @@ static u32 ipmi_acpi_gpe(void *context)
1765 1825
1766 spin_lock_irqsave(&(smi_info->si_lock), flags); 1826 spin_lock_irqsave(&(smi_info->si_lock), flags);
1767 1827
1768 spin_lock(&smi_info->count_lock); 1828 smi_inc_stat(smi_info, interrupts);
1769 smi_info->interrupts++;
1770 spin_unlock(&smi_info->count_lock);
1771 1829
1772#ifdef DEBUG_TIMING 1830#ifdef DEBUG_TIMING
1773 do_gettimeofday(&t); 1831 do_gettimeofday(&t);
@@ -1816,7 +1874,8 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1816 1874
1817/* 1875/*
1818 * Defined at 1876 * Defined at
1819 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf 1877 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/
1878 * Docs/TechPapers/IA64/hpspmi.pdf
1820 */ 1879 */
1821struct SPMITable { 1880struct SPMITable {
1822 s8 Signature[4]; 1881 s8 Signature[4];
@@ -1838,14 +1897,18 @@ struct SPMITable {
1838 */ 1897 */
1839 u8 InterruptType; 1898 u8 InterruptType;
1840 1899
1841 /* If bit 0 of InterruptType is set, then this is the SCI 1900 /*
1842 interrupt in the GPEx_STS register. */ 1901 * If bit 0 of InterruptType is set, then this is the SCI
1902 * interrupt in the GPEx_STS register.
1903 */
1843 u8 GPE; 1904 u8 GPE;
1844 1905
1845 s16 Reserved; 1906 s16 Reserved;
1846 1907
1847 /* If bit 1 of InterruptType is set, then this is the I/O 1908 /*
1848 APIC/SAPIC interrupt. */ 1909 * If bit 1 of InterruptType is set, then this is the I/O
1910 * APIC/SAPIC interrupt.
1911 */
1849 u32 GlobalSystemInterrupt; 1912 u32 GlobalSystemInterrupt;
1850 1913
1851 /* The actual register address. */ 1914 /* The actual register address. */
@@ -1863,7 +1926,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1863 1926
1864 if (spmi->IPMIlegacy != 1) { 1927 if (spmi->IPMIlegacy != 1) {
1865 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 1928 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1866 return -ENODEV; 1929 return -ENODEV;
1867 } 1930 }
1868 1931
1869 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1932 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -1880,8 +1943,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1880 info->addr_source = "ACPI"; 1943 info->addr_source = "ACPI";
1881 1944
1882 /* Figure out the interface type. */ 1945 /* Figure out the interface type. */
1883 switch (spmi->InterfaceType) 1946 switch (spmi->InterfaceType) {
1884 {
1885 case 1: /* KCS */ 1947 case 1: /* KCS */
1886 info->si_type = SI_KCS; 1948 info->si_type = SI_KCS;
1887 break; 1949 break;
@@ -1929,7 +1991,8 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1929 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1991 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1930 } else { 1992 } else {
1931 kfree(info); 1993 kfree(info);
1932 printk("ipmi_si: Unknown ACPI I/O Address type\n"); 1994 printk(KERN_WARNING
1995 "ipmi_si: Unknown ACPI I/O Address type\n");
1933 return -EIO; 1996 return -EIO;
1934 } 1997 }
1935 info->io.addr_data = spmi->addr.address; 1998 info->io.addr_data = spmi->addr.address;
@@ -1963,8 +2026,7 @@ static __devinit void acpi_find_bmc(void)
1963#endif 2026#endif
1964 2027
1965#ifdef CONFIG_DMI 2028#ifdef CONFIG_DMI
1966struct dmi_ipmi_data 2029struct dmi_ipmi_data {
1967{
1968 u8 type; 2030 u8 type;
1969 u8 addr_space; 2031 u8 addr_space;
1970 unsigned long base_addr; 2032 unsigned long base_addr;
@@ -1989,11 +2051,10 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
1989 /* I/O */ 2051 /* I/O */
1990 base_addr &= 0xFFFE; 2052 base_addr &= 0xFFFE;
1991 dmi->addr_space = IPMI_IO_ADDR_SPACE; 2053 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1992 } 2054 } else
1993 else {
1994 /* Memory */ 2055 /* Memory */
1995 dmi->addr_space = IPMI_MEM_ADDR_SPACE; 2056 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1996 } 2057
1997 /* If bit 4 of byte 0x10 is set, then the lsb for the address 2058 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1998 is odd. */ 2059 is odd. */
1999 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); 2060 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
@@ -2002,7 +2063,7 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
2002 2063
2003 /* The top two bits of byte 0x10 hold the register spacing. */ 2064 /* The top two bits of byte 0x10 hold the register spacing. */
2004 reg_spacing = (data[0x10] & 0xC0) >> 6; 2065 reg_spacing = (data[0x10] & 0xC0) >> 6;
2005 switch(reg_spacing){ 2066 switch (reg_spacing) {
2006 case 0x00: /* Byte boundaries */ 2067 case 0x00: /* Byte boundaries */
2007 dmi->offset = 1; 2068 dmi->offset = 1;
2008 break; 2069 break;
@@ -2018,12 +2079,14 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
2018 } 2079 }
2019 } else { 2080 } else {
2020 /* Old DMI spec. */ 2081 /* Old DMI spec. */
2021 /* Note that technically, the lower bit of the base 2082 /*
2083 * Note that technically, the lower bit of the base
2022 * address should be 1 if the address is I/O and 0 if 2084 * address should be 1 if the address is I/O and 0 if
2023 * the address is in memory. So many systems get that 2085 * the address is in memory. So many systems get that
2024 * wrong (and all that I have seen are I/O) so we just 2086 * wrong (and all that I have seen are I/O) so we just
2025 * ignore that bit and assume I/O. Systems that use 2087 * ignore that bit and assume I/O. Systems that use
2026 * memory should use the newer spec, anyway. */ 2088 * memory should use the newer spec, anyway.
2089 */
2027 dmi->base_addr = base_addr & 0xfffe; 2090 dmi->base_addr = base_addr & 0xfffe;
2028 dmi->addr_space = IPMI_IO_ADDR_SPACE; 2091 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2029 dmi->offset = 1; 2092 dmi->offset = 1;
@@ -2230,13 +2293,13 @@ static struct pci_device_id ipmi_pci_devices[] = {
2230MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); 2293MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2231 2294
2232static struct pci_driver ipmi_pci_driver = { 2295static struct pci_driver ipmi_pci_driver = {
2233 .name = DEVICE_NAME, 2296 .name = DEVICE_NAME,
2234 .id_table = ipmi_pci_devices, 2297 .id_table = ipmi_pci_devices,
2235 .probe = ipmi_pci_probe, 2298 .probe = ipmi_pci_probe,
2236 .remove = __devexit_p(ipmi_pci_remove), 2299 .remove = __devexit_p(ipmi_pci_remove),
2237#ifdef CONFIG_PM 2300#ifdef CONFIG_PM
2238 .suspend = ipmi_pci_suspend, 2301 .suspend = ipmi_pci_suspend,
2239 .resume = ipmi_pci_resume, 2302 .resume = ipmi_pci_resume,
2240#endif 2303#endif
2241}; 2304};
2242#endif /* CONFIG_PCI */ 2305#endif /* CONFIG_PCI */
@@ -2306,7 +2369,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2306 info->io.addr_data, info->io.regsize, info->io.regspacing, 2369 info->io.addr_data, info->io.regsize, info->io.regspacing,
2307 info->irq); 2370 info->irq);
2308 2371
2309 dev->dev.driver_data = (void*) info; 2372 dev->dev.driver_data = (void *) info;
2310 2373
2311 return try_smi_init(info); 2374 return try_smi_init(info);
2312} 2375}
@@ -2319,14 +2382,16 @@ static int __devexit ipmi_of_remove(struct of_device *dev)
2319 2382
2320static struct of_device_id ipmi_match[] = 2383static struct of_device_id ipmi_match[] =
2321{ 2384{
2322 { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS }, 2385 { .type = "ipmi", .compatible = "ipmi-kcs",
2323 { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC }, 2386 .data = (void *)(unsigned long) SI_KCS },
2324 { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT }, 2387 { .type = "ipmi", .compatible = "ipmi-smic",
2388 .data = (void *)(unsigned long) SI_SMIC },
2389 { .type = "ipmi", .compatible = "ipmi-bt",
2390 .data = (void *)(unsigned long) SI_BT },
2325 {}, 2391 {},
2326}; 2392};
2327 2393
2328static struct of_platform_driver ipmi_of_platform_driver = 2394static struct of_platform_driver ipmi_of_platform_driver = {
2329{
2330 .name = "ipmi", 2395 .name = "ipmi",
2331 .match_table = ipmi_match, 2396 .match_table = ipmi_match,
2332 .probe = ipmi_of_probe, 2397 .probe = ipmi_of_probe,
@@ -2347,32 +2412,32 @@ static int try_get_dev_id(struct smi_info *smi_info)
2347 if (!resp) 2412 if (!resp)
2348 return -ENOMEM; 2413 return -ENOMEM;
2349 2414
2350 /* Do a Get Device ID command, since it comes back with some 2415 /*
2351 useful info. */ 2416 * Do a Get Device ID command, since it comes back with some
2417 * useful info.
2418 */
2352 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 2419 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2353 msg[1] = IPMI_GET_DEVICE_ID_CMD; 2420 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2354 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 2421 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2355 2422
2356 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); 2423 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2357 for (;;) 2424 for (;;) {
2358 {
2359 if (smi_result == SI_SM_CALL_WITH_DELAY || 2425 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2360 smi_result == SI_SM_CALL_WITH_TICK_DELAY) { 2426 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2361 schedule_timeout_uninterruptible(1); 2427 schedule_timeout_uninterruptible(1);
2362 smi_result = smi_info->handlers->event( 2428 smi_result = smi_info->handlers->event(
2363 smi_info->si_sm, 100); 2429 smi_info->si_sm, 100);
2364 } 2430 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
2365 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2366 {
2367 smi_result = smi_info->handlers->event( 2431 smi_result = smi_info->handlers->event(
2368 smi_info->si_sm, 0); 2432 smi_info->si_sm, 0);
2369 } 2433 } else
2370 else
2371 break; 2434 break;
2372 } 2435 }
2373 if (smi_result == SI_SM_HOSED) { 2436 if (smi_result == SI_SM_HOSED) {
2374 /* We couldn't get the state machine to run, so whatever's at 2437 /*
2375 the port is probably not an IPMI SMI interface. */ 2438 * We couldn't get the state machine to run, so whatever's at
2439 * the port is probably not an IPMI SMI interface.
2440 */
2376 rv = -ENODEV; 2441 rv = -ENODEV;
2377 goto out; 2442 goto out;
2378 } 2443 }
@@ -2405,30 +2470,28 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
2405 2470
2406 out += sprintf(out, "interrupts_enabled: %d\n", 2471 out += sprintf(out, "interrupts_enabled: %d\n",
2407 smi->irq && !smi->interrupt_disabled); 2472 smi->irq && !smi->interrupt_disabled);
2408 out += sprintf(out, "short_timeouts: %ld\n", 2473 out += sprintf(out, "short_timeouts: %u\n",
2409 smi->short_timeouts); 2474 smi_get_stat(smi, short_timeouts));
2410 out += sprintf(out, "long_timeouts: %ld\n", 2475 out += sprintf(out, "long_timeouts: %u\n",
2411 smi->long_timeouts); 2476 smi_get_stat(smi, long_timeouts));
2412 out += sprintf(out, "timeout_restarts: %ld\n", 2477 out += sprintf(out, "idles: %u\n",
2413 smi->timeout_restarts); 2478 smi_get_stat(smi, idles));
2414 out += sprintf(out, "idles: %ld\n", 2479 out += sprintf(out, "interrupts: %u\n",
2415 smi->idles); 2480 smi_get_stat(smi, interrupts));
2416 out += sprintf(out, "interrupts: %ld\n", 2481 out += sprintf(out, "attentions: %u\n",
2417 smi->interrupts); 2482 smi_get_stat(smi, attentions));
2418 out += sprintf(out, "attentions: %ld\n", 2483 out += sprintf(out, "flag_fetches: %u\n",
2419 smi->attentions); 2484 smi_get_stat(smi, flag_fetches));
2420 out += sprintf(out, "flag_fetches: %ld\n", 2485 out += sprintf(out, "hosed_count: %u\n",
2421 smi->flag_fetches); 2486 smi_get_stat(smi, hosed_count));
2422 out += sprintf(out, "hosed_count: %ld\n", 2487 out += sprintf(out, "complete_transactions: %u\n",
2423 smi->hosed_count); 2488 smi_get_stat(smi, complete_transactions));
2424 out += sprintf(out, "complete_transactions: %ld\n", 2489 out += sprintf(out, "events: %u\n",
2425 smi->complete_transactions); 2490 smi_get_stat(smi, events));
2426 out += sprintf(out, "events: %ld\n", 2491 out += sprintf(out, "watchdog_pretimeouts: %u\n",
2427 smi->events); 2492 smi_get_stat(smi, watchdog_pretimeouts));
2428 out += sprintf(out, "watchdog_pretimeouts: %ld\n", 2493 out += sprintf(out, "incoming_messages: %u\n",
2429 smi->watchdog_pretimeouts); 2494 smi_get_stat(smi, incoming_messages));
2430 out += sprintf(out, "incoming_messages: %ld\n",
2431 smi->incoming_messages);
2432 2495
2433 return out - page; 2496 return out - page;
2434} 2497}
@@ -2460,7 +2523,7 @@ static int param_read_proc(char *page, char **start, off_t off,
2460static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) 2523static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2461{ 2524{
2462 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | 2525 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2463 RECEIVE_MSG_AVAIL); 2526 RECEIVE_MSG_AVAIL);
2464 return 1; 2527 return 1;
2465} 2528}
2466 2529
@@ -2502,10 +2565,9 @@ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2502 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { 2565 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2503 smi_info->oem_data_avail_handler = 2566 smi_info->oem_data_avail_handler =
2504 oem_data_avail_to_receive_msg_avail; 2567 oem_data_avail_to_receive_msg_avail;
2505 } 2568 } else if (ipmi_version_major(id) < 1 ||
2506 else if (ipmi_version_major(id) < 1 || 2569 (ipmi_version_major(id) == 1 &&
2507 (ipmi_version_major(id) == 1 && 2570 ipmi_version_minor(id) < 5)) {
2508 ipmi_version_minor(id) < 5)) {
2509 smi_info->oem_data_avail_handler = 2571 smi_info->oem_data_avail_handler =
2510 oem_data_avail_to_receive_msg_avail; 2572 oem_data_avail_to_receive_msg_avail;
2511 } 2573 }
@@ -2597,8 +2659,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
2597static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 2659static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2598{ 2660{
2599 if (smi_info->intf) { 2661 if (smi_info->intf) {
2600 /* The timer and thread are only running if the 2662 /*
2601 interface has been started up and registered. */ 2663 * The timer and thread are only running if the
2664 * interface has been started up and registered.
2665 */
2602 if (smi_info->thread != NULL) 2666 if (smi_info->thread != NULL)
2603 kthread_stop(smi_info->thread); 2667 kthread_stop(smi_info->thread);
2604 del_timer_sync(&smi_info->si_timer); 2668 del_timer_sync(&smi_info->si_timer);
@@ -2676,6 +2740,7 @@ static int is_new_interface(struct smi_info *info)
2676static int try_smi_init(struct smi_info *new_smi) 2740static int try_smi_init(struct smi_info *new_smi)
2677{ 2741{
2678 int rv; 2742 int rv;
2743 int i;
2679 2744
2680 if (new_smi->addr_source) { 2745 if (new_smi->addr_source) {
2681 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" 2746 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
@@ -2722,7 +2787,7 @@ static int try_smi_init(struct smi_info *new_smi)
2722 /* Allocate the state machine's data and initialize it. */ 2787 /* Allocate the state machine's data and initialize it. */
2723 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 2788 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2724 if (!new_smi->si_sm) { 2789 if (!new_smi->si_sm) {
2725 printk(" Could not allocate state machine memory\n"); 2790 printk(KERN_ERR "Could not allocate state machine memory\n");
2726 rv = -ENOMEM; 2791 rv = -ENOMEM;
2727 goto out_err; 2792 goto out_err;
2728 } 2793 }
@@ -2732,13 +2797,12 @@ static int try_smi_init(struct smi_info *new_smi)
2732 /* Now that we know the I/O size, we can set up the I/O. */ 2797 /* Now that we know the I/O size, we can set up the I/O. */
2733 rv = new_smi->io_setup(new_smi); 2798 rv = new_smi->io_setup(new_smi);
2734 if (rv) { 2799 if (rv) {
2735 printk(" Could not set up I/O space\n"); 2800 printk(KERN_ERR "Could not set up I/O space\n");
2736 goto out_err; 2801 goto out_err;
2737 } 2802 }
2738 2803
2739 spin_lock_init(&(new_smi->si_lock)); 2804 spin_lock_init(&(new_smi->si_lock));
2740 spin_lock_init(&(new_smi->msg_lock)); 2805 spin_lock_init(&(new_smi->msg_lock));
2741 spin_lock_init(&(new_smi->count_lock));
2742 2806
2743 /* Do low-level detection first. */ 2807 /* Do low-level detection first. */
2744 if (new_smi->handlers->detect(new_smi->si_sm)) { 2808 if (new_smi->handlers->detect(new_smi->si_sm)) {
@@ -2749,8 +2813,10 @@ static int try_smi_init(struct smi_info *new_smi)
2749 goto out_err; 2813 goto out_err;
2750 } 2814 }
2751 2815
2752 /* Attempt a get device id command. If it fails, we probably 2816 /*
2753 don't have a BMC here. */ 2817 * Attempt a get device id command. If it fails, we probably
2818 * don't have a BMC here.
2819 */
2754 rv = try_get_dev_id(new_smi); 2820 rv = try_get_dev_id(new_smi);
2755 if (rv) { 2821 if (rv) {
2756 if (new_smi->addr_source) 2822 if (new_smi->addr_source)
@@ -2767,22 +2833,28 @@ static int try_smi_init(struct smi_info *new_smi)
2767 new_smi->curr_msg = NULL; 2833 new_smi->curr_msg = NULL;
2768 atomic_set(&new_smi->req_events, 0); 2834 atomic_set(&new_smi->req_events, 0);
2769 new_smi->run_to_completion = 0; 2835 new_smi->run_to_completion = 0;
2836 for (i = 0; i < SI_NUM_STATS; i++)
2837 atomic_set(&new_smi->stats[i], 0);
2770 2838
2771 new_smi->interrupt_disabled = 0; 2839 new_smi->interrupt_disabled = 0;
2772 atomic_set(&new_smi->stop_operation, 0); 2840 atomic_set(&new_smi->stop_operation, 0);
2773 new_smi->intf_num = smi_num; 2841 new_smi->intf_num = smi_num;
2774 smi_num++; 2842 smi_num++;
2775 2843
2776 /* Start clearing the flags before we enable interrupts or the 2844 /*
2777 timer to avoid racing with the timer. */ 2845 * Start clearing the flags before we enable interrupts or the
2846 * timer to avoid racing with the timer.
2847 */
2778 start_clear_flags(new_smi); 2848 start_clear_flags(new_smi);
2779 /* IRQ is defined to be set when non-zero. */ 2849 /* IRQ is defined to be set when non-zero. */
2780 if (new_smi->irq) 2850 if (new_smi->irq)
2781 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; 2851 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2782 2852
2783 if (!new_smi->dev) { 2853 if (!new_smi->dev) {
2784 /* If we don't already have a device from something 2854 /*
2785 * else (like PCI), then register a new one. */ 2855 * If we don't already have a device from something
2856 * else (like PCI), then register a new one.
2857 */
2786 new_smi->pdev = platform_device_alloc("ipmi_si", 2858 new_smi->pdev = platform_device_alloc("ipmi_si",
2787 new_smi->intf_num); 2859 new_smi->intf_num);
2788 if (rv) { 2860 if (rv) {
@@ -2820,7 +2892,7 @@ static int try_smi_init(struct smi_info *new_smi)
2820 } 2892 }
2821 2893
2822 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", 2894 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2823 type_file_read_proc, NULL, 2895 type_file_read_proc,
2824 new_smi, THIS_MODULE); 2896 new_smi, THIS_MODULE);
2825 if (rv) { 2897 if (rv) {
2826 printk(KERN_ERR 2898 printk(KERN_ERR
@@ -2830,7 +2902,7 @@ static int try_smi_init(struct smi_info *new_smi)
2830 } 2902 }
2831 2903
2832 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", 2904 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2833 stat_file_read_proc, NULL, 2905 stat_file_read_proc,
2834 new_smi, THIS_MODULE); 2906 new_smi, THIS_MODULE);
2835 if (rv) { 2907 if (rv) {
2836 printk(KERN_ERR 2908 printk(KERN_ERR
@@ -2840,7 +2912,7 @@ static int try_smi_init(struct smi_info *new_smi)
2840 } 2912 }
2841 2913
2842 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", 2914 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2843 param_read_proc, NULL, 2915 param_read_proc,
2844 new_smi, THIS_MODULE); 2916 new_smi, THIS_MODULE);
2845 if (rv) { 2917 if (rv) {
2846 printk(KERN_ERR 2918 printk(KERN_ERR
@@ -2853,7 +2925,8 @@ static int try_smi_init(struct smi_info *new_smi)
2853 2925
2854 mutex_unlock(&smi_infos_lock); 2926 mutex_unlock(&smi_infos_lock);
2855 2927
2856 printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]); 2928 printk(KERN_INFO "IPMI %s interface initialized\n",
2929 si_to_str[new_smi->si_type]);
2857 2930
2858 return 0; 2931 return 0;
2859 2932
@@ -2868,9 +2941,11 @@ static int try_smi_init(struct smi_info *new_smi)
2868 if (new_smi->irq_cleanup) 2941 if (new_smi->irq_cleanup)
2869 new_smi->irq_cleanup(new_smi); 2942 new_smi->irq_cleanup(new_smi);
2870 2943
2871 /* Wait until we know that we are out of any interrupt 2944 /*
2872 handlers might have been running before we freed the 2945 * Wait until we know that we are out of any interrupt
2873 interrupt. */ 2946 * handlers might have been running before we freed the
2947 * interrupt.
2948 */
2874 synchronize_sched(); 2949 synchronize_sched();
2875 2950
2876 if (new_smi->si_sm) { 2951 if (new_smi->si_sm) {
@@ -2942,11 +3017,10 @@ static __devinit int init_ipmi_si(void)
2942 3017
2943#ifdef CONFIG_PCI 3018#ifdef CONFIG_PCI
2944 rv = pci_register_driver(&ipmi_pci_driver); 3019 rv = pci_register_driver(&ipmi_pci_driver);
2945 if (rv){ 3020 if (rv)
2946 printk(KERN_ERR 3021 printk(KERN_ERR
2947 "init_ipmi_si: Unable to register PCI driver: %d\n", 3022 "init_ipmi_si: Unable to register PCI driver: %d\n",
2948 rv); 3023 rv);
2949 }
2950#endif 3024#endif
2951 3025
2952#ifdef CONFIG_PPC_OF 3026#ifdef CONFIG_PPC_OF
@@ -2975,7 +3049,8 @@ static __devinit int init_ipmi_si(void)
2975 of_unregister_platform_driver(&ipmi_of_platform_driver); 3049 of_unregister_platform_driver(&ipmi_of_platform_driver);
2976#endif 3050#endif
2977 driver_unregister(&ipmi_driver); 3051 driver_unregister(&ipmi_driver);
2978 printk("ipmi_si: Unable to find any System Interface(s)\n"); 3052 printk(KERN_WARNING
3053 "ipmi_si: Unable to find any System Interface(s)\n");
2979 return -ENODEV; 3054 return -ENODEV;
2980 } else { 3055 } else {
2981 mutex_unlock(&smi_infos_lock); 3056 mutex_unlock(&smi_infos_lock);
@@ -2997,13 +3072,17 @@ static void cleanup_one_si(struct smi_info *to_clean)
2997 /* Tell the driver that we are shutting down. */ 3072 /* Tell the driver that we are shutting down. */
2998 atomic_inc(&to_clean->stop_operation); 3073 atomic_inc(&to_clean->stop_operation);
2999 3074
3000 /* Make sure the timer and thread are stopped and will not run 3075 /*
3001 again. */ 3076 * Make sure the timer and thread are stopped and will not run
3077 * again.
3078 */
3002 wait_for_timer_and_thread(to_clean); 3079 wait_for_timer_and_thread(to_clean);
3003 3080
3004 /* Timeouts are stopped, now make sure the interrupts are off 3081 /*
3005 for the device. A little tricky with locks to make sure 3082 * Timeouts are stopped, now make sure the interrupts are off
3006 there are no races. */ 3083 * for the device. A little tricky with locks to make sure
3084 * there are no races.
3085 */
3007 spin_lock_irqsave(&to_clean->si_lock, flags); 3086 spin_lock_irqsave(&to_clean->si_lock, flags);
3008 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3087 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3009 spin_unlock_irqrestore(&to_clean->si_lock, flags); 3088 spin_unlock_irqrestore(&to_clean->si_lock, flags);
@@ -3074,4 +3153,5 @@ module_exit(cleanup_ipmi_si);
3074 3153
3075MODULE_LICENSE("GPL"); 3154MODULE_LICENSE("GPL");
3076MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 3155MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3077MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces."); 3156MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
3157 " system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index 4b731b24dc16..df89f73475fb 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -34,22 +34,27 @@
34 * 675 Mass Ave, Cambridge, MA 02139, USA. 34 * 675 Mass Ave, Cambridge, MA 02139, USA.
35 */ 35 */
36 36
37/* This is defined by the state machines themselves, it is an opaque 37/*
38 data type for them to use. */ 38 * This is defined by the state machines themselves, it is an opaque
39 * data type for them to use.
40 */
39struct si_sm_data; 41struct si_sm_data;
40 42
41/* The structure for doing I/O in the state machine. The state 43/*
42 machine doesn't have the actual I/O routines, they are done through 44 * The structure for doing I/O in the state machine. The state
43 this interface. */ 45 * machine doesn't have the actual I/O routines, they are done through
44struct si_sm_io 46 * this interface.
45{ 47 */
48struct si_sm_io {
46 unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); 49 unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset);
47 void (*outputb)(struct si_sm_io *io, 50 void (*outputb)(struct si_sm_io *io,
48 unsigned int offset, 51 unsigned int offset,
49 unsigned char b); 52 unsigned char b);
50 53
51 /* Generic info used by the actual handling routines, the 54 /*
52 state machine shouldn't touch these. */ 55 * Generic info used by the actual handling routines, the
56 * state machine shouldn't touch these.
57 */
53 void __iomem *addr; 58 void __iomem *addr;
54 int regspacing; 59 int regspacing;
55 int regsize; 60 int regsize;
@@ -59,53 +64,67 @@ struct si_sm_io
59}; 64};
60 65
61/* Results of SMI events. */ 66/* Results of SMI events. */
62enum si_sm_result 67enum si_sm_result {
63{
64 SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ 68 SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
65 SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ 69 SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */
66 SI_SM_CALL_WITH_TICK_DELAY, /* Delay at least 1 tick before calling again. */ 70 SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
67 SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ 71 SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
68 SI_SM_IDLE, /* The SM is in idle state. */ 72 SI_SM_IDLE, /* The SM is in idle state. */
69 SI_SM_HOSED, /* The hardware violated the state machine. */ 73 SI_SM_HOSED, /* The hardware violated the state machine. */
70 SI_SM_ATTN /* The hardware is asserting attn and the 74
71 state machine is idle. */ 75 /*
76 * The hardware is asserting attn and the state machine is
77 * idle.
78 */
79 SI_SM_ATTN
72}; 80};
73 81
74/* Handlers for the SMI state machine. */ 82/* Handlers for the SMI state machine. */
75struct si_sm_handlers 83struct si_sm_handlers {
76{ 84 /*
77 /* Put the version number of the state machine here so the 85 * Put the version number of the state machine here so the
78 upper layer can print it. */ 86 * upper layer can print it.
87 */
79 char *version; 88 char *version;
80 89
81 /* Initialize the data and return the amount of I/O space to 90 /*
82 reserve for the space. */ 91 * Initialize the data and return the amount of I/O space to
92 * reserve for the space.
93 */
83 unsigned int (*init_data)(struct si_sm_data *smi, 94 unsigned int (*init_data)(struct si_sm_data *smi,
84 struct si_sm_io *io); 95 struct si_sm_io *io);
85 96
86 /* Start a new transaction in the state machine. This will 97 /*
87 return -2 if the state machine is not idle, -1 if the size 98 * Start a new transaction in the state machine. This will
88 is invalid (to large or too small), or 0 if the transaction 99 * return -2 if the state machine is not idle, -1 if the size
89 is successfully completed. */ 100 * is invalid (to large or too small), or 0 if the transaction
101 * is successfully completed.
102 */
90 int (*start_transaction)(struct si_sm_data *smi, 103 int (*start_transaction)(struct si_sm_data *smi,
91 unsigned char *data, unsigned int size); 104 unsigned char *data, unsigned int size);
92 105
93 /* Return the results after the transaction. This will return 106 /*
94 -1 if the buffer is too small, zero if no transaction is 107 * Return the results after the transaction. This will return
95 present, or the actual length of the result data. */ 108 * -1 if the buffer is too small, zero if no transaction is
109 * present, or the actual length of the result data.
110 */
96 int (*get_result)(struct si_sm_data *smi, 111 int (*get_result)(struct si_sm_data *smi,
97 unsigned char *data, unsigned int length); 112 unsigned char *data, unsigned int length);
98 113
99 /* Call this periodically (for a polled interface) or upon 114 /*
100 receiving an interrupt (for a interrupt-driven interface). 115 * Call this periodically (for a polled interface) or upon
101 If interrupt driven, you should probably poll this 116 * receiving an interrupt (for a interrupt-driven interface).
102 periodically when not in idle state. This should be called 117 * If interrupt driven, you should probably poll this
103 with the time that passed since the last call, if it is 118 * periodically when not in idle state. This should be called
104 significant. Time is in microseconds. */ 119 * with the time that passed since the last call, if it is
120 * significant. Time is in microseconds.
121 */
105 enum si_sm_result (*event)(struct si_sm_data *smi, long time); 122 enum si_sm_result (*event)(struct si_sm_data *smi, long time);
106 123
107 /* Attempt to detect an SMI. Returns 0 on success or nonzero 124 /*
108 on failure. */ 125 * Attempt to detect an SMI. Returns 0 on success or nonzero
126 * on failure.
127 */
109 int (*detect)(struct si_sm_data *smi); 128 int (*detect)(struct si_sm_data *smi);
110 129
111 /* The interface is shutting down, so clean it up. */ 130 /* The interface is shutting down, so clean it up. */
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index e64ea7d25d24..faed92971907 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -85,6 +85,7 @@ enum smic_states {
85/* SMIC Flags Register Bits */ 85/* SMIC Flags Register Bits */
86#define SMIC_RX_DATA_READY 0x80 86#define SMIC_RX_DATA_READY 0x80
87#define SMIC_TX_DATA_READY 0x40 87#define SMIC_TX_DATA_READY 0x40
88
88/* 89/*
89 * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by 90 * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
90 * a few systems, and then only by Systems Management 91 * a few systems, and then only by Systems Management
@@ -104,23 +105,22 @@ enum smic_states {
104#define EC_ILLEGAL_COMMAND 0x04 105#define EC_ILLEGAL_COMMAND 0x04
105#define EC_BUFFER_FULL 0x05 106#define EC_BUFFER_FULL 0x05
106 107
107struct si_sm_data 108struct si_sm_data {
108{
109 enum smic_states state; 109 enum smic_states state;
110 struct si_sm_io *io; 110 struct si_sm_io *io;
111 unsigned char write_data[MAX_SMIC_WRITE_SIZE]; 111 unsigned char write_data[MAX_SMIC_WRITE_SIZE];
112 int write_pos; 112 int write_pos;
113 int write_count; 113 int write_count;
114 int orig_write_count; 114 int orig_write_count;
115 unsigned char read_data[MAX_SMIC_READ_SIZE]; 115 unsigned char read_data[MAX_SMIC_READ_SIZE];
116 int read_pos; 116 int read_pos;
117 int truncated; 117 int truncated;
118 unsigned int error_retries; 118 unsigned int error_retries;
119 long smic_timeout; 119 long smic_timeout;
120}; 120};
121 121
122static unsigned int init_smic_data (struct si_sm_data *smic, 122static unsigned int init_smic_data(struct si_sm_data *smic,
123 struct si_sm_io *io) 123 struct si_sm_io *io)
124{ 124{
125 smic->state = SMIC_IDLE; 125 smic->state = SMIC_IDLE;
126 smic->io = io; 126 smic->io = io;
@@ -150,11 +150,10 @@ static int start_smic_transaction(struct si_sm_data *smic,
150 return IPMI_NOT_IN_MY_STATE_ERR; 150 return IPMI_NOT_IN_MY_STATE_ERR;
151 151
152 if (smic_debug & SMIC_DEBUG_MSG) { 152 if (smic_debug & SMIC_DEBUG_MSG) {
153 printk(KERN_INFO "start_smic_transaction -"); 153 printk(KERN_DEBUG "start_smic_transaction -");
154 for (i = 0; i < size; i ++) { 154 for (i = 0; i < size; i++)
155 printk (" %02x", (unsigned char) (data [i])); 155 printk(" %02x", (unsigned char) data[i]);
156 } 156 printk("\n");
157 printk ("\n");
158 } 157 }
159 smic->error_retries = 0; 158 smic->error_retries = 0;
160 memcpy(smic->write_data, data, size); 159 memcpy(smic->write_data, data, size);
@@ -173,11 +172,10 @@ static int smic_get_result(struct si_sm_data *smic,
173 int i; 172 int i;
174 173
175 if (smic_debug & SMIC_DEBUG_MSG) { 174 if (smic_debug & SMIC_DEBUG_MSG) {
176 printk (KERN_INFO "smic_get result -"); 175 printk(KERN_DEBUG "smic_get result -");
177 for (i = 0; i < smic->read_pos; i ++) { 176 for (i = 0; i < smic->read_pos; i++)
178 printk (" %02x", (smic->read_data [i])); 177 printk(" %02x", smic->read_data[i]);
179 } 178 printk("\n");
180 printk ("\n");
181 } 179 }
182 if (length < smic->read_pos) { 180 if (length < smic->read_pos) {
183 smic->read_pos = length; 181 smic->read_pos = length;
@@ -223,8 +221,8 @@ static inline void write_smic_control(struct si_sm_data *smic,
223 smic->io->outputb(smic->io, 1, control); 221 smic->io->outputb(smic->io, 1, control);
224} 222}
225 223
226static inline void write_si_sm_data (struct si_sm_data *smic, 224static inline void write_si_sm_data(struct si_sm_data *smic,
227 unsigned char data) 225 unsigned char data)
228{ 226{
229 smic->io->outputb(smic->io, 0, data); 227 smic->io->outputb(smic->io, 0, data);
230} 228}
@@ -233,10 +231,9 @@ static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
233{ 231{
234 (smic->error_retries)++; 232 (smic->error_retries)++;
235 if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { 233 if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
236 if (smic_debug & SMIC_DEBUG_ENABLE) { 234 if (smic_debug & SMIC_DEBUG_ENABLE)
237 printk(KERN_WARNING 235 printk(KERN_WARNING
238 "ipmi_smic_drv: smic hosed: %s\n", reason); 236 "ipmi_smic_drv: smic hosed: %s\n", reason);
239 }
240 smic->state = SMIC_HOSED; 237 smic->state = SMIC_HOSED;
241 } else { 238 } else {
242 smic->write_count = smic->orig_write_count; 239 smic->write_count = smic->orig_write_count;
@@ -254,14 +251,14 @@ static inline void write_next_byte(struct si_sm_data *smic)
254 (smic->write_count)--; 251 (smic->write_count)--;
255} 252}
256 253
257static inline void read_next_byte (struct si_sm_data *smic) 254static inline void read_next_byte(struct si_sm_data *smic)
258{ 255{
259 if (smic->read_pos >= MAX_SMIC_READ_SIZE) { 256 if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
260 read_smic_data (smic); 257 read_smic_data(smic);
261 smic->truncated = 1; 258 smic->truncated = 1;
262 } else { 259 } else {
263 smic->read_data[smic->read_pos] = read_smic_data(smic); 260 smic->read_data[smic->read_pos] = read_smic_data(smic);
264 (smic->read_pos)++; 261 smic->read_pos++;
265 } 262 }
266} 263}
267 264
@@ -336,7 +333,7 @@ static inline void read_next_byte (struct si_sm_data *smic)
336 SMIC_SC_SMS_RD_END 0xC6 333 SMIC_SC_SMS_RD_END 0xC6
337*/ 334*/
338 335
339static enum si_sm_result smic_event (struct si_sm_data *smic, long time) 336static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
340{ 337{
341 unsigned char status; 338 unsigned char status;
342 unsigned char flags; 339 unsigned char flags;
@@ -347,13 +344,15 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
347 return SI_SM_HOSED; 344 return SI_SM_HOSED;
348 } 345 }
349 if (smic->state != SMIC_IDLE) { 346 if (smic->state != SMIC_IDLE) {
350 if (smic_debug & SMIC_DEBUG_STATES) { 347 if (smic_debug & SMIC_DEBUG_STATES)
351 printk(KERN_INFO 348 printk(KERN_DEBUG
352 "smic_event - smic->smic_timeout = %ld," 349 "smic_event - smic->smic_timeout = %ld,"
353 " time = %ld\n", 350 " time = %ld\n",
354 smic->smic_timeout, time); 351 smic->smic_timeout, time);
355 } 352 /*
356/* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */ 353 * FIXME: smic_event is sometimes called with time >
354 * SMIC_RETRY_TIMEOUT
355 */
357 if (time < SMIC_RETRY_TIMEOUT) { 356 if (time < SMIC_RETRY_TIMEOUT) {
358 smic->smic_timeout -= time; 357 smic->smic_timeout -= time;
359 if (smic->smic_timeout < 0) { 358 if (smic->smic_timeout < 0) {
@@ -366,9 +365,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
366 if (flags & SMIC_FLAG_BSY) 365 if (flags & SMIC_FLAG_BSY)
367 return SI_SM_CALL_WITH_DELAY; 366 return SI_SM_CALL_WITH_DELAY;
368 367
369 status = read_smic_status (smic); 368 status = read_smic_status(smic);
370 if (smic_debug & SMIC_DEBUG_STATES) 369 if (smic_debug & SMIC_DEBUG_STATES)
371 printk(KERN_INFO 370 printk(KERN_DEBUG
372 "smic_event - state = %d, flags = 0x%02x," 371 "smic_event - state = %d, flags = 0x%02x,"
373 " status = 0x%02x\n", 372 " status = 0x%02x\n",
374 smic->state, flags, status); 373 smic->state, flags, status);
@@ -377,9 +376,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
377 case SMIC_IDLE: 376 case SMIC_IDLE:
378 /* in IDLE we check for available messages */ 377 /* in IDLE we check for available messages */
379 if (flags & SMIC_SMS_DATA_AVAIL) 378 if (flags & SMIC_SMS_DATA_AVAIL)
380 {
381 return SI_SM_ATTN; 379 return SI_SM_ATTN;
382 }
383 return SI_SM_IDLE; 380 return SI_SM_IDLE;
384 381
385 case SMIC_START_OP: 382 case SMIC_START_OP:
@@ -391,7 +388,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
391 388
392 case SMIC_OP_OK: 389 case SMIC_OP_OK:
393 if (status != SMIC_SC_SMS_READY) { 390 if (status != SMIC_SC_SMS_READY) {
394 /* this should not happen */ 391 /* this should not happen */
395 start_error_recovery(smic, 392 start_error_recovery(smic,
396 "state = SMIC_OP_OK," 393 "state = SMIC_OP_OK,"
397 " status != SMIC_SC_SMS_READY"); 394 " status != SMIC_SC_SMS_READY");
@@ -411,8 +408,10 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
411 "status != SMIC_SC_SMS_WR_START"); 408 "status != SMIC_SC_SMS_WR_START");
412 return SI_SM_CALL_WITH_DELAY; 409 return SI_SM_CALL_WITH_DELAY;
413 } 410 }
414 /* we must not issue WR_(NEXT|END) unless 411 /*
415 TX_DATA_READY is set */ 412 * we must not issue WR_(NEXT|END) unless
413 * TX_DATA_READY is set
414 * */
416 if (flags & SMIC_TX_DATA_READY) { 415 if (flags & SMIC_TX_DATA_READY) {
417 if (smic->write_count == 1) { 416 if (smic->write_count == 1) {
418 /* last byte */ 417 /* last byte */
@@ -424,10 +423,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
424 } 423 }
425 write_next_byte(smic); 424 write_next_byte(smic);
426 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 425 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
427 } 426 } else
428 else {
429 return SI_SM_CALL_WITH_DELAY; 427 return SI_SM_CALL_WITH_DELAY;
430 }
431 break; 428 break;
432 429
433 case SMIC_WRITE_NEXT: 430 case SMIC_WRITE_NEXT:
@@ -442,52 +439,48 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
442 if (smic->write_count == 1) { 439 if (smic->write_count == 1) {
443 write_smic_control(smic, SMIC_CC_SMS_WR_END); 440 write_smic_control(smic, SMIC_CC_SMS_WR_END);
444 smic->state = SMIC_WRITE_END; 441 smic->state = SMIC_WRITE_END;
445 } 442 } else {
446 else {
447 write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); 443 write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
448 smic->state = SMIC_WRITE_NEXT; 444 smic->state = SMIC_WRITE_NEXT;
449 } 445 }
450 write_next_byte(smic); 446 write_next_byte(smic);
451 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 447 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
452 } 448 } else
453 else {
454 return SI_SM_CALL_WITH_DELAY; 449 return SI_SM_CALL_WITH_DELAY;
455 }
456 break; 450 break;
457 451
458 case SMIC_WRITE_END: 452 case SMIC_WRITE_END:
459 if (status != SMIC_SC_SMS_WR_END) { 453 if (status != SMIC_SC_SMS_WR_END) {
460 start_error_recovery (smic, 454 start_error_recovery(smic,
461 "state = SMIC_WRITE_END, " 455 "state = SMIC_WRITE_END, "
462 "status != SMIC_SC_SMS_WR_END"); 456 "status != SMIC_SC_SMS_WR_END");
463 return SI_SM_CALL_WITH_DELAY; 457 return SI_SM_CALL_WITH_DELAY;
464 } 458 }
465 /* data register holds an error code */ 459 /* data register holds an error code */
466 data = read_smic_data(smic); 460 data = read_smic_data(smic);
467 if (data != 0) { 461 if (data != 0) {
468 if (smic_debug & SMIC_DEBUG_ENABLE) { 462 if (smic_debug & SMIC_DEBUG_ENABLE)
469 printk(KERN_INFO 463 printk(KERN_DEBUG
470 "SMIC_WRITE_END: data = %02x\n", data); 464 "SMIC_WRITE_END: data = %02x\n", data);
471 }
472 start_error_recovery(smic, 465 start_error_recovery(smic,
473 "state = SMIC_WRITE_END, " 466 "state = SMIC_WRITE_END, "
474 "data != SUCCESS"); 467 "data != SUCCESS");
475 return SI_SM_CALL_WITH_DELAY; 468 return SI_SM_CALL_WITH_DELAY;
476 } else { 469 } else
477 smic->state = SMIC_WRITE2READ; 470 smic->state = SMIC_WRITE2READ;
478 }
479 break; 471 break;
480 472
481 case SMIC_WRITE2READ: 473 case SMIC_WRITE2READ:
482 /* we must wait for RX_DATA_READY to be set before we 474 /*
483 can continue */ 475 * we must wait for RX_DATA_READY to be set before we
476 * can continue
477 */
484 if (flags & SMIC_RX_DATA_READY) { 478 if (flags & SMIC_RX_DATA_READY) {
485 write_smic_control(smic, SMIC_CC_SMS_RD_START); 479 write_smic_control(smic, SMIC_CC_SMS_RD_START);
486 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 480 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
487 smic->state = SMIC_READ_START; 481 smic->state = SMIC_READ_START;
488 } else { 482 } else
489 return SI_SM_CALL_WITH_DELAY; 483 return SI_SM_CALL_WITH_DELAY;
490 }
491 break; 484 break;
492 485
493 case SMIC_READ_START: 486 case SMIC_READ_START:
@@ -502,15 +495,16 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
502 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); 495 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
503 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 496 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
504 smic->state = SMIC_READ_NEXT; 497 smic->state = SMIC_READ_NEXT;
505 } else { 498 } else
506 return SI_SM_CALL_WITH_DELAY; 499 return SI_SM_CALL_WITH_DELAY;
507 }
508 break; 500 break;
509 501
510 case SMIC_READ_NEXT: 502 case SMIC_READ_NEXT:
511 switch (status) { 503 switch (status) {
512 /* smic tells us that this is the last byte to be read 504 /*
513 --> clean up */ 505 * smic tells us that this is the last byte to be read
506 * --> clean up
507 */
514 case SMIC_SC_SMS_RD_END: 508 case SMIC_SC_SMS_RD_END:
515 read_next_byte(smic); 509 read_next_byte(smic);
516 write_smic_control(smic, SMIC_CC_SMS_RD_END); 510 write_smic_control(smic, SMIC_CC_SMS_RD_END);
@@ -523,9 +517,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
523 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); 517 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
524 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 518 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
525 smic->state = SMIC_READ_NEXT; 519 smic->state = SMIC_READ_NEXT;
526 } else { 520 } else
527 return SI_SM_CALL_WITH_DELAY; 521 return SI_SM_CALL_WITH_DELAY;
528 }
529 break; 522 break;
530 default: 523 default:
531 start_error_recovery( 524 start_error_recovery(
@@ -546,10 +539,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
546 data = read_smic_data(smic); 539 data = read_smic_data(smic);
547 /* data register holds an error code */ 540 /* data register holds an error code */
548 if (data != 0) { 541 if (data != 0) {
549 if (smic_debug & SMIC_DEBUG_ENABLE) { 542 if (smic_debug & SMIC_DEBUG_ENABLE)
550 printk(KERN_INFO 543 printk(KERN_DEBUG
551 "SMIC_READ_END: data = %02x\n", data); 544 "SMIC_READ_END: data = %02x\n", data);
552 }
553 start_error_recovery(smic, 545 start_error_recovery(smic,
554 "state = SMIC_READ_END, " 546 "state = SMIC_READ_END, "
555 "data != SUCCESS"); 547 "data != SUCCESS");
@@ -565,7 +557,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
565 557
566 default: 558 default:
567 if (smic_debug & SMIC_DEBUG_ENABLE) { 559 if (smic_debug & SMIC_DEBUG_ENABLE) {
568 printk(KERN_WARNING "smic->state = %d\n", smic->state); 560 printk(KERN_DEBUG "smic->state = %d\n", smic->state);
569 start_error_recovery(smic, "state = UNKNOWN"); 561 start_error_recovery(smic, "state = UNKNOWN");
570 return SI_SM_CALL_WITH_DELAY; 562 return SI_SM_CALL_WITH_DELAY;
571 } 563 }
@@ -576,10 +568,12 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
576 568
577static int smic_detect(struct si_sm_data *smic) 569static int smic_detect(struct si_sm_data *smic)
578{ 570{
579 /* It's impossible for the SMIC fnags register to be all 1's, 571 /*
580 (assuming a properly functioning, self-initialized BMC) 572 * It's impossible for the SMIC fnags register to be all 1's,
581 but that's what you get from reading a bogus address, so we 573 * (assuming a properly functioning, self-initialized BMC)
582 test that first. */ 574 * but that's what you get from reading a bogus address, so we
575 * test that first.
576 */
583 if (read_smic_flags(smic) == 0xff) 577 if (read_smic_flags(smic) == 0xff)
584 return 1; 578 return 1;
585 579
@@ -595,8 +589,7 @@ static int smic_size(void)
595 return sizeof(struct si_sm_data); 589 return sizeof(struct si_sm_data);
596} 590}
597 591
598struct si_sm_handlers smic_smi_handlers = 592struct si_sm_handlers smic_smi_handlers = {
599{
600 .init_data = init_smic_data, 593 .init_data = init_smic_data,
601 .start_transaction = start_smic_transaction, 594 .start_transaction = start_smic_transaction,
602 .get_result = smic_get_result, 595 .get_result = smic_get_result,
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 8f45ca9235ad..1b9a87047817 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -54,13 +54,15 @@
54#include <asm/atomic.h> 54#include <asm/atomic.h>
55 55
56#ifdef CONFIG_X86 56#ifdef CONFIG_X86
57/* This is ugly, but I've determined that x86 is the only architecture 57/*
58 that can reasonably support the IPMI NMI watchdog timeout at this 58 * This is ugly, but I've determined that x86 is the only architecture
59 time. If another architecture adds this capability somehow, it 59 * that can reasonably support the IPMI NMI watchdog timeout at this
60 will have to be a somewhat different mechanism and I have no idea 60 * time. If another architecture adds this capability somehow, it
61 how it will work. So in the unlikely event that another 61 * will have to be a somewhat different mechanism and I have no idea
62 architecture supports this, we can figure out a good generic 62 * how it will work. So in the unlikely event that another
63 mechanism for it at that time. */ 63 * architecture supports this, we can figure out a good generic
64 * mechanism for it at that time.
65 */
64#include <asm/kdebug.h> 66#include <asm/kdebug.h>
65#define HAVE_DIE_NMI 67#define HAVE_DIE_NMI
66#endif 68#endif
@@ -95,9 +97,8 @@
95/* Operations that can be performed on a pretimout. */ 97/* Operations that can be performed on a pretimout. */
96#define WDOG_PREOP_NONE 0 98#define WDOG_PREOP_NONE 0
97#define WDOG_PREOP_PANIC 1 99#define WDOG_PREOP_PANIC 1
98#define WDOG_PREOP_GIVE_DATA 2 /* Cause data to be available to 100/* Cause data to be available to read. Doesn't work in NMI mode. */
99 read. Doesn't work in NMI 101#define WDOG_PREOP_GIVE_DATA 2
100 mode. */
101 102
102/* Actions to perform on a full timeout. */ 103/* Actions to perform on a full timeout. */
103#define WDOG_SET_TIMEOUT_ACT(byte, use) \ 104#define WDOG_SET_TIMEOUT_ACT(byte, use) \
@@ -108,8 +109,10 @@
108#define WDOG_TIMEOUT_POWER_DOWN 2 109#define WDOG_TIMEOUT_POWER_DOWN 2
109#define WDOG_TIMEOUT_POWER_CYCLE 3 110#define WDOG_TIMEOUT_POWER_CYCLE 3
110 111
111/* Byte 3 of the get command, byte 4 of the get response is the 112/*
112 pre-timeout in seconds. */ 113 * Byte 3 of the get command, byte 4 of the get response is the
114 * pre-timeout in seconds.
115 */
113 116
114/* Bits for setting byte 4 of the set command, byte 5 of the get response. */ 117/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
115#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1) 118#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1)
@@ -118,11 +121,13 @@
118#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4) 121#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4)
119#define WDOG_EXPIRE_CLEAR_OEM (1 << 5) 122#define WDOG_EXPIRE_CLEAR_OEM (1 << 5)
120 123
121/* Setting/getting the watchdog timer value. This is for bytes 5 and 124/*
122 6 (the timeout time) of the set command, and bytes 6 and 7 (the 125 * Setting/getting the watchdog timer value. This is for bytes 5 and
123 timeout time) and 8 and 9 (the current countdown value) of the 126 * 6 (the timeout time) of the set command, and bytes 6 and 7 (the
124 response. The timeout value is given in seconds (in the command it 127 * timeout time) and 8 and 9 (the current countdown value) of the
125 is 100ms intervals). */ 128 * response. The timeout value is given in seconds (in the command it
129 * is 100ms intervals).
130 */
126#define WDOG_SET_TIMEOUT(byte1, byte2, val) \ 131#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
127 (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8) 132 (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
128#define WDOG_GET_TIMEOUT(byte1, byte2) \ 133#define WDOG_GET_TIMEOUT(byte1, byte2) \
@@ -184,8 +189,10 @@ static int ipmi_set_timeout(int do_heartbeat);
184static void ipmi_register_watchdog(int ipmi_intf); 189static void ipmi_register_watchdog(int ipmi_intf);
185static void ipmi_unregister_watchdog(int ipmi_intf); 190static void ipmi_unregister_watchdog(int ipmi_intf);
186 191
187/* If true, the driver will start running as soon as it is configured 192/*
188 and ready. */ 193 * If true, the driver will start running as soon as it is configured
194 * and ready.
195 */
189static int start_now; 196static int start_now;
190 197
191static int set_param_int(const char *val, struct kernel_param *kp) 198static int set_param_int(const char *val, struct kernel_param *kp)
@@ -309,10 +316,12 @@ static int ipmi_ignore_heartbeat;
309/* Is someone using the watchdog? Only one user is allowed. */ 316/* Is someone using the watchdog? Only one user is allowed. */
310static unsigned long ipmi_wdog_open; 317static unsigned long ipmi_wdog_open;
311 318
312/* If set to 1, the heartbeat command will set the state to reset and 319/*
313 start the timer. The timer doesn't normally run when the driver is 320 * If set to 1, the heartbeat command will set the state to reset and
314 first opened until the heartbeat is set the first time, this 321 * start the timer. The timer doesn't normally run when the driver is
315 variable is used to accomplish this. */ 322 * first opened until the heartbeat is set the first time, this
323 * variable is used to accomplish this.
324 */
316static int ipmi_start_timer_on_heartbeat; 325static int ipmi_start_timer_on_heartbeat;
317 326
318/* IPMI version of the BMC. */ 327/* IPMI version of the BMC. */
@@ -329,10 +338,12 @@ static int nmi_handler_registered;
329 338
330static int ipmi_heartbeat(void); 339static int ipmi_heartbeat(void);
331 340
332/* We use a mutex to make sure that only one thing can send a set 341/*
333 timeout at one time, because we only have one copy of the data. 342 * We use a mutex to make sure that only one thing can send a set
334 The mutex is claimed when the set_timeout is sent and freed 343 * timeout at one time, because we only have one copy of the data.
335 when both messages are free. */ 344 * The mutex is claimed when the set_timeout is sent and freed
345 * when both messages are free.
346 */
336static atomic_t set_timeout_tofree = ATOMIC_INIT(0); 347static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
337static DEFINE_MUTEX(set_timeout_lock); 348static DEFINE_MUTEX(set_timeout_lock);
338static DECLARE_COMPLETION(set_timeout_wait); 349static DECLARE_COMPLETION(set_timeout_wait);
@@ -346,15 +357,13 @@ static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
346 if (atomic_dec_and_test(&set_timeout_tofree)) 357 if (atomic_dec_and_test(&set_timeout_tofree))
347 complete(&set_timeout_wait); 358 complete(&set_timeout_wait);
348} 359}
349static struct ipmi_smi_msg set_timeout_smi_msg = 360static struct ipmi_smi_msg set_timeout_smi_msg = {
350{
351 .done = set_timeout_free_smi 361 .done = set_timeout_free_smi
352}; 362};
353static struct ipmi_recv_msg set_timeout_recv_msg = 363static struct ipmi_recv_msg set_timeout_recv_msg = {
354{
355 .done = set_timeout_free_recv 364 .done = set_timeout_free_recv
356}; 365};
357 366
358static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, 367static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
359 struct ipmi_recv_msg *recv_msg, 368 struct ipmi_recv_msg *recv_msg,
360 int *send_heartbeat_now) 369 int *send_heartbeat_now)
@@ -373,13 +382,14 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
373 WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); 382 WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
374 383
375 if ((ipmi_version_major > 1) 384 if ((ipmi_version_major > 1)
376 || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) 385 || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
377 {
378 /* This is an IPMI 1.5-only feature. */ 386 /* This is an IPMI 1.5-only feature. */
379 data[0] |= WDOG_DONT_STOP_ON_SET; 387 data[0] |= WDOG_DONT_STOP_ON_SET;
380 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { 388 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
381 /* In ipmi 1.0, setting the timer stops the watchdog, we 389 /*
382 need to start it back up again. */ 390 * In ipmi 1.0, setting the timer stops the watchdog, we
391 * need to start it back up again.
392 */
383 hbnow = 1; 393 hbnow = 1;
384 } 394 }
385 395
@@ -465,12 +475,10 @@ static void panic_recv_free(struct ipmi_recv_msg *msg)
465 atomic_dec(&panic_done_count); 475 atomic_dec(&panic_done_count);
466} 476}
467 477
468static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = 478static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = {
469{
470 .done = panic_smi_free 479 .done = panic_smi_free
471}; 480};
472static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = 481static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = {
473{
474 .done = panic_recv_free 482 .done = panic_recv_free
475}; 483};
476 484
@@ -480,8 +488,10 @@ static void panic_halt_ipmi_heartbeat(void)
480 struct ipmi_system_interface_addr addr; 488 struct ipmi_system_interface_addr addr;
481 int rv; 489 int rv;
482 490
483 /* Don't reset the timer if we have the timer turned off, that 491 /*
484 re-enables the watchdog. */ 492 * Don't reset the timer if we have the timer turned off, that
493 * re-enables the watchdog.
494 */
485 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) 495 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
486 return; 496 return;
487 497
@@ -505,19 +515,19 @@ static void panic_halt_ipmi_heartbeat(void)
505 atomic_add(2, &panic_done_count); 515 atomic_add(2, &panic_done_count);
506} 516}
507 517
508static struct ipmi_smi_msg panic_halt_smi_msg = 518static struct ipmi_smi_msg panic_halt_smi_msg = {
509{
510 .done = panic_smi_free 519 .done = panic_smi_free
511}; 520};
512static struct ipmi_recv_msg panic_halt_recv_msg = 521static struct ipmi_recv_msg panic_halt_recv_msg = {
513{
514 .done = panic_recv_free 522 .done = panic_recv_free
515}; 523};
516 524
517/* Special call, doesn't claim any locks. This is only to be called 525/*
518 at panic or halt time, in run-to-completion mode, when the caller 526 * Special call, doesn't claim any locks. This is only to be called
519 is the only CPU and the only thing that will be going is these IPMI 527 * at panic or halt time, in run-to-completion mode, when the caller
520 calls. */ 528 * is the only CPU and the only thing that will be going is these IPMI
529 * calls.
530 */
521static void panic_halt_ipmi_set_timeout(void) 531static void panic_halt_ipmi_set_timeout(void)
522{ 532{
523 int send_heartbeat_now; 533 int send_heartbeat_now;
@@ -540,10 +550,12 @@ static void panic_halt_ipmi_set_timeout(void)
540 ipmi_poll_interface(watchdog_user); 550 ipmi_poll_interface(watchdog_user);
541} 551}
542 552
543/* We use a semaphore to make sure that only one thing can send a 553/*
544 heartbeat at one time, because we only have one copy of the data. 554 * We use a mutex to make sure that only one thing can send a
545 The semaphore is claimed when the set_timeout is sent and freed 555 * heartbeat at one time, because we only have one copy of the data.
546 when both messages are free. */ 556 * The semaphore is claimed when the set_timeout is sent and freed
557 * when both messages are free.
558 */
547static atomic_t heartbeat_tofree = ATOMIC_INIT(0); 559static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
548static DEFINE_MUTEX(heartbeat_lock); 560static DEFINE_MUTEX(heartbeat_lock);
549static DECLARE_COMPLETION(heartbeat_wait); 561static DECLARE_COMPLETION(heartbeat_wait);
@@ -557,15 +569,13 @@ static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
557 if (atomic_dec_and_test(&heartbeat_tofree)) 569 if (atomic_dec_and_test(&heartbeat_tofree))
558 complete(&heartbeat_wait); 570 complete(&heartbeat_wait);
559} 571}
560static struct ipmi_smi_msg heartbeat_smi_msg = 572static struct ipmi_smi_msg heartbeat_smi_msg = {
561{
562 .done = heartbeat_free_smi 573 .done = heartbeat_free_smi
563}; 574};
564static struct ipmi_recv_msg heartbeat_recv_msg = 575static struct ipmi_recv_msg heartbeat_recv_msg = {
565{
566 .done = heartbeat_free_recv 576 .done = heartbeat_free_recv
567}; 577};
568 578
569static int ipmi_heartbeat(void) 579static int ipmi_heartbeat(void)
570{ 580{
571 struct kernel_ipmi_msg msg; 581 struct kernel_ipmi_msg msg;
@@ -580,10 +590,12 @@ static int ipmi_heartbeat(void)
580 ipmi_watchdog_state = action_val; 590 ipmi_watchdog_state = action_val;
581 return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); 591 return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
582 } else if (pretimeout_since_last_heartbeat) { 592 } else if (pretimeout_since_last_heartbeat) {
583 /* A pretimeout occurred, make sure we set the timeout. 593 /*
584 We don't want to set the action, though, we want to 594 * A pretimeout occurred, make sure we set the timeout.
585 leave that alone (thus it can't be combined with the 595 * We don't want to set the action, though, we want to
586 above operation. */ 596 * leave that alone (thus it can't be combined with the
597 * above operation.
598 */
587 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); 599 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
588 } 600 }
589 601
@@ -591,8 +603,10 @@ static int ipmi_heartbeat(void)
591 603
592 atomic_set(&heartbeat_tofree, 2); 604 atomic_set(&heartbeat_tofree, 2);
593 605
594 /* Don't reset the timer if we have the timer turned off, that 606 /*
595 re-enables the watchdog. */ 607 * Don't reset the timer if we have the timer turned off, that
608 * re-enables the watchdog.
609 */
596 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) { 610 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
597 mutex_unlock(&heartbeat_lock); 611 mutex_unlock(&heartbeat_lock);
598 return 0; 612 return 0;
@@ -625,10 +639,12 @@ static int ipmi_heartbeat(void)
625 wait_for_completion(&heartbeat_wait); 639 wait_for_completion(&heartbeat_wait);
626 640
627 if (heartbeat_recv_msg.msg.data[0] != 0) { 641 if (heartbeat_recv_msg.msg.data[0] != 0) {
628 /* Got an error in the heartbeat response. It was already 642 /*
629 reported in ipmi_wdog_msg_handler, but we should return 643 * Got an error in the heartbeat response. It was already
630 an error here. */ 644 * reported in ipmi_wdog_msg_handler, but we should return
631 rv = -EINVAL; 645 * an error here.
646 */
647 rv = -EINVAL;
632 } 648 }
633 649
634 mutex_unlock(&heartbeat_lock); 650 mutex_unlock(&heartbeat_lock);
@@ -636,8 +652,7 @@ static int ipmi_heartbeat(void)
636 return rv; 652 return rv;
637} 653}
638 654
639static struct watchdog_info ident = 655static struct watchdog_info ident = {
640{
641 .options = 0, /* WDIOF_SETTIMEOUT, */ 656 .options = 0, /* WDIOF_SETTIMEOUT, */
642 .firmware_version = 1, 657 .firmware_version = 1,
643 .identity = "IPMI" 658 .identity = "IPMI"
@@ -650,7 +665,7 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
650 int i; 665 int i;
651 int val; 666 int val;
652 667
653 switch(cmd) { 668 switch (cmd) {
654 case WDIOC_GETSUPPORT: 669 case WDIOC_GETSUPPORT:
655 i = copy_to_user(argp, &ident, sizeof(ident)); 670 i = copy_to_user(argp, &ident, sizeof(ident));
656 return i ? -EFAULT : 0; 671 return i ? -EFAULT : 0;
@@ -690,15 +705,13 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
690 i = copy_from_user(&val, argp, sizeof(int)); 705 i = copy_from_user(&val, argp, sizeof(int));
691 if (i) 706 if (i)
692 return -EFAULT; 707 return -EFAULT;
693 if (val & WDIOS_DISABLECARD) 708 if (val & WDIOS_DISABLECARD) {
694 {
695 ipmi_watchdog_state = WDOG_TIMEOUT_NONE; 709 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
696 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); 710 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
697 ipmi_start_timer_on_heartbeat = 0; 711 ipmi_start_timer_on_heartbeat = 0;
698 } 712 }
699 713
700 if (val & WDIOS_ENABLECARD) 714 if (val & WDIOS_ENABLECARD) {
701 {
702 ipmi_watchdog_state = action_val; 715 ipmi_watchdog_state = action_val;
703 ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); 716 ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
704 } 717 }
@@ -724,13 +737,13 @@ static ssize_t ipmi_write(struct file *file,
724 int rv; 737 int rv;
725 738
726 if (len) { 739 if (len) {
727 if (!nowayout) { 740 if (!nowayout) {
728 size_t i; 741 size_t i;
729 742
730 /* In case it was set long ago */ 743 /* In case it was set long ago */
731 expect_close = 0; 744 expect_close = 0;
732 745
733 for (i = 0; i != len; i++) { 746 for (i = 0; i != len; i++) {
734 char c; 747 char c;
735 748
736 if (get_user(c, buf + i)) 749 if (get_user(c, buf + i))
@@ -758,15 +771,17 @@ static ssize_t ipmi_read(struct file *file,
758 if (count <= 0) 771 if (count <= 0)
759 return 0; 772 return 0;
760 773
761 /* Reading returns if the pretimeout has gone off, and it only does 774 /*
762 it once per pretimeout. */ 775 * Reading returns if the pretimeout has gone off, and it only does
776 * it once per pretimeout.
777 */
763 spin_lock(&ipmi_read_lock); 778 spin_lock(&ipmi_read_lock);
764 if (!data_to_read) { 779 if (!data_to_read) {
765 if (file->f_flags & O_NONBLOCK) { 780 if (file->f_flags & O_NONBLOCK) {
766 rv = -EAGAIN; 781 rv = -EAGAIN;
767 goto out; 782 goto out;
768 } 783 }
769 784
770 init_waitqueue_entry(&wait, current); 785 init_waitqueue_entry(&wait, current);
771 add_wait_queue(&read_q, &wait); 786 add_wait_queue(&read_q, &wait);
772 while (!data_to_read) { 787 while (!data_to_read) {
@@ -776,7 +791,7 @@ static ssize_t ipmi_read(struct file *file,
776 spin_lock(&ipmi_read_lock); 791 spin_lock(&ipmi_read_lock);
777 } 792 }
778 remove_wait_queue(&read_q, &wait); 793 remove_wait_queue(&read_q, &wait);
779 794
780 if (signal_pending(current)) { 795 if (signal_pending(current)) {
781 rv = -ERESTARTSYS; 796 rv = -ERESTARTSYS;
782 goto out; 797 goto out;
@@ -799,25 +814,27 @@ static ssize_t ipmi_read(struct file *file,
799 814
800static int ipmi_open(struct inode *ino, struct file *filep) 815static int ipmi_open(struct inode *ino, struct file *filep)
801{ 816{
802 switch (iminor(ino)) { 817 switch (iminor(ino)) {
803 case WATCHDOG_MINOR: 818 case WATCHDOG_MINOR:
804 if (test_and_set_bit(0, &ipmi_wdog_open)) 819 if (test_and_set_bit(0, &ipmi_wdog_open))
805 return -EBUSY; 820 return -EBUSY;
806 821
807 /* Don't start the timer now, let it start on the 822 /*
808 first heartbeat. */ 823 * Don't start the timer now, let it start on the
824 * first heartbeat.
825 */
809 ipmi_start_timer_on_heartbeat = 1; 826 ipmi_start_timer_on_heartbeat = 1;
810 return nonseekable_open(ino, filep); 827 return nonseekable_open(ino, filep);
811 828
812 default: 829 default:
813 return (-ENODEV); 830 return (-ENODEV);
814 } 831 }
815} 832}
816 833
817static unsigned int ipmi_poll(struct file *file, poll_table *wait) 834static unsigned int ipmi_poll(struct file *file, poll_table *wait)
818{ 835{
819 unsigned int mask = 0; 836 unsigned int mask = 0;
820 837
821 poll_wait(file, &read_q, wait); 838 poll_wait(file, &read_q, wait);
822 839
823 spin_lock(&ipmi_read_lock); 840 spin_lock(&ipmi_read_lock);
@@ -851,7 +868,7 @@ static int ipmi_close(struct inode *ino, struct file *filep)
851 clear_bit(0, &ipmi_wdog_open); 868 clear_bit(0, &ipmi_wdog_open);
852 } 869 }
853 870
854 ipmi_fasync (-1, filep, 0); 871 ipmi_fasync(-1, filep, 0);
855 expect_close = 0; 872 expect_close = 0;
856 873
857 return 0; 874 return 0;
@@ -882,7 +899,7 @@ static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
882 msg->msg.data[0], 899 msg->msg.data[0],
883 msg->msg.cmd); 900 msg->msg.cmd);
884 } 901 }
885 902
886 ipmi_free_recv_msg(msg); 903 ipmi_free_recv_msg(msg);
887} 904}
888 905
@@ -902,14 +919,14 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data)
902 } 919 }
903 } 920 }
904 921
905 /* On some machines, the heartbeat will give 922 /*
906 an error and not work unless we re-enable 923 * On some machines, the heartbeat will give an error and not
907 the timer. So do so. */ 924 * work unless we re-enable the timer. So do so.
925 */
908 pretimeout_since_last_heartbeat = 1; 926 pretimeout_since_last_heartbeat = 1;
909} 927}
910 928
911static struct ipmi_user_hndl ipmi_hndlrs = 929static struct ipmi_user_hndl ipmi_hndlrs = {
912{
913 .ipmi_recv_hndl = ipmi_wdog_msg_handler, 930 .ipmi_recv_hndl = ipmi_wdog_msg_handler,
914 .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler 931 .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
915}; 932};
@@ -949,8 +966,10 @@ static void ipmi_register_watchdog(int ipmi_intf)
949 int old_timeout = timeout; 966 int old_timeout = timeout;
950 int old_preop_val = preop_val; 967 int old_preop_val = preop_val;
951 968
952 /* Set the pretimeout to go off in a second and give 969 /*
953 ourselves plenty of time to stop the timer. */ 970 * Set the pretimeout to go off in a second and give
971 * ourselves plenty of time to stop the timer.
972 */
954 ipmi_watchdog_state = WDOG_TIMEOUT_RESET; 973 ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
955 preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */ 974 preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
956 pretimeout = 99; 975 pretimeout = 99;
@@ -974,7 +993,7 @@ static void ipmi_register_watchdog(int ipmi_intf)
974 " occur. The NMI pretimeout will" 993 " occur. The NMI pretimeout will"
975 " likely not work\n"); 994 " likely not work\n");
976 } 995 }
977 out_restore: 996 out_restore:
978 testing_nmi = 0; 997 testing_nmi = 0;
979 preop_val = old_preop_val; 998 preop_val = old_preop_val;
980 pretimeout = old_pretimeout; 999 pretimeout = old_pretimeout;
@@ -1009,9 +1028,11 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
1009 /* Make sure no one can call us any more. */ 1028 /* Make sure no one can call us any more. */
1010 misc_deregister(&ipmi_wdog_miscdev); 1029 misc_deregister(&ipmi_wdog_miscdev);
1011 1030
1012 /* Wait to make sure the message makes it out. The lower layer has 1031 /*
1013 pointers to our buffers, we want to make sure they are done before 1032 * Wait to make sure the message makes it out. The lower layer has
1014 we release our memory. */ 1033 * pointers to our buffers, we want to make sure they are done before
1034 * we release our memory.
1035 */
1015 while (atomic_read(&set_timeout_tofree)) 1036 while (atomic_read(&set_timeout_tofree))
1016 schedule_timeout_uninterruptible(1); 1037 schedule_timeout_uninterruptible(1);
1017 1038
@@ -1052,15 +1073,17 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
1052 return NOTIFY_STOP; 1073 return NOTIFY_STOP;
1053 } 1074 }
1054 1075
1055 /* If we are not expecting a timeout, ignore it. */ 1076 /* If we are not expecting a timeout, ignore it. */
1056 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) 1077 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
1057 return NOTIFY_OK; 1078 return NOTIFY_OK;
1058 1079
1059 if (preaction_val != WDOG_PRETIMEOUT_NMI) 1080 if (preaction_val != WDOG_PRETIMEOUT_NMI)
1060 return NOTIFY_OK; 1081 return NOTIFY_OK;
1061 1082
1062 /* If no one else handled the NMI, we assume it was the IPMI 1083 /*
1063 watchdog. */ 1084 * If no one else handled the NMI, we assume it was the IPMI
1085 * watchdog.
1086 */
1064 if (preop_val == WDOG_PREOP_PANIC) { 1087 if (preop_val == WDOG_PREOP_PANIC) {
1065 /* On some machines, the heartbeat will give 1088 /* On some machines, the heartbeat will give
1066 an error and not work unless we re-enable 1089 an error and not work unless we re-enable
@@ -1082,7 +1105,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
1082 unsigned long code, 1105 unsigned long code,
1083 void *unused) 1106 void *unused)
1084{ 1107{
1085 static int reboot_event_handled = 0; 1108 static int reboot_event_handled;
1086 1109
1087 if ((watchdog_user) && (!reboot_event_handled)) { 1110 if ((watchdog_user) && (!reboot_event_handled)) {
1088 /* Make sure we only do this once. */ 1111 /* Make sure we only do this once. */
@@ -1115,7 +1138,7 @@ static int wdog_panic_handler(struct notifier_block *this,
1115 unsigned long event, 1138 unsigned long event,
1116 void *unused) 1139 void *unused)
1117{ 1140{
1118 static int panic_event_handled = 0; 1141 static int panic_event_handled;
1119 1142
1120 /* On a panic, if we have a panic timeout, make sure to extend 1143 /* On a panic, if we have a panic timeout, make sure to extend
1121 the watchdog timer to a reasonable value to complete the 1144 the watchdog timer to a reasonable value to complete the
@@ -1125,7 +1148,7 @@ static int wdog_panic_handler(struct notifier_block *this,
1125 ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { 1148 ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
1126 /* Make sure we do this only once. */ 1149 /* Make sure we do this only once. */
1127 panic_event_handled = 1; 1150 panic_event_handled = 1;
1128 1151
1129 timeout = 255; 1152 timeout = 255;
1130 pretimeout = 0; 1153 pretimeout = 0;
1131 panic_halt_ipmi_set_timeout(); 1154 panic_halt_ipmi_set_timeout();
@@ -1151,8 +1174,7 @@ static void ipmi_smi_gone(int if_num)
1151 ipmi_unregister_watchdog(if_num); 1174 ipmi_unregister_watchdog(if_num);
1152} 1175}
1153 1176
1154static struct ipmi_smi_watcher smi_watcher = 1177static struct ipmi_smi_watcher smi_watcher = {
1155{
1156 .owner = THIS_MODULE, 1178 .owner = THIS_MODULE,
1157 .new_smi = ipmi_new_smi, 1179 .new_smi = ipmi_new_smi,
1158 .smi_gone = ipmi_smi_gone 1180 .smi_gone = ipmi_smi_gone
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index eba2883b630e..4f3cefa8eb0e 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -126,8 +126,8 @@
126#include <linux/delay.h> 126#include <linux/delay.h>
127#include <linux/ioport.h> 127#include <linux/ioport.h>
128 128
129#include <asm/uaccess.h> 129#include <linux/uaccess.h>
130#include <asm/io.h> 130#include <linux/io.h>
131#include <asm/system.h> 131#include <asm/system.h>
132 132
133#include <linux/pci.h> 133#include <linux/pci.h>
@@ -189,7 +189,7 @@ struct isi_board {
189 unsigned short status; 189 unsigned short status;
190 unsigned short port_status; /* each bit for each port */ 190 unsigned short port_status; /* each bit for each port */
191 unsigned short shift_count; 191 unsigned short shift_count;
192 struct isi_port * ports; 192 struct isi_port *ports;
193 signed char count; 193 signed char count;
194 spinlock_t card_lock; /* Card wide lock 11/5/00 -sameer */ 194 spinlock_t card_lock; /* Card wide lock 11/5/00 -sameer */
195 unsigned long flags; 195 unsigned long flags;
@@ -205,11 +205,11 @@ struct isi_port {
205 u16 channel; 205 u16 channel;
206 u16 status; 206 u16 status;
207 u16 closing_wait; 207 u16 closing_wait;
208 struct isi_board * card; 208 struct isi_board *card;
209 struct tty_struct * tty; 209 struct tty_struct *tty;
210 wait_queue_head_t close_wait; 210 wait_queue_head_t close_wait;
211 wait_queue_head_t open_wait; 211 wait_queue_head_t open_wait;
212 unsigned char * xmit_buf; 212 unsigned char *xmit_buf;
213 int xmit_head; 213 int xmit_head;
214 int xmit_tail; 214 int xmit_tail;
215 int xmit_cnt; 215 int xmit_cnt;
@@ -405,7 +405,7 @@ static void isicom_tx(unsigned long _data)
405 405
406 /* find next active board */ 406 /* find next active board */
407 card = (prev_card + 1) & 0x0003; 407 card = (prev_card + 1) & 0x0003;
408 while(count-- > 0) { 408 while (count-- > 0) {
409 if (isi_card[card].status & BOARD_ACTIVE) 409 if (isi_card[card].status & BOARD_ACTIVE)
410 break; 410 break;
411 card = (card + 1) & 0x0003; 411 card = (card + 1) & 0x0003;
@@ -428,7 +428,7 @@ static void isicom_tx(unsigned long _data)
428 if (retries >= 100) 428 if (retries >= 100)
429 goto unlock; 429 goto unlock;
430 430
431 for (;count > 0;count--, port++) { 431 for (; count > 0; count--, port++) {
432 /* port not active or tx disabled to force flow control */ 432 /* port not active or tx disabled to force flow control */
433 if (!(port->flags & ASYNC_INITIALIZED) || 433 if (!(port->flags & ASYNC_INITIALIZED) ||
434 !(port->status & ISI_TXOK)) 434 !(port->status & ISI_TXOK))
@@ -471,9 +471,10 @@ static void isicom_tx(unsigned long _data)
471 break; 471 break;
472 } 472 }
473 } 473 }
474 if (cnt <= 0) break; 474 if (cnt <= 0)
475 break;
475 word_count = cnt >> 1; 476 word_count = cnt >> 1;
476 outsw(base, port->xmit_buf+port->xmit_tail,word_count); 477 outsw(base, port->xmit_buf+port->xmit_tail, word_count);
477 port->xmit_tail = (port->xmit_tail 478 port->xmit_tail = (port->xmit_tail
478 + (word_count << 1)) & (SERIAL_XMIT_SIZE - 1); 479 + (word_count << 1)) & (SERIAL_XMIT_SIZE - 1);
479 txcount -= (word_count << 1); 480 txcount -= (word_count << 1);
@@ -556,7 +557,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
556 tty = port->tty; 557 tty = port->tty;
557 if (tty == NULL) { 558 if (tty == NULL) {
558 word_count = byte_count >> 1; 559 word_count = byte_count >> 1;
559 while(byte_count > 1) { 560 while (byte_count > 1) {
560 inw(base); 561 inw(base);
561 byte_count -= 2; 562 byte_count -= 2;
562 } 563 }
@@ -569,7 +570,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
569 570
570 if (header & 0x8000) { /* Status Packet */ 571 if (header & 0x8000) { /* Status Packet */
571 header = inw(base); 572 header = inw(base);
572 switch(header & 0xff) { 573 switch (header & 0xff) {
573 case 0: /* Change in EIA signals */ 574 case 0: /* Change in EIA signals */
574 if (port->flags & ASYNC_CHECK_CD) { 575 if (port->flags & ASYNC_CHECK_CD) {
575 if (port->status & ISI_DCD) { 576 if (port->status & ISI_DCD) {
@@ -656,7 +657,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
656 if (byte_count > 0) { 657 if (byte_count > 0) {
657 pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping " 658 pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping "
658 "bytes...\n", base, channel + 1); 659 "bytes...\n", base, channel + 1);
659 while(byte_count > 0) { /* drain out unread xtra data */ 660 /* drain out unread xtra data */
661 while (byte_count > 0) {
660 inw(base); 662 inw(base);
661 byte_count -= 2; 663 byte_count -= 2;
662 } 664 }
@@ -679,8 +681,11 @@ static void isicom_config_port(struct isi_port *port)
679 shift_count = card->shift_count; 681 shift_count = card->shift_count;
680 unsigned char flow_ctrl; 682 unsigned char flow_ctrl;
681 683
682 if (!(tty = port->tty) || !tty->termios) 684 tty = port->tty;
685
686 if (tty == NULL)
683 return; 687 return;
688 /* FIXME: Switch to new tty baud API */
684 baud = C_BAUD(tty); 689 baud = C_BAUD(tty);
685 if (baud & CBAUDEX) { 690 if (baud & CBAUDEX) {
686 baud &= ~CBAUDEX; 691 baud &= ~CBAUDEX;
@@ -706,7 +711,7 @@ static void isicom_config_port(struct isi_port *port)
706 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) 711 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
707 baud++; /* 57.6 Kbps */ 712 baud++; /* 57.6 Kbps */
708 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) 713 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
709 baud +=2; /* 115 Kbps */ 714 baud += 2; /* 115 Kbps */
710 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) 715 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
711 baud += 3; /* 230 kbps*/ 716 baud += 3; /* 230 kbps*/
712 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) 717 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
@@ -716,15 +721,14 @@ static void isicom_config_port(struct isi_port *port)
716 /* hang up */ 721 /* hang up */
717 drop_dtr(port); 722 drop_dtr(port);
718 return; 723 return;
719 } 724 } else
720 else
721 raise_dtr(port); 725 raise_dtr(port);
722 726
723 if (WaitTillCardIsFree(base) == 0) { 727 if (WaitTillCardIsFree(base) == 0) {
724 outw(0x8000 | (channel << shift_count) |0x03, base); 728 outw(0x8000 | (channel << shift_count) | 0x03, base);
725 outw(linuxb_to_isib[baud] << 8 | 0x03, base); 729 outw(linuxb_to_isib[baud] << 8 | 0x03, base);
726 channel_setup = 0; 730 channel_setup = 0;
727 switch(C_CSIZE(tty)) { 731 switch (C_CSIZE(tty)) {
728 case CS5: 732 case CS5:
729 channel_setup |= ISICOM_CS5; 733 channel_setup |= ISICOM_CS5;
730 break; 734 break;
@@ -767,7 +771,7 @@ static void isicom_config_port(struct isi_port *port)
767 flow_ctrl |= ISICOM_INITIATE_XONXOFF; 771 flow_ctrl |= ISICOM_INITIATE_XONXOFF;
768 772
769 if (WaitTillCardIsFree(base) == 0) { 773 if (WaitTillCardIsFree(base) == 0) {
770 outw(0x8000 | (channel << shift_count) |0x04, base); 774 outw(0x8000 | (channel << shift_count) | 0x04, base);
771 outw(flow_ctrl << 8 | 0x05, base); 775 outw(flow_ctrl << 8 | 0x05, base);
772 outw((STOP_CHAR(tty)) << 8 | (START_CHAR(tty)), base); 776 outw((STOP_CHAR(tty)) << 8 | (START_CHAR(tty)), base);
773 InterruptTheCard(base); 777 InterruptTheCard(base);
@@ -805,20 +809,17 @@ static int isicom_setup_port(struct isi_port *port)
805 struct isi_board *card = port->card; 809 struct isi_board *card = port->card;
806 unsigned long flags; 810 unsigned long flags;
807 811
808 if (port->flags & ASYNC_INITIALIZED) { 812 if (port->flags & ASYNC_INITIALIZED)
809 return 0; 813 return 0;
810 }
811 if (!port->xmit_buf) { 814 if (!port->xmit_buf) {
812 unsigned long page; 815 /* Relies on BKL */
813 816 unsigned long page = get_zeroed_page(GFP_KERNEL);
814 if (!(page = get_zeroed_page(GFP_KERNEL))) 817 if (page == 0)
815 return -ENOMEM; 818 return -ENOMEM;
816 819 if (port->xmit_buf)
817 if (port->xmit_buf) {
818 free_page(page); 820 free_page(page);
819 return -ERESTARTSYS; 821 else
820 } 822 port->xmit_buf = (unsigned char *) page;
821 port->xmit_buf = (unsigned char *) page;
822 } 823 }
823 824
824 spin_lock_irqsave(&card->card_lock, flags); 825 spin_lock_irqsave(&card->card_lock, flags);
@@ -949,21 +950,18 @@ static int isicom_open(struct tty_struct *tty, struct file *filp)
949 port->count++; 950 port->count++;
950 tty->driver_data = port; 951 tty->driver_data = port;
951 port->tty = tty; 952 port->tty = tty;
952 if ((error = isicom_setup_port(port))!=0) 953 error = isicom_setup_port(port);
953 return error; 954 if (error == 0)
954 if ((error = block_til_ready(tty, filp, port))!=0) 955 error = block_til_ready(tty, filp, port);
955 return error; 956 return error;
956
957 return 0;
958} 957}
959 958
960/* close et all */ 959/* close et all */
961 960
962static inline void isicom_shutdown_board(struct isi_board *bp) 961static inline void isicom_shutdown_board(struct isi_board *bp)
963{ 962{
964 if (bp->status & BOARD_ACTIVE) { 963 if (bp->status & BOARD_ACTIVE)
965 bp->status &= ~BOARD_ACTIVE; 964 bp->status &= ~BOARD_ACTIVE;
966 }
967} 965}
968 966
969/* card->lock HAS to be held */ 967/* card->lock HAS to be held */
@@ -1012,6 +1010,22 @@ static void isicom_shutdown_port(struct isi_port *port)
1012 } 1010 }
1013} 1011}
1014 1012
1013static void isicom_flush_buffer(struct tty_struct *tty)
1014{
1015 struct isi_port *port = tty->driver_data;
1016 struct isi_board *card = port->card;
1017 unsigned long flags;
1018
1019 if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer"))
1020 return;
1021
1022 spin_lock_irqsave(&card->card_lock, flags);
1023 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
1024 spin_unlock_irqrestore(&card->card_lock, flags);
1025
1026 tty_wakeup(tty);
1027}
1028
1015static void isicom_close(struct tty_struct *tty, struct file *filp) 1029static void isicom_close(struct tty_struct *tty, struct file *filp)
1016{ 1030{
1017 struct isi_port *port = tty->driver_data; 1031 struct isi_port *port = tty->driver_data;
@@ -1065,8 +1079,7 @@ static void isicom_close(struct tty_struct *tty, struct file *filp)
1065 isicom_shutdown_port(port); 1079 isicom_shutdown_port(port);
1066 spin_unlock_irqrestore(&card->card_lock, flags); 1080 spin_unlock_irqrestore(&card->card_lock, flags);
1067 1081
1068 if (tty->driver->flush_buffer) 1082 isicom_flush_buffer(tty);
1069 tty->driver->flush_buffer(tty);
1070 tty_ldisc_flush(tty); 1083 tty_ldisc_flush(tty);
1071 1084
1072 spin_lock_irqsave(&card->card_lock, flags); 1085 spin_lock_irqsave(&card->card_lock, flags);
@@ -1104,7 +1117,7 @@ static int isicom_write(struct tty_struct *tty, const unsigned char *buf,
1104 1117
1105 spin_lock_irqsave(&card->card_lock, flags); 1118 spin_lock_irqsave(&card->card_lock, flags);
1106 1119
1107 while(1) { 1120 while (1) {
1108 cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt 1121 cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt
1109 - 1, SERIAL_XMIT_SIZE - port->xmit_head)); 1122 - 1, SERIAL_XMIT_SIZE - port->xmit_head));
1110 if (cnt <= 0) 1123 if (cnt <= 0)
@@ -1125,28 +1138,29 @@ static int isicom_write(struct tty_struct *tty, const unsigned char *buf,
1125} 1138}
1126 1139
1127/* put_char et all */ 1140/* put_char et all */
1128static void isicom_put_char(struct tty_struct *tty, unsigned char ch) 1141static int isicom_put_char(struct tty_struct *tty, unsigned char ch)
1129{ 1142{
1130 struct isi_port *port = tty->driver_data; 1143 struct isi_port *port = tty->driver_data;
1131 struct isi_board *card = port->card; 1144 struct isi_board *card = port->card;
1132 unsigned long flags; 1145 unsigned long flags;
1133 1146
1134 if (isicom_paranoia_check(port, tty->name, "isicom_put_char")) 1147 if (isicom_paranoia_check(port, tty->name, "isicom_put_char"))
1135 return; 1148 return 0;
1136 1149
1137 if (!port->xmit_buf) 1150 if (!port->xmit_buf)
1138 return; 1151 return 0;
1139 1152
1140 spin_lock_irqsave(&card->card_lock, flags); 1153 spin_lock_irqsave(&card->card_lock, flags);
1141 if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { 1154 if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) {
1142 spin_unlock_irqrestore(&card->card_lock, flags); 1155 spin_unlock_irqrestore(&card->card_lock, flags);
1143 return; 1156 return 0;
1144 } 1157 }
1145 1158
1146 port->xmit_buf[port->xmit_head++] = ch; 1159 port->xmit_buf[port->xmit_head++] = ch;
1147 port->xmit_head &= (SERIAL_XMIT_SIZE - 1); 1160 port->xmit_head &= (SERIAL_XMIT_SIZE - 1);
1148 port->xmit_cnt++; 1161 port->xmit_cnt++;
1149 spin_unlock_irqrestore(&card->card_lock, flags); 1162 spin_unlock_irqrestore(&card->card_lock, flags);
1163 return 1;
1150} 1164}
1151 1165
1152/* flush_chars et all */ 1166/* flush_chars et all */
@@ -1258,6 +1272,8 @@ static int isicom_set_serial_info(struct isi_port *port,
1258 if (copy_from_user(&newinfo, info, sizeof(newinfo))) 1272 if (copy_from_user(&newinfo, info, sizeof(newinfo)))
1259 return -EFAULT; 1273 return -EFAULT;
1260 1274
1275 lock_kernel();
1276
1261 reconfig_port = ((port->flags & ASYNC_SPD_MASK) != 1277 reconfig_port = ((port->flags & ASYNC_SPD_MASK) !=
1262 (newinfo.flags & ASYNC_SPD_MASK)); 1278 (newinfo.flags & ASYNC_SPD_MASK));
1263 1279
@@ -1265,12 +1281,13 @@ static int isicom_set_serial_info(struct isi_port *port,
1265 if ((newinfo.close_delay != port->close_delay) || 1281 if ((newinfo.close_delay != port->close_delay) ||
1266 (newinfo.closing_wait != port->closing_wait) || 1282 (newinfo.closing_wait != port->closing_wait) ||
1267 ((newinfo.flags & ~ASYNC_USR_MASK) != 1283 ((newinfo.flags & ~ASYNC_USR_MASK) !=
1268 (port->flags & ~ASYNC_USR_MASK))) 1284 (port->flags & ~ASYNC_USR_MASK))) {
1285 unlock_kernel();
1269 return -EPERM; 1286 return -EPERM;
1270 port->flags = ((port->flags & ~ ASYNC_USR_MASK) | 1287 }
1288 port->flags = ((port->flags & ~ASYNC_USR_MASK) |
1271 (newinfo.flags & ASYNC_USR_MASK)); 1289 (newinfo.flags & ASYNC_USR_MASK));
1272 } 1290 } else {
1273 else {
1274 port->close_delay = newinfo.close_delay; 1291 port->close_delay = newinfo.close_delay;
1275 port->closing_wait = newinfo.closing_wait; 1292 port->closing_wait = newinfo.closing_wait;
1276 port->flags = ((port->flags & ~ASYNC_FLAGS) | 1293 port->flags = ((port->flags & ~ASYNC_FLAGS) |
@@ -1282,6 +1299,7 @@ static int isicom_set_serial_info(struct isi_port *port,
1282 isicom_config_port(port); 1299 isicom_config_port(port);
1283 spin_unlock_irqrestore(&port->card->card_lock, flags); 1300 spin_unlock_irqrestore(&port->card->card_lock, flags);
1284 } 1301 }
1302 unlock_kernel();
1285 return 0; 1303 return 0;
1286} 1304}
1287 1305
@@ -1290,6 +1308,7 @@ static int isicom_get_serial_info(struct isi_port *port,
1290{ 1308{
1291 struct serial_struct out_info; 1309 struct serial_struct out_info;
1292 1310
1311 lock_kernel();
1293 memset(&out_info, 0, sizeof(out_info)); 1312 memset(&out_info, 0, sizeof(out_info));
1294/* out_info.type = ? */ 1313/* out_info.type = ? */
1295 out_info.line = port - isi_ports; 1314 out_info.line = port - isi_ports;
@@ -1299,6 +1318,7 @@ static int isicom_get_serial_info(struct isi_port *port,
1299/* out_info.baud_base = ? */ 1318/* out_info.baud_base = ? */
1300 out_info.close_delay = port->close_delay; 1319 out_info.close_delay = port->close_delay;
1301 out_info.closing_wait = port->closing_wait; 1320 out_info.closing_wait = port->closing_wait;
1321 unlock_kernel();
1302 if (copy_to_user(info, &out_info, sizeof(out_info))) 1322 if (copy_to_user(info, &out_info, sizeof(out_info)))
1303 return -EFAULT; 1323 return -EFAULT;
1304 return 0; 1324 return 0;
@@ -1314,7 +1334,7 @@ static int isicom_ioctl(struct tty_struct *tty, struct file *filp,
1314 if (isicom_paranoia_check(port, tty->name, "isicom_ioctl")) 1334 if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
1315 return -ENODEV; 1335 return -ENODEV;
1316 1336
1317 switch(cmd) { 1337 switch (cmd) {
1318 case TCSBRK: 1338 case TCSBRK:
1319 retval = tty_check_change(tty); 1339 retval = tty_check_change(tty);
1320 if (retval) 1340 if (retval)
@@ -1331,19 +1351,6 @@ static int isicom_ioctl(struct tty_struct *tty, struct file *filp,
1331 tty_wait_until_sent(tty, 0); 1351 tty_wait_until_sent(tty, 0);
1332 isicom_send_break(port, arg ? arg * (HZ/10) : HZ/4); 1352 isicom_send_break(port, arg ? arg * (HZ/10) : HZ/4);
1333 return 0; 1353 return 0;
1334
1335 case TIOCGSOFTCAR:
1336 return put_user(C_CLOCAL(tty) ? 1 : 0,
1337 (unsigned long __user *)argp);
1338
1339 case TIOCSSOFTCAR:
1340 if (get_user(arg, (unsigned long __user *) argp))
1341 return -EFAULT;
1342 tty->termios->c_cflag =
1343 ((tty->termios->c_cflag & ~CLOCAL) |
1344 (arg ? CLOCAL : 0));
1345 return 0;
1346
1347 case TIOCGSERIAL: 1354 case TIOCGSERIAL:
1348 return isicom_get_serial_info(port, argp); 1355 return isicom_get_serial_info(port, argp);
1349 1356
@@ -1453,22 +1460,6 @@ static void isicom_hangup(struct tty_struct *tty)
1453 wake_up_interruptible(&port->open_wait); 1460 wake_up_interruptible(&port->open_wait);
1454} 1461}
1455 1462
1456/* flush_buffer et all */
1457static void isicom_flush_buffer(struct tty_struct *tty)
1458{
1459 struct isi_port *port = tty->driver_data;
1460 struct isi_board *card = port->card;
1461 unsigned long flags;
1462
1463 if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer"))
1464 return;
1465
1466 spin_lock_irqsave(&card->card_lock, flags);
1467 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
1468 spin_unlock_irqrestore(&card->card_lock, flags);
1469
1470 tty_wakeup(tty);
1471}
1472 1463
1473/* 1464/*
1474 * Driver init and deinit functions 1465 * Driver init and deinit functions
@@ -1592,7 +1583,7 @@ static int __devinit load_firmware(struct pci_dev *pdev,
1592 default: 1583 default:
1593 dev_err(&pdev->dev, "Unknown signature.\n"); 1584 dev_err(&pdev->dev, "Unknown signature.\n");
1594 goto end; 1585 goto end;
1595 } 1586 }
1596 1587
1597 retval = request_firmware(&fw, name, &pdev->dev); 1588 retval = request_firmware(&fw, name, &pdev->dev);
1598 if (retval) 1589 if (retval)
@@ -1620,7 +1611,8 @@ static int __devinit load_firmware(struct pci_dev *pdev,
1620 if (WaitTillCardIsFree(base)) 1611 if (WaitTillCardIsFree(base))
1621 goto errrelfw; 1612 goto errrelfw;
1622 1613
1623 if ((status = inw(base + 0x4)) != 0) { 1614 status = inw(base + 0x4);
1615 if (status != 0) {
1624 dev_warn(&pdev->dev, "Card%d rejected load header:\n" 1616 dev_warn(&pdev->dev, "Card%d rejected load header:\n"
1625 KERN_WARNING "Address:0x%x\n" 1617 KERN_WARNING "Address:0x%x\n"
1626 KERN_WARNING "Count:0x%x\n" 1618 KERN_WARNING "Count:0x%x\n"
@@ -1637,12 +1629,13 @@ static int __devinit load_firmware(struct pci_dev *pdev,
1637 if (WaitTillCardIsFree(base)) 1629 if (WaitTillCardIsFree(base))
1638 goto errrelfw; 1630 goto errrelfw;
1639 1631
1640 if ((status = inw(base + 0x4)) != 0) { 1632 status = inw(base + 0x4);
1633 if (status != 0) {
1641 dev_err(&pdev->dev, "Card%d got out of sync.Card " 1634 dev_err(&pdev->dev, "Card%d got out of sync.Card "
1642 "Status:0x%x\n", index + 1, status); 1635 "Status:0x%x\n", index + 1, status);
1643 goto errrelfw; 1636 goto errrelfw;
1644 } 1637 }
1645 } 1638 }
1646 1639
1647/* XXX: should we test it by reading it back and comparing with original like 1640/* XXX: should we test it by reading it back and comparing with original like
1648 * in load firmware package? */ 1641 * in load firmware package? */
@@ -1666,7 +1659,8 @@ static int __devinit load_firmware(struct pci_dev *pdev,
1666 if (WaitTillCardIsFree(base)) 1659 if (WaitTillCardIsFree(base))
1667 goto errrelfw; 1660 goto errrelfw;
1668 1661
1669 if ((status = inw(base + 0x4)) != 0) { 1662 status = inw(base + 0x4);
1663 if (status != 0) {
1670 dev_warn(&pdev->dev, "Card%d rejected verify header:\n" 1664 dev_warn(&pdev->dev, "Card%d rejected verify header:\n"
1671 KERN_WARNING "Address:0x%x\n" 1665 KERN_WARNING "Address:0x%x\n"
1672 KERN_WARNING "Count:0x%x\n" 1666 KERN_WARNING "Count:0x%x\n"
@@ -1699,7 +1693,8 @@ static int __devinit load_firmware(struct pci_dev *pdev,
1699 if (WaitTillCardIsFree(base)) 1693 if (WaitTillCardIsFree(base))
1700 goto errrelfw; 1694 goto errrelfw;
1701 1695
1702 if ((status = inw(base + 0x4)) != 0) { 1696 status = inw(base + 0x4);
1697 if (status != 0) {
1703 dev_err(&pdev->dev, "Card%d verify got out of sync. " 1698 dev_err(&pdev->dev, "Card%d verify got out of sync. "
1704 "Card Status:0x%x\n", index + 1, status); 1699 "Card Status:0x%x\n", index + 1, status);
1705 goto errrelfw; 1700 goto errrelfw;
@@ -1764,7 +1759,7 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
1764 index + 1); 1759 index + 1);
1765 retval = -EBUSY; 1760 retval = -EBUSY;
1766 goto errdec; 1761 goto errdec;
1767 } 1762 }
1768 1763
1769 retval = request_irq(board->irq, isicom_interrupt, 1764 retval = request_irq(board->irq, isicom_interrupt,
1770 IRQF_SHARED | IRQF_DISABLED, ISICOM_NAME, board); 1765 IRQF_SHARED | IRQF_DISABLED, ISICOM_NAME, board);
@@ -1818,7 +1813,7 @@ static int __init isicom_init(void)
1818 int retval, idx, channel; 1813 int retval, idx, channel;
1819 struct isi_port *port; 1814 struct isi_port *port;
1820 1815
1821 for(idx = 0; idx < BOARD_COUNT; idx++) { 1816 for (idx = 0; idx < BOARD_COUNT; idx++) {
1822 port = &isi_ports[idx * 16]; 1817 port = &isi_ports[idx * 16];
1823 isi_card[idx].ports = port; 1818 isi_card[idx].ports = port;
1824 spin_lock_init(&isi_card[idx].card_lock); 1819 spin_lock_init(&isi_card[idx].card_lock);
@@ -1832,7 +1827,7 @@ static int __init isicom_init(void)
1832 init_waitqueue_head(&port->open_wait); 1827 init_waitqueue_head(&port->open_wait);
1833 init_waitqueue_head(&port->close_wait); 1828 init_waitqueue_head(&port->close_wait);
1834 /* . . . */ 1829 /* . . . */
1835 } 1830 }
1836 isi_card[idx].base = 0; 1831 isi_card[idx].base = 0;
1837 isi_card[idx].irq = 0; 1832 isi_card[idx].irq = 0;
1838 } 1833 }
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index c645455c3fd1..7c8b62f162bf 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -1682,16 +1682,6 @@ static int stli_ioctl(struct tty_struct *tty, struct file *file, unsigned int cm
1682 rc = 0; 1682 rc = 0;
1683 1683
1684 switch (cmd) { 1684 switch (cmd) {
1685 case TIOCGSOFTCAR:
1686 rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
1687 (unsigned __user *) arg);
1688 break;
1689 case TIOCSSOFTCAR:
1690 if ((rc = get_user(ival, (unsigned __user *) arg)) == 0)
1691 tty->termios->c_cflag =
1692 (tty->termios->c_cflag & ~CLOCAL) |
1693 (ival ? CLOCAL : 0);
1694 break;
1695 case TIOCGSERIAL: 1685 case TIOCGSERIAL:
1696 rc = stli_getserial(portp, argp); 1686 rc = stli_getserial(portp, argp);
1697 break; 1687 break;
@@ -3267,7 +3257,7 @@ static int stli_initecp(struct stlibrd *brdp)
3267 */ 3257 */
3268 EBRDINIT(brdp); 3258 EBRDINIT(brdp);
3269 3259
3270 brdp->membase = ioremap(brdp->memaddr, brdp->memsize); 3260 brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize);
3271 if (brdp->membase == NULL) { 3261 if (brdp->membase == NULL) {
3272 retval = -ENOMEM; 3262 retval = -ENOMEM;
3273 goto err_reg; 3263 goto err_reg;
@@ -3424,7 +3414,7 @@ static int stli_initonb(struct stlibrd *brdp)
3424 */ 3414 */
3425 EBRDINIT(brdp); 3415 EBRDINIT(brdp);
3426 3416
3427 brdp->membase = ioremap(brdp->memaddr, brdp->memsize); 3417 brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize);
3428 if (brdp->membase == NULL) { 3418 if (brdp->membase == NULL) {
3429 retval = -ENOMEM; 3419 retval = -ENOMEM;
3430 goto err_reg; 3420 goto err_reg;
@@ -3675,7 +3665,7 @@ static int stli_eisamemprobe(struct stlibrd *brdp)
3675 */ 3665 */
3676 for (i = 0; (i < stli_eisamempsize); i++) { 3666 for (i = 0; (i < stli_eisamempsize); i++) {
3677 brdp->memaddr = stli_eisamemprobeaddrs[i]; 3667 brdp->memaddr = stli_eisamemprobeaddrs[i];
3678 brdp->membase = ioremap(brdp->memaddr, brdp->memsize); 3668 brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize);
3679 if (brdp->membase == NULL) 3669 if (brdp->membase == NULL)
3680 continue; 3670 continue;
3681 3671
@@ -4433,6 +4423,8 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
4433 done = 0; 4423 done = 0;
4434 rc = 0; 4424 rc = 0;
4435 4425
4426 lock_kernel();
4427
4436 switch (cmd) { 4428 switch (cmd) {
4437 case COM_GETPORTSTATS: 4429 case COM_GETPORTSTATS:
4438 rc = stli_getportstats(NULL, argp); 4430 rc = stli_getportstats(NULL, argp);
@@ -4455,6 +4447,7 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
4455 done++; 4447 done++;
4456 break; 4448 break;
4457 } 4449 }
4450 unlock_kernel();
4458 4451
4459 if (done) 4452 if (done)
4460 return rc; 4453 return rc;
@@ -4472,6 +4465,8 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
4472 if (brdp->state == 0) 4465 if (brdp->state == 0)
4473 return -ENODEV; 4466 return -ENODEV;
4474 4467
4468 lock_kernel();
4469
4475 switch (cmd) { 4470 switch (cmd) {
4476 case STL_BINTR: 4471 case STL_BINTR:
4477 EBRDINTR(brdp); 4472 EBRDINTR(brdp);
@@ -4494,6 +4489,7 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
4494 rc = -ENOIOCTLCMD; 4489 rc = -ENOIOCTLCMD;
4495 break; 4490 break;
4496 } 4491 }
4492 unlock_kernel();
4497 return rc; 4493 return rc;
4498} 4494}
4499 4495
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 60b934adea65..7f7e798c1384 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -110,6 +110,7 @@ const int max_vals[] = {
110const int NR_TYPES = ARRAY_SIZE(max_vals); 110const int NR_TYPES = ARRAY_SIZE(max_vals);
111 111
112struct kbd_struct kbd_table[MAX_NR_CONSOLES]; 112struct kbd_struct kbd_table[MAX_NR_CONSOLES];
113EXPORT_SYMBOL_GPL(kbd_table);
113static struct kbd_struct *kbd = kbd_table; 114static struct kbd_struct *kbd = kbd_table;
114 115
115struct vt_spawn_console vt_spawn_con = { 116struct vt_spawn_console vt_spawn_con = {
@@ -260,6 +261,7 @@ void kd_mksound(unsigned int hz, unsigned int ticks)
260 } else 261 } else
261 kd_nosound(0); 262 kd_nosound(0);
262} 263}
264EXPORT_SYMBOL(kd_mksound);
263 265
264/* 266/*
265 * Setting the keyboard rate. 267 * Setting the keyboard rate.
@@ -1230,7 +1232,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
1230 1232
1231 if (rep && 1233 if (rep &&
1232 (!vc_kbd_mode(kbd, VC_REPEAT) || 1234 (!vc_kbd_mode(kbd, VC_REPEAT) ||
1233 (tty && !L_ECHO(tty) && tty->driver->chars_in_buffer(tty)))) { 1235 (tty && !L_ECHO(tty) && tty_chars_in_buffer(tty)))) {
1234 /* 1236 /*
1235 * Don't repeat a key if the input buffers are not empty and the 1237 * Don't repeat a key if the input buffers are not empty and the
1236 * characters get aren't echoed locally. This makes key repeat 1238 * characters get aren't echoed locally. This makes key repeat
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index e83623ead441..934ffafedaea 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -364,6 +364,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
364 return 0; 364 return 0;
365} 365}
366 366
367#ifdef CONFIG_DEVKMEM
367static int mmap_kmem(struct file * file, struct vm_area_struct * vma) 368static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
368{ 369{
369 unsigned long pfn; 370 unsigned long pfn;
@@ -384,6 +385,7 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
384 vma->vm_pgoff = pfn; 385 vma->vm_pgoff = pfn;
385 return mmap_mem(file, vma); 386 return mmap_mem(file, vma);
386} 387}
388#endif
387 389
388#ifdef CONFIG_CRASH_DUMP 390#ifdef CONFIG_CRASH_DUMP
389/* 391/*
@@ -422,6 +424,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
422extern long vread(char *buf, char *addr, unsigned long count); 424extern long vread(char *buf, char *addr, unsigned long count);
423extern long vwrite(char *buf, char *addr, unsigned long count); 425extern long vwrite(char *buf, char *addr, unsigned long count);
424 426
427#ifdef CONFIG_DEVKMEM
425/* 428/*
426 * This function reads the *virtual* memory as seen by the kernel. 429 * This function reads the *virtual* memory as seen by the kernel.
427 */ 430 */
@@ -626,6 +629,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
626 *ppos = p; 629 *ppos = p;
627 return virtr + wrote; 630 return virtr + wrote;
628} 631}
632#endif
629 633
630#ifdef CONFIG_DEVPORT 634#ifdef CONFIG_DEVPORT
631static ssize_t read_port(struct file * file, char __user * buf, 635static ssize_t read_port(struct file * file, char __user * buf,
@@ -803,6 +807,7 @@ static const struct file_operations mem_fops = {
803 .get_unmapped_area = get_unmapped_area_mem, 807 .get_unmapped_area = get_unmapped_area_mem,
804}; 808};
805 809
810#ifdef CONFIG_DEVKMEM
806static const struct file_operations kmem_fops = { 811static const struct file_operations kmem_fops = {
807 .llseek = memory_lseek, 812 .llseek = memory_lseek,
808 .read = read_kmem, 813 .read = read_kmem,
@@ -811,6 +816,7 @@ static const struct file_operations kmem_fops = {
811 .open = open_kmem, 816 .open = open_kmem,
812 .get_unmapped_area = get_unmapped_area_mem, 817 .get_unmapped_area = get_unmapped_area_mem,
813}; 818};
819#endif
814 820
815static const struct file_operations null_fops = { 821static const struct file_operations null_fops = {
816 .llseek = null_lseek, 822 .llseek = null_lseek,
@@ -889,11 +895,13 @@ static int memory_open(struct inode * inode, struct file * filp)
889 filp->f_mapping->backing_dev_info = 895 filp->f_mapping->backing_dev_info =
890 &directly_mappable_cdev_bdi; 896 &directly_mappable_cdev_bdi;
891 break; 897 break;
898#ifdef CONFIG_DEVKMEM
892 case 2: 899 case 2:
893 filp->f_op = &kmem_fops; 900 filp->f_op = &kmem_fops;
894 filp->f_mapping->backing_dev_info = 901 filp->f_mapping->backing_dev_info =
895 &directly_mappable_cdev_bdi; 902 &directly_mappable_cdev_bdi;
896 break; 903 break;
904#endif
897 case 3: 905 case 3:
898 filp->f_op = &null_fops; 906 filp->f_op = &null_fops;
899 break; 907 break;
@@ -942,7 +950,9 @@ static const struct {
942 const struct file_operations *fops; 950 const struct file_operations *fops;
943} devlist[] = { /* list of minor devices */ 951} devlist[] = { /* list of minor devices */
944 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, 952 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
953#ifdef CONFIG_DEVKMEM
945 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, 954 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
955#endif
946 {3, "null", S_IRUGO | S_IWUGO, &null_fops}, 956 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
947#ifdef CONFIG_DEVPORT 957#ifdef CONFIG_DEVPORT
948 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, 958 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 4d058dadbfcc..eaace0db0ff4 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -263,23 +263,26 @@ EXPORT_SYMBOL(misc_deregister);
263 263
264static int __init misc_init(void) 264static int __init misc_init(void)
265{ 265{
266#ifdef CONFIG_PROC_FS 266 int err;
267 struct proc_dir_entry *ent;
268 267
269 ent = create_proc_entry("misc", 0, NULL); 268#ifdef CONFIG_PROC_FS
270 if (ent) 269 proc_create("misc", 0, NULL, &misc_proc_fops);
271 ent->proc_fops = &misc_proc_fops;
272#endif 270#endif
273 misc_class = class_create(THIS_MODULE, "misc"); 271 misc_class = class_create(THIS_MODULE, "misc");
272 err = PTR_ERR(misc_class);
274 if (IS_ERR(misc_class)) 273 if (IS_ERR(misc_class))
275 return PTR_ERR(misc_class); 274 goto fail_remove;
276 275
277 if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) { 276 err = -EIO;
278 printk("unable to get major %d for misc devices\n", 277 if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
279 MISC_MAJOR); 278 goto fail_printk;
280 class_destroy(misc_class);
281 return -EIO;
282 }
283 return 0; 279 return 0;
280
281fail_printk:
282 printk("unable to get major %d for misc devices\n", MISC_MAJOR);
283 class_destroy(misc_class);
284fail_remove:
285 remove_proc_entry("misc", NULL);
286 return err;
284} 287}
285subsys_initcall(misc_init); 288subsys_initcall(misc_init);
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index e60a74c66e3d..192961fd7173 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -30,6 +30,8 @@
30#include <linux/miscdevice.h> 30#include <linux/miscdevice.h>
31#include <linux/posix-timers.h> 31#include <linux/posix-timers.h>
32#include <linux/interrupt.h> 32#include <linux/interrupt.h>
33#include <linux/time.h>
34#include <linux/math64.h>
33 35
34#include <asm/uaccess.h> 36#include <asm/uaccess.h>
35#include <asm/sn/addrs.h> 37#include <asm/sn/addrs.h>
@@ -74,9 +76,8 @@ static const struct file_operations mmtimer_fops = {
74 * We only have comparison registers RTC1-4 currently available per 76 * We only have comparison registers RTC1-4 currently available per
75 * node. RTC0 is used by SAL. 77 * node. RTC0 is used by SAL.
76 */ 78 */
77#define NUM_COMPARATORS 3
78/* Check for an RTC interrupt pending */ 79/* Check for an RTC interrupt pending */
79static int inline mmtimer_int_pending(int comparator) 80static int mmtimer_int_pending(int comparator)
80{ 81{
81 if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & 82 if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
82 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator) 83 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
@@ -84,15 +85,16 @@ static int inline mmtimer_int_pending(int comparator)
84 else 85 else
85 return 0; 86 return 0;
86} 87}
88
87/* Clear the RTC interrupt pending bit */ 89/* Clear the RTC interrupt pending bit */
88static void inline mmtimer_clr_int_pending(int comparator) 90static void mmtimer_clr_int_pending(int comparator)
89{ 91{
90 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), 92 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
91 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator); 93 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator);
92} 94}
93 95
94/* Setup timer on comparator RTC1 */ 96/* Setup timer on comparator RTC1 */
95static void inline mmtimer_setup_int_0(u64 expires) 97static void mmtimer_setup_int_0(int cpu, u64 expires)
96{ 98{
97 u64 val; 99 u64 val;
98 100
@@ -106,7 +108,7 @@ static void inline mmtimer_setup_int_0(u64 expires)
106 mmtimer_clr_int_pending(0); 108 mmtimer_clr_int_pending(0);
107 109
108 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | 110 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
109 ((u64)cpu_physical_id(smp_processor_id()) << 111 ((u64)cpu_physical_id(cpu) <<
110 SH_RTC1_INT_CONFIG_PID_SHFT); 112 SH_RTC1_INT_CONFIG_PID_SHFT);
111 113
112 /* Set configuration */ 114 /* Set configuration */
@@ -122,7 +124,7 @@ static void inline mmtimer_setup_int_0(u64 expires)
122} 124}
123 125
124/* Setup timer on comparator RTC2 */ 126/* Setup timer on comparator RTC2 */
125static void inline mmtimer_setup_int_1(u64 expires) 127static void mmtimer_setup_int_1(int cpu, u64 expires)
126{ 128{
127 u64 val; 129 u64 val;
128 130
@@ -133,7 +135,7 @@ static void inline mmtimer_setup_int_1(u64 expires)
133 mmtimer_clr_int_pending(1); 135 mmtimer_clr_int_pending(1);
134 136
135 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | 137 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) |
136 ((u64)cpu_physical_id(smp_processor_id()) << 138 ((u64)cpu_physical_id(cpu) <<
137 SH_RTC2_INT_CONFIG_PID_SHFT); 139 SH_RTC2_INT_CONFIG_PID_SHFT);
138 140
139 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val); 141 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val);
@@ -144,7 +146,7 @@ static void inline mmtimer_setup_int_1(u64 expires)
144} 146}
145 147
146/* Setup timer on comparator RTC3 */ 148/* Setup timer on comparator RTC3 */
147static void inline mmtimer_setup_int_2(u64 expires) 149static void mmtimer_setup_int_2(int cpu, u64 expires)
148{ 150{
149 u64 val; 151 u64 val;
150 152
@@ -155,7 +157,7 @@ static void inline mmtimer_setup_int_2(u64 expires)
155 mmtimer_clr_int_pending(2); 157 mmtimer_clr_int_pending(2);
156 158
157 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | 159 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
158 ((u64)cpu_physical_id(smp_processor_id()) << 160 ((u64)cpu_physical_id(cpu) <<
159 SH_RTC3_INT_CONFIG_PID_SHFT); 161 SH_RTC3_INT_CONFIG_PID_SHFT);
160 162
161 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val); 163 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val);
@@ -170,22 +172,22 @@ static void inline mmtimer_setup_int_2(u64 expires)
170 * in order to insure that the setup succeeds in a deterministic time frame. 172 * in order to insure that the setup succeeds in a deterministic time frame.
171 * It will check if the interrupt setup succeeded. 173 * It will check if the interrupt setup succeeded.
172 */ 174 */
173static int inline mmtimer_setup(int comparator, unsigned long expires) 175static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
174{ 176{
175 177
176 switch (comparator) { 178 switch (comparator) {
177 case 0: 179 case 0:
178 mmtimer_setup_int_0(expires); 180 mmtimer_setup_int_0(cpu, expires);
179 break; 181 break;
180 case 1: 182 case 1:
181 mmtimer_setup_int_1(expires); 183 mmtimer_setup_int_1(cpu, expires);
182 break; 184 break;
183 case 2: 185 case 2:
184 mmtimer_setup_int_2(expires); 186 mmtimer_setup_int_2(cpu, expires);
185 break; 187 break;
186 } 188 }
187 /* We might've missed our expiration time */ 189 /* We might've missed our expiration time */
188 if (rtc_time() < expires) 190 if (rtc_time() <= expires)
189 return 1; 191 return 1;
190 192
191 /* 193 /*
@@ -195,7 +197,7 @@ static int inline mmtimer_setup(int comparator, unsigned long expires)
195 return mmtimer_int_pending(comparator); 197 return mmtimer_int_pending(comparator);
196} 198}
197 199
198static int inline mmtimer_disable_int(long nasid, int comparator) 200static int mmtimer_disable_int(long nasid, int comparator)
199{ 201{
200 switch (comparator) { 202 switch (comparator) {
201 case 0: 203 case 0:
@@ -216,18 +218,124 @@ static int inline mmtimer_disable_int(long nasid, int comparator)
216 return 0; 218 return 0;
217} 219}
218 220
219#define TIMER_OFF 0xbadcabLL 221#define COMPARATOR 1 /* The comparator to use */
220 222
221/* There is one of these for each comparator */ 223#define TIMER_OFF 0xbadcabLL /* Timer is not setup */
222typedef struct mmtimer { 224#define TIMER_SET 0 /* Comparator is set for this timer */
223 spinlock_t lock ____cacheline_aligned; 225
226/* There is one of these for each timer */
227struct mmtimer {
228 struct rb_node list;
224 struct k_itimer *timer; 229 struct k_itimer *timer;
225 int i;
226 int cpu; 230 int cpu;
231};
232
233struct mmtimer_node {
234 spinlock_t lock ____cacheline_aligned;
235 struct rb_root timer_head;
236 struct rb_node *next;
227 struct tasklet_struct tasklet; 237 struct tasklet_struct tasklet;
228} mmtimer_t; 238};
239static struct mmtimer_node *timers;
240
241
242/*
243 * Add a new mmtimer struct to the node's mmtimer list.
244 * This function assumes the struct mmtimer_node is locked.
245 */
246static void mmtimer_add_list(struct mmtimer *n)
247{
248 int nodeid = n->timer->it.mmtimer.node;
249 unsigned long expires = n->timer->it.mmtimer.expires;
250 struct rb_node **link = &timers[nodeid].timer_head.rb_node;
251 struct rb_node *parent = NULL;
252 struct mmtimer *x;
253
254 /*
255 * Find the right place in the rbtree:
256 */
257 while (*link) {
258 parent = *link;
259 x = rb_entry(parent, struct mmtimer, list);
260
261 if (expires < x->timer->it.mmtimer.expires)
262 link = &(*link)->rb_left;
263 else
264 link = &(*link)->rb_right;
265 }
266
267 /*
268 * Insert the timer to the rbtree and check whether it
269 * replaces the first pending timer
270 */
271 rb_link_node(&n->list, parent, link);
272 rb_insert_color(&n->list, &timers[nodeid].timer_head);
273
274 if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next,
275 struct mmtimer, list)->timer->it.mmtimer.expires)
276 timers[nodeid].next = &n->list;
277}
278
279/*
280 * Set the comparator for the next timer.
281 * This function assumes the struct mmtimer_node is locked.
282 */
283static void mmtimer_set_next_timer(int nodeid)
284{
285 struct mmtimer_node *n = &timers[nodeid];
286 struct mmtimer *x;
287 struct k_itimer *t;
288 int o;
289
290restart:
291 if (n->next == NULL)
292 return;
229 293
230static mmtimer_t ** timers; 294 x = rb_entry(n->next, struct mmtimer, list);
295 t = x->timer;
296 if (!t->it.mmtimer.incr) {
297 /* Not an interval timer */
298 if (!mmtimer_setup(x->cpu, COMPARATOR,
299 t->it.mmtimer.expires)) {
300 /* Late setup, fire now */
301 tasklet_schedule(&n->tasklet);
302 }
303 return;
304 }
305
306 /* Interval timer */
307 o = 0;
308 while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) {
309 unsigned long e, e1;
310 struct rb_node *next;
311 t->it.mmtimer.expires += t->it.mmtimer.incr << o;
312 t->it_overrun += 1 << o;
313 o++;
314 if (o > 20) {
315 printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
316 t->it.mmtimer.clock = TIMER_OFF;
317 n->next = rb_next(&x->list);
318 rb_erase(&x->list, &n->timer_head);
319 kfree(x);
320 goto restart;
321 }
322
323 e = t->it.mmtimer.expires;
324 next = rb_next(&x->list);
325
326 if (next == NULL)
327 continue;
328
329 e1 = rb_entry(next, struct mmtimer, list)->
330 timer->it.mmtimer.expires;
331 if (e > e1) {
332 n->next = next;
333 rb_erase(&x->list, &n->timer_head);
334 mmtimer_add_list(x);
335 goto restart;
336 }
337 }
338}
231 339
232/** 340/**
233 * mmtimer_ioctl - ioctl interface for /dev/mmtimer 341 * mmtimer_ioctl - ioctl interface for /dev/mmtimer
@@ -366,8 +474,8 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
366 474
367 nsec = rtc_time() * sgi_clock_period 475 nsec = rtc_time() * sgi_clock_period
368 + sgi_clock_offset.tv_nsec; 476 + sgi_clock_offset.tv_nsec;
369 tp->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tp->tv_nsec) 477 *tp = ns_to_timespec(nsec);
370 + sgi_clock_offset.tv_sec; 478 tp->tv_sec += sgi_clock_offset.tv_sec;
371 return 0; 479 return 0;
372}; 480};
373 481
@@ -375,11 +483,11 @@ static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
375{ 483{
376 484
377 u64 nsec; 485 u64 nsec;
378 u64 rem; 486 u32 rem;
379 487
380 nsec = rtc_time() * sgi_clock_period; 488 nsec = rtc_time() * sgi_clock_period;
381 489
382 sgi_clock_offset.tv_sec = tp->tv_sec - div_long_long_rem(nsec, NSEC_PER_SEC, &rem); 490 sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
383 491
384 if (rem <= tp->tv_nsec) 492 if (rem <= tp->tv_nsec)
385 sgi_clock_offset.tv_nsec = tp->tv_sec - rem; 493 sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
@@ -390,35 +498,6 @@ static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
390 return 0; 498 return 0;
391} 499}
392 500
393/*
394 * Schedule the next periodic interrupt. This function will attempt
395 * to schedule a periodic interrupt later if necessary. If the scheduling
396 * of an interrupt fails then the time to skip is lengthened
397 * exponentially in order to ensure that the next interrupt
398 * can be properly scheduled..
399 */
400static int inline reschedule_periodic_timer(mmtimer_t *x)
401{
402 int n;
403 struct k_itimer *t = x->timer;
404
405 t->it.mmtimer.clock = x->i;
406 t->it_overrun--;
407
408 n = 0;
409 do {
410
411 t->it.mmtimer.expires += t->it.mmtimer.incr << n;
412 t->it_overrun += 1 << n;
413 n++;
414 if (n > 20)
415 return 1;
416
417 } while (!mmtimer_setup(x->i, t->it.mmtimer.expires));
418
419 return 0;
420}
421
422/** 501/**
423 * mmtimer_interrupt - timer interrupt handler 502 * mmtimer_interrupt - timer interrupt handler
424 * @irq: irq received 503 * @irq: irq received
@@ -435,71 +514,75 @@ static int inline reschedule_periodic_timer(mmtimer_t *x)
435static irqreturn_t 514static irqreturn_t
436mmtimer_interrupt(int irq, void *dev_id) 515mmtimer_interrupt(int irq, void *dev_id)
437{ 516{
438 int i;
439 unsigned long expires = 0; 517 unsigned long expires = 0;
440 int result = IRQ_NONE; 518 int result = IRQ_NONE;
441 unsigned indx = cpu_to_node(smp_processor_id()); 519 unsigned indx = cpu_to_node(smp_processor_id());
520 struct mmtimer *base;
442 521
443 /* 522 spin_lock(&timers[indx].lock);
444 * Do this once for each comparison register 523 base = rb_entry(timers[indx].next, struct mmtimer, list);
445 */ 524 if (base == NULL) {
446 for (i = 0; i < NUM_COMPARATORS; i++) { 525 spin_unlock(&timers[indx].lock);
447 mmtimer_t *base = timers[indx] + i; 526 return result;
448 /* Make sure this doesn't get reused before tasklet_sched */ 527 }
449 spin_lock(&base->lock); 528
450 if (base->cpu == smp_processor_id()) { 529 if (base->cpu == smp_processor_id()) {
451 if (base->timer) 530 if (base->timer)
452 expires = base->timer->it.mmtimer.expires; 531 expires = base->timer->it.mmtimer.expires;
453 /* expires test won't work with shared irqs */ 532 /* expires test won't work with shared irqs */
454 if ((mmtimer_int_pending(i) > 0) || 533 if ((mmtimer_int_pending(COMPARATOR) > 0) ||
455 (expires && (expires < rtc_time()))) { 534 (expires && (expires <= rtc_time()))) {
456 mmtimer_clr_int_pending(i); 535 mmtimer_clr_int_pending(COMPARATOR);
457 tasklet_schedule(&base->tasklet); 536 tasklet_schedule(&timers[indx].tasklet);
458 result = IRQ_HANDLED; 537 result = IRQ_HANDLED;
459 }
460 } 538 }
461 spin_unlock(&base->lock);
462 expires = 0;
463 } 539 }
540 spin_unlock(&timers[indx].lock);
464 return result; 541 return result;
465} 542}
466 543
467void mmtimer_tasklet(unsigned long data) { 544static void mmtimer_tasklet(unsigned long data)
468 mmtimer_t *x = (mmtimer_t *)data; 545{
469 struct k_itimer *t = x->timer; 546 int nodeid = data;
547 struct mmtimer_node *mn = &timers[nodeid];
548 struct mmtimer *x = rb_entry(mn->next, struct mmtimer, list);
549 struct k_itimer *t;
470 unsigned long flags; 550 unsigned long flags;
471 551
472 if (t == NULL)
473 return;
474
475 /* Send signal and deal with periodic signals */ 552 /* Send signal and deal with periodic signals */
476 spin_lock_irqsave(&t->it_lock, flags); 553 spin_lock_irqsave(&mn->lock, flags);
477 spin_lock(&x->lock); 554 if (!mn->next)
478 /* If timer was deleted between interrupt and here, leave */
479 if (t != x->timer)
480 goto out; 555 goto out;
481 t->it_overrun = 0;
482 556
483 if (posix_timer_event(t, 0) != 0) { 557 x = rb_entry(mn->next, struct mmtimer, list);
558 t = x->timer;
559
560 if (t->it.mmtimer.clock == TIMER_OFF)
561 goto out;
562
563 t->it_overrun = 0;
484 564
485 // printk(KERN_WARNING "mmtimer: cannot deliver signal.\n"); 565 mn->next = rb_next(&x->list);
566 rb_erase(&x->list, &mn->timer_head);
486 567
568 if (posix_timer_event(t, 0) != 0)
487 t->it_overrun++; 569 t->it_overrun++;
488 } 570
489 if(t->it.mmtimer.incr) { 571 if(t->it.mmtimer.incr) {
490 /* Periodic timer */ 572 t->it.mmtimer.expires += t->it.mmtimer.incr;
491 if (reschedule_periodic_timer(x)) { 573 mmtimer_add_list(x);
492 printk(KERN_WARNING "mmtimer: unable to reschedule\n");
493 x->timer = NULL;
494 }
495 } else { 574 } else {
496 /* Ensure we don't false trigger in mmtimer_interrupt */ 575 /* Ensure we don't false trigger in mmtimer_interrupt */
576 t->it.mmtimer.clock = TIMER_OFF;
497 t->it.mmtimer.expires = 0; 577 t->it.mmtimer.expires = 0;
578 kfree(x);
498 } 579 }
580 /* Set comparator for next timer, if there is one */
581 mmtimer_set_next_timer(nodeid);
582
499 t->it_overrun_last = t->it_overrun; 583 t->it_overrun_last = t->it_overrun;
500out: 584out:
501 spin_unlock(&x->lock); 585 spin_unlock_irqrestore(&mn->lock, flags);
502 spin_unlock_irqrestore(&t->it_lock, flags);
503} 586}
504 587
505static int sgi_timer_create(struct k_itimer *timer) 588static int sgi_timer_create(struct k_itimer *timer)
@@ -516,25 +599,53 @@ static int sgi_timer_create(struct k_itimer *timer)
516 */ 599 */
517static int sgi_timer_del(struct k_itimer *timr) 600static int sgi_timer_del(struct k_itimer *timr)
518{ 601{
519 int i = timr->it.mmtimer.clock;
520 cnodeid_t nodeid = timr->it.mmtimer.node; 602 cnodeid_t nodeid = timr->it.mmtimer.node;
521 mmtimer_t *t = timers[nodeid] + i;
522 unsigned long irqflags; 603 unsigned long irqflags;
523 604
524 if (i != TIMER_OFF) { 605 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
525 spin_lock_irqsave(&t->lock, irqflags); 606 if (timr->it.mmtimer.clock != TIMER_OFF) {
526 mmtimer_disable_int(cnodeid_to_nasid(nodeid),i); 607 unsigned long expires = timr->it.mmtimer.expires;
527 t->timer = NULL; 608 struct rb_node *n = timers[nodeid].timer_head.rb_node;
609 struct mmtimer *uninitialized_var(t);
610 int r = 0;
611
528 timr->it.mmtimer.clock = TIMER_OFF; 612 timr->it.mmtimer.clock = TIMER_OFF;
529 timr->it.mmtimer.expires = 0; 613 timr->it.mmtimer.expires = 0;
530 spin_unlock_irqrestore(&t->lock, irqflags); 614
615 while (n) {
616 t = rb_entry(n, struct mmtimer, list);
617 if (t->timer == timr)
618 break;
619
620 if (expires < t->timer->it.mmtimer.expires)
621 n = n->rb_left;
622 else
623 n = n->rb_right;
624 }
625
626 if (!n) {
627 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
628 return 0;
629 }
630
631 if (timers[nodeid].next == n) {
632 timers[nodeid].next = rb_next(n);
633 r = 1;
634 }
635
636 rb_erase(n, &timers[nodeid].timer_head);
637 kfree(t);
638
639 if (r) {
640 mmtimer_disable_int(cnodeid_to_nasid(nodeid),
641 COMPARATOR);
642 mmtimer_set_next_timer(nodeid);
643 }
531 } 644 }
645 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
532 return 0; 646 return 0;
533} 647}
534 648
535#define timespec_to_ns(x) ((x).tv_nsec + (x).tv_sec * NSEC_PER_SEC)
536#define ns_to_timespec(ts, nsec) (ts).tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &(ts).tv_nsec)
537
538/* Assumption: it_lock is already held with irq's disabled */ 649/* Assumption: it_lock is already held with irq's disabled */
539static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) 650static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
540{ 651{
@@ -547,9 +658,8 @@ static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
547 return; 658 return;
548 } 659 }
549 660
550 ns_to_timespec(cur_setting->it_interval, timr->it.mmtimer.incr * sgi_clock_period); 661 cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period);
551 ns_to_timespec(cur_setting->it_value, (timr->it.mmtimer.expires - rtc_time())* sgi_clock_period); 662 cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
552 return;
553} 663}
554 664
555 665
@@ -557,30 +667,33 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
557 struct itimerspec * new_setting, 667 struct itimerspec * new_setting,
558 struct itimerspec * old_setting) 668 struct itimerspec * old_setting)
559{ 669{
560
561 int i;
562 unsigned long when, period, irqflags; 670 unsigned long when, period, irqflags;
563 int err = 0; 671 int err = 0;
564 cnodeid_t nodeid; 672 cnodeid_t nodeid;
565 mmtimer_t *base; 673 struct mmtimer *base;
674 struct rb_node *n;
566 675
567 if (old_setting) 676 if (old_setting)
568 sgi_timer_get(timr, old_setting); 677 sgi_timer_get(timr, old_setting);
569 678
570 sgi_timer_del(timr); 679 sgi_timer_del(timr);
571 when = timespec_to_ns(new_setting->it_value); 680 when = timespec_to_ns(&new_setting->it_value);
572 period = timespec_to_ns(new_setting->it_interval); 681 period = timespec_to_ns(&new_setting->it_interval);
573 682
574 if (when == 0) 683 if (when == 0)
575 /* Clear timer */ 684 /* Clear timer */
576 return 0; 685 return 0;
577 686
687 base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL);
688 if (base == NULL)
689 return -ENOMEM;
690
578 if (flags & TIMER_ABSTIME) { 691 if (flags & TIMER_ABSTIME) {
579 struct timespec n; 692 struct timespec n;
580 unsigned long now; 693 unsigned long now;
581 694
582 getnstimeofday(&n); 695 getnstimeofday(&n);
583 now = timespec_to_ns(n); 696 now = timespec_to_ns(&n);
584 if (when > now) 697 if (when > now)
585 when -= now; 698 when -= now;
586 else 699 else
@@ -604,47 +717,38 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
604 preempt_disable(); 717 preempt_disable();
605 718
606 nodeid = cpu_to_node(smp_processor_id()); 719 nodeid = cpu_to_node(smp_processor_id());
607retry:
608 /* Don't use an allocated timer, or a deleted one that's pending */
609 for(i = 0; i< NUM_COMPARATORS; i++) {
610 base = timers[nodeid] + i;
611 if (!base->timer && !base->tasklet.state) {
612 break;
613 }
614 }
615
616 if (i == NUM_COMPARATORS) {
617 preempt_enable();
618 return -EBUSY;
619 }
620 720
621 spin_lock_irqsave(&base->lock, irqflags); 721 /* Lock the node timer structure */
722 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
622 723
623 if (base->timer || base->tasklet.state != 0) {
624 spin_unlock_irqrestore(&base->lock, irqflags);
625 goto retry;
626 }
627 base->timer = timr; 724 base->timer = timr;
628 base->cpu = smp_processor_id(); 725 base->cpu = smp_processor_id();
629 726
630 timr->it.mmtimer.clock = i; 727 timr->it.mmtimer.clock = TIMER_SET;
631 timr->it.mmtimer.node = nodeid; 728 timr->it.mmtimer.node = nodeid;
632 timr->it.mmtimer.incr = period; 729 timr->it.mmtimer.incr = period;
633 timr->it.mmtimer.expires = when; 730 timr->it.mmtimer.expires = when;
634 731
635 if (period == 0) { 732 n = timers[nodeid].next;
636 if (!mmtimer_setup(i, when)) { 733
637 mmtimer_disable_int(-1, i); 734 /* Add the new struct mmtimer to node's timer list */
638 posix_timer_event(timr, 0); 735 mmtimer_add_list(base);
639 timr->it.mmtimer.expires = 0; 736
640 } 737 if (timers[nodeid].next == n) {
641 } else { 738 /* No need to reprogram comparator for now */
642 timr->it.mmtimer.expires -= period; 739 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
643 if (reschedule_periodic_timer(base)) 740 preempt_enable();
644 err = -EINVAL; 741 return err;
645 } 742 }
646 743
647 spin_unlock_irqrestore(&base->lock, irqflags); 744 /* We need to reprogram the comparator */
745 if (n)
746 mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR);
747
748 mmtimer_set_next_timer(nodeid);
749
750 /* Unlock the node timer structure */
751 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
648 752
649 preempt_enable(); 753 preempt_enable();
650 754
@@ -669,7 +773,6 @@ static struct k_clock sgi_clock = {
669 */ 773 */
670static int __init mmtimer_init(void) 774static int __init mmtimer_init(void)
671{ 775{
672 unsigned i;
673 cnodeid_t node, maxn = -1; 776 cnodeid_t node, maxn = -1;
674 777
675 if (!ia64_platform_is("sn2")) 778 if (!ia64_platform_is("sn2"))
@@ -706,31 +809,18 @@ static int __init mmtimer_init(void)
706 maxn++; 809 maxn++;
707 810
708 /* Allocate list of node ptrs to mmtimer_t's */ 811 /* Allocate list of node ptrs to mmtimer_t's */
709 timers = kzalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL); 812 timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL);
710 if (timers == NULL) { 813 if (timers == NULL) {
711 printk(KERN_ERR "%s: failed to allocate memory for device\n", 814 printk(KERN_ERR "%s: failed to allocate memory for device\n",
712 MMTIMER_NAME); 815 MMTIMER_NAME);
713 goto out3; 816 goto out3;
714 } 817 }
715 818
716 /* Allocate mmtimer_t's for each online node */ 819 /* Initialize struct mmtimer's for each online node */
717 for_each_online_node(node) { 820 for_each_online_node(node) {
718 timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); 821 spin_lock_init(&timers[node].lock);
719 if (timers[node] == NULL) { 822 tasklet_init(&timers[node].tasklet, mmtimer_tasklet,
720 printk(KERN_ERR "%s: failed to allocate memory for device\n", 823 (unsigned long) node);
721 MMTIMER_NAME);
722 goto out4;
723 }
724 for (i=0; i< NUM_COMPARATORS; i++) {
725 mmtimer_t * base = timers[node] + i;
726
727 spin_lock_init(&base->lock);
728 base->timer = NULL;
729 base->cpu = 0;
730 base->i = i;
731 tasklet_init(&base->tasklet, mmtimer_tasklet,
732 (unsigned long) (base));
733 }
734 } 824 }
735 825
736 sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second; 826 sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
@@ -741,11 +831,8 @@ static int __init mmtimer_init(void)
741 831
742 return 0; 832 return 0;
743 833
744out4:
745 for_each_online_node(node) {
746 kfree(timers[node]);
747 }
748out3: 834out3:
835 kfree(timers);
749 misc_deregister(&mmtimer_miscdev); 836 misc_deregister(&mmtimer_miscdev);
750out2: 837out2:
751 free_irq(SGI_MMTIMER_VECTOR, NULL); 838 free_irq(SGI_MMTIMER_VECTOR, NULL);
@@ -754,4 +841,3 @@ out1:
754} 841}
755 842
756module_init(mmtimer_init); 843module_init(mmtimer_init);
757
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 64b7b2b18352..d57d3a61919b 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -2,7 +2,8 @@
2/* 2/*
3 * moxa.c -- MOXA Intellio family multiport serial driver. 3 * moxa.c -- MOXA Intellio family multiport serial driver.
4 * 4 *
5 * Copyright (C) 1999-2000 Moxa Technologies (support@moxa.com.tw). 5 * Copyright (C) 1999-2000 Moxa Technologies (support@moxa.com).
6 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * 7 *
7 * This code is loosely based on the Linux serial driver, written by 8 * This code is loosely based on the Linux serial driver, written by
8 * Linus Torvalds, Theodore T'so and others. 9 * Linus Torvalds, Theodore T'so and others.
@@ -25,6 +26,7 @@
25#include <linux/mm.h> 26#include <linux/mm.h>
26#include <linux/ioport.h> 27#include <linux/ioport.h>
27#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/firmware.h>
28#include <linux/signal.h> 30#include <linux/signal.h>
29#include <linux/sched.h> 31#include <linux/sched.h>
30#include <linux/timer.h> 32#include <linux/timer.h>
@@ -41,21 +43,26 @@
41#include <linux/pci.h> 43#include <linux/pci.h>
42#include <linux/init.h> 44#include <linux/init.h>
43#include <linux/bitops.h> 45#include <linux/bitops.h>
44#include <linux/completion.h>
45 46
46#include <asm/system.h> 47#include <asm/system.h>
47#include <asm/io.h> 48#include <asm/io.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49 50
50#define MOXA_VERSION "5.1k" 51#include "moxa.h"
52
53#define MOXA_VERSION "6.0k"
54
55#define MOXA_FW_HDRLEN 32
51 56
52#define MOXAMAJOR 172 57#define MOXAMAJOR 172
53#define MOXACUMAJOR 173
54 58
55#define MAX_BOARDS 4 /* Don't change this value */ 59#define MAX_BOARDS 4 /* Don't change this value */
56#define MAX_PORTS_PER_BOARD 32 /* Don't change this value */ 60#define MAX_PORTS_PER_BOARD 32 /* Don't change this value */
57#define MAX_PORTS (MAX_BOARDS * MAX_PORTS_PER_BOARD) 61#define MAX_PORTS (MAX_BOARDS * MAX_PORTS_PER_BOARD)
58 62
63#define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_ISA || \
64 (brd)->boardType == MOXA_BOARD_C320_PCI)
65
59/* 66/*
60 * Define the Moxa PCI vendor and device IDs. 67 * Define the Moxa PCI vendor and device IDs.
61 */ 68 */
@@ -92,24 +99,16 @@ static struct pci_device_id moxa_pcibrds[] = {
92MODULE_DEVICE_TABLE(pci, moxa_pcibrds); 99MODULE_DEVICE_TABLE(pci, moxa_pcibrds);
93#endif /* CONFIG_PCI */ 100#endif /* CONFIG_PCI */
94 101
95struct moxa_isa_board_conf { 102struct moxa_port;
96 int boardType;
97 int numPorts;
98 unsigned long baseAddr;
99};
100
101static struct moxa_isa_board_conf moxa_isa_boards[] =
102{
103/* {MOXA_BOARD_C218_ISA,8,0xDC000}, */
104};
105 103
106static struct moxa_board_conf { 104static struct moxa_board_conf {
107 int boardType; 105 int boardType;
108 int numPorts; 106 int numPorts;
109 unsigned long baseAddr;
110 int busType; 107 int busType;
111 108
112 int loadstat; 109 unsigned int ready;
110
111 struct moxa_port *ports;
113 112
114 void __iomem *basemem; 113 void __iomem *basemem;
115 void __iomem *intNdx; 114 void __iomem *intNdx;
@@ -131,30 +130,27 @@ struct moxaq_str {
131}; 130};
132 131
133struct moxa_port { 132struct moxa_port {
133 struct moxa_board_conf *board;
134 struct tty_struct *tty;
135 void __iomem *tableAddr;
136
134 int type; 137 int type;
135 int port;
136 int close_delay; 138 int close_delay;
137 unsigned short closing_wait; 139 unsigned int count;
138 int count;
139 int blocked_open;
140 long event; /* long req'd for set_bit --RR */
141 int asyncflags; 140 int asyncflags;
142 unsigned long statusflags;
143 struct tty_struct *tty;
144 int cflag; 141 int cflag;
142 unsigned long statusflags;
145 wait_queue_head_t open_wait; 143 wait_queue_head_t open_wait;
146 struct completion close_wait;
147
148 struct timer_list emptyTimer;
149 144
150 char chkPort; 145 u8 DCDState;
151 char lineCtrl; 146 u8 lineCtrl;
152 void __iomem *tableAddr; 147 u8 lowChkFlag;
153 long curBaud; 148};
154 char DCDState;
155 char lowChkFlag;
156 149
157 ushort breakCnt; 150struct mon_str {
151 int tick;
152 int rxcnt[MAX_PORTS];
153 int txcnt[MAX_PORTS];
158}; 154};
159 155
160/* statusflags */ 156/* statusflags */
@@ -168,20 +164,27 @@ struct moxa_port {
168#define WAKEUP_CHARS 256 164#define WAKEUP_CHARS 256
169 165
170static int ttymajor = MOXAMAJOR; 166static int ttymajor = MOXAMAJOR;
167static struct mon_str moxaLog;
168static unsigned int moxaFuncTout = HZ / 2;
169static unsigned int moxaLowWaterChk;
170static DEFINE_MUTEX(moxa_openlock);
171/* Variables for insmod */ 171/* Variables for insmod */
172#ifdef MODULE 172#ifdef MODULE
173static int baseaddr[4]; 173static unsigned long baseaddr[MAX_BOARDS];
174static int type[4]; 174static unsigned int type[MAX_BOARDS];
175static int numports[4]; 175static unsigned int numports[MAX_BOARDS];
176#endif 176#endif
177 177
178MODULE_AUTHOR("William Chen"); 178MODULE_AUTHOR("William Chen");
179MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver"); 179MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
180MODULE_LICENSE("GPL"); 180MODULE_LICENSE("GPL");
181#ifdef MODULE 181#ifdef MODULE
182module_param_array(type, int, NULL, 0); 182module_param_array(type, uint, NULL, 0);
183module_param_array(baseaddr, int, NULL, 0); 183MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
184module_param_array(numports, int, NULL, 0); 184module_param_array(baseaddr, ulong, NULL, 0);
185MODULE_PARM_DESC(baseaddr, "base address");
186module_param_array(numports, uint, NULL, 0);
187MODULE_PARM_DESC(numports, "numports (ignored for C218)");
185#endif 188#endif
186module_param(ttymajor, int, 0); 189module_param(ttymajor, int, 0);
187 190
@@ -194,9 +197,6 @@ static int moxa_write(struct tty_struct *, const unsigned char *, int);
194static int moxa_write_room(struct tty_struct *); 197static int moxa_write_room(struct tty_struct *);
195static void moxa_flush_buffer(struct tty_struct *); 198static void moxa_flush_buffer(struct tty_struct *);
196static int moxa_chars_in_buffer(struct tty_struct *); 199static int moxa_chars_in_buffer(struct tty_struct *);
197static void moxa_flush_chars(struct tty_struct *);
198static void moxa_put_char(struct tty_struct *, unsigned char);
199static int moxa_ioctl(struct tty_struct *, struct file *, unsigned int, unsigned long);
200static void moxa_throttle(struct tty_struct *); 200static void moxa_throttle(struct tty_struct *);
201static void moxa_unthrottle(struct tty_struct *); 201static void moxa_unthrottle(struct tty_struct *);
202static void moxa_set_termios(struct tty_struct *, struct ktermios *); 202static void moxa_set_termios(struct tty_struct *, struct ktermios *);
@@ -208,44 +208,183 @@ static int moxa_tiocmset(struct tty_struct *tty, struct file *file,
208 unsigned int set, unsigned int clear); 208 unsigned int set, unsigned int clear);
209static void moxa_poll(unsigned long); 209static void moxa_poll(unsigned long);
210static void moxa_set_tty_param(struct tty_struct *, struct ktermios *); 210static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
211static int moxa_block_till_ready(struct tty_struct *, struct file *,
212 struct moxa_port *);
213static void moxa_setup_empty_event(struct tty_struct *); 211static void moxa_setup_empty_event(struct tty_struct *);
214static void moxa_check_xmit_empty(unsigned long);
215static void moxa_shut_down(struct moxa_port *); 212static void moxa_shut_down(struct moxa_port *);
216static void moxa_receive_data(struct moxa_port *);
217/* 213/*
218 * moxa board interface functions: 214 * moxa board interface functions:
219 */ 215 */
220static void MoxaDriverInit(void); 216static void MoxaPortEnable(struct moxa_port *);
221static int MoxaDriverIoctl(unsigned int, unsigned long, int); 217static void MoxaPortDisable(struct moxa_port *);
222static int MoxaDriverPoll(void); 218static int MoxaPortSetTermio(struct moxa_port *, struct ktermios *, speed_t);
223static int MoxaPortsOfCard(int); 219static int MoxaPortGetLineOut(struct moxa_port *, int *, int *);
224static int MoxaPortIsValid(int); 220static void MoxaPortLineCtrl(struct moxa_port *, int, int);
225static void MoxaPortEnable(int); 221static void MoxaPortFlowCtrl(struct moxa_port *, int, int, int, int, int);
226static void MoxaPortDisable(int); 222static int MoxaPortLineStatus(struct moxa_port *);
227static long MoxaPortGetMaxBaud(int); 223static void MoxaPortFlushData(struct moxa_port *, int);
228static long MoxaPortSetBaud(int, long); 224static int MoxaPortWriteData(struct moxa_port *, const unsigned char *, int);
229static int MoxaPortSetTermio(int, struct ktermios *, speed_t); 225static int MoxaPortReadData(struct moxa_port *);
230static int MoxaPortGetLineOut(int, int *, int *); 226static int MoxaPortTxQueue(struct moxa_port *);
231static void MoxaPortLineCtrl(int, int, int); 227static int MoxaPortRxQueue(struct moxa_port *);
232static void MoxaPortFlowCtrl(int, int, int, int, int, int); 228static int MoxaPortTxFree(struct moxa_port *);
233static int MoxaPortLineStatus(int); 229static void MoxaPortTxDisable(struct moxa_port *);
234static int MoxaPortDCDChange(int); 230static void MoxaPortTxEnable(struct moxa_port *);
235static int MoxaPortDCDON(int);
236static void MoxaPortFlushData(int, int);
237static int MoxaPortWriteData(int, unsigned char *, int);
238static int MoxaPortReadData(int, struct tty_struct *tty);
239static int MoxaPortTxQueue(int);
240static int MoxaPortRxQueue(int);
241static int MoxaPortTxFree(int);
242static void MoxaPortTxDisable(int);
243static void MoxaPortTxEnable(int);
244static int MoxaPortResetBrkCnt(int);
245static void MoxaPortSendBreak(int, int);
246static int moxa_get_serial_info(struct moxa_port *, struct serial_struct __user *); 231static int moxa_get_serial_info(struct moxa_port *, struct serial_struct __user *);
247static int moxa_set_serial_info(struct moxa_port *, struct serial_struct __user *); 232static int moxa_set_serial_info(struct moxa_port *, struct serial_struct __user *);
248static void MoxaSetFifo(int port, int enable); 233static void MoxaSetFifo(struct moxa_port *port, int enable);
234
235/*
236 * I/O functions
237 */
238
239static void moxa_wait_finish(void __iomem *ofsAddr)
240{
241 unsigned long end = jiffies + moxaFuncTout;
242
243 while (readw(ofsAddr + FuncCode) != 0)
244 if (time_after(jiffies, end))
245 return;
246 if (readw(ofsAddr + FuncCode) != 0 && printk_ratelimit())
247 printk(KERN_WARNING "moxa function expired\n");
248}
249
250static void moxafunc(void __iomem *ofsAddr, u16 cmd, u16 arg)
251{
252 writew(arg, ofsAddr + FuncArg);
253 writew(cmd, ofsAddr + FuncCode);
254 moxa_wait_finish(ofsAddr);
255}
256
257static void moxa_low_water_check(void __iomem *ofsAddr)
258{
259 u16 rptr, wptr, mask, len;
260
261 if (readb(ofsAddr + FlagStat) & Xoff_state) {
262 rptr = readw(ofsAddr + RXrptr);
263 wptr = readw(ofsAddr + RXwptr);
264 mask = readw(ofsAddr + RX_mask);
265 len = (wptr - rptr) & mask;
266 if (len <= Low_water)
267 moxafunc(ofsAddr, FC_SendXon, 0);
268 }
269}
270
271/*
272 * TTY operations
273 */
274
275static int moxa_ioctl(struct tty_struct *tty, struct file *file,
276 unsigned int cmd, unsigned long arg)
277{
278 struct moxa_port *ch = tty->driver_data;
279 void __user *argp = (void __user *)arg;
280 int status, ret = 0;
281
282 if (tty->index == MAX_PORTS) {
283 if (cmd != MOXA_GETDATACOUNT && cmd != MOXA_GET_IOQUEUE &&
284 cmd != MOXA_GETMSTATUS)
285 return -EINVAL;
286 } else if (!ch)
287 return -ENODEV;
288
289 switch (cmd) {
290 case MOXA_GETDATACOUNT:
291 moxaLog.tick = jiffies;
292 if (copy_to_user(argp, &moxaLog, sizeof(moxaLog)))
293 ret = -EFAULT;
294 break;
295 case MOXA_FLUSH_QUEUE:
296 MoxaPortFlushData(ch, arg);
297 break;
298 case MOXA_GET_IOQUEUE: {
299 struct moxaq_str __user *argm = argp;
300 struct moxaq_str tmp;
301 struct moxa_port *p;
302 unsigned int i, j;
303
304 mutex_lock(&moxa_openlock);
305 for (i = 0; i < MAX_BOARDS; i++) {
306 p = moxa_boards[i].ports;
307 for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
308 memset(&tmp, 0, sizeof(tmp));
309 if (moxa_boards[i].ready) {
310 tmp.inq = MoxaPortRxQueue(p);
311 tmp.outq = MoxaPortTxQueue(p);
312 }
313 if (copy_to_user(argm, &tmp, sizeof(tmp))) {
314 mutex_unlock(&moxa_openlock);
315 return -EFAULT;
316 }
317 }
318 }
319 mutex_unlock(&moxa_openlock);
320 break;
321 } case MOXA_GET_OQUEUE:
322 status = MoxaPortTxQueue(ch);
323 ret = put_user(status, (unsigned long __user *)argp);
324 break;
325 case MOXA_GET_IQUEUE:
326 status = MoxaPortRxQueue(ch);
327 ret = put_user(status, (unsigned long __user *)argp);
328 break;
329 case MOXA_GETMSTATUS: {
330 struct mxser_mstatus __user *argm = argp;
331 struct mxser_mstatus tmp;
332 struct moxa_port *p;
333 unsigned int i, j;
334
335 mutex_lock(&moxa_openlock);
336 for (i = 0; i < MAX_BOARDS; i++) {
337 p = moxa_boards[i].ports;
338 for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
339 memset(&tmp, 0, sizeof(tmp));
340 if (!moxa_boards[i].ready)
341 goto copy;
342
343 status = MoxaPortLineStatus(p);
344 if (status & 1)
345 tmp.cts = 1;
346 if (status & 2)
347 tmp.dsr = 1;
348 if (status & 4)
349 tmp.dcd = 1;
350
351 if (!p->tty || !p->tty->termios)
352 tmp.cflag = p->cflag;
353 else
354 tmp.cflag = p->tty->termios->c_cflag;
355copy:
356 if (copy_to_user(argm, &tmp, sizeof(tmp))) {
357 mutex_unlock(&moxa_openlock);
358 return -EFAULT;
359 }
360 }
361 }
362 mutex_unlock(&moxa_openlock);
363 break;
364 }
365 case TIOCGSERIAL:
366 mutex_lock(&moxa_openlock);
367 ret = moxa_get_serial_info(ch, argp);
368 mutex_unlock(&moxa_openlock);
369 break;
370 case TIOCSSERIAL:
371 mutex_lock(&moxa_openlock);
372 ret = moxa_set_serial_info(ch, argp);
373 mutex_unlock(&moxa_openlock);
374 break;
375 default:
376 ret = -ENOIOCTLCMD;
377 }
378 return ret;
379}
380
381static void moxa_break_ctl(struct tty_struct *tty, int state)
382{
383 struct moxa_port *port = tty->driver_data;
384
385 moxafunc(port->tableAddr, state ? FC_SendBreak : FC_StopBreak,
386 Magic_code);
387}
249 388
250static const struct tty_operations moxa_ops = { 389static const struct tty_operations moxa_ops = {
251 .open = moxa_open, 390 .open = moxa_open,
@@ -254,8 +393,6 @@ static const struct tty_operations moxa_ops = {
254 .write_room = moxa_write_room, 393 .write_room = moxa_write_room,
255 .flush_buffer = moxa_flush_buffer, 394 .flush_buffer = moxa_flush_buffer,
256 .chars_in_buffer = moxa_chars_in_buffer, 395 .chars_in_buffer = moxa_chars_in_buffer,
257 .flush_chars = moxa_flush_chars,
258 .put_char = moxa_put_char,
259 .ioctl = moxa_ioctl, 396 .ioctl = moxa_ioctl,
260 .throttle = moxa_throttle, 397 .throttle = moxa_throttle,
261 .unthrottle = moxa_unthrottle, 398 .unthrottle = moxa_unthrottle,
@@ -263,15 +400,509 @@ static const struct tty_operations moxa_ops = {
263 .stop = moxa_stop, 400 .stop = moxa_stop,
264 .start = moxa_start, 401 .start = moxa_start,
265 .hangup = moxa_hangup, 402 .hangup = moxa_hangup,
403 .break_ctl = moxa_break_ctl,
266 .tiocmget = moxa_tiocmget, 404 .tiocmget = moxa_tiocmget,
267 .tiocmset = moxa_tiocmset, 405 .tiocmset = moxa_tiocmset,
268}; 406};
269 407
270static struct tty_driver *moxaDriver; 408static struct tty_driver *moxaDriver;
271static struct moxa_port moxa_ports[MAX_PORTS];
272static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0); 409static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0);
273static DEFINE_SPINLOCK(moxa_lock); 410static DEFINE_SPINLOCK(moxa_lock);
274 411
412/*
413 * HW init
414 */
415
416static int moxa_check_fw_model(struct moxa_board_conf *brd, u8 model)
417{
418 switch (brd->boardType) {
419 case MOXA_BOARD_C218_ISA:
420 case MOXA_BOARD_C218_PCI:
421 if (model != 1)
422 goto err;
423 break;
424 case MOXA_BOARD_CP204J:
425 if (model != 3)
426 goto err;
427 break;
428 default:
429 if (model != 2)
430 goto err;
431 break;
432 }
433 return 0;
434err:
435 return -EINVAL;
436}
437
438static int moxa_check_fw(const void *ptr)
439{
440 const __le16 *lptr = ptr;
441
442 if (*lptr != cpu_to_le16(0x7980))
443 return -EINVAL;
444
445 return 0;
446}
447
448static int moxa_load_bios(struct moxa_board_conf *brd, const u8 *buf,
449 size_t len)
450{
451 void __iomem *baseAddr = brd->basemem;
452 u16 tmp;
453
454 writeb(HW_reset, baseAddr + Control_reg); /* reset */
455 msleep(10);
456 memset_io(baseAddr, 0, 4096);
457 memcpy_toio(baseAddr, buf, len); /* download BIOS */
458 writeb(0, baseAddr + Control_reg); /* restart */
459
460 msleep(2000);
461
462 switch (brd->boardType) {
463 case MOXA_BOARD_C218_ISA:
464 case MOXA_BOARD_C218_PCI:
465 tmp = readw(baseAddr + C218_key);
466 if (tmp != C218_KeyCode)
467 goto err;
468 break;
469 case MOXA_BOARD_CP204J:
470 tmp = readw(baseAddr + C218_key);
471 if (tmp != CP204J_KeyCode)
472 goto err;
473 break;
474 default:
475 tmp = readw(baseAddr + C320_key);
476 if (tmp != C320_KeyCode)
477 goto err;
478 tmp = readw(baseAddr + C320_status);
479 if (tmp != STS_init) {
480 printk(KERN_ERR "MOXA: bios upload failed -- CPU/Basic "
481 "module not found\n");
482 return -EIO;
483 }
484 break;
485 }
486
487 return 0;
488err:
489 printk(KERN_ERR "MOXA: bios upload failed -- board not found\n");
490 return -EIO;
491}
492
493static int moxa_load_320b(struct moxa_board_conf *brd, const u8 *ptr,
494 size_t len)
495{
496 void __iomem *baseAddr = brd->basemem;
497
498 if (len < 7168) {
499 printk(KERN_ERR "MOXA: invalid 320 bios -- too short\n");
500 return -EINVAL;
501 }
502
503 writew(len - 7168 - 2, baseAddr + C320bapi_len);
504 writeb(1, baseAddr + Control_reg); /* Select Page 1 */
505 memcpy_toio(baseAddr + DynPage_addr, ptr, 7168);
506 writeb(2, baseAddr + Control_reg); /* Select Page 2 */
507 memcpy_toio(baseAddr + DynPage_addr, ptr + 7168, len - 7168);
508
509 return 0;
510}
511
512static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr,
513 size_t len)
514{
515 void __iomem *baseAddr = brd->basemem;
516 const u16 *uptr = ptr;
517 size_t wlen, len2, j;
518 unsigned long key, loadbuf, loadlen, checksum, checksum_ok;
519 unsigned int i, retry;
520 u16 usum, keycode;
521
522 keycode = (brd->boardType == MOXA_BOARD_CP204J) ? CP204J_KeyCode :
523 C218_KeyCode;
524
525 switch (brd->boardType) {
526 case MOXA_BOARD_CP204J:
527 case MOXA_BOARD_C218_ISA:
528 case MOXA_BOARD_C218_PCI:
529 key = C218_key;
530 loadbuf = C218_LoadBuf;
531 loadlen = C218DLoad_len;
532 checksum = C218check_sum;
533 checksum_ok = C218chksum_ok;
534 break;
535 default:
536 key = C320_key;
537 keycode = C320_KeyCode;
538 loadbuf = C320_LoadBuf;
539 loadlen = C320DLoad_len;
540 checksum = C320check_sum;
541 checksum_ok = C320chksum_ok;
542 break;
543 }
544
545 usum = 0;
546 wlen = len >> 1;
547 for (i = 0; i < wlen; i++)
548 usum += le16_to_cpu(uptr[i]);
549 retry = 0;
550 do {
551 wlen = len >> 1;
552 j = 0;
553 while (wlen) {
554 len2 = (wlen > 2048) ? 2048 : wlen;
555 wlen -= len2;
556 memcpy_toio(baseAddr + loadbuf, ptr + j, len2 << 1);
557 j += len2 << 1;
558
559 writew(len2, baseAddr + loadlen);
560 writew(0, baseAddr + key);
561 for (i = 0; i < 100; i++) {
562 if (readw(baseAddr + key) == keycode)
563 break;
564 msleep(10);
565 }
566 if (readw(baseAddr + key) != keycode)
567 return -EIO;
568 }
569 writew(0, baseAddr + loadlen);
570 writew(usum, baseAddr + checksum);
571 writew(0, baseAddr + key);
572 for (i = 0; i < 100; i++) {
573 if (readw(baseAddr + key) == keycode)
574 break;
575 msleep(10);
576 }
577 retry++;
578 } while ((readb(baseAddr + checksum_ok) != 1) && (retry < 3));
579 if (readb(baseAddr + checksum_ok) != 1)
580 return -EIO;
581
582 writew(0, baseAddr + key);
583 for (i = 0; i < 600; i++) {
584 if (readw(baseAddr + Magic_no) == Magic_code)
585 break;
586 msleep(10);
587 }
588 if (readw(baseAddr + Magic_no) != Magic_code)
589 return -EIO;
590
591 if (MOXA_IS_320(brd)) {
592 if (brd->busType == MOXA_BUS_TYPE_PCI) { /* ASIC board */
593 writew(0x3800, baseAddr + TMS320_PORT1);
594 writew(0x3900, baseAddr + TMS320_PORT2);
595 writew(28499, baseAddr + TMS320_CLOCK);
596 } else {
597 writew(0x3200, baseAddr + TMS320_PORT1);
598 writew(0x3400, baseAddr + TMS320_PORT2);
599 writew(19999, baseAddr + TMS320_CLOCK);
600 }
601 }
602 writew(1, baseAddr + Disable_IRQ);
603 writew(0, baseAddr + Magic_no);
604 for (i = 0; i < 500; i++) {
605 if (readw(baseAddr + Magic_no) == Magic_code)
606 break;
607 msleep(10);
608 }
609 if (readw(baseAddr + Magic_no) != Magic_code)
610 return -EIO;
611
612 if (MOXA_IS_320(brd)) {
613 j = readw(baseAddr + Module_cnt);
614 if (j <= 0)
615 return -EIO;
616 brd->numPorts = j * 8;
617 writew(j, baseAddr + Module_no);
618 writew(0, baseAddr + Magic_no);
619 for (i = 0; i < 600; i++) {
620 if (readw(baseAddr + Magic_no) == Magic_code)
621 break;
622 msleep(10);
623 }
624 if (readw(baseAddr + Magic_no) != Magic_code)
625 return -EIO;
626 }
627 brd->intNdx = baseAddr + IRQindex;
628 brd->intPend = baseAddr + IRQpending;
629 brd->intTable = baseAddr + IRQtable;
630
631 return 0;
632}
633
634static int moxa_load_code(struct moxa_board_conf *brd, const void *ptr,
635 size_t len)
636{
637 void __iomem *ofsAddr, *baseAddr = brd->basemem;
638 struct moxa_port *port;
639 int retval, i;
640
641 if (len % 2) {
642 printk(KERN_ERR "MOXA: bios length is not even\n");
643 return -EINVAL;
644 }
645
646 retval = moxa_real_load_code(brd, ptr, len); /* may change numPorts */
647 if (retval)
648 return retval;
649
650 switch (brd->boardType) {
651 case MOXA_BOARD_C218_ISA:
652 case MOXA_BOARD_C218_PCI:
653 case MOXA_BOARD_CP204J:
654 port = brd->ports;
655 for (i = 0; i < brd->numPorts; i++, port++) {
656 port->board = brd;
657 port->DCDState = 0;
658 port->tableAddr = baseAddr + Extern_table +
659 Extern_size * i;
660 ofsAddr = port->tableAddr;
661 writew(C218rx_mask, ofsAddr + RX_mask);
662 writew(C218tx_mask, ofsAddr + TX_mask);
663 writew(C218rx_spage + i * C218buf_pageno, ofsAddr + Page_rxb);
664 writew(readw(ofsAddr + Page_rxb) + C218rx_pageno, ofsAddr + EndPage_rxb);
665
666 writew(C218tx_spage + i * C218buf_pageno, ofsAddr + Page_txb);
667 writew(readw(ofsAddr + Page_txb) + C218tx_pageno, ofsAddr + EndPage_txb);
668
669 }
670 break;
671 default:
672 port = brd->ports;
673 for (i = 0; i < brd->numPorts; i++, port++) {
674 port->board = brd;
675 port->DCDState = 0;
676 port->tableAddr = baseAddr + Extern_table +
677 Extern_size * i;
678 ofsAddr = port->tableAddr;
679 switch (brd->numPorts) {
680 case 8:
681 writew(C320p8rx_mask, ofsAddr + RX_mask);
682 writew(C320p8tx_mask, ofsAddr + TX_mask);
683 writew(C320p8rx_spage + i * C320p8buf_pgno, ofsAddr + Page_rxb);
684 writew(readw(ofsAddr + Page_rxb) + C320p8rx_pgno, ofsAddr + EndPage_rxb);
685 writew(C320p8tx_spage + i * C320p8buf_pgno, ofsAddr + Page_txb);
686 writew(readw(ofsAddr + Page_txb) + C320p8tx_pgno, ofsAddr + EndPage_txb);
687
688 break;
689 case 16:
690 writew(C320p16rx_mask, ofsAddr + RX_mask);
691 writew(C320p16tx_mask, ofsAddr + TX_mask);
692 writew(C320p16rx_spage + i * C320p16buf_pgno, ofsAddr + Page_rxb);
693 writew(readw(ofsAddr + Page_rxb) + C320p16rx_pgno, ofsAddr + EndPage_rxb);
694 writew(C320p16tx_spage + i * C320p16buf_pgno, ofsAddr + Page_txb);
695 writew(readw(ofsAddr + Page_txb) + C320p16tx_pgno, ofsAddr + EndPage_txb);
696 break;
697
698 case 24:
699 writew(C320p24rx_mask, ofsAddr + RX_mask);
700 writew(C320p24tx_mask, ofsAddr + TX_mask);
701 writew(C320p24rx_spage + i * C320p24buf_pgno, ofsAddr + Page_rxb);
702 writew(readw(ofsAddr + Page_rxb) + C320p24rx_pgno, ofsAddr + EndPage_rxb);
703 writew(C320p24tx_spage + i * C320p24buf_pgno, ofsAddr + Page_txb);
704 writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
705 break;
706 case 32:
707 writew(C320p32rx_mask, ofsAddr + RX_mask);
708 writew(C320p32tx_mask, ofsAddr + TX_mask);
709 writew(C320p32tx_ofs, ofsAddr + Ofs_txb);
710 writew(C320p32rx_spage + i * C320p32buf_pgno, ofsAddr + Page_rxb);
711 writew(readb(ofsAddr + Page_rxb), ofsAddr + EndPage_rxb);
712 writew(C320p32tx_spage + i * C320p32buf_pgno, ofsAddr + Page_txb);
713 writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
714 break;
715 }
716 }
717 break;
718 }
719 return 0;
720}
721
722static int moxa_load_fw(struct moxa_board_conf *brd, const struct firmware *fw)
723{
724 void *ptr = fw->data;
725 char rsn[64];
726 u16 lens[5];
727 size_t len;
728 unsigned int a, lenp, lencnt;
729 int ret = -EINVAL;
730 struct {
731 __le32 magic; /* 0x34303430 */
732 u8 reserved1[2];
733 u8 type; /* UNIX = 3 */
734 u8 model; /* C218T=1, C320T=2, CP204=3 */
735 u8 reserved2[8];
736 __le16 len[5];
737 } *hdr = ptr;
738
739 BUILD_BUG_ON(ARRAY_SIZE(hdr->len) != ARRAY_SIZE(lens));
740
741 if (fw->size < MOXA_FW_HDRLEN) {
742 strcpy(rsn, "too short (even header won't fit)");
743 goto err;
744 }
745 if (hdr->magic != cpu_to_le32(0x30343034)) {
746 sprintf(rsn, "bad magic: %.8x", le32_to_cpu(hdr->magic));
747 goto err;
748 }
749 if (hdr->type != 3) {
750 sprintf(rsn, "not for linux, type is %u", hdr->type);
751 goto err;
752 }
753 if (moxa_check_fw_model(brd, hdr->model)) {
754 sprintf(rsn, "not for this card, model is %u", hdr->model);
755 goto err;
756 }
757
758 len = MOXA_FW_HDRLEN;
759 lencnt = hdr->model == 2 ? 5 : 3;
760 for (a = 0; a < ARRAY_SIZE(lens); a++) {
761 lens[a] = le16_to_cpu(hdr->len[a]);
762 if (lens[a] && len + lens[a] <= fw->size &&
763 moxa_check_fw(&fw->data[len]))
764 printk(KERN_WARNING "MOXA firmware: unexpected input "
765 "at offset %u, but going on\n", (u32)len);
766 if (!lens[a] && a < lencnt) {
767 sprintf(rsn, "too few entries in fw file");
768 goto err;
769 }
770 len += lens[a];
771 }
772
773 if (len != fw->size) {
774 sprintf(rsn, "bad length: %u (should be %u)", (u32)fw->size,
775 (u32)len);
776 goto err;
777 }
778
779 ptr += MOXA_FW_HDRLEN;
780 lenp = 0; /* bios */
781
782 strcpy(rsn, "read above");
783
784 ret = moxa_load_bios(brd, ptr, lens[lenp]);
785 if (ret)
786 goto err;
787
788 /* we skip the tty section (lens[1]), since we don't need it */
789 ptr += lens[lenp] + lens[lenp + 1];
790 lenp += 2; /* comm */
791
792 if (hdr->model == 2) {
793 ret = moxa_load_320b(brd, ptr, lens[lenp]);
794 if (ret)
795 goto err;
796 /* skip another tty */
797 ptr += lens[lenp] + lens[lenp + 1];
798 lenp += 2;
799 }
800
801 ret = moxa_load_code(brd, ptr, lens[lenp]);
802 if (ret)
803 goto err;
804
805 return 0;
806err:
807 printk(KERN_ERR "firmware failed to load, reason: %s\n", rsn);
808 return ret;
809}
810
811static int moxa_init_board(struct moxa_board_conf *brd, struct device *dev)
812{
813 const struct firmware *fw;
814 const char *file;
815 struct moxa_port *p;
816 unsigned int i;
817 int ret;
818
819 brd->ports = kcalloc(MAX_PORTS_PER_BOARD, sizeof(*brd->ports),
820 GFP_KERNEL);
821 if (brd->ports == NULL) {
822 printk(KERN_ERR "cannot allocate memory for ports\n");
823 ret = -ENOMEM;
824 goto err;
825 }
826
827 for (i = 0, p = brd->ports; i < MAX_PORTS_PER_BOARD; i++, p++) {
828 p->type = PORT_16550A;
829 p->close_delay = 5 * HZ / 10;
830 p->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
831 init_waitqueue_head(&p->open_wait);
832 }
833
834 switch (brd->boardType) {
835 case MOXA_BOARD_C218_ISA:
836 case MOXA_BOARD_C218_PCI:
837 file = "c218tunx.cod";
838 break;
839 case MOXA_BOARD_CP204J:
840 file = "cp204unx.cod";
841 break;
842 default:
843 file = "c320tunx.cod";
844 break;
845 }
846
847 ret = request_firmware(&fw, file, dev);
848 if (ret) {
849 printk(KERN_ERR "MOXA: request_firmware failed. Make sure "
850 "you've placed '%s' file into your firmware "
851 "loader directory (e.g. /lib/firmware)\n",
852 file);
853 goto err_free;
854 }
855
856 ret = moxa_load_fw(brd, fw);
857
858 release_firmware(fw);
859
860 if (ret)
861 goto err_free;
862
863 spin_lock_bh(&moxa_lock);
864 brd->ready = 1;
865 if (!timer_pending(&moxaTimer))
866 mod_timer(&moxaTimer, jiffies + HZ / 50);
867 spin_unlock_bh(&moxa_lock);
868
869 return 0;
870err_free:
871 kfree(brd->ports);
872err:
873 return ret;
874}
875
876static void moxa_board_deinit(struct moxa_board_conf *brd)
877{
878 unsigned int a, opened;
879
880 mutex_lock(&moxa_openlock);
881 spin_lock_bh(&moxa_lock);
882 brd->ready = 0;
883 spin_unlock_bh(&moxa_lock);
884
885 /* pci hot-un-plug support */
886 for (a = 0; a < brd->numPorts; a++)
887 if (brd->ports[a].asyncflags & ASYNC_INITIALIZED)
888 tty_hangup(brd->ports[a].tty);
889 while (1) {
890 opened = 0;
891 for (a = 0; a < brd->numPorts; a++)
892 if (brd->ports[a].asyncflags & ASYNC_INITIALIZED)
893 opened++;
894 mutex_unlock(&moxa_openlock);
895 if (!opened)
896 break;
897 msleep(50);
898 mutex_lock(&moxa_openlock);
899 }
900
901 iounmap(brd->basemem);
902 brd->basemem = NULL;
903 kfree(brd->ports);
904}
905
275#ifdef CONFIG_PCI 906#ifdef CONFIG_PCI
276static int __devinit moxa_pci_probe(struct pci_dev *pdev, 907static int __devinit moxa_pci_probe(struct pci_dev *pdev,
277 const struct pci_device_id *ent) 908 const struct pci_device_id *ent)
@@ -299,10 +930,17 @@ static int __devinit moxa_pci_probe(struct pci_dev *pdev,
299 } 930 }
300 931
301 board = &moxa_boards[i]; 932 board = &moxa_boards[i];
302 board->basemem = pci_iomap(pdev, 2, 0x4000); 933
934 retval = pci_request_region(pdev, 2, "moxa-base");
935 if (retval) {
936 dev_err(&pdev->dev, "can't request pci region 2\n");
937 goto err;
938 }
939
940 board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000);
303 if (board->basemem == NULL) { 941 if (board->basemem == NULL) {
304 dev_err(&pdev->dev, "can't remap io space 2\n"); 942 dev_err(&pdev->dev, "can't remap io space 2\n");
305 goto err; 943 goto err_reg;
306 } 944 }
307 945
308 board->boardType = board_type; 946 board->boardType = board_type;
@@ -321,9 +959,21 @@ static int __devinit moxa_pci_probe(struct pci_dev *pdev,
321 } 959 }
322 board->busType = MOXA_BUS_TYPE_PCI; 960 board->busType = MOXA_BUS_TYPE_PCI;
323 961
962 retval = moxa_init_board(board, &pdev->dev);
963 if (retval)
964 goto err_base;
965
324 pci_set_drvdata(pdev, board); 966 pci_set_drvdata(pdev, board);
325 967
326 return (0); 968 dev_info(&pdev->dev, "board '%s' ready (%u ports, firmware loaded)\n",
969 moxa_brdname[board_type - 1], board->numPorts);
970
971 return 0;
972err_base:
973 iounmap(board->basemem);
974 board->basemem = NULL;
975err_reg:
976 pci_release_region(pdev, 2);
327err: 977err:
328 return retval; 978 return retval;
329} 979}
@@ -332,8 +982,9 @@ static void __devexit moxa_pci_remove(struct pci_dev *pdev)
332{ 982{
333 struct moxa_board_conf *brd = pci_get_drvdata(pdev); 983 struct moxa_board_conf *brd = pci_get_drvdata(pdev);
334 984
335 pci_iounmap(pdev, brd->basemem); 985 moxa_board_deinit(brd);
336 brd->basemem = NULL; 986
987 pci_release_region(pdev, 2);
337} 988}
338 989
339static struct pci_driver moxa_pci_driver = { 990static struct pci_driver moxa_pci_driver = {
@@ -346,8 +997,8 @@ static struct pci_driver moxa_pci_driver = {
346 997
347static int __init moxa_init(void) 998static int __init moxa_init(void)
348{ 999{
349 int i, numBoards, retval = 0; 1000 unsigned int isabrds = 0;
350 struct moxa_port *ch; 1001 int retval = 0;
351 1002
352 printk(KERN_INFO "MOXA Intellio family driver version %s\n", 1003 printk(KERN_INFO "MOXA Intellio family driver version %s\n",
353 MOXA_VERSION); 1004 MOXA_VERSION);
@@ -368,154 +1019,176 @@ static int __init moxa_init(void)
368 moxaDriver->flags = TTY_DRIVER_REAL_RAW; 1019 moxaDriver->flags = TTY_DRIVER_REAL_RAW;
369 tty_set_operations(moxaDriver, &moxa_ops); 1020 tty_set_operations(moxaDriver, &moxa_ops);
370 1021
371 for (i = 0, ch = moxa_ports; i < MAX_PORTS; i++, ch++) {
372 ch->type = PORT_16550A;
373 ch->port = i;
374 ch->close_delay = 5 * HZ / 10;
375 ch->closing_wait = 30 * HZ;
376 ch->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
377 init_waitqueue_head(&ch->open_wait);
378 init_completion(&ch->close_wait);
379
380 setup_timer(&ch->emptyTimer, moxa_check_xmit_empty,
381 (unsigned long)ch);
382 }
383
384 pr_debug("Moxa tty devices major number = %d\n", ttymajor);
385
386 if (tty_register_driver(moxaDriver)) { 1022 if (tty_register_driver(moxaDriver)) {
387 printk(KERN_ERR "Couldn't install MOXA Smartio family driver !\n"); 1023 printk(KERN_ERR "can't register MOXA Smartio tty driver!\n");
388 put_tty_driver(moxaDriver); 1024 put_tty_driver(moxaDriver);
389 return -1; 1025 return -1;
390 } 1026 }
391 1027
392 mod_timer(&moxaTimer, jiffies + HZ / 50); 1028 /* Find the boards defined from module args. */
393
394 /* Find the boards defined in source code */
395 numBoards = 0;
396 for (i = 0; i < MAX_BOARDS; i++) {
397 if ((moxa_isa_boards[i].boardType == MOXA_BOARD_C218_ISA) ||
398 (moxa_isa_boards[i].boardType == MOXA_BOARD_C320_ISA)) {
399 moxa_boards[numBoards].boardType = moxa_isa_boards[i].boardType;
400 if (moxa_isa_boards[i].boardType == MOXA_BOARD_C218_ISA)
401 moxa_boards[numBoards].numPorts = 8;
402 else
403 moxa_boards[numBoards].numPorts = moxa_isa_boards[i].numPorts;
404 moxa_boards[numBoards].busType = MOXA_BUS_TYPE_ISA;
405 moxa_boards[numBoards].baseAddr = moxa_isa_boards[i].baseAddr;
406 pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n",
407 numBoards + 1,
408 moxa_brdname[moxa_boards[numBoards].boardType-1],
409 moxa_boards[numBoards].baseAddr);
410 numBoards++;
411 }
412 }
413 /* Find the boards defined form module args. */
414#ifdef MODULE 1029#ifdef MODULE
1030 {
1031 struct moxa_board_conf *brd = moxa_boards;
1032 unsigned int i;
415 for (i = 0; i < MAX_BOARDS; i++) { 1033 for (i = 0; i < MAX_BOARDS; i++) {
416 if ((type[i] == MOXA_BOARD_C218_ISA) || 1034 if (!baseaddr[i])
417 (type[i] == MOXA_BOARD_C320_ISA)) { 1035 break;
1036 if (type[i] == MOXA_BOARD_C218_ISA ||
1037 type[i] == MOXA_BOARD_C320_ISA) {
418 pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n", 1038 pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n",
419 numBoards + 1, moxa_brdname[type[i] - 1], 1039 isabrds + 1, moxa_brdname[type[i] - 1],
420 (unsigned long)baseaddr[i]); 1040 baseaddr[i]);
421 if (numBoards >= MAX_BOARDS) { 1041 brd->boardType = type[i];
422 printk(KERN_WARNING "More than %d MOXA " 1042 brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 :
423 "Intellio family boards found. Board " 1043 numports[i];
424 "is ignored.\n", MAX_BOARDS); 1044 brd->busType = MOXA_BUS_TYPE_ISA;
1045 brd->basemem = ioremap_nocache(baseaddr[i], 0x4000);
1046 if (!brd->basemem) {
1047 printk(KERN_ERR "MOXA: can't remap %lx\n",
1048 baseaddr[i]);
425 continue; 1049 continue;
426 } 1050 }
427 moxa_boards[numBoards].boardType = type[i]; 1051 if (moxa_init_board(brd, NULL)) {
428 if (moxa_isa_boards[i].boardType == MOXA_BOARD_C218_ISA) 1052 iounmap(brd->basemem);
429 moxa_boards[numBoards].numPorts = 8; 1053 brd->basemem = NULL;
430 else 1054 continue;
431 moxa_boards[numBoards].numPorts = numports[i]; 1055 }
432 moxa_boards[numBoards].busType = MOXA_BUS_TYPE_ISA; 1056
433 moxa_boards[numBoards].baseAddr = baseaddr[i]; 1057 printk(KERN_INFO "MOXA isa board found at 0x%.8lu and "
434 numBoards++; 1058 "ready (%u ports, firmware loaded)\n",
1059 baseaddr[i], brd->numPorts);
1060
1061 brd++;
1062 isabrds++;
435 } 1063 }
436 } 1064 }
1065 }
437#endif 1066#endif
438 1067
439#ifdef CONFIG_PCI 1068#ifdef CONFIG_PCI
440 retval = pci_register_driver(&moxa_pci_driver); 1069 retval = pci_register_driver(&moxa_pci_driver);
441 if (retval) { 1070 if (retval) {
442 printk(KERN_ERR "Can't register moxa pci driver!\n"); 1071 printk(KERN_ERR "Can't register MOXA pci driver!\n");
443 if (numBoards) 1072 if (isabrds)
444 retval = 0; 1073 retval = 0;
445 } 1074 }
446#endif 1075#endif
447 1076
448 for (i = 0; i < numBoards; i++) {
449 moxa_boards[i].basemem = ioremap(moxa_boards[i].baseAddr,
450 0x4000);
451 }
452
453 return retval; 1077 return retval;
454} 1078}
455 1079
456static void __exit moxa_exit(void) 1080static void __exit moxa_exit(void)
457{ 1081{
458 int i; 1082 unsigned int i;
459 1083
460 del_timer_sync(&moxaTimer); 1084#ifdef CONFIG_PCI
1085 pci_unregister_driver(&moxa_pci_driver);
1086#endif
1087
1088 for (i = 0; i < MAX_BOARDS; i++) /* ISA boards */
1089 if (moxa_boards[i].ready)
1090 moxa_board_deinit(&moxa_boards[i]);
461 1091
462 for (i = 0; i < MAX_PORTS; i++) 1092 del_timer_sync(&moxaTimer);
463 del_timer_sync(&moxa_ports[i].emptyTimer);
464 1093
465 if (tty_unregister_driver(moxaDriver)) 1094 if (tty_unregister_driver(moxaDriver))
466 printk(KERN_ERR "Couldn't unregister MOXA Intellio family " 1095 printk(KERN_ERR "Couldn't unregister MOXA Intellio family "
467 "serial driver\n"); 1096 "serial driver\n");
468 put_tty_driver(moxaDriver); 1097 put_tty_driver(moxaDriver);
469
470#ifdef CONFIG_PCI
471 pci_unregister_driver(&moxa_pci_driver);
472#endif
473
474 for (i = 0; i < MAX_BOARDS; i++)
475 if (moxa_boards[i].basemem)
476 iounmap(moxa_boards[i].basemem);
477} 1098}
478 1099
479module_init(moxa_init); 1100module_init(moxa_init);
480module_exit(moxa_exit); 1101module_exit(moxa_exit);
481 1102
1103static void moxa_close_port(struct moxa_port *ch)
1104{
1105 moxa_shut_down(ch);
1106 MoxaPortFlushData(ch, 2);
1107 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
1108 ch->tty->driver_data = NULL;
1109 ch->tty = NULL;
1110}
1111
1112static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp,
1113 struct moxa_port *ch)
1114{
1115 DEFINE_WAIT(wait);
1116 int retval = 0;
1117 u8 dcd;
1118
1119 while (1) {
1120 prepare_to_wait(&ch->open_wait, &wait, TASK_INTERRUPTIBLE);
1121 if (tty_hung_up_p(filp)) {
1122#ifdef SERIAL_DO_RESTART
1123 retval = -ERESTARTSYS;
1124#else
1125 retval = -EAGAIN;
1126#endif
1127 break;
1128 }
1129 spin_lock_bh(&moxa_lock);
1130 dcd = ch->DCDState;
1131 spin_unlock_bh(&moxa_lock);
1132 if (dcd)
1133 break;
1134
1135 if (signal_pending(current)) {
1136 retval = -ERESTARTSYS;
1137 break;
1138 }
1139 schedule();
1140 }
1141 finish_wait(&ch->open_wait, &wait);
1142
1143 return retval;
1144}
1145
482static int moxa_open(struct tty_struct *tty, struct file *filp) 1146static int moxa_open(struct tty_struct *tty, struct file *filp)
483{ 1147{
1148 struct moxa_board_conf *brd;
484 struct moxa_port *ch; 1149 struct moxa_port *ch;
485 int port; 1150 int port;
486 int retval; 1151 int retval;
487 1152
488 port = tty->index; 1153 port = tty->index;
489 if (port == MAX_PORTS) { 1154 if (port == MAX_PORTS) {
490 return (0); 1155 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
491 } 1156 }
492 if (!MoxaPortIsValid(port)) { 1157 if (mutex_lock_interruptible(&moxa_openlock))
493 tty->driver_data = NULL; 1158 return -ERESTARTSYS;
494 return (-ENODEV); 1159 brd = &moxa_boards[port / MAX_PORTS_PER_BOARD];
1160 if (!brd->ready) {
1161 mutex_unlock(&moxa_openlock);
1162 return -ENODEV;
495 } 1163 }
496 1164
497 ch = &moxa_ports[port]; 1165 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
498 ch->count++; 1166 ch->count++;
499 tty->driver_data = ch; 1167 tty->driver_data = ch;
500 ch->tty = tty; 1168 ch->tty = tty;
501 if (!(ch->asyncflags & ASYNC_INITIALIZED)) { 1169 if (!(ch->asyncflags & ASYNC_INITIALIZED)) {
502 ch->statusflags = 0; 1170 ch->statusflags = 0;
503 moxa_set_tty_param(tty, tty->termios); 1171 moxa_set_tty_param(tty, tty->termios);
504 MoxaPortLineCtrl(ch->port, 1, 1); 1172 MoxaPortLineCtrl(ch, 1, 1);
505 MoxaPortEnable(ch->port); 1173 MoxaPortEnable(ch);
1174 MoxaSetFifo(ch, ch->type == PORT_16550A);
506 ch->asyncflags |= ASYNC_INITIALIZED; 1175 ch->asyncflags |= ASYNC_INITIALIZED;
507 } 1176 }
508 retval = moxa_block_till_ready(tty, filp, ch); 1177 mutex_unlock(&moxa_openlock);
509 1178
510 moxa_unthrottle(tty); 1179 retval = 0;
511 1180 if (!(filp->f_flags & O_NONBLOCK) && !C_CLOCAL(tty))
512 if (ch->type == PORT_16550A) { 1181 retval = moxa_block_till_ready(tty, filp, ch);
513 MoxaSetFifo(ch->port, 1); 1182 mutex_lock(&moxa_openlock);
514 } else { 1183 if (retval) {
515 MoxaSetFifo(ch->port, 0); 1184 if (ch->count) /* 0 means already hung up... */
516 } 1185 if (--ch->count == 0)
1186 moxa_close_port(ch);
1187 } else
1188 ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
1189 mutex_unlock(&moxa_openlock);
517 1190
518 return (retval); 1191 return retval;
519} 1192}
520 1193
521static void moxa_close(struct tty_struct *tty, struct file *filp) 1194static void moxa_close(struct tty_struct *tty, struct file *filp)
@@ -524,23 +1197,14 @@ static void moxa_close(struct tty_struct *tty, struct file *filp)
524 int port; 1197 int port;
525 1198
526 port = tty->index; 1199 port = tty->index;
527 if (port == MAX_PORTS) { 1200 if (port == MAX_PORTS || tty_hung_up_p(filp))
528 return;
529 }
530 if (!MoxaPortIsValid(port)) {
531 pr_debug("Invalid portno in moxa_close\n");
532 tty->driver_data = NULL;
533 return;
534 }
535 if (tty->driver_data == NULL) {
536 return; 1201 return;
537 }
538 if (tty_hung_up_p(filp)) {
539 return;
540 }
541 ch = (struct moxa_port *) tty->driver_data;
542 1202
543 if ((tty->count == 1) && (ch->count != 1)) { 1203 mutex_lock(&moxa_openlock);
1204 ch = tty->driver_data;
1205 if (ch == NULL)
1206 goto unlock;
1207 if (tty->count == 1 && ch->count != 1) {
544 printk(KERN_WARNING "moxa_close: bad serial port count; " 1208 printk(KERN_WARNING "moxa_close: bad serial port count; "
545 "tty->count is 1, ch->count is %d\n", ch->count); 1209 "tty->count is 1, ch->count is %d\n", ch->count);
546 ch->count = 1; 1210 ch->count = 1;
@@ -550,59 +1214,35 @@ static void moxa_close(struct tty_struct *tty, struct file *filp)
550 "device=%s\n", tty->name); 1214 "device=%s\n", tty->name);
551 ch->count = 0; 1215 ch->count = 0;
552 } 1216 }
553 if (ch->count) { 1217 if (ch->count)
554 return; 1218 goto unlock;
555 }
556 ch->asyncflags |= ASYNC_CLOSING;
557 1219
558 ch->cflag = tty->termios->c_cflag; 1220 ch->cflag = tty->termios->c_cflag;
559 if (ch->asyncflags & ASYNC_INITIALIZED) { 1221 if (ch->asyncflags & ASYNC_INITIALIZED) {
560 moxa_setup_empty_event(tty); 1222 moxa_setup_empty_event(tty);
561 tty_wait_until_sent(tty, 30 * HZ); /* 30 seconds timeout */ 1223 tty_wait_until_sent(tty, 30 * HZ); /* 30 seconds timeout */
562 del_timer_sync(&moxa_ports[ch->port].emptyTimer);
563 }
564 moxa_shut_down(ch);
565 MoxaPortFlushData(port, 2);
566
567 if (tty->driver->flush_buffer)
568 tty->driver->flush_buffer(tty);
569 tty_ldisc_flush(tty);
570
571 tty->closing = 0;
572 ch->event = 0;
573 ch->tty = NULL;
574 if (ch->blocked_open) {
575 if (ch->close_delay) {
576 msleep_interruptible(jiffies_to_msecs(ch->close_delay));
577 }
578 wake_up_interruptible(&ch->open_wait);
579 } 1224 }
580 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING); 1225
581 complete_all(&ch->close_wait); 1226 moxa_close_port(ch);
1227unlock:
1228 mutex_unlock(&moxa_openlock);
582} 1229}
583 1230
584static int moxa_write(struct tty_struct *tty, 1231static int moxa_write(struct tty_struct *tty,
585 const unsigned char *buf, int count) 1232 const unsigned char *buf, int count)
586{ 1233{
587 struct moxa_port *ch; 1234 struct moxa_port *ch = tty->driver_data;
588 int len, port; 1235 int len;
589 unsigned long flags;
590 1236
591 ch = (struct moxa_port *) tty->driver_data;
592 if (ch == NULL) 1237 if (ch == NULL)
593 return (0); 1238 return 0;
594 port = ch->port;
595 1239
596 spin_lock_irqsave(&moxa_lock, flags); 1240 spin_lock_bh(&moxa_lock);
597 len = MoxaPortWriteData(port, (unsigned char *) buf, count); 1241 len = MoxaPortWriteData(ch, buf, count);
598 spin_unlock_irqrestore(&moxa_lock, flags); 1242 spin_unlock_bh(&moxa_lock);
599 1243
600 /*********************************************
601 if ( !(ch->statusflags & LOWWAIT) &&
602 ((len != count) || (MoxaPortTxFree(port) <= 100)) )
603 ************************************************/
604 ch->statusflags |= LOWWAIT; 1244 ch->statusflags |= LOWWAIT;
605 return (len); 1245 return len;
606} 1246}
607 1247
608static int moxa_write_room(struct tty_struct *tty) 1248static int moxa_write_room(struct tty_struct *tty)
@@ -610,27 +1250,27 @@ static int moxa_write_room(struct tty_struct *tty)
610 struct moxa_port *ch; 1250 struct moxa_port *ch;
611 1251
612 if (tty->stopped) 1252 if (tty->stopped)
613 return (0); 1253 return 0;
614 ch = (struct moxa_port *) tty->driver_data; 1254 ch = tty->driver_data;
615 if (ch == NULL) 1255 if (ch == NULL)
616 return (0); 1256 return 0;
617 return (MoxaPortTxFree(ch->port)); 1257 return MoxaPortTxFree(ch);
618} 1258}
619 1259
620static void moxa_flush_buffer(struct tty_struct *tty) 1260static void moxa_flush_buffer(struct tty_struct *tty)
621{ 1261{
622 struct moxa_port *ch = (struct moxa_port *) tty->driver_data; 1262 struct moxa_port *ch = tty->driver_data;
623 1263
624 if (ch == NULL) 1264 if (ch == NULL)
625 return; 1265 return;
626 MoxaPortFlushData(ch->port, 1); 1266 MoxaPortFlushData(ch, 1);
627 tty_wakeup(tty); 1267 tty_wakeup(tty);
628} 1268}
629 1269
630static int moxa_chars_in_buffer(struct tty_struct *tty) 1270static int moxa_chars_in_buffer(struct tty_struct *tty)
631{ 1271{
1272 struct moxa_port *ch = tty->driver_data;
632 int chars; 1273 int chars;
633 struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
634 1274
635 /* 1275 /*
636 * Sigh...I have to check if driver_data is NULL here, because 1276 * Sigh...I have to check if driver_data is NULL here, because
@@ -639,8 +1279,9 @@ static int moxa_chars_in_buffer(struct tty_struct *tty)
639 * routine. And since the open() failed, we return 0 here. TDJ 1279 * routine. And since the open() failed, we return 0 here. TDJ
640 */ 1280 */
641 if (ch == NULL) 1281 if (ch == NULL)
642 return (0); 1282 return 0;
643 chars = MoxaPortTxQueue(ch->port); 1283 lock_kernel();
1284 chars = MoxaPortTxQueue(ch);
644 if (chars) { 1285 if (chars) {
645 /* 1286 /*
646 * Make it possible to wakeup anything waiting for output 1287 * Make it possible to wakeup anything waiting for output
@@ -649,73 +1290,54 @@ static int moxa_chars_in_buffer(struct tty_struct *tty)
649 if (!(ch->statusflags & EMPTYWAIT)) 1290 if (!(ch->statusflags & EMPTYWAIT))
650 moxa_setup_empty_event(tty); 1291 moxa_setup_empty_event(tty);
651 } 1292 }
652 return (chars); 1293 unlock_kernel();
653} 1294 return chars;
654
655static void moxa_flush_chars(struct tty_struct *tty)
656{
657 /*
658 * Don't think I need this, because this is called to empty the TX
659 * buffer for the 16450, 16550, etc.
660 */
661}
662
663static void moxa_put_char(struct tty_struct *tty, unsigned char c)
664{
665 struct moxa_port *ch;
666 int port;
667 unsigned long flags;
668
669 ch = (struct moxa_port *) tty->driver_data;
670 if (ch == NULL)
671 return;
672 port = ch->port;
673 spin_lock_irqsave(&moxa_lock, flags);
674 MoxaPortWriteData(port, &c, 1);
675 spin_unlock_irqrestore(&moxa_lock, flags);
676 /************************************************
677 if ( !(ch->statusflags & LOWWAIT) && (MoxaPortTxFree(port) <= 100) )
678 *************************************************/
679 ch->statusflags |= LOWWAIT;
680} 1295}
681 1296
682static int moxa_tiocmget(struct tty_struct *tty, struct file *file) 1297static int moxa_tiocmget(struct tty_struct *tty, struct file *file)
683{ 1298{
684 struct moxa_port *ch = (struct moxa_port *) tty->driver_data; 1299 struct moxa_port *ch;
685 int port;
686 int flag = 0, dtr, rts; 1300 int flag = 0, dtr, rts;
687 1301
688 port = tty->index; 1302 mutex_lock(&moxa_openlock);
689 if ((port != MAX_PORTS) && (!ch)) 1303 ch = tty->driver_data;
690 return (-EINVAL); 1304 if (!ch) {
1305 mutex_unlock(&moxa_openlock);
1306 return -EINVAL;
1307 }
691 1308
692 MoxaPortGetLineOut(ch->port, &dtr, &rts); 1309 MoxaPortGetLineOut(ch, &dtr, &rts);
693 if (dtr) 1310 if (dtr)
694 flag |= TIOCM_DTR; 1311 flag |= TIOCM_DTR;
695 if (rts) 1312 if (rts)
696 flag |= TIOCM_RTS; 1313 flag |= TIOCM_RTS;
697 dtr = MoxaPortLineStatus(ch->port); 1314 dtr = MoxaPortLineStatus(ch);
698 if (dtr & 1) 1315 if (dtr & 1)
699 flag |= TIOCM_CTS; 1316 flag |= TIOCM_CTS;
700 if (dtr & 2) 1317 if (dtr & 2)
701 flag |= TIOCM_DSR; 1318 flag |= TIOCM_DSR;
702 if (dtr & 4) 1319 if (dtr & 4)
703 flag |= TIOCM_CD; 1320 flag |= TIOCM_CD;
1321 mutex_unlock(&moxa_openlock);
704 return flag; 1322 return flag;
705} 1323}
706 1324
707static int moxa_tiocmset(struct tty_struct *tty, struct file *file, 1325static int moxa_tiocmset(struct tty_struct *tty, struct file *file,
708 unsigned int set, unsigned int clear) 1326 unsigned int set, unsigned int clear)
709{ 1327{
710 struct moxa_port *ch = (struct moxa_port *) tty->driver_data; 1328 struct moxa_port *ch;
711 int port; 1329 int port;
712 int dtr, rts; 1330 int dtr, rts;
713 1331
714 port = tty->index; 1332 port = tty->index;
715 if ((port != MAX_PORTS) && (!ch)) 1333 mutex_lock(&moxa_openlock);
716 return (-EINVAL); 1334 ch = tty->driver_data;
1335 if (!ch) {
1336 mutex_unlock(&moxa_openlock);
1337 return -EINVAL;
1338 }
717 1339
718 MoxaPortGetLineOut(ch->port, &dtr, &rts); 1340 MoxaPortGetLineOut(ch, &dtr, &rts);
719 if (set & TIOCM_RTS) 1341 if (set & TIOCM_RTS)
720 rts = 1; 1342 rts = 1;
721 if (set & TIOCM_DTR) 1343 if (set & TIOCM_DTR)
@@ -724,105 +1346,51 @@ static int moxa_tiocmset(struct tty_struct *tty, struct file *file,
724 rts = 0; 1346 rts = 0;
725 if (clear & TIOCM_DTR) 1347 if (clear & TIOCM_DTR)
726 dtr = 0; 1348 dtr = 0;
727 MoxaPortLineCtrl(ch->port, dtr, rts); 1349 MoxaPortLineCtrl(ch, dtr, rts);
1350 mutex_unlock(&moxa_openlock);
728 return 0; 1351 return 0;
729} 1352}
730 1353
731static int moxa_ioctl(struct tty_struct *tty, struct file *file,
732 unsigned int cmd, unsigned long arg)
733{
734 struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
735 register int port;
736 void __user *argp = (void __user *)arg;
737 int retval;
738
739 port = tty->index;
740 if ((port != MAX_PORTS) && (!ch))
741 return (-EINVAL);
742
743 switch (cmd) {
744 case TCSBRK: /* SVID version: non-zero arg --> no break */
745 retval = tty_check_change(tty);
746 if (retval)
747 return (retval);
748 moxa_setup_empty_event(tty);
749 tty_wait_until_sent(tty, 0);
750 if (!arg)
751 MoxaPortSendBreak(ch->port, 0);
752 return (0);
753 case TCSBRKP: /* support for POSIX tcsendbreak() */
754 retval = tty_check_change(tty);
755 if (retval)
756 return (retval);
757 moxa_setup_empty_event(tty);
758 tty_wait_until_sent(tty, 0);
759 MoxaPortSendBreak(ch->port, arg);
760 return (0);
761 case TIOCGSOFTCAR:
762 return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) argp);
763 case TIOCSSOFTCAR:
764 if(get_user(retval, (unsigned long __user *) argp))
765 return -EFAULT;
766 arg = retval;
767 tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) |
768 (arg ? CLOCAL : 0));
769 if (C_CLOCAL(tty))
770 ch->asyncflags &= ~ASYNC_CHECK_CD;
771 else
772 ch->asyncflags |= ASYNC_CHECK_CD;
773 return (0);
774 case TIOCGSERIAL:
775 return moxa_get_serial_info(ch, argp);
776
777 case TIOCSSERIAL:
778 return moxa_set_serial_info(ch, argp);
779 default:
780 retval = MoxaDriverIoctl(cmd, arg, port);
781 }
782 return (retval);
783}
784
785static void moxa_throttle(struct tty_struct *tty) 1354static void moxa_throttle(struct tty_struct *tty)
786{ 1355{
787 struct moxa_port *ch = (struct moxa_port *) tty->driver_data; 1356 struct moxa_port *ch = tty->driver_data;
788 1357
789 ch->statusflags |= THROTTLE; 1358 ch->statusflags |= THROTTLE;
790} 1359}
791 1360
792static void moxa_unthrottle(struct tty_struct *tty) 1361static void moxa_unthrottle(struct tty_struct *tty)
793{ 1362{
794 struct moxa_port *ch = (struct moxa_port *) tty->driver_data; 1363 struct moxa_port *ch = tty->driver_data;
795 1364
796 ch->statusflags &= ~THROTTLE; 1365 ch->statusflags &= ~THROTTLE;
797} 1366}
798 1367
799static void moxa_set_termios(struct tty_struct *tty, 1368static void moxa_set_termios(struct tty_struct *tty,
800 struct ktermios *old_termios) 1369 struct ktermios *old_termios)
801{ 1370{
802 struct moxa_port *ch = (struct moxa_port *) tty->driver_data; 1371 struct moxa_port *ch = tty->driver_data;
803 1372
804 if (ch == NULL) 1373 if (ch == NULL)
805 return; 1374 return;
806 moxa_set_tty_param(tty, old_termios); 1375 moxa_set_tty_param(tty, old_termios);
807 if (!(old_termios->c_cflag & CLOCAL) && 1376 if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty))
808 (tty->termios->c_cflag & CLOCAL))
809 wake_up_interruptible(&ch->open_wait); 1377 wake_up_interruptible(&ch->open_wait);
810} 1378}
811 1379
812static void moxa_stop(struct tty_struct *tty) 1380static void moxa_stop(struct tty_struct *tty)
813{ 1381{
814 struct moxa_port *ch = (struct moxa_port *) tty->driver_data; 1382 struct moxa_port *ch = tty->driver_data;
815 1383
816 if (ch == NULL) 1384 if (ch == NULL)
817 return; 1385 return;
818 MoxaPortTxDisable(ch->port); 1386 MoxaPortTxDisable(ch);
819 ch->statusflags |= TXSTOPPED; 1387 ch->statusflags |= TXSTOPPED;
820} 1388}
821 1389
822 1390
823static void moxa_start(struct tty_struct *tty) 1391static void moxa_start(struct tty_struct *tty)
824{ 1392{
825 struct moxa_port *ch = (struct moxa_port *) tty->driver_data; 1393 struct moxa_port *ch = tty->driver_data;
826 1394
827 if (ch == NULL) 1395 if (ch == NULL)
828 return; 1396 return;
@@ -830,91 +1398,143 @@ static void moxa_start(struct tty_struct *tty)
830 if (!(ch->statusflags & TXSTOPPED)) 1398 if (!(ch->statusflags & TXSTOPPED))
831 return; 1399 return;
832 1400
833 MoxaPortTxEnable(ch->port); 1401 MoxaPortTxEnable(ch);
834 ch->statusflags &= ~TXSTOPPED; 1402 ch->statusflags &= ~TXSTOPPED;
835} 1403}
836 1404
837static void moxa_hangup(struct tty_struct *tty) 1405static void moxa_hangup(struct tty_struct *tty)
838{ 1406{
839 struct moxa_port *ch = (struct moxa_port *) tty->driver_data; 1407 struct moxa_port *ch;
840 1408
841 moxa_flush_buffer(tty); 1409 mutex_lock(&moxa_openlock);
842 moxa_shut_down(ch); 1410 ch = tty->driver_data;
843 ch->event = 0; 1411 if (ch == NULL) {
1412 mutex_unlock(&moxa_openlock);
1413 return;
1414 }
844 ch->count = 0; 1415 ch->count = 0;
845 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; 1416 moxa_close_port(ch);
846 ch->tty = NULL; 1417 mutex_unlock(&moxa_openlock);
1418
847 wake_up_interruptible(&ch->open_wait); 1419 wake_up_interruptible(&ch->open_wait);
848} 1420}
849 1421
850static void moxa_poll(unsigned long ignored) 1422static void moxa_new_dcdstate(struct moxa_port *p, u8 dcd)
851{ 1423{
852 register int card; 1424 dcd = !!dcd;
853 struct moxa_port *ch;
854 struct tty_struct *tp;
855 int i, ports;
856 1425
857 del_timer(&moxaTimer); 1426 if (dcd != p->DCDState && p->tty && C_CLOCAL(p->tty)) {
1427 if (!dcd)
1428 tty_hangup(p->tty);
1429 }
1430 p->DCDState = dcd;
1431}
858 1432
859 if (MoxaDriverPoll() < 0) { 1433static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
860 mod_timer(&moxaTimer, jiffies + HZ / 50); 1434 u16 __iomem *ip)
861 return; 1435{
1436 struct tty_struct *tty = p->tty;
1437 void __iomem *ofsAddr;
1438 unsigned int inited = p->asyncflags & ASYNC_INITIALIZED;
1439 u16 intr;
1440
1441 if (tty) {
1442 if ((p->statusflags & EMPTYWAIT) &&
1443 MoxaPortTxQueue(p) == 0) {
1444 p->statusflags &= ~EMPTYWAIT;
1445 tty_wakeup(tty);
1446 }
1447 if ((p->statusflags & LOWWAIT) && !tty->stopped &&
1448 MoxaPortTxQueue(p) <= WAKEUP_CHARS) {
1449 p->statusflags &= ~LOWWAIT;
1450 tty_wakeup(tty);
1451 }
1452
1453 if (inited && !(p->statusflags & THROTTLE) &&
1454 MoxaPortRxQueue(p) > 0) { /* RX */
1455 MoxaPortReadData(p);
1456 tty_schedule_flip(tty);
1457 }
1458 } else {
1459 p->statusflags &= ~EMPTYWAIT;
1460 MoxaPortFlushData(p, 0); /* flush RX */
862 } 1461 }
1462
1463 if (!handle) /* nothing else to do */
1464 return 0;
1465
1466 intr = readw(ip); /* port irq status */
1467 if (intr == 0)
1468 return 0;
1469
1470 writew(0, ip); /* ACK port */
1471 ofsAddr = p->tableAddr;
1472 if (intr & IntrTx) /* disable tx intr */
1473 writew(readw(ofsAddr + HostStat) & ~WakeupTx,
1474 ofsAddr + HostStat);
1475
1476 if (!inited)
1477 return 0;
1478
1479 if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */
1480 tty_insert_flip_char(tty, 0, TTY_BREAK);
1481 tty_schedule_flip(tty);
1482 }
1483
1484 if (intr & IntrLine)
1485 moxa_new_dcdstate(p, readb(ofsAddr + FlagStat) & DCD_state);
1486
1487 return 0;
1488}
1489
1490static void moxa_poll(unsigned long ignored)
1491{
1492 struct moxa_board_conf *brd;
1493 u16 __iomem *ip;
1494 unsigned int card, port, served = 0;
1495
1496 spin_lock(&moxa_lock);
863 for (card = 0; card < MAX_BOARDS; card++) { 1497 for (card = 0; card < MAX_BOARDS; card++) {
864 if ((ports = MoxaPortsOfCard(card)) <= 0) 1498 brd = &moxa_boards[card];
1499 if (!brd->ready)
865 continue; 1500 continue;
866 ch = &moxa_ports[card * MAX_PORTS_PER_BOARD]; 1501
867 for (i = 0; i < ports; i++, ch++) { 1502 served++;
868 if ((ch->asyncflags & ASYNC_INITIALIZED) == 0) 1503
869 continue; 1504 ip = NULL;
870 if (!(ch->statusflags & THROTTLE) && 1505 if (readb(brd->intPend) == 0xff)
871 (MoxaPortRxQueue(ch->port) > 0)) 1506 ip = brd->intTable + readb(brd->intNdx);
872 moxa_receive_data(ch); 1507
873 if ((tp = ch->tty) == 0) 1508 for (port = 0; port < brd->numPorts; port++)
874 continue; 1509 moxa_poll_port(&brd->ports[port], !!ip, ip + port);
875 if (ch->statusflags & LOWWAIT) { 1510
876 if (MoxaPortTxQueue(ch->port) <= WAKEUP_CHARS) { 1511 if (ip)
877 if (!tp->stopped) { 1512 writeb(0, brd->intPend); /* ACK */
878 ch->statusflags &= ~LOWWAIT; 1513
879 tty_wakeup(tp); 1514 if (moxaLowWaterChk) {
880 } 1515 struct moxa_port *p = brd->ports;
881 } 1516 for (port = 0; port < brd->numPorts; port++, p++)
882 } 1517 if (p->lowChkFlag) {
883 if (!I_IGNBRK(tp) && (MoxaPortResetBrkCnt(ch->port) > 0)) { 1518 p->lowChkFlag = 0;
884 tty_insert_flip_char(tp, 0, TTY_BREAK); 1519 moxa_low_water_check(p->tableAddr);
885 tty_schedule_flip(tp);
886 }
887 if (MoxaPortDCDChange(ch->port)) {
888 if (ch->asyncflags & ASYNC_CHECK_CD) {
889 if (MoxaPortDCDON(ch->port))
890 wake_up_interruptible(&ch->open_wait);
891 else {
892 tty_hangup(tp);
893 wake_up_interruptible(&ch->open_wait);
894 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
895 }
896 } 1520 }
897 }
898 } 1521 }
899 } 1522 }
1523 moxaLowWaterChk = 0;
900 1524
901 mod_timer(&moxaTimer, jiffies + HZ / 50); 1525 if (served)
1526 mod_timer(&moxaTimer, jiffies + HZ / 50);
1527 spin_unlock(&moxa_lock);
902} 1528}
903 1529
904/******************************************************************************/ 1530/******************************************************************************/
905 1531
906static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios) 1532static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios)
907{ 1533{
908 register struct ktermios *ts; 1534 register struct ktermios *ts = tty->termios;
909 struct moxa_port *ch; 1535 struct moxa_port *ch = tty->driver_data;
910 int rts, cts, txflow, rxflow, xany, baud; 1536 int rts, cts, txflow, rxflow, xany, baud;
911 1537
912 ch = (struct moxa_port *) tty->driver_data;
913 ts = tty->termios;
914 if (ts->c_cflag & CLOCAL)
915 ch->asyncflags &= ~ASYNC_CHECK_CD;
916 else
917 ch->asyncflags |= ASYNC_CHECK_CD;
918 rts = cts = txflow = rxflow = xany = 0; 1538 rts = cts = txflow = rxflow = xany = 0;
919 if (ts->c_cflag & CRTSCTS) 1539 if (ts->c_cflag & CRTSCTS)
920 rts = cts = 1; 1540 rts = cts = 1;
@@ -927,776 +1547,60 @@ static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_term
927 1547
928 /* Clear the features we don't support */ 1548 /* Clear the features we don't support */
929 ts->c_cflag &= ~CMSPAR; 1549 ts->c_cflag &= ~CMSPAR;
930 MoxaPortFlowCtrl(ch->port, rts, cts, txflow, rxflow, xany); 1550 MoxaPortFlowCtrl(ch, rts, cts, txflow, rxflow, xany);
931 baud = MoxaPortSetTermio(ch->port, ts, tty_get_baud_rate(tty)); 1551 baud = MoxaPortSetTermio(ch, ts, tty_get_baud_rate(tty));
932 if (baud == -1) 1552 if (baud == -1)
933 baud = tty_termios_baud_rate(old_termios); 1553 baud = tty_termios_baud_rate(old_termios);
934 /* Not put the baud rate into the termios data */ 1554 /* Not put the baud rate into the termios data */
935 tty_encode_baud_rate(tty, baud, baud); 1555 tty_encode_baud_rate(tty, baud, baud);
936} 1556}
937 1557
938static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp,
939 struct moxa_port *ch)
940{
941 DECLARE_WAITQUEUE(wait,current);
942 unsigned long flags;
943 int retval;
944 int do_clocal = C_CLOCAL(tty);
945
946 /*
947 * If the device is in the middle of being closed, then block
948 * until it's done, and then try again.
949 */
950 if (tty_hung_up_p(filp) || (ch->asyncflags & ASYNC_CLOSING)) {
951 if (ch->asyncflags & ASYNC_CLOSING)
952 wait_for_completion_interruptible(&ch->close_wait);
953#ifdef SERIAL_DO_RESTART
954 if (ch->asyncflags & ASYNC_HUP_NOTIFY)
955 return (-EAGAIN);
956 else
957 return (-ERESTARTSYS);
958#else
959 return (-EAGAIN);
960#endif
961 }
962 /*
963 * If non-blocking mode is set, then make the check up front
964 * and then exit.
965 */
966 if (filp->f_flags & O_NONBLOCK) {
967 ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
968 return (0);
969 }
970 /*
971 * Block waiting for the carrier detect and the line to become free
972 */
973 retval = 0;
974 add_wait_queue(&ch->open_wait, &wait);
975 pr_debug("block_til_ready before block: ttys%d, count = %d\n",
976 ch->port, ch->count);
977 spin_lock_irqsave(&moxa_lock, flags);
978 if (!tty_hung_up_p(filp))
979 ch->count--;
980 ch->blocked_open++;
981 spin_unlock_irqrestore(&moxa_lock, flags);
982
983 while (1) {
984 set_current_state(TASK_INTERRUPTIBLE);
985 if (tty_hung_up_p(filp) ||
986 !(ch->asyncflags & ASYNC_INITIALIZED)) {
987#ifdef SERIAL_DO_RESTART
988 if (ch->asyncflags & ASYNC_HUP_NOTIFY)
989 retval = -EAGAIN;
990 else
991 retval = -ERESTARTSYS;
992#else
993 retval = -EAGAIN;
994#endif
995 break;
996 }
997 if (!(ch->asyncflags & ASYNC_CLOSING) && (do_clocal ||
998 MoxaPortDCDON(ch->port)))
999 break;
1000
1001 if (signal_pending(current)) {
1002 retval = -ERESTARTSYS;
1003 break;
1004 }
1005 schedule();
1006 }
1007 set_current_state(TASK_RUNNING);
1008 remove_wait_queue(&ch->open_wait, &wait);
1009
1010 spin_lock_irqsave(&moxa_lock, flags);
1011 if (!tty_hung_up_p(filp))
1012 ch->count++;
1013 ch->blocked_open--;
1014 spin_unlock_irqrestore(&moxa_lock, flags);
1015 pr_debug("block_til_ready after blocking: ttys%d, count = %d\n",
1016 ch->port, ch->count);
1017 if (retval)
1018 return (retval);
1019 /* FIXME: review to see if we need to use set_bit on these */
1020 ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
1021 return 0;
1022}
1023
1024static void moxa_setup_empty_event(struct tty_struct *tty) 1558static void moxa_setup_empty_event(struct tty_struct *tty)
1025{ 1559{
1026 struct moxa_port *ch = tty->driver_data; 1560 struct moxa_port *ch = tty->driver_data;
1027 unsigned long flags;
1028 1561
1029 spin_lock_irqsave(&moxa_lock, flags); 1562 spin_lock_bh(&moxa_lock);
1030 ch->statusflags |= EMPTYWAIT; 1563 ch->statusflags |= EMPTYWAIT;
1031 mod_timer(&moxa_ports[ch->port].emptyTimer, jiffies + HZ); 1564 spin_unlock_bh(&moxa_lock);
1032 spin_unlock_irqrestore(&moxa_lock, flags);
1033}
1034
1035static void moxa_check_xmit_empty(unsigned long data)
1036{
1037 struct moxa_port *ch;
1038
1039 ch = (struct moxa_port *) data;
1040 if (ch->tty && (ch->statusflags & EMPTYWAIT)) {
1041 if (MoxaPortTxQueue(ch->port) == 0) {
1042 ch->statusflags &= ~EMPTYWAIT;
1043 tty_wakeup(ch->tty);
1044 return;
1045 }
1046 mod_timer(&moxa_ports[ch->port].emptyTimer,
1047 round_jiffies(jiffies + HZ));
1048 } else
1049 ch->statusflags &= ~EMPTYWAIT;
1050} 1565}
1051 1566
1052static void moxa_shut_down(struct moxa_port *ch) 1567static void moxa_shut_down(struct moxa_port *ch)
1053{ 1568{
1054 struct tty_struct *tp; 1569 struct tty_struct *tp = ch->tty;
1055 1570
1056 if (!(ch->asyncflags & ASYNC_INITIALIZED)) 1571 if (!(ch->asyncflags & ASYNC_INITIALIZED))
1057 return; 1572 return;
1058 1573
1059 tp = ch->tty; 1574 MoxaPortDisable(ch);
1060
1061 MoxaPortDisable(ch->port);
1062 1575
1063 /* 1576 /*
1064 * If we're a modem control device and HUPCL is on, drop RTS & DTR. 1577 * If we're a modem control device and HUPCL is on, drop RTS & DTR.
1065 */ 1578 */
1066 if (tp->termios->c_cflag & HUPCL) 1579 if (C_HUPCL(tp))
1067 MoxaPortLineCtrl(ch->port, 0, 0); 1580 MoxaPortLineCtrl(ch, 0, 0);
1068 1581
1582 spin_lock_bh(&moxa_lock);
1069 ch->asyncflags &= ~ASYNC_INITIALIZED; 1583 ch->asyncflags &= ~ASYNC_INITIALIZED;
1584 spin_unlock_bh(&moxa_lock);
1070} 1585}
1071 1586
1072static void moxa_receive_data(struct moxa_port *ch)
1073{
1074 struct tty_struct *tp;
1075 struct ktermios *ts;
1076 unsigned long flags;
1077
1078 ts = NULL;
1079 tp = ch->tty;
1080 if (tp)
1081 ts = tp->termios;
1082 /**************************************************
1083 if ( !tp || !ts || !(ts->c_cflag & CREAD) ) {
1084 *****************************************************/
1085 if (!tp || !ts) {
1086 MoxaPortFlushData(ch->port, 0);
1087 return;
1088 }
1089 spin_lock_irqsave(&moxa_lock, flags);
1090 MoxaPortReadData(ch->port, tp);
1091 spin_unlock_irqrestore(&moxa_lock, flags);
1092 tty_schedule_flip(tp);
1093}
1094
1095#define Magic_code 0x404
1096
1097/*
1098 * System Configuration
1099 */
1100/*
1101 * for C218 BIOS initialization
1102 */
1103#define C218_ConfBase 0x800
1104#define C218_status (C218_ConfBase + 0) /* BIOS running status */
1105#define C218_diag (C218_ConfBase + 2) /* diagnostic status */
1106#define C218_key (C218_ConfBase + 4) /* WORD (0x218 for C218) */
1107#define C218DLoad_len (C218_ConfBase + 6) /* WORD */
1108#define C218check_sum (C218_ConfBase + 8) /* BYTE */
1109#define C218chksum_ok (C218_ConfBase + 0x0a) /* BYTE (1:ok) */
1110#define C218_TestRx (C218_ConfBase + 0x10) /* 8 bytes for 8 ports */
1111#define C218_TestTx (C218_ConfBase + 0x18) /* 8 bytes for 8 ports */
1112#define C218_RXerr (C218_ConfBase + 0x20) /* 8 bytes for 8 ports */
1113#define C218_ErrFlag (C218_ConfBase + 0x28) /* 8 bytes for 8 ports */
1114
1115#define C218_LoadBuf 0x0F00
1116#define C218_KeyCode 0x218
1117#define CP204J_KeyCode 0x204
1118
1119/*
1120 * for C320 BIOS initialization
1121 */
1122#define C320_ConfBase 0x800
1123#define C320_LoadBuf 0x0f00
1124#define STS_init 0x05 /* for C320_status */
1125
1126#define C320_status C320_ConfBase + 0 /* BIOS running status */
1127#define C320_diag C320_ConfBase + 2 /* diagnostic status */
1128#define C320_key C320_ConfBase + 4 /* WORD (0320H for C320) */
1129#define C320DLoad_len C320_ConfBase + 6 /* WORD */
1130#define C320check_sum C320_ConfBase + 8 /* WORD */
1131#define C320chksum_ok C320_ConfBase + 0x0a /* WORD (1:ok) */
1132#define C320bapi_len C320_ConfBase + 0x0c /* WORD */
1133#define C320UART_no C320_ConfBase + 0x0e /* WORD */
1134
1135#define C320_KeyCode 0x320
1136
1137#define FixPage_addr 0x0000 /* starting addr of static page */
1138#define DynPage_addr 0x2000 /* starting addr of dynamic page */
1139#define C218_start 0x3000 /* starting addr of C218 BIOS prg */
1140#define Control_reg 0x1ff0 /* select page and reset control */
1141#define HW_reset 0x80
1142
1143/*
1144 * Function Codes
1145 */
1146#define FC_CardReset 0x80
1147#define FC_ChannelReset 1 /* C320 firmware not supported */
1148#define FC_EnableCH 2
1149#define FC_DisableCH 3
1150#define FC_SetParam 4
1151#define FC_SetMode 5
1152#define FC_SetRate 6
1153#define FC_LineControl 7
1154#define FC_LineStatus 8
1155#define FC_XmitControl 9
1156#define FC_FlushQueue 10
1157#define FC_SendBreak 11
1158#define FC_StopBreak 12
1159#define FC_LoopbackON 13
1160#define FC_LoopbackOFF 14
1161#define FC_ClrIrqTable 15
1162#define FC_SendXon 16
1163#define FC_SetTermIrq 17 /* C320 firmware not supported */
1164#define FC_SetCntIrq 18 /* C320 firmware not supported */
1165#define FC_SetBreakIrq 19
1166#define FC_SetLineIrq 20
1167#define FC_SetFlowCtl 21
1168#define FC_GenIrq 22
1169#define FC_InCD180 23
1170#define FC_OutCD180 24
1171#define FC_InUARTreg 23
1172#define FC_OutUARTreg 24
1173#define FC_SetXonXoff 25
1174#define FC_OutCD180CCR 26
1175#define FC_ExtIQueue 27
1176#define FC_ExtOQueue 28
1177#define FC_ClrLineIrq 29
1178#define FC_HWFlowCtl 30
1179#define FC_GetClockRate 35
1180#define FC_SetBaud 36
1181#define FC_SetDataMode 41
1182#define FC_GetCCSR 43
1183#define FC_GetDataError 45
1184#define FC_RxControl 50
1185#define FC_ImmSend 51
1186#define FC_SetXonState 52
1187#define FC_SetXoffState 53
1188#define FC_SetRxFIFOTrig 54
1189#define FC_SetTxFIFOCnt 55
1190#define FC_UnixRate 56
1191#define FC_UnixResetTimer 57
1192
1193#define RxFIFOTrig1 0
1194#define RxFIFOTrig4 1
1195#define RxFIFOTrig8 2
1196#define RxFIFOTrig14 3
1197
1198/*
1199 * Dual-Ported RAM
1200 */
1201#define DRAM_global 0
1202#define INT_data (DRAM_global + 0)
1203#define Config_base (DRAM_global + 0x108)
1204
1205#define IRQindex (INT_data + 0)
1206#define IRQpending (INT_data + 4)
1207#define IRQtable (INT_data + 8)
1208
1209/*
1210 * Interrupt Status
1211 */
1212#define IntrRx 0x01 /* receiver data O.K. */
1213#define IntrTx 0x02 /* transmit buffer empty */
1214#define IntrFunc 0x04 /* function complete */
1215#define IntrBreak 0x08 /* received break */
1216#define IntrLine 0x10 /* line status change
1217 for transmitter */
1218#define IntrIntr 0x20 /* received INTR code */
1219#define IntrQuit 0x40 /* received QUIT code */
1220#define IntrEOF 0x80 /* received EOF code */
1221
1222#define IntrRxTrigger 0x100 /* rx data count reach tigger value */
1223#define IntrTxTrigger 0x200 /* tx data count below trigger value */
1224
1225#define Magic_no (Config_base + 0)
1226#define Card_model_no (Config_base + 2)
1227#define Total_ports (Config_base + 4)
1228#define Module_cnt (Config_base + 8)
1229#define Module_no (Config_base + 10)
1230#define Timer_10ms (Config_base + 14)
1231#define Disable_IRQ (Config_base + 20)
1232#define TMS320_PORT1 (Config_base + 22)
1233#define TMS320_PORT2 (Config_base + 24)
1234#define TMS320_CLOCK (Config_base + 26)
1235
1236/*
1237 * DATA BUFFER in DRAM
1238 */
1239#define Extern_table 0x400 /* Base address of the external table
1240 (24 words * 64) total 3K bytes
1241 (24 words * 128) total 6K bytes */
1242#define Extern_size 0x60 /* 96 bytes */
1243#define RXrptr 0x00 /* read pointer for RX buffer */
1244#define RXwptr 0x02 /* write pointer for RX buffer */
1245#define TXrptr 0x04 /* read pointer for TX buffer */
1246#define TXwptr 0x06 /* write pointer for TX buffer */
1247#define HostStat 0x08 /* IRQ flag and general flag */
1248#define FlagStat 0x0A
1249#define FlowControl 0x0C /* B7 B6 B5 B4 B3 B2 B1 B0 */
1250 /* x x x x | | | | */
1251 /* | | | + CTS flow */
1252 /* | | +--- RTS flow */
1253 /* | +------ TX Xon/Xoff */
1254 /* +--------- RX Xon/Xoff */
1255#define Break_cnt 0x0E /* received break count */
1256#define CD180TXirq 0x10 /* if non-0: enable TX irq */
1257#define RX_mask 0x12
1258#define TX_mask 0x14
1259#define Ofs_rxb 0x16
1260#define Ofs_txb 0x18
1261#define Page_rxb 0x1A
1262#define Page_txb 0x1C
1263#define EndPage_rxb 0x1E
1264#define EndPage_txb 0x20
1265#define Data_error 0x22
1266#define RxTrigger 0x28
1267#define TxTrigger 0x2a
1268
1269#define rRXwptr 0x34
1270#define Low_water 0x36
1271
1272#define FuncCode 0x40
1273#define FuncArg 0x42
1274#define FuncArg1 0x44
1275
1276#define C218rx_size 0x2000 /* 8K bytes */
1277#define C218tx_size 0x8000 /* 32K bytes */
1278
1279#define C218rx_mask (C218rx_size - 1)
1280#define C218tx_mask (C218tx_size - 1)
1281
1282#define C320p8rx_size 0x2000
1283#define C320p8tx_size 0x8000
1284#define C320p8rx_mask (C320p8rx_size - 1)
1285#define C320p8tx_mask (C320p8tx_size - 1)
1286
1287#define C320p16rx_size 0x2000
1288#define C320p16tx_size 0x4000
1289#define C320p16rx_mask (C320p16rx_size - 1)
1290#define C320p16tx_mask (C320p16tx_size - 1)
1291
1292#define C320p24rx_size 0x2000
1293#define C320p24tx_size 0x2000
1294#define C320p24rx_mask (C320p24rx_size - 1)
1295#define C320p24tx_mask (C320p24tx_size - 1)
1296
1297#define C320p32rx_size 0x1000
1298#define C320p32tx_size 0x1000
1299#define C320p32rx_mask (C320p32rx_size - 1)
1300#define C320p32tx_mask (C320p32tx_size - 1)
1301
1302#define Page_size 0x2000
1303#define Page_mask (Page_size - 1)
1304#define C218rx_spage 3
1305#define C218tx_spage 4
1306#define C218rx_pageno 1
1307#define C218tx_pageno 4
1308#define C218buf_pageno 5
1309
1310#define C320p8rx_spage 3
1311#define C320p8tx_spage 4
1312#define C320p8rx_pgno 1
1313#define C320p8tx_pgno 4
1314#define C320p8buf_pgno 5
1315
1316#define C320p16rx_spage 3
1317#define C320p16tx_spage 4
1318#define C320p16rx_pgno 1
1319#define C320p16tx_pgno 2
1320#define C320p16buf_pgno 3
1321
1322#define C320p24rx_spage 3
1323#define C320p24tx_spage 4
1324#define C320p24rx_pgno 1
1325#define C320p24tx_pgno 1
1326#define C320p24buf_pgno 2
1327
1328#define C320p32rx_spage 3
1329#define C320p32tx_ofs C320p32rx_size
1330#define C320p32tx_spage 3
1331#define C320p32buf_pgno 1
1332
1333/*
1334 * Host Status
1335 */
1336#define WakeupRx 0x01
1337#define WakeupTx 0x02
1338#define WakeupBreak 0x08
1339#define WakeupLine 0x10
1340#define WakeupIntr 0x20
1341#define WakeupQuit 0x40
1342#define WakeupEOF 0x80 /* used in VTIME control */
1343#define WakeupRxTrigger 0x100
1344#define WakeupTxTrigger 0x200
1345/*
1346 * Flag status
1347 */
1348#define Rx_over 0x01
1349#define Xoff_state 0x02
1350#define Tx_flowOff 0x04
1351#define Tx_enable 0x08
1352#define CTS_state 0x10
1353#define DSR_state 0x20
1354#define DCD_state 0x80
1355/*
1356 * FlowControl
1357 */
1358#define CTS_FlowCtl 1
1359#define RTS_FlowCtl 2
1360#define Tx_FlowCtl 4
1361#define Rx_FlowCtl 8
1362#define IXM_IXANY 0x10
1363
1364#define LowWater 128
1365
1366#define DTR_ON 1
1367#define RTS_ON 2
1368#define CTS_ON 1
1369#define DSR_ON 2
1370#define DCD_ON 8
1371
1372/* mode definition */
1373#define MX_CS8 0x03
1374#define MX_CS7 0x02
1375#define MX_CS6 0x01
1376#define MX_CS5 0x00
1377
1378#define MX_STOP1 0x00
1379#define MX_STOP15 0x04
1380#define MX_STOP2 0x08
1381
1382#define MX_PARNONE 0x00
1383#define MX_PAREVEN 0x40
1384#define MX_PARODD 0xC0
1385
1386/*
1387 * Query
1388 */
1389
1390struct mon_str {
1391 int tick;
1392 int rxcnt[MAX_PORTS];
1393 int txcnt[MAX_PORTS];
1394};
1395
1396#define DCD_changed 0x01
1397#define DCD_oldstate 0x80
1398
1399static unsigned char moxaBuff[10240];
1400static int moxaLowWaterChk;
1401static int moxaCard;
1402static struct mon_str moxaLog;
1403static int moxaFuncTout = HZ / 2;
1404
1405static void moxafunc(void __iomem *, int, ushort);
1406static void moxa_wait_finish(void __iomem *);
1407static void moxa_low_water_check(void __iomem *);
1408static int moxaloadbios(int, unsigned char __user *, int);
1409static int moxafindcard(int);
1410static int moxaload320b(int, unsigned char __user *, int);
1411static int moxaloadcode(int, unsigned char __user *, int);
1412static int moxaloadc218(int, void __iomem *, int);
1413static int moxaloadc320(int, void __iomem *, int, int *);
1414
1415/***************************************************************************** 1587/*****************************************************************************
1416 * Driver level functions: * 1588 * Driver level functions: *
1417 * 1. MoxaDriverInit(void); *
1418 * 2. MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port); *
1419 * 3. MoxaDriverPoll(void); *
1420 *****************************************************************************/ 1589 *****************************************************************************/
1421void MoxaDriverInit(void)
1422{
1423 struct moxa_port *p;
1424 unsigned int i;
1425 1590
1426 moxaFuncTout = HZ / 2; /* 500 mini-seconds */ 1591static void MoxaPortFlushData(struct moxa_port *port, int mode)
1427 moxaCard = 0;
1428 moxaLog.tick = 0;
1429 moxaLowWaterChk = 0;
1430 for (i = 0; i < MAX_PORTS; i++) {
1431 p = &moxa_ports[i];
1432 p->chkPort = 0;
1433 p->lowChkFlag = 0;
1434 p->lineCtrl = 0;
1435 moxaLog.rxcnt[i] = 0;
1436 moxaLog.txcnt[i] = 0;
1437 }
1438}
1439
1440#define MOXA 0x400
1441#define MOXA_GET_IQUEUE (MOXA + 1) /* get input buffered count */
1442#define MOXA_GET_OQUEUE (MOXA + 2) /* get output buffered count */
1443#define MOXA_INIT_DRIVER (MOXA + 6) /* moxaCard=0 */
1444#define MOXA_LOAD_BIOS (MOXA + 9) /* download BIOS */
1445#define MOXA_FIND_BOARD (MOXA + 10) /* Check if MOXA card exist? */
1446#define MOXA_LOAD_C320B (MOXA + 11) /* download 320B firmware */
1447#define MOXA_LOAD_CODE (MOXA + 12) /* download firmware */
1448#define MOXA_GETDATACOUNT (MOXA + 23)
1449#define MOXA_GET_IOQUEUE (MOXA + 27)
1450#define MOXA_FLUSH_QUEUE (MOXA + 28)
1451#define MOXA_GET_CONF (MOXA + 35) /* configuration */
1452#define MOXA_GET_MAJOR (MOXA + 63)
1453#define MOXA_GET_CUMAJOR (MOXA + 64)
1454#define MOXA_GETMSTATUS (MOXA + 65)
1455
1456struct dl_str {
1457 char __user *buf;
1458 int len;
1459 int cardno;
1460};
1461
1462static struct dl_str dltmp;
1463
1464void MoxaPortFlushData(int port, int mode)
1465{ 1592{
1466 void __iomem *ofsAddr; 1593 void __iomem *ofsAddr;
1467 if ((mode < 0) || (mode > 2)) 1594 if (mode < 0 || mode > 2)
1468 return; 1595 return;
1469 ofsAddr = moxa_ports[port].tableAddr; 1596 ofsAddr = port->tableAddr;
1470 moxafunc(ofsAddr, FC_FlushQueue, mode); 1597 moxafunc(ofsAddr, FC_FlushQueue, mode);
1471 if (mode != 1) { 1598 if (mode != 1) {
1472 moxa_ports[port].lowChkFlag = 0; 1599 port->lowChkFlag = 0;
1473 moxa_low_water_check(ofsAddr); 1600 moxa_low_water_check(ofsAddr);
1474 } 1601 }
1475} 1602}
1476 1603
1477int MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port)
1478{
1479 int i;
1480 int status;
1481 int MoxaPortTxQueue(int), MoxaPortRxQueue(int);
1482 void __user *argp = (void __user *)arg;
1483
1484 if (port == MAX_PORTS) {
1485 if ((cmd != MOXA_GET_CONF) && (cmd != MOXA_INIT_DRIVER) &&
1486 (cmd != MOXA_LOAD_BIOS) && (cmd != MOXA_FIND_BOARD) && (cmd != MOXA_LOAD_C320B) &&
1487 (cmd != MOXA_LOAD_CODE) && (cmd != MOXA_GETDATACOUNT) &&
1488 (cmd != MOXA_GET_IOQUEUE) && (cmd != MOXA_GET_MAJOR) &&
1489 (cmd != MOXA_GET_CUMAJOR) && (cmd != MOXA_GETMSTATUS))
1490 return (-EINVAL);
1491 }
1492 switch (cmd) {
1493 case MOXA_GET_CONF:
1494 if(copy_to_user(argp, &moxa_boards, MAX_BOARDS *
1495 sizeof(struct moxa_board_conf)))
1496 return -EFAULT;
1497 return (0);
1498 case MOXA_INIT_DRIVER:
1499 if ((int) arg == 0x404)
1500 MoxaDriverInit();
1501 return (0);
1502 case MOXA_GETDATACOUNT:
1503 moxaLog.tick = jiffies;
1504 if(copy_to_user(argp, &moxaLog, sizeof(struct mon_str)))
1505 return -EFAULT;
1506 return (0);
1507 case MOXA_FLUSH_QUEUE:
1508 MoxaPortFlushData(port, arg);
1509 return (0);
1510 case MOXA_GET_IOQUEUE: {
1511 struct moxaq_str __user *argm = argp;
1512 struct moxaq_str tmp;
1513
1514 for (i = 0; i < MAX_PORTS; i++, argm++) {
1515 memset(&tmp, 0, sizeof(tmp));
1516 if (moxa_ports[i].chkPort) {
1517 tmp.inq = MoxaPortRxQueue(i);
1518 tmp.outq = MoxaPortTxQueue(i);
1519 }
1520 if (copy_to_user(argm, &tmp, sizeof(tmp)))
1521 return -EFAULT;
1522 }
1523 return (0);
1524 } case MOXA_GET_OQUEUE:
1525 i = MoxaPortTxQueue(port);
1526 return put_user(i, (unsigned long __user *)argp);
1527 case MOXA_GET_IQUEUE:
1528 i = MoxaPortRxQueue(port);
1529 return put_user(i, (unsigned long __user *)argp);
1530 case MOXA_GET_MAJOR:
1531 if(copy_to_user(argp, &ttymajor, sizeof(int)))
1532 return -EFAULT;
1533 return 0;
1534 case MOXA_GET_CUMAJOR:
1535 i = 0;
1536 if(copy_to_user(argp, &i, sizeof(int)))
1537 return -EFAULT;
1538 return 0;
1539 case MOXA_GETMSTATUS: {
1540 struct mxser_mstatus __user *argm = argp;
1541 struct mxser_mstatus tmp;
1542 struct moxa_port *p;
1543
1544 for (i = 0; i < MAX_PORTS; i++, argm++) {
1545 p = &moxa_ports[i];
1546 memset(&tmp, 0, sizeof(tmp));
1547 if (!p->chkPort) {
1548 goto copy;
1549 } else {
1550 status = MoxaPortLineStatus(p->port);
1551 if (status & 1)
1552 tmp.cts = 1;
1553 if (status & 2)
1554 tmp.dsr = 1;
1555 if (status & 4)
1556 tmp.dcd = 1;
1557 }
1558
1559 if (!p->tty || !p->tty->termios)
1560 tmp.cflag = p->cflag;
1561 else
1562 tmp.cflag = p->tty->termios->c_cflag;
1563copy:
1564 if (copy_to_user(argm, &tmp, sizeof(tmp)))
1565 return -EFAULT;
1566 }
1567 return 0;
1568 } default:
1569 return (-ENOIOCTLCMD);
1570 case MOXA_LOAD_BIOS:
1571 case MOXA_FIND_BOARD:
1572 case MOXA_LOAD_C320B:
1573 case MOXA_LOAD_CODE:
1574 if (!capable(CAP_SYS_RAWIO))
1575 return -EPERM;
1576 break;
1577 }
1578
1579 if(copy_from_user(&dltmp, argp, sizeof(struct dl_str)))
1580 return -EFAULT;
1581 if(dltmp.cardno < 0 || dltmp.cardno >= MAX_BOARDS || dltmp.len < 0)
1582 return -EINVAL;
1583
1584 switch(cmd)
1585 {
1586 case MOXA_LOAD_BIOS:
1587 i = moxaloadbios(dltmp.cardno, dltmp.buf, dltmp.len);
1588 return (i);
1589 case MOXA_FIND_BOARD:
1590 return moxafindcard(dltmp.cardno);
1591 case MOXA_LOAD_C320B:
1592 moxaload320b(dltmp.cardno, dltmp.buf, dltmp.len);
1593 default: /* to keep gcc happy */
1594 return (0);
1595 case MOXA_LOAD_CODE:
1596 i = moxaloadcode(dltmp.cardno, dltmp.buf, dltmp.len);
1597 if (i == -1)
1598 return (-EFAULT);
1599 return (i);
1600
1601 }
1602}
1603
1604int MoxaDriverPoll(void)
1605{
1606 struct moxa_board_conf *brd;
1607 register ushort temp;
1608 register int card;
1609 void __iomem *ofsAddr;
1610 void __iomem *ip;
1611 int port, p, ports;
1612
1613 if (moxaCard == 0)
1614 return (-1);
1615 for (card = 0; card < MAX_BOARDS; card++) {
1616 brd = &moxa_boards[card];
1617 if (brd->loadstat == 0)
1618 continue;
1619 if ((ports = brd->numPorts) == 0)
1620 continue;
1621 if (readb(brd->intPend) == 0xff) {
1622 ip = brd->intTable + readb(brd->intNdx);
1623 p = card * MAX_PORTS_PER_BOARD;
1624 ports <<= 1;
1625 for (port = 0; port < ports; port += 2, p++) {
1626 if ((temp = readw(ip + port)) != 0) {
1627 writew(0, ip + port);
1628 ofsAddr = moxa_ports[p].tableAddr;
1629 if (temp & IntrTx)
1630 writew(readw(ofsAddr + HostStat) & ~WakeupTx, ofsAddr + HostStat);
1631 if (temp & IntrBreak) {
1632 moxa_ports[p].breakCnt++;
1633 }
1634 if (temp & IntrLine) {
1635 if (readb(ofsAddr + FlagStat) & DCD_state) {
1636 if ((moxa_ports[p].DCDState & DCD_oldstate) == 0)
1637 moxa_ports[p].DCDState = (DCD_oldstate |
1638 DCD_changed);
1639 } else {
1640 if (moxa_ports[p].DCDState & DCD_oldstate)
1641 moxa_ports[p].DCDState = DCD_changed;
1642 }
1643 }
1644 }
1645 }
1646 writeb(0, brd->intPend);
1647 }
1648 if (moxaLowWaterChk) {
1649 p = card * MAX_PORTS_PER_BOARD;
1650 for (port = 0; port < ports; port++, p++) {
1651 if (moxa_ports[p].lowChkFlag) {
1652 moxa_ports[p].lowChkFlag = 0;
1653 ofsAddr = moxa_ports[p].tableAddr;
1654 moxa_low_water_check(ofsAddr);
1655 }
1656 }
1657 }
1658 }
1659 moxaLowWaterChk = 0;
1660 return (0);
1661}
1662
1663/*****************************************************************************
1664 * Card level function: *
1665 * 1. MoxaPortsOfCard(int cardno); *
1666 *****************************************************************************/
1667int MoxaPortsOfCard(int cardno)
1668{
1669
1670 if (moxa_boards[cardno].boardType == 0)
1671 return (0);
1672 return (moxa_boards[cardno].numPorts);
1673}
1674
1675/*****************************************************************************
1676 * Port level functions: *
1677 * 1. MoxaPortIsValid(int port); *
1678 * 2. MoxaPortEnable(int port); *
1679 * 3. MoxaPortDisable(int port); *
1680 * 4. MoxaPortGetMaxBaud(int port); *
1681 * 6. MoxaPortSetBaud(int port, long baud); *
1682 * 8. MoxaPortSetTermio(int port, unsigned char *termio); *
1683 * 9. MoxaPortGetLineOut(int port, int *dtrState, int *rtsState); *
1684 * 10. MoxaPortLineCtrl(int port, int dtrState, int rtsState); *
1685 * 11. MoxaPortFlowCtrl(int port, int rts, int cts, int rx, int tx,int xany); *
1686 * 12. MoxaPortLineStatus(int port); *
1687 * 13. MoxaPortDCDChange(int port); *
1688 * 14. MoxaPortDCDON(int port); *
1689 * 15. MoxaPortFlushData(int port, int mode); *
1690 * 16. MoxaPortWriteData(int port, unsigned char * buffer, int length); *
1691 * 17. MoxaPortReadData(int port, struct tty_struct *tty); *
1692 * 20. MoxaPortTxQueue(int port); *
1693 * 21. MoxaPortTxFree(int port); *
1694 * 22. MoxaPortRxQueue(int port); *
1695 * 24. MoxaPortTxDisable(int port); *
1696 * 25. MoxaPortTxEnable(int port); *
1697 * 27. MoxaPortResetBrkCnt(int port); *
1698 * 30. MoxaPortSendBreak(int port, int ticks); *
1699 *****************************************************************************/
1700/* 1604/*
1701 * Moxa Port Number Description: 1605 * Moxa Port Number Description:
1702 * 1606 *
@@ -1733,33 +1637,6 @@ int MoxaPortsOfCard(int cardno)
1733 * -ENOIOCTLCMD 1637 * -ENOIOCTLCMD
1734 * 1638 *
1735 * 1639 *
1736 * Function 3: Moxa driver polling process routine.
1737 * Syntax:
1738 * int MoxaDriverPoll(void);
1739 *
1740 * return: 0 ; polling O.K.
1741 * -1 : no any Moxa card.
1742 *
1743 *
1744 * Function 4: Get the ports of this card.
1745 * Syntax:
1746 * int MoxaPortsOfCard(int cardno);
1747 *
1748 * int cardno : card number (0 - 3)
1749 *
1750 * return: 0 : this card is invalid
1751 * 8/16/24/32
1752 *
1753 *
1754 * Function 5: Check this port is valid or invalid
1755 * Syntax:
1756 * int MoxaPortIsValid(int port);
1757 * int port : port number (0 - 127, ref port description)
1758 *
1759 * return: 0 : this port is invalid
1760 * 1 : this port is valid
1761 *
1762 *
1763 * Function 6: Enable this port to start Tx/Rx data. 1640 * Function 6: Enable this port to start Tx/Rx data.
1764 * Syntax: 1641 * Syntax:
1765 * void MoxaPortEnable(int port); 1642 * void MoxaPortEnable(int port);
@@ -1772,18 +1649,9 @@ int MoxaPortsOfCard(int cardno)
1772 * int port : port number (0 - 127) 1649 * int port : port number (0 - 127)
1773 * 1650 *
1774 * 1651 *
1775 * Function 8: Get the maximun available baud rate of this port.
1776 * Syntax:
1777 * long MoxaPortGetMaxBaud(int port);
1778 * int port : port number (0 - 127)
1779 *
1780 * return: 0 : this port is invalid
1781 * 38400/57600/115200 bps
1782 *
1783 *
1784 * Function 10: Setting baud rate of this port. 1652 * Function 10: Setting baud rate of this port.
1785 * Syntax: 1653 * Syntax:
1786 * long MoxaPortSetBaud(int port, long baud); 1654 * speed_t MoxaPortSetBaud(int port, speed_t baud);
1787 * int port : port number (0 - 127) 1655 * int port : port number (0 - 127)
1788 * long baud : baud rate (50 - 115200) 1656 * long baud : baud rate (50 - 115200)
1789 * 1657 *
@@ -1850,25 +1718,6 @@ int MoxaPortsOfCard(int cardno)
1850 * Bit 2 - DCD state (0: off, 1: on) 1718 * Bit 2 - DCD state (0: off, 1: on)
1851 * 1719 *
1852 * 1720 *
1853 * Function 17: Check the DCD state has changed since the last read
1854 * of this function.
1855 * Syntax:
1856 * int MoxaPortDCDChange(int port);
1857 * int port : port number (0 - 127)
1858 *
1859 * return: 0 : no changed
1860 * 1 : DCD has changed
1861 *
1862 *
1863 * Function 18: Check ths current DCD state is ON or not.
1864 * Syntax:
1865 * int MoxaPortDCDON(int port);
1866 * int port : port number (0 - 127)
1867 *
1868 * return: 0 : DCD off
1869 * 1 : DCD on
1870 *
1871 *
1872 * Function 19: Flush the Rx/Tx buffer data of this port. 1721 * Function 19: Flush the Rx/Tx buffer data of this port.
1873 * Syntax: 1722 * Syntax:
1874 * void MoxaPortFlushData(int port, int mode); 1723 * void MoxaPortFlushData(int port, int mode);
@@ -1942,40 +1791,20 @@ int MoxaPortsOfCard(int cardno)
1942 * return: 0 - .. : BREAK signal count 1791 * return: 0 - .. : BREAK signal count
1943 * 1792 *
1944 * 1793 *
1945 * Function 34: Send out a BREAK signal.
1946 * Syntax:
1947 * void MoxaPortSendBreak(int port, int ms100);
1948 * int port : port number (0 - 127)
1949 * int ms100 : break signal time interval.
1950 * unit: 100 mini-second. if ms100 == 0, it will
1951 * send out a about 250 ms BREAK signal.
1952 *
1953 */ 1794 */
1954int MoxaPortIsValid(int port)
1955{
1956
1957 if (moxaCard == 0)
1958 return (0);
1959 if (moxa_ports[port].chkPort == 0)
1960 return (0);
1961 return (1);
1962}
1963 1795
1964void MoxaPortEnable(int port) 1796static void MoxaPortEnable(struct moxa_port *port)
1965{ 1797{
1966 void __iomem *ofsAddr; 1798 void __iomem *ofsAddr;
1967 int MoxaPortLineStatus(int); 1799 u16 lowwater = 512;
1968 short lowwater = 512;
1969 1800
1970 ofsAddr = moxa_ports[port].tableAddr; 1801 ofsAddr = port->tableAddr;
1971 writew(lowwater, ofsAddr + Low_water); 1802 writew(lowwater, ofsAddr + Low_water);
1972 moxa_ports[port].breakCnt = 0; 1803 if (MOXA_IS_320(port->board))
1973 if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) ||
1974 (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) {
1975 moxafunc(ofsAddr, FC_SetBreakIrq, 0); 1804 moxafunc(ofsAddr, FC_SetBreakIrq, 0);
1976 } else { 1805 else
1977 writew(readw(ofsAddr + HostStat) | WakeupBreak, ofsAddr + HostStat); 1806 writew(readw(ofsAddr + HostStat) | WakeupBreak,
1978 } 1807 ofsAddr + HostStat);
1979 1808
1980 moxafunc(ofsAddr, FC_SetLineIrq, Magic_code); 1809 moxafunc(ofsAddr, FC_SetLineIrq, Magic_code);
1981 moxafunc(ofsAddr, FC_FlushQueue, 2); 1810 moxafunc(ofsAddr, FC_FlushQueue, 2);
@@ -1984,9 +1813,9 @@ void MoxaPortEnable(int port)
1984 MoxaPortLineStatus(port); 1813 MoxaPortLineStatus(port);
1985} 1814}
1986 1815
1987void MoxaPortDisable(int port) 1816static void MoxaPortDisable(struct moxa_port *port)
1988{ 1817{
1989 void __iomem *ofsAddr = moxa_ports[port].tableAddr; 1818 void __iomem *ofsAddr = port->tableAddr;
1990 1819
1991 moxafunc(ofsAddr, FC_SetFlowCtl, 0); /* disable flow control */ 1820 moxafunc(ofsAddr, FC_SetFlowCtl, 0); /* disable flow control */
1992 moxafunc(ofsAddr, FC_ClrLineIrq, Magic_code); 1821 moxafunc(ofsAddr, FC_ClrLineIrq, Magic_code);
@@ -1994,49 +1823,32 @@ void MoxaPortDisable(int port)
1994 moxafunc(ofsAddr, FC_DisableCH, Magic_code); 1823 moxafunc(ofsAddr, FC_DisableCH, Magic_code);
1995} 1824}
1996 1825
1997long MoxaPortGetMaxBaud(int port) 1826static speed_t MoxaPortSetBaud(struct moxa_port *port, speed_t baud)
1998{
1999 if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) ||
2000 (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI))
2001 return (460800L);
2002 else
2003 return (921600L);
2004}
2005
2006
2007long MoxaPortSetBaud(int port, long baud)
2008{ 1827{
2009 void __iomem *ofsAddr; 1828 void __iomem *ofsAddr = port->tableAddr;
2010 long max, clock; 1829 unsigned int clock, val;
2011 unsigned int val; 1830 speed_t max;
2012 1831
2013 if ((baud < 50L) || ((max = MoxaPortGetMaxBaud(port)) == 0)) 1832 max = MOXA_IS_320(port->board) ? 460800 : 921600;
2014 return (0); 1833 if (baud < 50)
2015 ofsAddr = moxa_ports[port].tableAddr; 1834 return 0;
2016 if (baud > max) 1835 if (baud > max)
2017 baud = max; 1836 baud = max;
2018 if (max == 38400L) 1837 clock = 921600;
2019 clock = 614400L; /* for 9.8304 Mhz : max. 38400 bps */
2020 else if (max == 57600L)
2021 clock = 691200L; /* for 11.0592 Mhz : max. 57600 bps */
2022 else
2023 clock = 921600L; /* for 14.7456 Mhz : max. 115200 bps */
2024 val = clock / baud; 1838 val = clock / baud;
2025 moxafunc(ofsAddr, FC_SetBaud, val); 1839 moxafunc(ofsAddr, FC_SetBaud, val);
2026 baud = clock / val; 1840 baud = clock / val;
2027 moxa_ports[port].curBaud = baud; 1841 return baud;
2028 return (baud);
2029} 1842}
2030 1843
2031int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud) 1844static int MoxaPortSetTermio(struct moxa_port *port, struct ktermios *termio,
1845 speed_t baud)
2032{ 1846{
2033 void __iomem *ofsAddr; 1847 void __iomem *ofsAddr;
2034 tcflag_t cflag; 1848 tcflag_t cflag;
2035 tcflag_t mode = 0; 1849 tcflag_t mode = 0;
2036 1850
2037 if (moxa_ports[port].chkPort == 0 || termio == 0) 1851 ofsAddr = port->tableAddr;
2038 return (-1);
2039 ofsAddr = moxa_ports[port].tableAddr;
2040 cflag = termio->c_cflag; /* termio->c_cflag */ 1852 cflag = termio->c_cflag; /* termio->c_cflag */
2041 1853
2042 mode = termio->c_cflag & CSIZE; 1854 mode = termio->c_cflag & CSIZE;
@@ -2065,13 +1877,11 @@ int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud)
2065 } else 1877 } else
2066 mode |= MX_PARNONE; 1878 mode |= MX_PARNONE;
2067 1879
2068 moxafunc(ofsAddr, FC_SetDataMode, (ushort) mode); 1880 moxafunc(ofsAddr, FC_SetDataMode, (u16)mode);
1881
1882 if (MOXA_IS_320(port->board) && baud >= 921600)
1883 return -1;
2069 1884
2070 if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) ||
2071 (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) {
2072 if (baud >= 921600L)
2073 return (-1);
2074 }
2075 baud = MoxaPortSetBaud(port, baud); 1885 baud = MoxaPortSetBaud(port, baud);
2076 1886
2077 if (termio->c_iflag & (IXON | IXOFF | IXANY)) { 1887 if (termio->c_iflag & (IXON | IXOFF | IXANY)) {
@@ -2081,51 +1891,37 @@ int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud)
2081 moxa_wait_finish(ofsAddr); 1891 moxa_wait_finish(ofsAddr);
2082 1892
2083 } 1893 }
2084 return (baud); 1894 return baud;
2085} 1895}
2086 1896
2087int MoxaPortGetLineOut(int port, int *dtrState, int *rtsState) 1897static int MoxaPortGetLineOut(struct moxa_port *port, int *dtrState,
1898 int *rtsState)
2088{ 1899{
1900 if (dtrState)
1901 *dtrState = !!(port->lineCtrl & DTR_ON);
1902 if (rtsState)
1903 *rtsState = !!(port->lineCtrl & RTS_ON);
2089 1904
2090 if (!MoxaPortIsValid(port)) 1905 return 0;
2091 return (-1);
2092 if (dtrState) {
2093 if (moxa_ports[port].lineCtrl & DTR_ON)
2094 *dtrState = 1;
2095 else
2096 *dtrState = 0;
2097 }
2098 if (rtsState) {
2099 if (moxa_ports[port].lineCtrl & RTS_ON)
2100 *rtsState = 1;
2101 else
2102 *rtsState = 0;
2103 }
2104 return (0);
2105} 1906}
2106 1907
2107void MoxaPortLineCtrl(int port, int dtr, int rts) 1908static void MoxaPortLineCtrl(struct moxa_port *port, int dtr, int rts)
2108{ 1909{
2109 void __iomem *ofsAddr; 1910 u8 mode = 0;
2110 int mode;
2111 1911
2112 ofsAddr = moxa_ports[port].tableAddr;
2113 mode = 0;
2114 if (dtr) 1912 if (dtr)
2115 mode |= DTR_ON; 1913 mode |= DTR_ON;
2116 if (rts) 1914 if (rts)
2117 mode |= RTS_ON; 1915 mode |= RTS_ON;
2118 moxa_ports[port].lineCtrl = mode; 1916 port->lineCtrl = mode;
2119 moxafunc(ofsAddr, FC_LineControl, mode); 1917 moxafunc(port->tableAddr, FC_LineControl, mode);
2120} 1918}
2121 1919
2122void MoxaPortFlowCtrl(int port, int rts, int cts, int txflow, int rxflow, int txany) 1920static void MoxaPortFlowCtrl(struct moxa_port *port, int rts, int cts,
1921 int txflow, int rxflow, int txany)
2123{ 1922{
2124 void __iomem *ofsAddr; 1923 int mode = 0;
2125 int mode;
2126 1924
2127 ofsAddr = moxa_ports[port].tableAddr;
2128 mode = 0;
2129 if (rts) 1925 if (rts)
2130 mode |= RTS_FlowCtl; 1926 mode |= RTS_FlowCtl;
2131 if (cts) 1927 if (cts)
@@ -2136,81 +1932,50 @@ void MoxaPortFlowCtrl(int port, int rts, int cts, int txflow, int rxflow, int tx
2136 mode |= Rx_FlowCtl; 1932 mode |= Rx_FlowCtl;
2137 if (txany) 1933 if (txany)
2138 mode |= IXM_IXANY; 1934 mode |= IXM_IXANY;
2139 moxafunc(ofsAddr, FC_SetFlowCtl, mode); 1935 moxafunc(port->tableAddr, FC_SetFlowCtl, mode);
2140} 1936}
2141 1937
2142int MoxaPortLineStatus(int port) 1938static int MoxaPortLineStatus(struct moxa_port *port)
2143{ 1939{
2144 void __iomem *ofsAddr; 1940 void __iomem *ofsAddr;
2145 int val; 1941 int val;
2146 1942
2147 ofsAddr = moxa_ports[port].tableAddr; 1943 ofsAddr = port->tableAddr;
2148 if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) || 1944 if (MOXA_IS_320(port->board)) {
2149 (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) {
2150 moxafunc(ofsAddr, FC_LineStatus, 0); 1945 moxafunc(ofsAddr, FC_LineStatus, 0);
2151 val = readw(ofsAddr + FuncArg); 1946 val = readw(ofsAddr + FuncArg);
2152 } else { 1947 } else {
2153 val = readw(ofsAddr + FlagStat) >> 4; 1948 val = readw(ofsAddr + FlagStat) >> 4;
2154 } 1949 }
2155 val &= 0x0B; 1950 val &= 0x0B;
2156 if (val & 8) { 1951 if (val & 8)
2157 val |= 4; 1952 val |= 4;
2158 if ((moxa_ports[port].DCDState & DCD_oldstate) == 0) 1953 spin_lock_bh(&moxa_lock);
2159 moxa_ports[port].DCDState = (DCD_oldstate | DCD_changed); 1954 moxa_new_dcdstate(port, val & 8);
2160 } else { 1955 spin_unlock_bh(&moxa_lock);
2161 if (moxa_ports[port].DCDState & DCD_oldstate)
2162 moxa_ports[port].DCDState = DCD_changed;
2163 }
2164 val &= 7; 1956 val &= 7;
2165 return (val); 1957 return val;
2166}
2167
2168int MoxaPortDCDChange(int port)
2169{
2170 int n;
2171
2172 if (moxa_ports[port].chkPort == 0)
2173 return (0);
2174 n = moxa_ports[port].DCDState;
2175 moxa_ports[port].DCDState &= ~DCD_changed;
2176 n &= DCD_changed;
2177 return (n);
2178}
2179
2180int MoxaPortDCDON(int port)
2181{
2182 int n;
2183
2184 if (moxa_ports[port].chkPort == 0)
2185 return (0);
2186 if (moxa_ports[port].DCDState & DCD_oldstate)
2187 n = 1;
2188 else
2189 n = 0;
2190 return (n);
2191} 1958}
2192 1959
2193int MoxaPortWriteData(int port, unsigned char * buffer, int len) 1960static int MoxaPortWriteData(struct moxa_port *port,
1961 const unsigned char *buffer, int len)
2194{ 1962{
2195 int c, total, i;
2196 ushort tail;
2197 int cnt;
2198 ushort head, tx_mask, spage, epage;
2199 ushort pageno, pageofs, bufhead;
2200 void __iomem *baseAddr, *ofsAddr, *ofs; 1963 void __iomem *baseAddr, *ofsAddr, *ofs;
1964 unsigned int c, total;
1965 u16 head, tail, tx_mask, spage, epage;
1966 u16 pageno, pageofs, bufhead;
2201 1967
2202 ofsAddr = moxa_ports[port].tableAddr; 1968 ofsAddr = port->tableAddr;
2203 baseAddr = moxa_boards[port / MAX_PORTS_PER_BOARD].basemem; 1969 baseAddr = port->board->basemem;
2204 tx_mask = readw(ofsAddr + TX_mask); 1970 tx_mask = readw(ofsAddr + TX_mask);
2205 spage = readw(ofsAddr + Page_txb); 1971 spage = readw(ofsAddr + Page_txb);
2206 epage = readw(ofsAddr + EndPage_txb); 1972 epage = readw(ofsAddr + EndPage_txb);
2207 tail = readw(ofsAddr + TXwptr); 1973 tail = readw(ofsAddr + TXwptr);
2208 head = readw(ofsAddr + TXrptr); 1974 head = readw(ofsAddr + TXrptr);
2209 c = (head > tail) ? (head - tail - 1) 1975 c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask);
2210 : (head - tail + tx_mask);
2211 if (c > len) 1976 if (c > len)
2212 c = len; 1977 c = len;
2213 moxaLog.txcnt[port] += c; 1978 moxaLog.txcnt[port->tty->index] += c;
2214 total = c; 1979 total = c;
2215 if (spage == epage) { 1980 if (spage == epage) {
2216 bufhead = readw(ofsAddr + Ofs_txb); 1981 bufhead = readw(ofsAddr + Ofs_txb);
@@ -2222,249 +1987,179 @@ int MoxaPortWriteData(int port, unsigned char * buffer, int len)
2222 len = tx_mask + 1 - tail; 1987 len = tx_mask + 1 - tail;
2223 len = (c > len) ? len : c; 1988 len = (c > len) ? len : c;
2224 ofs = baseAddr + DynPage_addr + bufhead + tail; 1989 ofs = baseAddr + DynPage_addr + bufhead + tail;
2225 for (i = 0; i < len; i++) 1990 memcpy_toio(ofs, buffer, len);
2226 writeb(*buffer++, ofs + i); 1991 buffer += len;
2227 tail = (tail + len) & tx_mask; 1992 tail = (tail + len) & tx_mask;
2228 c -= len; 1993 c -= len;
2229 } 1994 }
2230 writew(tail, ofsAddr + TXwptr);
2231 } else { 1995 } else {
2232 len = c;
2233 pageno = spage + (tail >> 13); 1996 pageno = spage + (tail >> 13);
2234 pageofs = tail & Page_mask; 1997 pageofs = tail & Page_mask;
2235 do { 1998 while (c > 0) {
2236 cnt = Page_size - pageofs; 1999 len = Page_size - pageofs;
2237 if (cnt > c) 2000 if (len > c)
2238 cnt = c; 2001 len = c;
2239 c -= cnt;
2240 writeb(pageno, baseAddr + Control_reg); 2002 writeb(pageno, baseAddr + Control_reg);
2241 ofs = baseAddr + DynPage_addr + pageofs; 2003 ofs = baseAddr + DynPage_addr + pageofs;
2242 for (i = 0; i < cnt; i++) 2004 memcpy_toio(ofs, buffer, len);
2243 writeb(*buffer++, ofs + i); 2005 buffer += len;
2244 if (c == 0) {
2245 writew((tail + len) & tx_mask, ofsAddr + TXwptr);
2246 break;
2247 }
2248 if (++pageno == epage) 2006 if (++pageno == epage)
2249 pageno = spage; 2007 pageno = spage;
2250 pageofs = 0; 2008 pageofs = 0;
2251 } while (1); 2009 c -= len;
2010 }
2011 tail = (tail + total) & tx_mask;
2252 } 2012 }
2013 writew(tail, ofsAddr + TXwptr);
2253 writeb(1, ofsAddr + CD180TXirq); /* start to send */ 2014 writeb(1, ofsAddr + CD180TXirq); /* start to send */
2254 return (total); 2015 return total;
2255} 2016}
2256 2017
2257int MoxaPortReadData(int port, struct tty_struct *tty) 2018static int MoxaPortReadData(struct moxa_port *port)
2258{ 2019{
2259 register ushort head, pageofs; 2020 struct tty_struct *tty = port->tty;
2260 int i, count, cnt, len, total, remain; 2021 unsigned char *dst;
2261 ushort tail, rx_mask, spage, epage;
2262 ushort pageno, bufhead;
2263 void __iomem *baseAddr, *ofsAddr, *ofs; 2022 void __iomem *baseAddr, *ofsAddr, *ofs;
2023 unsigned int count, len, total;
2024 u16 tail, rx_mask, spage, epage;
2025 u16 pageno, pageofs, bufhead, head;
2264 2026
2265 ofsAddr = moxa_ports[port].tableAddr; 2027 ofsAddr = port->tableAddr;
2266 baseAddr = moxa_boards[port / MAX_PORTS_PER_BOARD].basemem; 2028 baseAddr = port->board->basemem;
2267 head = readw(ofsAddr + RXrptr); 2029 head = readw(ofsAddr + RXrptr);
2268 tail = readw(ofsAddr + RXwptr); 2030 tail = readw(ofsAddr + RXwptr);
2269 rx_mask = readw(ofsAddr + RX_mask); 2031 rx_mask = readw(ofsAddr + RX_mask);
2270 spage = readw(ofsAddr + Page_rxb); 2032 spage = readw(ofsAddr + Page_rxb);
2271 epage = readw(ofsAddr + EndPage_rxb); 2033 epage = readw(ofsAddr + EndPage_rxb);
2272 count = (tail >= head) ? (tail - head) 2034 count = (tail >= head) ? (tail - head) : (tail - head + rx_mask + 1);
2273 : (tail - head + rx_mask + 1);
2274 if (count == 0) 2035 if (count == 0)
2275 return 0; 2036 return 0;
2276 2037
2277 total = count; 2038 total = count;
2278 remain = count - total; 2039 moxaLog.rxcnt[tty->index] += total;
2279 moxaLog.rxcnt[port] += total;
2280 count = total;
2281 if (spage == epage) { 2040 if (spage == epage) {
2282 bufhead = readw(ofsAddr + Ofs_rxb); 2041 bufhead = readw(ofsAddr + Ofs_rxb);
2283 writew(spage, baseAddr + Control_reg); 2042 writew(spage, baseAddr + Control_reg);
2284 while (count > 0) { 2043 while (count > 0) {
2285 if (tail >= head)
2286 len = tail - head;
2287 else
2288 len = rx_mask + 1 - head;
2289 len = (count > len) ? len : count;
2290 ofs = baseAddr + DynPage_addr + bufhead + head; 2044 ofs = baseAddr + DynPage_addr + bufhead + head;
2291 for (i = 0; i < len; i++) 2045 len = (tail >= head) ? (tail - head) :
2292 tty_insert_flip_char(tty, readb(ofs + i), TTY_NORMAL); 2046 (rx_mask + 1 - head);
2047 len = tty_prepare_flip_string(tty, &dst,
2048 min(len, count));
2049 memcpy_fromio(dst, ofs, len);
2293 head = (head + len) & rx_mask; 2050 head = (head + len) & rx_mask;
2294 count -= len; 2051 count -= len;
2295 } 2052 }
2296 writew(head, ofsAddr + RXrptr);
2297 } else { 2053 } else {
2298 len = count;
2299 pageno = spage + (head >> 13); 2054 pageno = spage + (head >> 13);
2300 pageofs = head & Page_mask; 2055 pageofs = head & Page_mask;
2301 do { 2056 while (count > 0) {
2302 cnt = Page_size - pageofs;
2303 if (cnt > count)
2304 cnt = count;
2305 count -= cnt;
2306 writew(pageno, baseAddr + Control_reg); 2057 writew(pageno, baseAddr + Control_reg);
2307 ofs = baseAddr + DynPage_addr + pageofs; 2058 ofs = baseAddr + DynPage_addr + pageofs;
2308 for (i = 0; i < cnt; i++) 2059 len = tty_prepare_flip_string(tty, &dst,
2309 tty_insert_flip_char(tty, readb(ofs + i), TTY_NORMAL); 2060 min(Page_size - pageofs, count));
2310 if (count == 0) { 2061 memcpy_fromio(dst, ofs, len);
2311 writew((head + len) & rx_mask, ofsAddr + RXrptr); 2062
2312 break; 2063 count -= len;
2313 } 2064 pageofs = (pageofs + len) & Page_mask;
2314 if (++pageno == epage) 2065 if (pageofs == 0 && ++pageno == epage)
2315 pageno = spage; 2066 pageno = spage;
2316 pageofs = 0; 2067 }
2317 } while (1); 2068 head = (head + total) & rx_mask;
2318 } 2069 }
2319 if ((readb(ofsAddr + FlagStat) & Xoff_state) && (remain < LowWater)) { 2070 writew(head, ofsAddr + RXrptr);
2071 if (readb(ofsAddr + FlagStat) & Xoff_state) {
2320 moxaLowWaterChk = 1; 2072 moxaLowWaterChk = 1;
2321 moxa_ports[port].lowChkFlag = 1; 2073 port->lowChkFlag = 1;
2322 } 2074 }
2323 return (total); 2075 return total;
2324} 2076}
2325 2077
2326 2078
2327int MoxaPortTxQueue(int port) 2079static int MoxaPortTxQueue(struct moxa_port *port)
2328{ 2080{
2329 void __iomem *ofsAddr; 2081 void __iomem *ofsAddr = port->tableAddr;
2330 ushort rptr, wptr, mask; 2082 u16 rptr, wptr, mask;
2331 int len;
2332 2083
2333 ofsAddr = moxa_ports[port].tableAddr;
2334 rptr = readw(ofsAddr + TXrptr); 2084 rptr = readw(ofsAddr + TXrptr);
2335 wptr = readw(ofsAddr + TXwptr); 2085 wptr = readw(ofsAddr + TXwptr);
2336 mask = readw(ofsAddr + TX_mask); 2086 mask = readw(ofsAddr + TX_mask);
2337 len = (wptr - rptr) & mask; 2087 return (wptr - rptr) & mask;
2338 return (len);
2339} 2088}
2340 2089
2341int MoxaPortTxFree(int port) 2090static int MoxaPortTxFree(struct moxa_port *port)
2342{ 2091{
2343 void __iomem *ofsAddr; 2092 void __iomem *ofsAddr = port->tableAddr;
2344 ushort rptr, wptr, mask; 2093 u16 rptr, wptr, mask;
2345 int len;
2346 2094
2347 ofsAddr = moxa_ports[port].tableAddr;
2348 rptr = readw(ofsAddr + TXrptr); 2095 rptr = readw(ofsAddr + TXrptr);
2349 wptr = readw(ofsAddr + TXwptr); 2096 wptr = readw(ofsAddr + TXwptr);
2350 mask = readw(ofsAddr + TX_mask); 2097 mask = readw(ofsAddr + TX_mask);
2351 len = mask - ((wptr - rptr) & mask); 2098 return mask - ((wptr - rptr) & mask);
2352 return (len);
2353} 2099}
2354 2100
2355int MoxaPortRxQueue(int port) 2101static int MoxaPortRxQueue(struct moxa_port *port)
2356{ 2102{
2357 void __iomem *ofsAddr; 2103 void __iomem *ofsAddr = port->tableAddr;
2358 ushort rptr, wptr, mask; 2104 u16 rptr, wptr, mask;
2359 int len;
2360 2105
2361 ofsAddr = moxa_ports[port].tableAddr;
2362 rptr = readw(ofsAddr + RXrptr); 2106 rptr = readw(ofsAddr + RXrptr);
2363 wptr = readw(ofsAddr + RXwptr); 2107 wptr = readw(ofsAddr + RXwptr);
2364 mask = readw(ofsAddr + RX_mask); 2108 mask = readw(ofsAddr + RX_mask);
2365 len = (wptr - rptr) & mask; 2109 return (wptr - rptr) & mask;
2366 return (len);
2367} 2110}
2368 2111
2369 2112static void MoxaPortTxDisable(struct moxa_port *port)
2370void MoxaPortTxDisable(int port)
2371{ 2113{
2372 void __iomem *ofsAddr; 2114 moxafunc(port->tableAddr, FC_SetXoffState, Magic_code);
2373
2374 ofsAddr = moxa_ports[port].tableAddr;
2375 moxafunc(ofsAddr, FC_SetXoffState, Magic_code);
2376} 2115}
2377 2116
2378void MoxaPortTxEnable(int port) 2117static void MoxaPortTxEnable(struct moxa_port *port)
2379{ 2118{
2380 void __iomem *ofsAddr; 2119 moxafunc(port->tableAddr, FC_SetXonState, Magic_code);
2381
2382 ofsAddr = moxa_ports[port].tableAddr;
2383 moxafunc(ofsAddr, FC_SetXonState, Magic_code);
2384}
2385
2386
2387int MoxaPortResetBrkCnt(int port)
2388{
2389 ushort cnt;
2390 cnt = moxa_ports[port].breakCnt;
2391 moxa_ports[port].breakCnt = 0;
2392 return (cnt);
2393}
2394
2395
2396void MoxaPortSendBreak(int port, int ms100)
2397{
2398 void __iomem *ofsAddr;
2399
2400 ofsAddr = moxa_ports[port].tableAddr;
2401 if (ms100) {
2402 moxafunc(ofsAddr, FC_SendBreak, Magic_code);
2403 msleep(ms100 * 10);
2404 } else {
2405 moxafunc(ofsAddr, FC_SendBreak, Magic_code);
2406 msleep(250);
2407 }
2408 moxafunc(ofsAddr, FC_StopBreak, Magic_code);
2409} 2120}
2410 2121
2411static int moxa_get_serial_info(struct moxa_port *info, 2122static int moxa_get_serial_info(struct moxa_port *info,
2412 struct serial_struct __user *retinfo) 2123 struct serial_struct __user *retinfo)
2413{ 2124{
2414 struct serial_struct tmp; 2125 struct serial_struct tmp = {
2415 2126 .type = info->type,
2416 memset(&tmp, 0, sizeof(tmp)); 2127 .line = info->tty->index,
2417 tmp.type = info->type; 2128 .flags = info->asyncflags,
2418 tmp.line = info->port; 2129 .baud_base = 921600,
2419 tmp.port = 0; 2130 .close_delay = info->close_delay
2420 tmp.irq = 0; 2131 };
2421 tmp.flags = info->asyncflags; 2132 return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
2422 tmp.baud_base = 921600;
2423 tmp.close_delay = info->close_delay;
2424 tmp.closing_wait = info->closing_wait;
2425 tmp.custom_divisor = 0;
2426 tmp.hub6 = 0;
2427 if(copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
2428 return -EFAULT;
2429 return (0);
2430} 2133}
2431 2134
2432 2135
2433static int moxa_set_serial_info(struct moxa_port *info, 2136static int moxa_set_serial_info(struct moxa_port *info,
2434 struct serial_struct __user *new_info) 2137 struct serial_struct __user *new_info)
2435{ 2138{
2436 struct serial_struct new_serial; 2139 struct serial_struct new_serial;
2437 2140
2438 if(copy_from_user(&new_serial, new_info, sizeof(new_serial))) 2141 if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
2439 return -EFAULT; 2142 return -EFAULT;
2440 2143
2441 if ((new_serial.irq != 0) || 2144 if (new_serial.irq != 0 || new_serial.port != 0 ||
2442 (new_serial.port != 0) || 2145 new_serial.custom_divisor != 0 ||
2443// (new_serial.type != info->type) || 2146 new_serial.baud_base != 921600)
2444 (new_serial.custom_divisor != 0) || 2147 return -EPERM;
2445 (new_serial.baud_base != 921600))
2446 return (-EPERM);
2447 2148
2448 if (!capable(CAP_SYS_ADMIN)) { 2149 if (!capable(CAP_SYS_ADMIN)) {
2449 if (((new_serial.flags & ~ASYNC_USR_MASK) != 2150 if (((new_serial.flags & ~ASYNC_USR_MASK) !=
2450 (info->asyncflags & ~ASYNC_USR_MASK))) 2151 (info->asyncflags & ~ASYNC_USR_MASK)))
2451 return (-EPERM); 2152 return -EPERM;
2452 } else { 2153 } else
2453 info->close_delay = new_serial.close_delay * HZ / 100; 2154 info->close_delay = new_serial.close_delay * HZ / 100;
2454 info->closing_wait = new_serial.closing_wait * HZ / 100;
2455 }
2456 2155
2457 new_serial.flags = (new_serial.flags & ~ASYNC_FLAGS); 2156 new_serial.flags = (new_serial.flags & ~ASYNC_FLAGS);
2458 new_serial.flags |= (info->asyncflags & ASYNC_FLAGS); 2157 new_serial.flags |= (info->asyncflags & ASYNC_FLAGS);
2459 2158
2460 if (new_serial.type == PORT_16550A) { 2159 MoxaSetFifo(info, new_serial.type == PORT_16550A);
2461 MoxaSetFifo(info->port, 1);
2462 } else {
2463 MoxaSetFifo(info->port, 0);
2464 }
2465 2160
2466 info->type = new_serial.type; 2161 info->type = new_serial.type;
2467 return (0); 2162 return 0;
2468} 2163}
2469 2164
2470 2165
@@ -2472,374 +2167,10 @@ static int moxa_set_serial_info(struct moxa_port *info,
2472/***************************************************************************** 2167/*****************************************************************************
2473 * Static local functions: * 2168 * Static local functions: *
2474 *****************************************************************************/ 2169 *****************************************************************************/
2475static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg)
2476{
2477
2478 writew(arg, ofsAddr + FuncArg);
2479 writew(cmd, ofsAddr + FuncCode);
2480 moxa_wait_finish(ofsAddr);
2481}
2482
2483static void moxa_wait_finish(void __iomem *ofsAddr)
2484{
2485 unsigned long i, j;
2486
2487 i = jiffies;
2488 while (readw(ofsAddr + FuncCode) != 0) {
2489 j = jiffies;
2490 if ((j - i) > moxaFuncTout) {
2491 return;
2492 }
2493 }
2494}
2495
2496static void moxa_low_water_check(void __iomem *ofsAddr)
2497{
2498 int len;
2499 ushort rptr, wptr, mask;
2500
2501 if (readb(ofsAddr + FlagStat) & Xoff_state) {
2502 rptr = readw(ofsAddr + RXrptr);
2503 wptr = readw(ofsAddr + RXwptr);
2504 mask = readw(ofsAddr + RX_mask);
2505 len = (wptr - rptr) & mask;
2506 if (len <= Low_water)
2507 moxafunc(ofsAddr, FC_SendXon, 0);
2508 }
2509}
2510
2511static int moxaloadbios(int cardno, unsigned char __user *tmp, int len)
2512{
2513 void __iomem *baseAddr;
2514 int i;
2515
2516 if(len < 0 || len > sizeof(moxaBuff))
2517 return -EINVAL;
2518 if(copy_from_user(moxaBuff, tmp, len))
2519 return -EFAULT;
2520 baseAddr = moxa_boards[cardno].basemem;
2521 writeb(HW_reset, baseAddr + Control_reg); /* reset */
2522 msleep(10);
2523 for (i = 0; i < 4096; i++)
2524 writeb(0, baseAddr + i); /* clear fix page */
2525 for (i = 0; i < len; i++)
2526 writeb(moxaBuff[i], baseAddr + i); /* download BIOS */
2527 writeb(0, baseAddr + Control_reg); /* restart */
2528 return (0);
2529}
2530
2531static int moxafindcard(int cardno)
2532{
2533 void __iomem *baseAddr;
2534 ushort tmp;
2535
2536 baseAddr = moxa_boards[cardno].basemem;
2537 switch (moxa_boards[cardno].boardType) {
2538 case MOXA_BOARD_C218_ISA:
2539 case MOXA_BOARD_C218_PCI:
2540 if ((tmp = readw(baseAddr + C218_key)) != C218_KeyCode) {
2541 return (-1);
2542 }
2543 break;
2544 case MOXA_BOARD_CP204J:
2545 if ((tmp = readw(baseAddr + C218_key)) != CP204J_KeyCode) {
2546 return (-1);
2547 }
2548 break;
2549 default:
2550 if ((tmp = readw(baseAddr + C320_key)) != C320_KeyCode) {
2551 return (-1);
2552 }
2553 if ((tmp = readw(baseAddr + C320_status)) != STS_init) {
2554 return (-2);
2555 }
2556 }
2557 return (0);
2558}
2559
2560static int moxaload320b(int cardno, unsigned char __user *tmp, int len)
2561{
2562 void __iomem *baseAddr;
2563 int i;
2564
2565 if(len < 0 || len > sizeof(moxaBuff))
2566 return -EINVAL;
2567 if(copy_from_user(moxaBuff, tmp, len))
2568 return -EFAULT;
2569 baseAddr = moxa_boards[cardno].basemem;
2570 writew(len - 7168 - 2, baseAddr + C320bapi_len);
2571 writeb(1, baseAddr + Control_reg); /* Select Page 1 */
2572 for (i = 0; i < 7168; i++)
2573 writeb(moxaBuff[i], baseAddr + DynPage_addr + i);
2574 writeb(2, baseAddr + Control_reg); /* Select Page 2 */
2575 for (i = 0; i < (len - 7168); i++)
2576 writeb(moxaBuff[i + 7168], baseAddr + DynPage_addr + i);
2577 return (0);
2578}
2579
2580static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
2581{
2582 void __iomem *baseAddr, *ofsAddr;
2583 int retval, port, i;
2584
2585 if(len < 0 || len > sizeof(moxaBuff))
2586 return -EINVAL;
2587 if(copy_from_user(moxaBuff, tmp, len))
2588 return -EFAULT;
2589 baseAddr = moxa_boards[cardno].basemem;
2590 switch (moxa_boards[cardno].boardType) {
2591 case MOXA_BOARD_C218_ISA:
2592 case MOXA_BOARD_C218_PCI:
2593 case MOXA_BOARD_CP204J:
2594 retval = moxaloadc218(cardno, baseAddr, len);
2595 if (retval)
2596 return (retval);
2597 port = cardno * MAX_PORTS_PER_BOARD;
2598 for (i = 0; i < moxa_boards[cardno].numPorts; i++, port++) {
2599 struct moxa_port *p = &moxa_ports[port];
2600
2601 p->chkPort = 1;
2602 p->curBaud = 9600L;
2603 p->DCDState = 0;
2604 p->tableAddr = baseAddr + Extern_table + Extern_size * i;
2605 ofsAddr = p->tableAddr;
2606 writew(C218rx_mask, ofsAddr + RX_mask);
2607 writew(C218tx_mask, ofsAddr + TX_mask);
2608 writew(C218rx_spage + i * C218buf_pageno, ofsAddr + Page_rxb);
2609 writew(readw(ofsAddr + Page_rxb) + C218rx_pageno, ofsAddr + EndPage_rxb);
2610
2611 writew(C218tx_spage + i * C218buf_pageno, ofsAddr + Page_txb);
2612 writew(readw(ofsAddr + Page_txb) + C218tx_pageno, ofsAddr + EndPage_txb);
2613
2614 }
2615 break;
2616 default:
2617 retval = moxaloadc320(cardno, baseAddr, len,
2618 &moxa_boards[cardno].numPorts);
2619 if (retval)
2620 return (retval);
2621 port = cardno * MAX_PORTS_PER_BOARD;
2622 for (i = 0; i < moxa_boards[cardno].numPorts; i++, port++) {
2623 struct moxa_port *p = &moxa_ports[port];
2624
2625 p->chkPort = 1;
2626 p->curBaud = 9600L;
2627 p->DCDState = 0;
2628 p->tableAddr = baseAddr + Extern_table + Extern_size * i;
2629 ofsAddr = p->tableAddr;
2630 if (moxa_boards[cardno].numPorts == 8) {
2631 writew(C320p8rx_mask, ofsAddr + RX_mask);
2632 writew(C320p8tx_mask, ofsAddr + TX_mask);
2633 writew(C320p8rx_spage + i * C320p8buf_pgno, ofsAddr + Page_rxb);
2634 writew(readw(ofsAddr + Page_rxb) + C320p8rx_pgno, ofsAddr + EndPage_rxb);
2635 writew(C320p8tx_spage + i * C320p8buf_pgno, ofsAddr + Page_txb);
2636 writew(readw(ofsAddr + Page_txb) + C320p8tx_pgno, ofsAddr + EndPage_txb);
2637
2638 } else if (moxa_boards[cardno].numPorts == 16) {
2639 writew(C320p16rx_mask, ofsAddr + RX_mask);
2640 writew(C320p16tx_mask, ofsAddr + TX_mask);
2641 writew(C320p16rx_spage + i * C320p16buf_pgno, ofsAddr + Page_rxb);
2642 writew(readw(ofsAddr + Page_rxb) + C320p16rx_pgno, ofsAddr + EndPage_rxb);
2643 writew(C320p16tx_spage + i * C320p16buf_pgno, ofsAddr + Page_txb);
2644 writew(readw(ofsAddr + Page_txb) + C320p16tx_pgno, ofsAddr + EndPage_txb);
2645
2646 } else if (moxa_boards[cardno].numPorts == 24) {
2647 writew(C320p24rx_mask, ofsAddr + RX_mask);
2648 writew(C320p24tx_mask, ofsAddr + TX_mask);
2649 writew(C320p24rx_spage + i * C320p24buf_pgno, ofsAddr + Page_rxb);
2650 writew(readw(ofsAddr + Page_rxb) + C320p24rx_pgno, ofsAddr + EndPage_rxb);
2651 writew(C320p24tx_spage + i * C320p24buf_pgno, ofsAddr + Page_txb);
2652 writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
2653 } else if (moxa_boards[cardno].numPorts == 32) {
2654 writew(C320p32rx_mask, ofsAddr + RX_mask);
2655 writew(C320p32tx_mask, ofsAddr + TX_mask);
2656 writew(C320p32tx_ofs, ofsAddr + Ofs_txb);
2657 writew(C320p32rx_spage + i * C320p32buf_pgno, ofsAddr + Page_rxb);
2658 writew(readb(ofsAddr + Page_rxb), ofsAddr + EndPage_rxb);
2659 writew(C320p32tx_spage + i * C320p32buf_pgno, ofsAddr + Page_txb);
2660 writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
2661 }
2662 }
2663 break;
2664 }
2665 moxa_boards[cardno].loadstat = 1;
2666 return (0);
2667}
2668
2669static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2670{
2671 char retry;
2672 int i, j, len1, len2;
2673 ushort usum, *ptr, keycode;
2674
2675 if (moxa_boards[cardno].boardType == MOXA_BOARD_CP204J)
2676 keycode = CP204J_KeyCode;
2677 else
2678 keycode = C218_KeyCode;
2679 usum = 0;
2680 len1 = len >> 1;
2681 ptr = (ushort *) moxaBuff;
2682 for (i = 0; i < len1; i++)
2683 usum += le16_to_cpu(*(ptr + i));
2684 retry = 0;
2685 do {
2686 len1 = len >> 1;
2687 j = 0;
2688 while (len1) {
2689 len2 = (len1 > 2048) ? 2048 : len1;
2690 len1 -= len2;
2691 for (i = 0; i < len2 << 1; i++)
2692 writeb(moxaBuff[i + j], baseAddr + C218_LoadBuf + i);
2693 j += i;
2694
2695 writew(len2, baseAddr + C218DLoad_len);
2696 writew(0, baseAddr + C218_key);
2697 for (i = 0; i < 100; i++) {
2698 if (readw(baseAddr + C218_key) == keycode)
2699 break;
2700 msleep(10);
2701 }
2702 if (readw(baseAddr + C218_key) != keycode) {
2703 return (-1);
2704 }
2705 }
2706 writew(0, baseAddr + C218DLoad_len);
2707 writew(usum, baseAddr + C218check_sum);
2708 writew(0, baseAddr + C218_key);
2709 for (i = 0; i < 100; i++) {
2710 if (readw(baseAddr + C218_key) == keycode)
2711 break;
2712 msleep(10);
2713 }
2714 retry++;
2715 } while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3));
2716 if (readb(baseAddr + C218chksum_ok) != 1) {
2717 return (-1);
2718 }
2719 writew(0, baseAddr + C218_key);
2720 for (i = 0; i < 100; i++) {
2721 if (readw(baseAddr + Magic_no) == Magic_code)
2722 break;
2723 msleep(10);
2724 }
2725 if (readw(baseAddr + Magic_no) != Magic_code) {
2726 return (-1);
2727 }
2728 writew(1, baseAddr + Disable_IRQ);
2729 writew(0, baseAddr + Magic_no);
2730 for (i = 0; i < 100; i++) {
2731 if (readw(baseAddr + Magic_no) == Magic_code)
2732 break;
2733 msleep(10);
2734 }
2735 if (readw(baseAddr + Magic_no) != Magic_code) {
2736 return (-1);
2737 }
2738 moxaCard = 1;
2739 moxa_boards[cardno].intNdx = baseAddr + IRQindex;
2740 moxa_boards[cardno].intPend = baseAddr + IRQpending;
2741 moxa_boards[cardno].intTable = baseAddr + IRQtable;
2742 return (0);
2743}
2744
2745static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPorts)
2746{
2747 ushort usum;
2748 int i, j, wlen, len2, retry;
2749 ushort *uptr;
2750
2751 usum = 0;
2752 wlen = len >> 1;
2753 uptr = (ushort *) moxaBuff;
2754 for (i = 0; i < wlen; i++)
2755 usum += le16_to_cpu(uptr[i]);
2756 retry = 0;
2757 j = 0;
2758 do {
2759 while (wlen) {
2760 if (wlen > 2048)
2761 len2 = 2048;
2762 else
2763 len2 = wlen;
2764 wlen -= len2;
2765 len2 <<= 1;
2766 for (i = 0; i < len2; i++)
2767 writeb(moxaBuff[j + i], baseAddr + C320_LoadBuf + i);
2768 len2 >>= 1;
2769 j += i;
2770 writew(len2, baseAddr + C320DLoad_len);
2771 writew(0, baseAddr + C320_key);
2772 for (i = 0; i < 10; i++) {
2773 if (readw(baseAddr + C320_key) == C320_KeyCode)
2774 break;
2775 msleep(10);
2776 }
2777 if (readw(baseAddr + C320_key) != C320_KeyCode)
2778 return (-1);
2779 }
2780 writew(0, baseAddr + C320DLoad_len);
2781 writew(usum, baseAddr + C320check_sum);
2782 writew(0, baseAddr + C320_key);
2783 for (i = 0; i < 10; i++) {
2784 if (readw(baseAddr + C320_key) == C320_KeyCode)
2785 break;
2786 msleep(10);
2787 }
2788 retry++;
2789 } while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3));
2790 if (readb(baseAddr + C320chksum_ok) != 1)
2791 return (-1);
2792 writew(0, baseAddr + C320_key);
2793 for (i = 0; i < 600; i++) {
2794 if (readw(baseAddr + Magic_no) == Magic_code)
2795 break;
2796 msleep(10);
2797 }
2798 if (readw(baseAddr + Magic_no) != Magic_code)
2799 return (-100);
2800
2801 if (moxa_boards[cardno].busType == MOXA_BUS_TYPE_PCI) { /* ASIC board */
2802 writew(0x3800, baseAddr + TMS320_PORT1);
2803 writew(0x3900, baseAddr + TMS320_PORT2);
2804 writew(28499, baseAddr + TMS320_CLOCK);
2805 } else {
2806 writew(0x3200, baseAddr + TMS320_PORT1);
2807 writew(0x3400, baseAddr + TMS320_PORT2);
2808 writew(19999, baseAddr + TMS320_CLOCK);
2809 }
2810 writew(1, baseAddr + Disable_IRQ);
2811 writew(0, baseAddr + Magic_no);
2812 for (i = 0; i < 500; i++) {
2813 if (readw(baseAddr + Magic_no) == Magic_code)
2814 break;
2815 msleep(10);
2816 }
2817 if (readw(baseAddr + Magic_no) != Magic_code)
2818 return (-102);
2819
2820 j = readw(baseAddr + Module_cnt);
2821 if (j <= 0)
2822 return (-101);
2823 *numPorts = j * 8;
2824 writew(j, baseAddr + Module_no);
2825 writew(0, baseAddr + Magic_no);
2826 for (i = 0; i < 600; i++) {
2827 if (readw(baseAddr + Magic_no) == Magic_code)
2828 break;
2829 msleep(10);
2830 }
2831 if (readw(baseAddr + Magic_no) != Magic_code)
2832 return (-102);
2833 moxaCard = 1;
2834 moxa_boards[cardno].intNdx = baseAddr + IRQindex;
2835 moxa_boards[cardno].intPend = baseAddr + IRQpending;
2836 moxa_boards[cardno].intTable = baseAddr + IRQtable;
2837 return (0);
2838}
2839 2170
2840static void MoxaSetFifo(int port, int enable) 2171static void MoxaSetFifo(struct moxa_port *port, int enable)
2841{ 2172{
2842 void __iomem *ofsAddr = moxa_ports[port].tableAddr; 2173 void __iomem *ofsAddr = port->tableAddr;
2843 2174
2844 if (!enable) { 2175 if (!enable) {
2845 moxafunc(ofsAddr, FC_SetRxFIFOTrig, 0); 2176 moxafunc(ofsAddr, FC_SetRxFIFOTrig, 0);
diff --git a/drivers/char/moxa.h b/drivers/char/moxa.h
new file mode 100644
index 000000000000..87d16ce57be7
--- /dev/null
+++ b/drivers/char/moxa.h
@@ -0,0 +1,304 @@
1#ifndef MOXA_H_FILE
2#define MOXA_H_FILE
3
4#define MOXA 0x400
5#define MOXA_GET_IQUEUE (MOXA + 1) /* get input buffered count */
6#define MOXA_GET_OQUEUE (MOXA + 2) /* get output buffered count */
7#define MOXA_GETDATACOUNT (MOXA + 23)
8#define MOXA_GET_IOQUEUE (MOXA + 27)
9#define MOXA_FLUSH_QUEUE (MOXA + 28)
10#define MOXA_GETMSTATUS (MOXA + 65)
11
12/*
13 * System Configuration
14 */
15
16#define Magic_code 0x404
17
18/*
19 * for C218 BIOS initialization
20 */
21#define C218_ConfBase 0x800
22#define C218_status (C218_ConfBase + 0) /* BIOS running status */
23#define C218_diag (C218_ConfBase + 2) /* diagnostic status */
24#define C218_key (C218_ConfBase + 4) /* WORD (0x218 for C218) */
25#define C218DLoad_len (C218_ConfBase + 6) /* WORD */
26#define C218check_sum (C218_ConfBase + 8) /* BYTE */
27#define C218chksum_ok (C218_ConfBase + 0x0a) /* BYTE (1:ok) */
28#define C218_TestRx (C218_ConfBase + 0x10) /* 8 bytes for 8 ports */
29#define C218_TestTx (C218_ConfBase + 0x18) /* 8 bytes for 8 ports */
30#define C218_RXerr (C218_ConfBase + 0x20) /* 8 bytes for 8 ports */
31#define C218_ErrFlag (C218_ConfBase + 0x28) /* 8 bytes for 8 ports */
32
33#define C218_LoadBuf 0x0F00
34#define C218_KeyCode 0x218
35#define CP204J_KeyCode 0x204
36
37/*
38 * for C320 BIOS initialization
39 */
40#define C320_ConfBase 0x800
41#define C320_LoadBuf 0x0f00
42#define STS_init 0x05 /* for C320_status */
43
44#define C320_status C320_ConfBase + 0 /* BIOS running status */
45#define C320_diag C320_ConfBase + 2 /* diagnostic status */
46#define C320_key C320_ConfBase + 4 /* WORD (0320H for C320) */
47#define C320DLoad_len C320_ConfBase + 6 /* WORD */
48#define C320check_sum C320_ConfBase + 8 /* WORD */
49#define C320chksum_ok C320_ConfBase + 0x0a /* WORD (1:ok) */
50#define C320bapi_len C320_ConfBase + 0x0c /* WORD */
51#define C320UART_no C320_ConfBase + 0x0e /* WORD */
52
53#define C320_KeyCode 0x320
54
55#define FixPage_addr 0x0000 /* starting addr of static page */
56#define DynPage_addr 0x2000 /* starting addr of dynamic page */
57#define C218_start 0x3000 /* starting addr of C218 BIOS prg */
58#define Control_reg 0x1ff0 /* select page and reset control */
59#define HW_reset 0x80
60
61/*
62 * Function Codes
63 */
64#define FC_CardReset 0x80
65#define FC_ChannelReset 1 /* C320 firmware not supported */
66#define FC_EnableCH 2
67#define FC_DisableCH 3
68#define FC_SetParam 4
69#define FC_SetMode 5
70#define FC_SetRate 6
71#define FC_LineControl 7
72#define FC_LineStatus 8
73#define FC_XmitControl 9
74#define FC_FlushQueue 10
75#define FC_SendBreak 11
76#define FC_StopBreak 12
77#define FC_LoopbackON 13
78#define FC_LoopbackOFF 14
79#define FC_ClrIrqTable 15
80#define FC_SendXon 16
81#define FC_SetTermIrq 17 /* C320 firmware not supported */
82#define FC_SetCntIrq 18 /* C320 firmware not supported */
83#define FC_SetBreakIrq 19
84#define FC_SetLineIrq 20
85#define FC_SetFlowCtl 21
86#define FC_GenIrq 22
87#define FC_InCD180 23
88#define FC_OutCD180 24
89#define FC_InUARTreg 23
90#define FC_OutUARTreg 24
91#define FC_SetXonXoff 25
92#define FC_OutCD180CCR 26
93#define FC_ExtIQueue 27
94#define FC_ExtOQueue 28
95#define FC_ClrLineIrq 29
96#define FC_HWFlowCtl 30
97#define FC_GetClockRate 35
98#define FC_SetBaud 36
99#define FC_SetDataMode 41
100#define FC_GetCCSR 43
101#define FC_GetDataError 45
102#define FC_RxControl 50
103#define FC_ImmSend 51
104#define FC_SetXonState 52
105#define FC_SetXoffState 53
106#define FC_SetRxFIFOTrig 54
107#define FC_SetTxFIFOCnt 55
108#define FC_UnixRate 56
109#define FC_UnixResetTimer 57
110
111#define RxFIFOTrig1 0
112#define RxFIFOTrig4 1
113#define RxFIFOTrig8 2
114#define RxFIFOTrig14 3
115
116/*
117 * Dual-Ported RAM
118 */
119#define DRAM_global 0
120#define INT_data (DRAM_global + 0)
121#define Config_base (DRAM_global + 0x108)
122
123#define IRQindex (INT_data + 0)
124#define IRQpending (INT_data + 4)
125#define IRQtable (INT_data + 8)
126
127/*
128 * Interrupt Status
129 */
130#define IntrRx 0x01 /* receiver data O.K. */
131#define IntrTx 0x02 /* transmit buffer empty */
132#define IntrFunc 0x04 /* function complete */
133#define IntrBreak 0x08 /* received break */
134#define IntrLine 0x10 /* line status change
135 for transmitter */
136#define IntrIntr 0x20 /* received INTR code */
137#define IntrQuit 0x40 /* received QUIT code */
138#define IntrEOF 0x80 /* received EOF code */
139
140#define IntrRxTrigger 0x100 /* rx data count reach tigger value */
141#define IntrTxTrigger 0x200 /* tx data count below trigger value */
142
143#define Magic_no (Config_base + 0)
144#define Card_model_no (Config_base + 2)
145#define Total_ports (Config_base + 4)
146#define Module_cnt (Config_base + 8)
147#define Module_no (Config_base + 10)
148#define Timer_10ms (Config_base + 14)
149#define Disable_IRQ (Config_base + 20)
150#define TMS320_PORT1 (Config_base + 22)
151#define TMS320_PORT2 (Config_base + 24)
152#define TMS320_CLOCK (Config_base + 26)
153
154/*
155 * DATA BUFFER in DRAM
156 */
157#define Extern_table 0x400 /* Base address of the external table
158 (24 words * 64) total 3K bytes
159 (24 words * 128) total 6K bytes */
160#define Extern_size 0x60 /* 96 bytes */
161#define RXrptr 0x00 /* read pointer for RX buffer */
162#define RXwptr 0x02 /* write pointer for RX buffer */
163#define TXrptr 0x04 /* read pointer for TX buffer */
164#define TXwptr 0x06 /* write pointer for TX buffer */
165#define HostStat 0x08 /* IRQ flag and general flag */
166#define FlagStat 0x0A
167#define FlowControl 0x0C /* B7 B6 B5 B4 B3 B2 B1 B0 */
168 /* x x x x | | | | */
169 /* | | | + CTS flow */
170 /* | | +--- RTS flow */
171 /* | +------ TX Xon/Xoff */
172 /* +--------- RX Xon/Xoff */
173#define Break_cnt 0x0E /* received break count */
174#define CD180TXirq 0x10 /* if non-0: enable TX irq */
175#define RX_mask 0x12
176#define TX_mask 0x14
177#define Ofs_rxb 0x16
178#define Ofs_txb 0x18
179#define Page_rxb 0x1A
180#define Page_txb 0x1C
181#define EndPage_rxb 0x1E
182#define EndPage_txb 0x20
183#define Data_error 0x22
184#define RxTrigger 0x28
185#define TxTrigger 0x2a
186
187#define rRXwptr 0x34
188#define Low_water 0x36
189
190#define FuncCode 0x40
191#define FuncArg 0x42
192#define FuncArg1 0x44
193
194#define C218rx_size 0x2000 /* 8K bytes */
195#define C218tx_size 0x8000 /* 32K bytes */
196
197#define C218rx_mask (C218rx_size - 1)
198#define C218tx_mask (C218tx_size - 1)
199
200#define C320p8rx_size 0x2000
201#define C320p8tx_size 0x8000
202#define C320p8rx_mask (C320p8rx_size - 1)
203#define C320p8tx_mask (C320p8tx_size - 1)
204
205#define C320p16rx_size 0x2000
206#define C320p16tx_size 0x4000
207#define C320p16rx_mask (C320p16rx_size - 1)
208#define C320p16tx_mask (C320p16tx_size - 1)
209
210#define C320p24rx_size 0x2000
211#define C320p24tx_size 0x2000
212#define C320p24rx_mask (C320p24rx_size - 1)
213#define C320p24tx_mask (C320p24tx_size - 1)
214
215#define C320p32rx_size 0x1000
216#define C320p32tx_size 0x1000
217#define C320p32rx_mask (C320p32rx_size - 1)
218#define C320p32tx_mask (C320p32tx_size - 1)
219
220#define Page_size 0x2000U
221#define Page_mask (Page_size - 1)
222#define C218rx_spage 3
223#define C218tx_spage 4
224#define C218rx_pageno 1
225#define C218tx_pageno 4
226#define C218buf_pageno 5
227
228#define C320p8rx_spage 3
229#define C320p8tx_spage 4
230#define C320p8rx_pgno 1
231#define C320p8tx_pgno 4
232#define C320p8buf_pgno 5
233
234#define C320p16rx_spage 3
235#define C320p16tx_spage 4
236#define C320p16rx_pgno 1
237#define C320p16tx_pgno 2
238#define C320p16buf_pgno 3
239
240#define C320p24rx_spage 3
241#define C320p24tx_spage 4
242#define C320p24rx_pgno 1
243#define C320p24tx_pgno 1
244#define C320p24buf_pgno 2
245
246#define C320p32rx_spage 3
247#define C320p32tx_ofs C320p32rx_size
248#define C320p32tx_spage 3
249#define C320p32buf_pgno 1
250
251/*
252 * Host Status
253 */
254#define WakeupRx 0x01
255#define WakeupTx 0x02
256#define WakeupBreak 0x08
257#define WakeupLine 0x10
258#define WakeupIntr 0x20
259#define WakeupQuit 0x40
260#define WakeupEOF 0x80 /* used in VTIME control */
261#define WakeupRxTrigger 0x100
262#define WakeupTxTrigger 0x200
263/*
264 * Flag status
265 */
266#define Rx_over 0x01
267#define Xoff_state 0x02
268#define Tx_flowOff 0x04
269#define Tx_enable 0x08
270#define CTS_state 0x10
271#define DSR_state 0x20
272#define DCD_state 0x80
273/*
274 * FlowControl
275 */
276#define CTS_FlowCtl 1
277#define RTS_FlowCtl 2
278#define Tx_FlowCtl 4
279#define Rx_FlowCtl 8
280#define IXM_IXANY 0x10
281
282#define LowWater 128
283
284#define DTR_ON 1
285#define RTS_ON 2
286#define CTS_ON 1
287#define DSR_ON 2
288#define DCD_ON 8
289
290/* mode definition */
291#define MX_CS8 0x03
292#define MX_CS7 0x02
293#define MX_CS6 0x01
294#define MX_CS5 0x00
295
296#define MX_STOP1 0x00
297#define MX_STOP15 0x04
298#define MX_STOP2 0x08
299
300#define MX_PARNONE 0x00
301#define MX_PAREVEN 0x40
302#define MX_PARODD 0xC0
303
304#endif
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index ff146c2b08fd..fe2a95b5d3c0 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -180,7 +180,7 @@ mspec_close(struct vm_area_struct *vma)
180 my_page = vdata->maddr[index]; 180 my_page = vdata->maddr[index];
181 vdata->maddr[index] = 0; 181 vdata->maddr[index] = 0;
182 if (!mspec_zero_block(my_page, PAGE_SIZE)) 182 if (!mspec_zero_block(my_page, PAGE_SIZE))
183 uncached_free_page(my_page); 183 uncached_free_page(my_page, 1);
184 else 184 else
185 printk(KERN_WARNING "mspec_close(): " 185 printk(KERN_WARNING "mspec_close(): "
186 "failed to zero page %ld\n", my_page); 186 "failed to zero page %ld\n", my_page);
@@ -209,7 +209,7 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
209 index = (address - vdata->vm_start) >> PAGE_SHIFT; 209 index = (address - vdata->vm_start) >> PAGE_SHIFT;
210 maddr = (volatile unsigned long) vdata->maddr[index]; 210 maddr = (volatile unsigned long) vdata->maddr[index];
211 if (maddr == 0) { 211 if (maddr == 0) {
212 maddr = uncached_alloc_page(numa_node_id()); 212 maddr = uncached_alloc_page(numa_node_id(), 1);
213 if (maddr == 0) 213 if (maddr == 0)
214 return NOPFN_OOM; 214 return NOPFN_OOM;
215 215
@@ -218,7 +218,7 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
218 vdata->count++; 218 vdata->count++;
219 vdata->maddr[index] = maddr; 219 vdata->maddr[index] = maddr;
220 } else { 220 } else {
221 uncached_free_page(maddr); 221 uncached_free_page(maddr, 1);
222 maddr = vdata->maddr[index]; 222 maddr = vdata->maddr[index];
223 } 223 }
224 spin_unlock(&vdata->lock); 224 spin_unlock(&vdata->lock);
@@ -367,7 +367,7 @@ mspec_init(void)
367 int nasid; 367 int nasid;
368 unsigned long phys; 368 unsigned long phys;
369 369
370 scratch_page[nid] = uncached_alloc_page(nid); 370 scratch_page[nid] = uncached_alloc_page(nid, 1);
371 if (scratch_page[nid] == 0) 371 if (scratch_page[nid] == 0)
372 goto free_scratch_pages; 372 goto free_scratch_pages;
373 phys = __pa(scratch_page[nid]); 373 phys = __pa(scratch_page[nid]);
@@ -414,7 +414,7 @@ mspec_init(void)
414 free_scratch_pages: 414 free_scratch_pages:
415 for_each_node(nid) { 415 for_each_node(nid) {
416 if (scratch_page[nid] != 0) 416 if (scratch_page[nid] != 0)
417 uncached_free_page(scratch_page[nid]); 417 uncached_free_page(scratch_page[nid], 1);
418 } 418 }
419 return ret; 419 return ret;
420} 420}
@@ -431,7 +431,7 @@ mspec_exit(void)
431 431
432 for_each_node(nid) { 432 for_each_node(nid) {
433 if (scratch_page[nid] != 0) 433 if (scratch_page[nid] != 0)
434 uncached_free_page(scratch_page[nid]); 434 uncached_free_page(scratch_page[nid], 1);
435 } 435 }
436 } 436 }
437} 437}
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 68c2e9234691..4b81a85c5b53 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -307,6 +307,200 @@ static unsigned char mxser_msr[MXSER_PORTS + 1];
307static struct mxser_mon_ext mon_data_ext; 307static struct mxser_mon_ext mon_data_ext;
308static int mxser_set_baud_method[MXSER_PORTS + 1]; 308static int mxser_set_baud_method[MXSER_PORTS + 1];
309 309
310static void mxser_enable_must_enchance_mode(unsigned long baseio)
311{
312 u8 oldlcr;
313 u8 efr;
314
315 oldlcr = inb(baseio + UART_LCR);
316 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
317
318 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
319 efr |= MOXA_MUST_EFR_EFRB_ENABLE;
320
321 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
322 outb(oldlcr, baseio + UART_LCR);
323}
324
325static void mxser_disable_must_enchance_mode(unsigned long baseio)
326{
327 u8 oldlcr;
328 u8 efr;
329
330 oldlcr = inb(baseio + UART_LCR);
331 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
332
333 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
334 efr &= ~MOXA_MUST_EFR_EFRB_ENABLE;
335
336 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
337 outb(oldlcr, baseio + UART_LCR);
338}
339
340static void mxser_set_must_xon1_value(unsigned long baseio, u8 value)
341{
342 u8 oldlcr;
343 u8 efr;
344
345 oldlcr = inb(baseio + UART_LCR);
346 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
347
348 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
349 efr &= ~MOXA_MUST_EFR_BANK_MASK;
350 efr |= MOXA_MUST_EFR_BANK0;
351
352 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
353 outb(value, baseio + MOXA_MUST_XON1_REGISTER);
354 outb(oldlcr, baseio + UART_LCR);
355}
356
357static void mxser_set_must_xoff1_value(unsigned long baseio, u8 value)
358{
359 u8 oldlcr;
360 u8 efr;
361
362 oldlcr = inb(baseio + UART_LCR);
363 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
364
365 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
366 efr &= ~MOXA_MUST_EFR_BANK_MASK;
367 efr |= MOXA_MUST_EFR_BANK0;
368
369 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
370 outb(value, baseio + MOXA_MUST_XOFF1_REGISTER);
371 outb(oldlcr, baseio + UART_LCR);
372}
373
374static void mxser_set_must_fifo_value(struct mxser_port *info)
375{
376 u8 oldlcr;
377 u8 efr;
378
379 oldlcr = inb(info->ioaddr + UART_LCR);
380 outb(MOXA_MUST_ENTER_ENCHANCE, info->ioaddr + UART_LCR);
381
382 efr = inb(info->ioaddr + MOXA_MUST_EFR_REGISTER);
383 efr &= ~MOXA_MUST_EFR_BANK_MASK;
384 efr |= MOXA_MUST_EFR_BANK1;
385
386 outb(efr, info->ioaddr + MOXA_MUST_EFR_REGISTER);
387 outb((u8)info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTH_REGISTER);
388 outb((u8)info->rx_trigger, info->ioaddr + MOXA_MUST_RBRTI_REGISTER);
389 outb((u8)info->rx_low_water, info->ioaddr + MOXA_MUST_RBRTL_REGISTER);
390 outb(oldlcr, info->ioaddr + UART_LCR);
391}
392
393static void mxser_set_must_enum_value(unsigned long baseio, u8 value)
394{
395 u8 oldlcr;
396 u8 efr;
397
398 oldlcr = inb(baseio + UART_LCR);
399 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
400
401 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
402 efr &= ~MOXA_MUST_EFR_BANK_MASK;
403 efr |= MOXA_MUST_EFR_BANK2;
404
405 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
406 outb(value, baseio + MOXA_MUST_ENUM_REGISTER);
407 outb(oldlcr, baseio + UART_LCR);
408}
409
410static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
411{
412 u8 oldlcr;
413 u8 efr;
414
415 oldlcr = inb(baseio + UART_LCR);
416 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
417
418 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
419 efr &= ~MOXA_MUST_EFR_BANK_MASK;
420 efr |= MOXA_MUST_EFR_BANK2;
421
422 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
423 *pId = inb(baseio + MOXA_MUST_HWID_REGISTER);
424 outb(oldlcr, baseio + UART_LCR);
425}
426
427static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio)
428{
429 u8 oldlcr;
430 u8 efr;
431
432 oldlcr = inb(baseio + UART_LCR);
433 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
434
435 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
436 efr &= ~MOXA_MUST_EFR_SF_MASK;
437
438 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
439 outb(oldlcr, baseio + UART_LCR);
440}
441
442static void mxser_enable_must_tx_software_flow_control(unsigned long baseio)
443{
444 u8 oldlcr;
445 u8 efr;
446
447 oldlcr = inb(baseio + UART_LCR);
448 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
449
450 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
451 efr &= ~MOXA_MUST_EFR_SF_TX_MASK;
452 efr |= MOXA_MUST_EFR_SF_TX1;
453
454 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
455 outb(oldlcr, baseio + UART_LCR);
456}
457
458static void mxser_disable_must_tx_software_flow_control(unsigned long baseio)
459{
460 u8 oldlcr;
461 u8 efr;
462
463 oldlcr = inb(baseio + UART_LCR);
464 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
465
466 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
467 efr &= ~MOXA_MUST_EFR_SF_TX_MASK;
468
469 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
470 outb(oldlcr, baseio + UART_LCR);
471}
472
473static void mxser_enable_must_rx_software_flow_control(unsigned long baseio)
474{
475 u8 oldlcr;
476 u8 efr;
477
478 oldlcr = inb(baseio + UART_LCR);
479 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
480
481 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
482 efr &= ~MOXA_MUST_EFR_SF_RX_MASK;
483 efr |= MOXA_MUST_EFR_SF_RX1;
484
485 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
486 outb(oldlcr, baseio + UART_LCR);
487}
488
489static void mxser_disable_must_rx_software_flow_control(unsigned long baseio)
490{
491 u8 oldlcr;
492 u8 efr;
493
494 oldlcr = inb(baseio + UART_LCR);
495 outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
496
497 efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
498 efr &= ~MOXA_MUST_EFR_SF_RX_MASK;
499
500 outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
501 outb(oldlcr, baseio + UART_LCR);
502}
503
310#ifdef CONFIG_PCI 504#ifdef CONFIG_PCI
311static int __devinit CheckIsMoxaMust(unsigned long io) 505static int __devinit CheckIsMoxaMust(unsigned long io)
312{ 506{
@@ -314,16 +508,16 @@ static int __devinit CheckIsMoxaMust(unsigned long io)
314 int i; 508 int i;
315 509
316 outb(0, io + UART_LCR); 510 outb(0, io + UART_LCR);
317 DISABLE_MOXA_MUST_ENCHANCE_MODE(io); 511 mxser_disable_must_enchance_mode(io);
318 oldmcr = inb(io + UART_MCR); 512 oldmcr = inb(io + UART_MCR);
319 outb(0, io + UART_MCR); 513 outb(0, io + UART_MCR);
320 SET_MOXA_MUST_XON1_VALUE(io, 0x11); 514 mxser_set_must_xon1_value(io, 0x11);
321 if ((hwid = inb(io + UART_MCR)) != 0) { 515 if ((hwid = inb(io + UART_MCR)) != 0) {
322 outb(oldmcr, io + UART_MCR); 516 outb(oldmcr, io + UART_MCR);
323 return MOXA_OTHER_UART; 517 return MOXA_OTHER_UART;
324 } 518 }
325 519
326 GET_MOXA_MUST_HARDWARE_ID(io, &hwid); 520 mxser_get_must_hardware_id(io, &hwid);
327 for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */ 521 for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */
328 if (hwid == Gpci_uart_info[i].type) 522 if (hwid == Gpci_uart_info[i].type)
329 return (int)hwid; 523 return (int)hwid;
@@ -494,10 +688,10 @@ static int mxser_set_baud(struct mxser_port *info, long newspd)
494 } else 688 } else
495 quot /= newspd; 689 quot /= newspd;
496 690
497 SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, quot); 691 mxser_set_must_enum_value(info->ioaddr, quot);
498 } else 692 } else
499#endif 693#endif
500 SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, 0); 694 mxser_set_must_enum_value(info->ioaddr, 0);
501 695
502 return 0; 696 return 0;
503} 697}
@@ -553,14 +747,14 @@ static int mxser_change_speed(struct mxser_port *info,
553 if (info->board->chip_flag) { 747 if (info->board->chip_flag) {
554 fcr = UART_FCR_ENABLE_FIFO; 748 fcr = UART_FCR_ENABLE_FIFO;
555 fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE; 749 fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
556 SET_MOXA_MUST_FIFO_VALUE(info); 750 mxser_set_must_fifo_value(info);
557 } else 751 } else
558 fcr = 0; 752 fcr = 0;
559 } else { 753 } else {
560 fcr = UART_FCR_ENABLE_FIFO; 754 fcr = UART_FCR_ENABLE_FIFO;
561 if (info->board->chip_flag) { 755 if (info->board->chip_flag) {
562 fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE; 756 fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
563 SET_MOXA_MUST_FIFO_VALUE(info); 757 mxser_set_must_fifo_value(info);
564 } else { 758 } else {
565 switch (info->rx_trigger) { 759 switch (info->rx_trigger) {
566 case 1: 760 case 1:
@@ -657,17 +851,21 @@ static int mxser_change_speed(struct mxser_port *info,
657 } 851 }
658 } 852 }
659 if (info->board->chip_flag) { 853 if (info->board->chip_flag) {
660 SET_MOXA_MUST_XON1_VALUE(info->ioaddr, START_CHAR(info->tty)); 854 mxser_set_must_xon1_value(info->ioaddr, START_CHAR(info->tty));
661 SET_MOXA_MUST_XOFF1_VALUE(info->ioaddr, STOP_CHAR(info->tty)); 855 mxser_set_must_xoff1_value(info->ioaddr, STOP_CHAR(info->tty));
662 if (I_IXON(info->tty)) { 856 if (I_IXON(info->tty)) {
663 ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr); 857 mxser_enable_must_rx_software_flow_control(
858 info->ioaddr);
664 } else { 859 } else {
665 DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr); 860 mxser_disable_must_rx_software_flow_control(
861 info->ioaddr);
666 } 862 }
667 if (I_IXOFF(info->tty)) { 863 if (I_IXOFF(info->tty)) {
668 ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr); 864 mxser_enable_must_tx_software_flow_control(
865 info->ioaddr);
669 } else { 866 } else {
670 DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr); 867 mxser_disable_must_tx_software_flow_control(
868 info->ioaddr);
671 } 869 }
672 } 870 }
673 871
@@ -927,6 +1125,27 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
927 return 0; 1125 return 0;
928} 1126}
929 1127
1128static void mxser_flush_buffer(struct tty_struct *tty)
1129{
1130 struct mxser_port *info = tty->driver_data;
1131 char fcr;
1132 unsigned long flags;
1133
1134
1135 spin_lock_irqsave(&info->slock, flags);
1136 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1137
1138 fcr = inb(info->ioaddr + UART_FCR);
1139 outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
1140 info->ioaddr + UART_FCR);
1141 outb(fcr, info->ioaddr + UART_FCR);
1142
1143 spin_unlock_irqrestore(&info->slock, flags);
1144
1145 tty_wakeup(tty);
1146}
1147
1148
930/* 1149/*
931 * This routine is called when the serial port gets closed. First, we 1150 * This routine is called when the serial port gets closed. First, we
932 * wait for the last remaining data to be sent. Then, we unlink its 1151 * wait for the last remaining data to be sent. Then, we unlink its
@@ -1013,9 +1232,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
1013 } 1232 }
1014 mxser_shutdown(info); 1233 mxser_shutdown(info);
1015 1234
1016 if (tty->driver->flush_buffer) 1235 mxser_flush_buffer(tty);
1017 tty->driver->flush_buffer(tty);
1018
1019 tty_ldisc_flush(tty); 1236 tty_ldisc_flush(tty);
1020 1237
1021 tty->closing = 0; 1238 tty->closing = 0;
@@ -1072,16 +1289,16 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
1072 return total; 1289 return total;
1073} 1290}
1074 1291
1075static void mxser_put_char(struct tty_struct *tty, unsigned char ch) 1292static int mxser_put_char(struct tty_struct *tty, unsigned char ch)
1076{ 1293{
1077 struct mxser_port *info = tty->driver_data; 1294 struct mxser_port *info = tty->driver_data;
1078 unsigned long flags; 1295 unsigned long flags;
1079 1296
1080 if (!info->xmit_buf) 1297 if (!info->xmit_buf)
1081 return; 1298 return 0;
1082 1299
1083 if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1) 1300 if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
1084 return; 1301 return 0;
1085 1302
1086 spin_lock_irqsave(&info->slock, flags); 1303 spin_lock_irqsave(&info->slock, flags);
1087 info->xmit_buf[info->xmit_head++] = ch; 1304 info->xmit_buf[info->xmit_head++] = ch;
@@ -1099,6 +1316,7 @@ static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
1099 spin_unlock_irqrestore(&info->slock, flags); 1316 spin_unlock_irqrestore(&info->slock, flags);
1100 } 1317 }
1101 } 1318 }
1319 return 1;
1102} 1320}
1103 1321
1104 1322
@@ -1142,26 +1360,6 @@ static int mxser_chars_in_buffer(struct tty_struct *tty)
1142 return info->xmit_cnt; 1360 return info->xmit_cnt;
1143} 1361}
1144 1362
1145static void mxser_flush_buffer(struct tty_struct *tty)
1146{
1147 struct mxser_port *info = tty->driver_data;
1148 char fcr;
1149 unsigned long flags;
1150
1151
1152 spin_lock_irqsave(&info->slock, flags);
1153 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1154
1155 fcr = inb(info->ioaddr + UART_FCR);
1156 outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
1157 info->ioaddr + UART_FCR);
1158 outb(fcr, info->ioaddr + UART_FCR);
1159
1160 spin_unlock_irqrestore(&info->slock, flags);
1161
1162 tty_wakeup(tty);
1163}
1164
1165/* 1363/*
1166 * ------------------------------------------------------------ 1364 * ------------------------------------------------------------
1167 * friends of mxser_ioctl() 1365 * friends of mxser_ioctl()
@@ -1460,6 +1658,7 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1460 struct mxser_port *port; 1658 struct mxser_port *port;
1461 int result, status; 1659 int result, status;
1462 unsigned int i, j; 1660 unsigned int i, j;
1661 int ret = 0;
1463 1662
1464 switch (cmd) { 1663 switch (cmd) {
1465 case MOXA_GET_MAJOR: 1664 case MOXA_GET_MAJOR:
@@ -1467,18 +1666,21 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1467 1666
1468 case MOXA_CHKPORTENABLE: 1667 case MOXA_CHKPORTENABLE:
1469 result = 0; 1668 result = 0;
1470 1669 lock_kernel();
1471 for (i = 0; i < MXSER_BOARDS; i++) 1670 for (i = 0; i < MXSER_BOARDS; i++)
1472 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) 1671 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++)
1473 if (mxser_boards[i].ports[j].ioaddr) 1672 if (mxser_boards[i].ports[j].ioaddr)
1474 result |= (1 << i); 1673 result |= (1 << i);
1475 1674 unlock_kernel();
1476 return put_user(result, (unsigned long __user *)argp); 1675 return put_user(result, (unsigned long __user *)argp);
1477 case MOXA_GETDATACOUNT: 1676 case MOXA_GETDATACOUNT:
1677 lock_kernel();
1478 if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log))) 1678 if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
1479 return -EFAULT; 1679 ret = -EFAULT;
1480 return 0; 1680 unlock_kernel();
1681 return ret;
1481 case MOXA_GETMSTATUS: 1682 case MOXA_GETMSTATUS:
1683 lock_kernel();
1482 for (i = 0; i < MXSER_BOARDS; i++) 1684 for (i = 0; i < MXSER_BOARDS; i++)
1483 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) { 1685 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
1484 port = &mxser_boards[i].ports[j]; 1686 port = &mxser_boards[i].ports[j];
@@ -1515,6 +1717,7 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1515 else 1717 else
1516 GMStatus[i].cts = 0; 1718 GMStatus[i].cts = 0;
1517 } 1719 }
1720 unlock_kernel();
1518 if (copy_to_user(argp, GMStatus, 1721 if (copy_to_user(argp, GMStatus,
1519 sizeof(struct mxser_mstatus) * MXSER_PORTS)) 1722 sizeof(struct mxser_mstatus) * MXSER_PORTS))
1520 return -EFAULT; 1723 return -EFAULT;
@@ -1524,7 +1727,8 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1524 unsigned long opmode; 1727 unsigned long opmode;
1525 unsigned cflag, iflag; 1728 unsigned cflag, iflag;
1526 1729
1527 for (i = 0; i < MXSER_BOARDS; i++) 1730 lock_kernel();
1731 for (i = 0; i < MXSER_BOARDS; i++) {
1528 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) { 1732 for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
1529 port = &mxser_boards[i].ports[j]; 1733 port = &mxser_boards[i].ports[j];
1530 if (!port->ioaddr) 1734 if (!port->ioaddr)
@@ -1589,13 +1793,14 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1589 mon_data_ext.iftype[i] = opmode; 1793 mon_data_ext.iftype[i] = opmode;
1590 1794
1591 } 1795 }
1592 if (copy_to_user(argp, &mon_data_ext, 1796 }
1593 sizeof(mon_data_ext))) 1797 unlock_kernel();
1594 return -EFAULT; 1798 if (copy_to_user(argp, &mon_data_ext,
1595 1799 sizeof(mon_data_ext)))
1596 return 0; 1800 return -EFAULT;
1597 1801 return 0;
1598 } default: 1802 }
1803 default:
1599 return -ENOIOCTLCMD; 1804 return -ENOIOCTLCMD;
1600 } 1805 }
1601 return 0; 1806 return 0;
@@ -1651,16 +1856,20 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1651 opmode != RS422_MODE && 1856 opmode != RS422_MODE &&
1652 opmode != RS485_4WIRE_MODE) 1857 opmode != RS485_4WIRE_MODE)
1653 return -EFAULT; 1858 return -EFAULT;
1859 lock_kernel();
1654 mask = ModeMask[p]; 1860 mask = ModeMask[p];
1655 shiftbit = p * 2; 1861 shiftbit = p * 2;
1656 val = inb(info->opmode_ioaddr); 1862 val = inb(info->opmode_ioaddr);
1657 val &= mask; 1863 val &= mask;
1658 val |= (opmode << shiftbit); 1864 val |= (opmode << shiftbit);
1659 outb(val, info->opmode_ioaddr); 1865 outb(val, info->opmode_ioaddr);
1866 unlock_kernel();
1660 } else { 1867 } else {
1868 lock_kernel();
1661 shiftbit = p * 2; 1869 shiftbit = p * 2;
1662 opmode = inb(info->opmode_ioaddr) >> shiftbit; 1870 opmode = inb(info->opmode_ioaddr) >> shiftbit;
1663 opmode &= OP_MODE_MASK; 1871 opmode &= OP_MODE_MASK;
1872 unlock_kernel();
1664 if (put_user(opmode, (int __user *)argp)) 1873 if (put_user(opmode, (int __user *)argp))
1665 return -EFAULT; 1874 return -EFAULT;
1666 } 1875 }
@@ -1687,19 +1896,18 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1687 tty_wait_until_sent(tty, 0); 1896 tty_wait_until_sent(tty, 0);
1688 mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4); 1897 mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
1689 return 0; 1898 return 0;
1690 case TIOCGSOFTCAR:
1691 return put_user(!!C_CLOCAL(tty), (unsigned long __user *)argp);
1692 case TIOCSSOFTCAR:
1693 if (get_user(arg, (unsigned long __user *)argp))
1694 return -EFAULT;
1695 tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
1696 return 0;
1697 case TIOCGSERIAL: 1899 case TIOCGSERIAL:
1698 return mxser_get_serial_info(info, argp); 1900 lock_kernel();
1901 retval = mxser_get_serial_info(info, argp);
1902 unlock_kernel();
1903 return retval;
1699 case TIOCSSERIAL: 1904 case TIOCSSERIAL:
1700 return mxser_set_serial_info(info, argp); 1905 lock_kernel();
1906 retval = mxser_set_serial_info(info, argp);
1907 unlock_kernel();
1908 return retval;
1701 case TIOCSERGETLSR: /* Get line status register */ 1909 case TIOCSERGETLSR: /* Get line status register */
1702 return mxser_get_lsr_info(info, argp); 1910 return mxser_get_lsr_info(info, argp);
1703 /* 1911 /*
1704 * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change 1912 * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
1705 * - mask passed in arg for lines of interest 1913 * - mask passed in arg for lines of interest
@@ -1746,24 +1954,27 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1746 case MOXA_HighSpeedOn: 1954 case MOXA_HighSpeedOn:
1747 return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp); 1955 return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
1748 case MOXA_SDS_RSTICOUNTER: 1956 case MOXA_SDS_RSTICOUNTER:
1957 lock_kernel();
1749 info->mon_data.rxcnt = 0; 1958 info->mon_data.rxcnt = 0;
1750 info->mon_data.txcnt = 0; 1959 info->mon_data.txcnt = 0;
1960 unlock_kernel();
1751 return 0; 1961 return 0;
1752 1962
1753 case MOXA_ASPP_OQUEUE:{ 1963 case MOXA_ASPP_OQUEUE:{
1754 int len, lsr; 1964 int len, lsr;
1755 1965
1966 lock_kernel();
1756 len = mxser_chars_in_buffer(tty); 1967 len = mxser_chars_in_buffer(tty);
1757
1758 lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT; 1968 lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT;
1759
1760 len += (lsr ? 0 : 1); 1969 len += (lsr ? 0 : 1);
1970 unlock_kernel();
1761 1971
1762 return put_user(len, (int __user *)argp); 1972 return put_user(len, (int __user *)argp);
1763 } 1973 }
1764 case MOXA_ASPP_MON: { 1974 case MOXA_ASPP_MON: {
1765 int mcr, status; 1975 int mcr, status;
1766 1976
1977 lock_kernel();
1767 status = mxser_get_msr(info->ioaddr, 1, tty->index); 1978 status = mxser_get_msr(info->ioaddr, 1, tty->index);
1768 mxser_check_modem_status(info, status); 1979 mxser_check_modem_status(info, status);
1769 1980
@@ -1782,7 +1993,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
1782 info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD; 1993 info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
1783 else 1994 else
1784 info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD; 1995 info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
1785 1996 unlock_kernel();
1786 if (copy_to_user(argp, &info->mon_data, 1997 if (copy_to_user(argp, &info->mon_data,
1787 sizeof(struct mxser_mon))) 1998 sizeof(struct mxser_mon)))
1788 return -EFAULT; 1999 return -EFAULT;
@@ -1925,7 +2136,8 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
1925 2136
1926 if (info->board->chip_flag) { 2137 if (info->board->chip_flag) {
1927 spin_lock_irqsave(&info->slock, flags); 2138 spin_lock_irqsave(&info->slock, flags);
1928 DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr); 2139 mxser_disable_must_rx_software_flow_control(
2140 info->ioaddr);
1929 spin_unlock_irqrestore(&info->slock, flags); 2141 spin_unlock_irqrestore(&info->slock, flags);
1930 } 2142 }
1931 2143
@@ -1979,6 +2191,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
1979 timeout, char_time); 2191 timeout, char_time);
1980 printk("jiff=%lu...", jiffies); 2192 printk("jiff=%lu...", jiffies);
1981#endif 2193#endif
2194 lock_kernel();
1982 while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) { 2195 while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
1983#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT 2196#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
1984 printk("lsr = %d (jiff=%lu)...", lsr, jiffies); 2197 printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
@@ -1990,6 +2203,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
1990 break; 2203 break;
1991 } 2204 }
1992 set_current_state(TASK_RUNNING); 2205 set_current_state(TASK_RUNNING);
2206 unlock_kernel();
1993 2207
1994#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT 2208#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
1995 printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); 2209 printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
@@ -2342,7 +2556,7 @@ static int __devinit mxser_initbrd(struct mxser_board *brd,
2342 2556
2343 /* Enhance mode enabled here */ 2557 /* Enhance mode enabled here */
2344 if (brd->chip_flag != MOXA_OTHER_UART) 2558 if (brd->chip_flag != MOXA_OTHER_UART)
2345 ENABLE_MOXA_MUST_ENCHANCE_MODE(info->ioaddr); 2559 mxser_enable_must_enchance_mode(info->ioaddr);
2346 2560
2347 info->flags = ASYNC_SHARE_IRQ; 2561 info->flags = ASYNC_SHARE_IRQ;
2348 info->type = brd->uart_type; 2562 info->type = brd->uart_type;
diff --git a/drivers/char/mxser.h b/drivers/char/mxser.h
index 844171115954..41878a69203d 100644
--- a/drivers/char/mxser.h
+++ b/drivers/char/mxser.h
@@ -147,141 +147,4 @@
147/* Rx software flow control mask */ 147/* Rx software flow control mask */
148#define MOXA_MUST_EFR_SF_RX_MASK 0x03 148#define MOXA_MUST_EFR_SF_RX_MASK 0x03
149 149
150#define ENABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
151 u8 __oldlcr, __efr; \
152 __oldlcr = inb((baseio)+UART_LCR); \
153 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
154 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
155 __efr |= MOXA_MUST_EFR_EFRB_ENABLE; \
156 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
157 outb(__oldlcr, (baseio)+UART_LCR); \
158} while (0)
159
160#define DISABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
161 u8 __oldlcr, __efr; \
162 __oldlcr = inb((baseio)+UART_LCR); \
163 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
164 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
165 __efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; \
166 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
167 outb(__oldlcr, (baseio)+UART_LCR); \
168} while (0)
169
170#define SET_MOXA_MUST_XON1_VALUE(baseio, Value) do { \
171 u8 __oldlcr, __efr; \
172 __oldlcr = inb((baseio)+UART_LCR); \
173 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
174 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
175 __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
176 __efr |= MOXA_MUST_EFR_BANK0; \
177 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
178 outb((u8)(Value), (baseio)+MOXA_MUST_XON1_REGISTER); \
179 outb(__oldlcr, (baseio)+UART_LCR); \
180} while (0)
181
182#define SET_MOXA_MUST_XOFF1_VALUE(baseio, Value) do { \
183 u8 __oldlcr, __efr; \
184 __oldlcr = inb((baseio)+UART_LCR); \
185 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
186 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
187 __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
188 __efr |= MOXA_MUST_EFR_BANK0; \
189 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
190 outb((u8)(Value), (baseio)+MOXA_MUST_XOFF1_REGISTER); \
191 outb(__oldlcr, (baseio)+UART_LCR); \
192} while (0)
193
194#define SET_MOXA_MUST_FIFO_VALUE(info) do { \
195 u8 __oldlcr, __efr; \
196 __oldlcr = inb((info)->ioaddr+UART_LCR); \
197 outb(MOXA_MUST_ENTER_ENCHANCE, (info)->ioaddr+UART_LCR);\
198 __efr = inb((info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
199 __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
200 __efr |= MOXA_MUST_EFR_BANK1; \
201 outb(__efr, (info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
202 outb((u8)((info)->rx_high_water), (info)->ioaddr+ \
203 MOXA_MUST_RBRTH_REGISTER); \
204 outb((u8)((info)->rx_trigger), (info)->ioaddr+ \
205 MOXA_MUST_RBRTI_REGISTER); \
206 outb((u8)((info)->rx_low_water), (info)->ioaddr+ \
207 MOXA_MUST_RBRTL_REGISTER); \
208 outb(__oldlcr, (info)->ioaddr+UART_LCR); \
209} while (0)
210
211#define SET_MOXA_MUST_ENUM_VALUE(baseio, Value) do { \
212 u8 __oldlcr, __efr; \
213 __oldlcr = inb((baseio)+UART_LCR); \
214 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
215 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
216 __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
217 __efr |= MOXA_MUST_EFR_BANK2; \
218 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
219 outb((u8)(Value), (baseio)+MOXA_MUST_ENUM_REGISTER); \
220 outb(__oldlcr, (baseio)+UART_LCR); \
221} while (0)
222
223#define GET_MOXA_MUST_HARDWARE_ID(baseio, pId) do { \
224 u8 __oldlcr, __efr; \
225 __oldlcr = inb((baseio)+UART_LCR); \
226 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
227 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
228 __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
229 __efr |= MOXA_MUST_EFR_BANK2; \
230 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
231 *pId = inb((baseio)+MOXA_MUST_HWID_REGISTER); \
232 outb(__oldlcr, (baseio)+UART_LCR); \
233} while (0)
234
235#define SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(baseio) do { \
236 u8 __oldlcr, __efr; \
237 __oldlcr = inb((baseio)+UART_LCR); \
238 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
239 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
240 __efr &= ~MOXA_MUST_EFR_SF_MASK; \
241 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
242 outb(__oldlcr, (baseio)+UART_LCR); \
243} while (0)
244
245#define ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
246 u8 __oldlcr, __efr; \
247 __oldlcr = inb((baseio)+UART_LCR); \
248 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
249 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
250 __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
251 __efr |= MOXA_MUST_EFR_SF_TX1; \
252 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
253 outb(__oldlcr, (baseio)+UART_LCR); \
254} while (0)
255
256#define DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
257 u8 __oldlcr, __efr; \
258 __oldlcr = inb((baseio)+UART_LCR); \
259 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
260 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
261 __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
262 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
263 outb(__oldlcr, (baseio)+UART_LCR); \
264} while (0)
265
266#define ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
267 u8 __oldlcr, __efr; \
268 __oldlcr = inb((baseio)+UART_LCR); \
269 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
270 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
271 __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
272 __efr |= MOXA_MUST_EFR_SF_RX1; \
273 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
274 outb(__oldlcr, (baseio)+UART_LCR); \
275} while (0)
276
277#define DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
278 u8 __oldlcr, __efr; \
279 __oldlcr = inb((baseio)+UART_LCR); \
280 outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
281 __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
282 __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
283 outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
284 outb(__oldlcr, (baseio)+UART_LCR); \
285} while (0)
286
287#endif 150#endif
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 82bcfb9c839a..a35bfd7ee80e 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -342,12 +342,10 @@ static int n_hdlc_tty_open (struct tty_struct *tty)
342#endif 342#endif
343 343
344 /* Flush any pending characters in the driver and discipline. */ 344 /* Flush any pending characters in the driver and discipline. */
345
346 if (tty->ldisc.flush_buffer) 345 if (tty->ldisc.flush_buffer)
347 tty->ldisc.flush_buffer (tty); 346 tty->ldisc.flush_buffer(tty);
348 347
349 if (tty->driver->flush_buffer) 348 tty_driver_flush_buffer(tty);
350 tty->driver->flush_buffer (tty);
351 349
352 if (debuglevel >= DEBUG_LEVEL_INFO) 350 if (debuglevel >= DEBUG_LEVEL_INFO)
353 printk("%s(%d)n_hdlc_tty_open() success\n",__FILE__,__LINE__); 351 printk("%s(%d)n_hdlc_tty_open() success\n",__FILE__,__LINE__);
@@ -399,7 +397,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
399 397
400 /* Send the next block of data to device */ 398 /* Send the next block of data to device */
401 tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 399 tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
402 actual = tty->driver->write(tty, tbuf->buf, tbuf->count); 400 actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
403 401
404 /* rollback was possible and has been done */ 402 /* rollback was possible and has been done */
405 if (actual == -ERESTARTSYS) { 403 if (actual == -ERESTARTSYS) {
@@ -501,7 +499,7 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
501 __FILE__,__LINE__, count); 499 __FILE__,__LINE__, count);
502 500
503 /* This can happen if stuff comes in on the backup tty */ 501 /* This can happen if stuff comes in on the backup tty */
504 if (n_hdlc == 0 || tty != n_hdlc->tty) 502 if (!n_hdlc || tty != n_hdlc->tty)
505 return; 503 return;
506 504
507 /* verify line is using HDLC discipline */ 505 /* verify line is using HDLC discipline */
@@ -578,26 +576,36 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
578 return -EFAULT; 576 return -EFAULT;
579 } 577 }
580 578
579 lock_kernel();
580
581 for (;;) { 581 for (;;) {
582 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) 582 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
583 unlock_kernel();
583 return -EIO; 584 return -EIO;
585 }
584 586
585 n_hdlc = tty2n_hdlc (tty); 587 n_hdlc = tty2n_hdlc (tty);
586 if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC || 588 if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
587 tty != n_hdlc->tty) 589 tty != n_hdlc->tty) {
590 unlock_kernel();
588 return 0; 591 return 0;
592 }
589 593
590 rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); 594 rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
591 if (rbuf) 595 if (rbuf)
592 break; 596 break;
593 597
594 /* no data */ 598 /* no data */
595 if (file->f_flags & O_NONBLOCK) 599 if (file->f_flags & O_NONBLOCK) {
600 unlock_kernel();
596 return -EAGAIN; 601 return -EAGAIN;
602 }
597 603
598 interruptible_sleep_on (&tty->read_wait); 604 interruptible_sleep_on (&tty->read_wait);
599 if (signal_pending(current)) 605 if (signal_pending(current)) {
606 unlock_kernel();
600 return -EINTR; 607 return -EINTR;
608 }
601 } 609 }
602 610
603 if (rbuf->count > nr) 611 if (rbuf->count > nr)
@@ -618,7 +626,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
618 kfree(rbuf); 626 kfree(rbuf);
619 else 627 else
620 n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf); 628 n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf);
621 629 unlock_kernel();
622 return ret; 630 return ret;
623 631
624} /* end of n_hdlc_tty_read() */ 632} /* end of n_hdlc_tty_read() */
@@ -661,6 +669,8 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
661 count = maxframe; 669 count = maxframe;
662 } 670 }
663 671
672 lock_kernel();
673
664 add_wait_queue(&tty->write_wait, &wait); 674 add_wait_queue(&tty->write_wait, &wait);
665 set_current_state(TASK_INTERRUPTIBLE); 675 set_current_state(TASK_INTERRUPTIBLE);
666 676
@@ -695,7 +705,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
695 n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf); 705 n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
696 n_hdlc_send_frames(n_hdlc,tty); 706 n_hdlc_send_frames(n_hdlc,tty);
697 } 707 }
698 708 unlock_kernel();
699 return error; 709 return error;
700 710
701} /* end of n_hdlc_tty_write() */ 711} /* end of n_hdlc_tty_write() */
@@ -740,8 +750,7 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
740 750
741 case TIOCOUTQ: 751 case TIOCOUTQ:
742 /* get the pending tx byte count in the driver */ 752 /* get the pending tx byte count in the driver */
743 count = tty->driver->chars_in_buffer ? 753 count = tty_chars_in_buffer(tty);
744 tty->driver->chars_in_buffer(tty) : 0;
745 /* add size of next output frame in queue */ 754 /* add size of next output frame in queue */
746 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); 755 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
747 if (n_hdlc->tx_buf_list.head) 756 if (n_hdlc->tx_buf_list.head)
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index 6b918b80f73e..902169062332 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -376,8 +376,9 @@ static void put_char(struct r3964_info *pInfo, unsigned char ch)
376 if (tty == NULL) 376 if (tty == NULL)
377 return; 377 return;
378 378
379 if (tty->driver->put_char) { 379 /* FIXME: put_char should not be called from an IRQ */
380 tty->driver->put_char(tty, ch); 380 if (tty->ops->put_char) {
381 tty->ops->put_char(tty, ch);
381 } 382 }
382 pInfo->bcc ^= ch; 383 pInfo->bcc ^= ch;
383} 384}
@@ -386,12 +387,9 @@ static void flush(struct r3964_info *pInfo)
386{ 387{
387 struct tty_struct *tty = pInfo->tty; 388 struct tty_struct *tty = pInfo->tty;
388 389
389 if (tty == NULL) 390 if (tty == NULL || tty->ops->flush_chars == NULL)
390 return; 391 return;
391 392 tty->ops->flush_chars(tty);
392 if (tty->driver->flush_chars) {
393 tty->driver->flush_chars(tty);
394 }
395} 393}
396 394
397static void trigger_transmit(struct r3964_info *pInfo) 395static void trigger_transmit(struct r3964_info *pInfo)
@@ -449,12 +447,11 @@ static void transmit_block(struct r3964_info *pInfo)
449 struct r3964_block_header *pBlock = pInfo->tx_first; 447 struct r3964_block_header *pBlock = pInfo->tx_first;
450 int room = 0; 448 int room = 0;
451 449
452 if ((tty == NULL) || (pBlock == NULL)) { 450 if (tty == NULL || pBlock == NULL) {
453 return; 451 return;
454 } 452 }
455 453
456 if (tty->driver->write_room) 454 room = tty_write_room(tty);
457 room = tty->driver->write_room(tty);
458 455
459 TRACE_PS("transmit_block %p, room %d, length %d", 456 TRACE_PS("transmit_block %p, room %d, length %d",
460 pBlock, room, pBlock->length); 457 pBlock, room, pBlock->length);
@@ -1075,12 +1072,15 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1075 1072
1076 TRACE_L("read()"); 1073 TRACE_L("read()");
1077 1074
1075 lock_kernel();
1076
1078 pClient = findClient(pInfo, task_pid(current)); 1077 pClient = findClient(pInfo, task_pid(current));
1079 if (pClient) { 1078 if (pClient) {
1080 pMsg = remove_msg(pInfo, pClient); 1079 pMsg = remove_msg(pInfo, pClient);
1081 if (pMsg == NULL) { 1080 if (pMsg == NULL) {
1082 /* no messages available. */ 1081 /* no messages available. */
1083 if (file->f_flags & O_NONBLOCK) { 1082 if (file->f_flags & O_NONBLOCK) {
1083 unlock_kernel();
1084 return -EAGAIN; 1084 return -EAGAIN;
1085 } 1085 }
1086 /* block until there is a message: */ 1086 /* block until there is a message: */
@@ -1090,8 +1090,10 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1090 1090
1091 /* If we still haven't got a message, we must have been signalled */ 1091 /* If we still haven't got a message, we must have been signalled */
1092 1092
1093 if (!pMsg) 1093 if (!pMsg) {
1094 unlock_kernel();
1094 return -EINTR; 1095 return -EINTR;
1096 }
1095 1097
1096 /* deliver msg to client process: */ 1098 /* deliver msg to client process: */
1097 theMsg.msg_id = pMsg->msg_id; 1099 theMsg.msg_id = pMsg->msg_id;
@@ -1102,12 +1104,15 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1102 kfree(pMsg); 1104 kfree(pMsg);
1103 TRACE_M("r3964_read - msg kfree %p", pMsg); 1105 TRACE_M("r3964_read - msg kfree %p", pMsg);
1104 1106
1105 if (copy_to_user(buf, &theMsg, count)) 1107 if (copy_to_user(buf, &theMsg, count)) {
1108 unlock_kernel();
1106 return -EFAULT; 1109 return -EFAULT;
1110 }
1107 1111
1108 TRACE_PS("read - return %d", count); 1112 TRACE_PS("read - return %d", count);
1109 return count; 1113 return count;
1110 } 1114 }
1115 unlock_kernel();
1111 return -EPERM; 1116 return -EPERM;
1112} 1117}
1113 1118
@@ -1156,6 +1161,8 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
1156 pHeader->locks = 0; 1161 pHeader->locks = 0;
1157 pHeader->owner = NULL; 1162 pHeader->owner = NULL;
1158 1163
1164 lock_kernel();
1165
1159 pClient = findClient(pInfo, task_pid(current)); 1166 pClient = findClient(pInfo, task_pid(current));
1160 if (pClient) { 1167 if (pClient) {
1161 pHeader->owner = pClient; 1168 pHeader->owner = pClient;
@@ -1173,6 +1180,8 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
1173 add_tx_queue(pInfo, pHeader); 1180 add_tx_queue(pInfo, pHeader);
1174 trigger_transmit(pInfo); 1181 trigger_transmit(pInfo);
1175 1182
1183 unlock_kernel();
1184
1176 return 0; 1185 return 0;
1177} 1186}
1178 1187
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 0c09409fa45d..19105ec203f7 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -147,10 +147,8 @@ static void put_tty_queue(unsigned char c, struct tty_struct *tty)
147 147
148static void check_unthrottle(struct tty_struct *tty) 148static void check_unthrottle(struct tty_struct *tty)
149{ 149{
150 if (tty->count && 150 if (tty->count)
151 test_and_clear_bit(TTY_THROTTLED, &tty->flags) && 151 tty_unthrottle(tty);
152 tty->driver->unthrottle)
153 tty->driver->unthrottle(tty);
154} 152}
155 153
156/** 154/**
@@ -183,22 +181,24 @@ static void reset_buffer_flags(struct tty_struct *tty)
183 * at hangup) or when the N_TTY line discipline internally has to 181 * at hangup) or when the N_TTY line discipline internally has to
184 * clean the pending queue (for example some signals). 182 * clean the pending queue (for example some signals).
185 * 183 *
186 * FIXME: tty->ctrl_status is not spinlocked and relies on 184 * Locking: ctrl_lock
187 * lock_kernel() still.
188 */ 185 */
189 186
190static void n_tty_flush_buffer(struct tty_struct *tty) 187static void n_tty_flush_buffer(struct tty_struct *tty)
191{ 188{
189 unsigned long flags;
192 /* clear everything and unthrottle the driver */ 190 /* clear everything and unthrottle the driver */
193 reset_buffer_flags(tty); 191 reset_buffer_flags(tty);
194 192
195 if (!tty->link) 193 if (!tty->link)
196 return; 194 return;
197 195
196 spin_lock_irqsave(&tty->ctrl_lock, flags);
198 if (tty->link->packet) { 197 if (tty->link->packet) {
199 tty->ctrl_status |= TIOCPKT_FLUSHREAD; 198 tty->ctrl_status |= TIOCPKT_FLUSHREAD;
200 wake_up_interruptible(&tty->link->read_wait); 199 wake_up_interruptible(&tty->link->read_wait);
201 } 200 }
201 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
202} 202}
203 203
204/** 204/**
@@ -264,17 +264,18 @@ static inline int is_continuation(unsigned char c, struct tty_struct *tty)
264 * relevant in the world today. If you ever need them, add them here. 264 * relevant in the world today. If you ever need them, add them here.
265 * 265 *
266 * Called from both the receive and transmit sides and can be called 266 * Called from both the receive and transmit sides and can be called
267 * re-entrantly. Relies on lock_kernel() still. 267 * re-entrantly. Relies on lock_kernel() for tty->column state.
268 */ 268 */
269 269
270static int opost(unsigned char c, struct tty_struct *tty) 270static int opost(unsigned char c, struct tty_struct *tty)
271{ 271{
272 int space, spaces; 272 int space, spaces;
273 273
274 space = tty->driver->write_room(tty); 274 space = tty_write_room(tty);
275 if (!space) 275 if (!space)
276 return -1; 276 return -1;
277 277
278 lock_kernel();
278 if (O_OPOST(tty)) { 279 if (O_OPOST(tty)) {
279 switch (c) { 280 switch (c) {
280 case '\n': 281 case '\n':
@@ -283,7 +284,7 @@ static int opost(unsigned char c, struct tty_struct *tty)
283 if (O_ONLCR(tty)) { 284 if (O_ONLCR(tty)) {
284 if (space < 2) 285 if (space < 2)
285 return -1; 286 return -1;
286 tty->driver->put_char(tty, '\r'); 287 tty_put_char(tty, '\r');
287 tty->column = 0; 288 tty->column = 0;
288 } 289 }
289 tty->canon_column = tty->column; 290 tty->canon_column = tty->column;
@@ -305,7 +306,7 @@ static int opost(unsigned char c, struct tty_struct *tty)
305 if (space < spaces) 306 if (space < spaces)
306 return -1; 307 return -1;
307 tty->column += spaces; 308 tty->column += spaces;
308 tty->driver->write(tty, " ", spaces); 309 tty->ops->write(tty, " ", spaces);
309 return 0; 310 return 0;
310 } 311 }
311 tty->column += spaces; 312 tty->column += spaces;
@@ -322,7 +323,8 @@ static int opost(unsigned char c, struct tty_struct *tty)
322 break; 323 break;
323 } 324 }
324 } 325 }
325 tty->driver->put_char(tty, c); 326 tty_put_char(tty, c);
327 unlock_kernel();
326 return 0; 328 return 0;
327} 329}
328 330
@@ -337,7 +339,8 @@ static int opost(unsigned char c, struct tty_struct *tty)
337 * the simple cases normally found and helps to generate blocks of 339 * the simple cases normally found and helps to generate blocks of
338 * symbols for the console driver and thus improve performance. 340 * symbols for the console driver and thus improve performance.
339 * 341 *
340 * Called from write_chan under the tty layer write lock. 342 * Called from write_chan under the tty layer write lock. Relies
343 * on lock_kernel for the tty->column state.
341 */ 344 */
342 345
343static ssize_t opost_block(struct tty_struct *tty, 346static ssize_t opost_block(struct tty_struct *tty,
@@ -347,12 +350,13 @@ static ssize_t opost_block(struct tty_struct *tty,
347 int i; 350 int i;
348 const unsigned char *cp; 351 const unsigned char *cp;
349 352
350 space = tty->driver->write_room(tty); 353 space = tty_write_room(tty);
351 if (!space) 354 if (!space)
352 return 0; 355 return 0;
353 if (nr > space) 356 if (nr > space)
354 nr = space; 357 nr = space;
355 358
359 lock_kernel();
356 for (i = 0, cp = buf; i < nr; i++, cp++) { 360 for (i = 0, cp = buf; i < nr; i++, cp++) {
357 switch (*cp) { 361 switch (*cp) {
358 case '\n': 362 case '\n':
@@ -384,27 +388,15 @@ static ssize_t opost_block(struct tty_struct *tty,
384 } 388 }
385 } 389 }
386break_out: 390break_out:
387 if (tty->driver->flush_chars) 391 if (tty->ops->flush_chars)
388 tty->driver->flush_chars(tty); 392 tty->ops->flush_chars(tty);
389 i = tty->driver->write(tty, buf, i); 393 i = tty->ops->write(tty, buf, i);
394 unlock_kernel();
390 return i; 395 return i;
391} 396}
392 397
393 398
394/** 399/**
395 * put_char - write character to driver
396 * @c: character (or part of unicode symbol)
397 * @tty: terminal device
398 *
399 * Queue a byte to the driver layer for output
400 */
401
402static inline void put_char(unsigned char c, struct tty_struct *tty)
403{
404 tty->driver->put_char(tty, c);
405}
406
407/**
408 * echo_char - echo characters 400 * echo_char - echo characters
409 * @c: unicode byte to echo 401 * @c: unicode byte to echo
410 * @tty: terminal device 402 * @tty: terminal device
@@ -416,8 +408,8 @@ static inline void put_char(unsigned char c, struct tty_struct *tty)
416static void echo_char(unsigned char c, struct tty_struct *tty) 408static void echo_char(unsigned char c, struct tty_struct *tty)
417{ 409{
418 if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') { 410 if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') {
419 put_char('^', tty); 411 tty_put_char(tty, '^');
420 put_char(c ^ 0100, tty); 412 tty_put_char(tty, c ^ 0100);
421 tty->column += 2; 413 tty->column += 2;
422 } else 414 } else
423 opost(c, tty); 415 opost(c, tty);
@@ -426,7 +418,7 @@ static void echo_char(unsigned char c, struct tty_struct *tty)
426static inline void finish_erasing(struct tty_struct *tty) 418static inline void finish_erasing(struct tty_struct *tty)
427{ 419{
428 if (tty->erasing) { 420 if (tty->erasing) {
429 put_char('/', tty); 421 tty_put_char(tty, '/');
430 tty->column++; 422 tty->column++;
431 tty->erasing = 0; 423 tty->erasing = 0;
432 } 424 }
@@ -510,7 +502,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
510 if (L_ECHO(tty)) { 502 if (L_ECHO(tty)) {
511 if (L_ECHOPRT(tty)) { 503 if (L_ECHOPRT(tty)) {
512 if (!tty->erasing) { 504 if (!tty->erasing) {
513 put_char('\\', tty); 505 tty_put_char(tty, '\\');
514 tty->column++; 506 tty->column++;
515 tty->erasing = 1; 507 tty->erasing = 1;
516 } 508 }
@@ -518,7 +510,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
518 echo_char(c, tty); 510 echo_char(c, tty);
519 while (--cnt > 0) { 511 while (--cnt > 0) {
520 head = (head+1) & (N_TTY_BUF_SIZE-1); 512 head = (head+1) & (N_TTY_BUF_SIZE-1);
521 put_char(tty->read_buf[head], tty); 513 tty_put_char(tty, tty->read_buf[head]);
522 } 514 }
523 } else if (kill_type == ERASE && !L_ECHOE(tty)) { 515 } else if (kill_type == ERASE && !L_ECHOE(tty)) {
524 echo_char(ERASE_CHAR(tty), tty); 516 echo_char(ERASE_CHAR(tty), tty);
@@ -546,22 +538,22 @@ static void eraser(unsigned char c, struct tty_struct *tty)
546 /* Now backup to that column. */ 538 /* Now backup to that column. */
547 while (tty->column > col) { 539 while (tty->column > col) {
548 /* Can't use opost here. */ 540 /* Can't use opost here. */
549 put_char('\b', tty); 541 tty_put_char(tty, '\b');
550 if (tty->column > 0) 542 if (tty->column > 0)
551 tty->column--; 543 tty->column--;
552 } 544 }
553 } else { 545 } else {
554 if (iscntrl(c) && L_ECHOCTL(tty)) { 546 if (iscntrl(c) && L_ECHOCTL(tty)) {
555 put_char('\b', tty); 547 tty_put_char(tty, '\b');
556 put_char(' ', tty); 548 tty_put_char(tty, ' ');
557 put_char('\b', tty); 549 tty_put_char(tty, '\b');
558 if (tty->column > 0) 550 if (tty->column > 0)
559 tty->column--; 551 tty->column--;
560 } 552 }
561 if (!iscntrl(c) || L_ECHOCTL(tty)) { 553 if (!iscntrl(c) || L_ECHOCTL(tty)) {
562 put_char('\b', tty); 554 tty_put_char(tty, '\b');
563 put_char(' ', tty); 555 tty_put_char(tty, ' ');
564 put_char('\b', tty); 556 tty_put_char(tty, '\b');
565 if (tty->column > 0) 557 if (tty->column > 0)
566 tty->column--; 558 tty->column--;
567 } 559 }
@@ -592,8 +584,7 @@ static inline void isig(int sig, struct tty_struct *tty, int flush)
592 kill_pgrp(tty->pgrp, sig, 1); 584 kill_pgrp(tty->pgrp, sig, 1);
593 if (flush || !L_NOFLSH(tty)) { 585 if (flush || !L_NOFLSH(tty)) {
594 n_tty_flush_buffer(tty); 586 n_tty_flush_buffer(tty);
595 if (tty->driver->flush_buffer) 587 tty_driver_flush_buffer(tty);
596 tty->driver->flush_buffer(tty);
597 } 588 }
598} 589}
599 590
@@ -701,7 +692,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
701 692
702 if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && 693 if (tty->stopped && !tty->flow_stopped && I_IXON(tty) &&
703 ((I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty)) || 694 ((I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty)) ||
704 c == INTR_CHAR(tty) || c == QUIT_CHAR(tty))) 695 c == INTR_CHAR(tty) || c == QUIT_CHAR(tty) || c == SUSP_CHAR(tty)))
705 start_tty(tty); 696 start_tty(tty);
706 697
707 if (tty->closing) { 698 if (tty->closing) {
@@ -725,7 +716,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
725 tty->lnext = 0; 716 tty->lnext = 0;
726 if (L_ECHO(tty)) { 717 if (L_ECHO(tty)) {
727 if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { 718 if (tty->read_cnt >= N_TTY_BUF_SIZE-1) {
728 put_char('\a', tty); /* beep if no space */ 719 tty_put_char(tty, '\a'); /* beep if no space */
729 return; 720 return;
730 } 721 }
731 /* Record the column of first canon char. */ 722 /* Record the column of first canon char. */
@@ -739,13 +730,6 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
739 return; 730 return;
740 } 731 }
741 732
742 if (c == '\r') {
743 if (I_IGNCR(tty))
744 return;
745 if (I_ICRNL(tty))
746 c = '\n';
747 } else if (c == '\n' && I_INLCR(tty))
748 c = '\r';
749 if (I_IXON(tty)) { 733 if (I_IXON(tty)) {
750 if (c == START_CHAR(tty)) { 734 if (c == START_CHAR(tty)) {
751 start_tty(tty); 735 start_tty(tty);
@@ -756,6 +740,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
756 return; 740 return;
757 } 741 }
758 } 742 }
743
759 if (L_ISIG(tty)) { 744 if (L_ISIG(tty)) {
760 int signal; 745 int signal;
761 signal = SIGINT; 746 signal = SIGINT;
@@ -775,8 +760,7 @@ send_signal:
775 */ 760 */
776 if (!L_NOFLSH(tty)) { 761 if (!L_NOFLSH(tty)) {
777 n_tty_flush_buffer(tty); 762 n_tty_flush_buffer(tty);
778 if (tty->driver->flush_buffer) 763 tty_driver_flush_buffer(tty);
779 tty->driver->flush_buffer(tty);
780 } 764 }
781 if (L_ECHO(tty)) 765 if (L_ECHO(tty))
782 echo_char(c, tty); 766 echo_char(c, tty);
@@ -785,6 +769,15 @@ send_signal:
785 return; 769 return;
786 } 770 }
787 } 771 }
772
773 if (c == '\r') {
774 if (I_IGNCR(tty))
775 return;
776 if (I_ICRNL(tty))
777 c = '\n';
778 } else if (c == '\n' && I_INLCR(tty))
779 c = '\r';
780
788 if (tty->icanon) { 781 if (tty->icanon) {
789 if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || 782 if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) ||
790 (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { 783 (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) {
@@ -796,8 +789,8 @@ send_signal:
796 if (L_ECHO(tty)) { 789 if (L_ECHO(tty)) {
797 finish_erasing(tty); 790 finish_erasing(tty);
798 if (L_ECHOCTL(tty)) { 791 if (L_ECHOCTL(tty)) {
799 put_char('^', tty); 792 tty_put_char(tty, '^');
800 put_char('\b', tty); 793 tty_put_char(tty, '\b');
801 } 794 }
802 } 795 }
803 return; 796 return;
@@ -818,7 +811,7 @@ send_signal:
818 if (c == '\n') { 811 if (c == '\n') {
819 if (L_ECHO(tty) || L_ECHONL(tty)) { 812 if (L_ECHO(tty) || L_ECHONL(tty)) {
820 if (tty->read_cnt >= N_TTY_BUF_SIZE-1) 813 if (tty->read_cnt >= N_TTY_BUF_SIZE-1)
821 put_char('\a', tty); 814 tty_put_char(tty, '\a');
822 opost('\n', tty); 815 opost('\n', tty);
823 } 816 }
824 goto handle_newline; 817 goto handle_newline;
@@ -836,7 +829,7 @@ send_signal:
836 */ 829 */
837 if (L_ECHO(tty)) { 830 if (L_ECHO(tty)) {
838 if (tty->read_cnt >= N_TTY_BUF_SIZE-1) 831 if (tty->read_cnt >= N_TTY_BUF_SIZE-1)
839 put_char('\a', tty); 832 tty_put_char(tty, '\a');
840 /* Record the column of first canon char. */ 833 /* Record the column of first canon char. */
841 if (tty->canon_head == tty->read_head) 834 if (tty->canon_head == tty->read_head)
842 tty->canon_column = tty->column; 835 tty->canon_column = tty->column;
@@ -866,7 +859,7 @@ handle_newline:
866 finish_erasing(tty); 859 finish_erasing(tty);
867 if (L_ECHO(tty)) { 860 if (L_ECHO(tty)) {
868 if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { 861 if (tty->read_cnt >= N_TTY_BUF_SIZE-1) {
869 put_char('\a', tty); /* beep if no space */ 862 tty_put_char(tty, '\a'); /* beep if no space */
870 return; 863 return;
871 } 864 }
872 if (c == '\n') 865 if (c == '\n')
@@ -970,8 +963,8 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
970 break; 963 break;
971 } 964 }
972 } 965 }
973 if (tty->driver->flush_chars) 966 if (tty->ops->flush_chars)
974 tty->driver->flush_chars(tty); 967 tty->ops->flush_chars(tty);
975 } 968 }
976 969
977 n_tty_set_room(tty); 970 n_tty_set_room(tty);
@@ -987,12 +980,8 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
987 * mode. We don't want to throttle the driver if we're in 980 * mode. We don't want to throttle the driver if we're in
988 * canonical mode and don't have a newline yet! 981 * canonical mode and don't have a newline yet!
989 */ 982 */
990 if (tty->receive_room < TTY_THRESHOLD_THROTTLE) { 983 if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
991 /* check TTY_THROTTLED first so it indicates our state */ 984 tty_throttle(tty);
992 if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
993 tty->driver->throttle)
994 tty->driver->throttle(tty);
995 }
996} 985}
997 986
998int is_ignored(int sig) 987int is_ignored(int sig)
@@ -1076,6 +1065,9 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1076 tty->real_raw = 0; 1065 tty->real_raw = 0;
1077 } 1066 }
1078 n_tty_set_room(tty); 1067 n_tty_set_room(tty);
1068 /* The termios change make the tty ready for I/O */
1069 wake_up_interruptible(&tty->write_wait);
1070 wake_up_interruptible(&tty->read_wait);
1079} 1071}
1080 1072
1081/** 1073/**
@@ -1194,6 +1186,11 @@ extern ssize_t redirected_tty_write(struct file *, const char __user *,
1194 * Perform job control management checks on this file/tty descriptor 1186 * Perform job control management checks on this file/tty descriptor
1195 * and if appropriate send any needed signals and return a negative 1187 * and if appropriate send any needed signals and return a negative
1196 * error code if action should be taken. 1188 * error code if action should be taken.
1189 *
1190 * FIXME:
1191 * Locking: None - redirected write test is safe, testing
1192 * current->signal should possibly lock current->sighand
1193 * pgrp locking ?
1197 */ 1194 */
1198 1195
1199static int job_control(struct tty_struct *tty, struct file *file) 1196static int job_control(struct tty_struct *tty, struct file *file)
@@ -1246,6 +1243,7 @@ static ssize_t read_chan(struct tty_struct *tty, struct file *file,
1246 ssize_t size; 1243 ssize_t size;
1247 long timeout; 1244 long timeout;
1248 unsigned long flags; 1245 unsigned long flags;
1246 int packet;
1249 1247
1250do_it_again: 1248do_it_again:
1251 1249
@@ -1289,16 +1287,19 @@ do_it_again:
1289 if (mutex_lock_interruptible(&tty->atomic_read_lock)) 1287 if (mutex_lock_interruptible(&tty->atomic_read_lock))
1290 return -ERESTARTSYS; 1288 return -ERESTARTSYS;
1291 } 1289 }
1290 packet = tty->packet;
1292 1291
1293 add_wait_queue(&tty->read_wait, &wait); 1292 add_wait_queue(&tty->read_wait, &wait);
1294 while (nr) { 1293 while (nr) {
1295 /* First test for status change. */ 1294 /* First test for status change. */
1296 if (tty->packet && tty->link->ctrl_status) { 1295 if (packet && tty->link->ctrl_status) {
1297 unsigned char cs; 1296 unsigned char cs;
1298 if (b != buf) 1297 if (b != buf)
1299 break; 1298 break;
1299 spin_lock_irqsave(&tty->link->ctrl_lock, flags);
1300 cs = tty->link->ctrl_status; 1300 cs = tty->link->ctrl_status;
1301 tty->link->ctrl_status = 0; 1301 tty->link->ctrl_status = 0;
1302 spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
1302 if (tty_put_user(tty, cs, b++)) { 1303 if (tty_put_user(tty, cs, b++)) {
1303 retval = -EFAULT; 1304 retval = -EFAULT;
1304 b--; 1305 b--;
@@ -1333,6 +1334,7 @@ do_it_again:
1333 retval = -ERESTARTSYS; 1334 retval = -ERESTARTSYS;
1334 break; 1335 break;
1335 } 1336 }
1337 /* FIXME: does n_tty_set_room need locking ? */
1336 n_tty_set_room(tty); 1338 n_tty_set_room(tty);
1337 timeout = schedule_timeout(timeout); 1339 timeout = schedule_timeout(timeout);
1338 continue; 1340 continue;
@@ -1340,7 +1342,7 @@ do_it_again:
1340 __set_current_state(TASK_RUNNING); 1342 __set_current_state(TASK_RUNNING);
1341 1343
1342 /* Deal with packet mode. */ 1344 /* Deal with packet mode. */
1343 if (tty->packet && b == buf) { 1345 if (packet && b == buf) {
1344 if (tty_put_user(tty, TIOCPKT_DATA, b++)) { 1346 if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
1345 retval = -EFAULT; 1347 retval = -EFAULT;
1346 b--; 1348 b--;
@@ -1388,6 +1390,8 @@ do_it_again:
1388 break; 1390 break;
1389 } else { 1391 } else {
1390 int uncopied; 1392 int uncopied;
1393 /* The copy function takes the read lock and handles
1394 locking internally for this case */
1391 uncopied = copy_from_read_buf(tty, &b, &nr); 1395 uncopied = copy_from_read_buf(tty, &b, &nr);
1392 uncopied += copy_from_read_buf(tty, &b, &nr); 1396 uncopied += copy_from_read_buf(tty, &b, &nr);
1393 if (uncopied) { 1397 if (uncopied) {
@@ -1429,7 +1433,6 @@ do_it_again:
1429 goto do_it_again; 1433 goto do_it_again;
1430 1434
1431 n_tty_set_room(tty); 1435 n_tty_set_room(tty);
1432
1433 return retval; 1436 return retval;
1434} 1437}
1435 1438
@@ -1492,11 +1495,11 @@ static ssize_t write_chan(struct tty_struct *tty, struct file *file,
1492 break; 1495 break;
1493 b++; nr--; 1496 b++; nr--;
1494 } 1497 }
1495 if (tty->driver->flush_chars) 1498 if (tty->ops->flush_chars)
1496 tty->driver->flush_chars(tty); 1499 tty->ops->flush_chars(tty);
1497 } else { 1500 } else {
1498 while (nr > 0) { 1501 while (nr > 0) {
1499 c = tty->driver->write(tty, b, nr); 1502 c = tty->ops->write(tty, b, nr);
1500 if (c < 0) { 1503 if (c < 0) {
1501 retval = c; 1504 retval = c;
1502 goto break_out; 1505 goto break_out;
@@ -1533,11 +1536,6 @@ break_out:
1533 * 1536 *
1534 * This code must be sure never to sleep through a hangup. 1537 * This code must be sure never to sleep through a hangup.
1535 * Called without the kernel lock held - fine 1538 * Called without the kernel lock held - fine
1536 *
1537 * FIXME: if someone changes the VMIN or discipline settings for the
1538 * terminal while another process is in poll() the poll does not
1539 * recompute the new limits. Possibly set_termios should issue
1540 * a read wakeup to fix this bug.
1541 */ 1539 */
1542 1540
1543static unsigned int normal_poll(struct tty_struct *tty, struct file *file, 1541static unsigned int normal_poll(struct tty_struct *tty, struct file *file,
@@ -1561,9 +1559,9 @@ static unsigned int normal_poll(struct tty_struct *tty, struct file *file,
1561 else 1559 else
1562 tty->minimum_to_wake = 1; 1560 tty->minimum_to_wake = 1;
1563 } 1561 }
1564 if (!tty_is_writelocked(tty) && 1562 if (tty->ops->write && !tty_is_writelocked(tty) &&
1565 tty->driver->chars_in_buffer(tty) < WAKEUP_CHARS && 1563 tty_chars_in_buffer(tty) < WAKEUP_CHARS &&
1566 tty->driver->write_room(tty) > 0) 1564 tty_write_room(tty) > 0)
1567 mask |= POLLOUT | POLLWRNORM; 1565 mask |= POLLOUT | POLLWRNORM;
1568 return mask; 1566 return mask;
1569} 1567}
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 6a6843a0a674..66a0f931c66c 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -73,7 +73,7 @@ do { \
73 char tmp[P_BUF_SIZE]; \ 73 char tmp[P_BUF_SIZE]; \
74 snprintf(tmp, sizeof(tmp), ##args); \ 74 snprintf(tmp, sizeof(tmp), ##args); \
75 printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \ 75 printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \
76 __FUNCTION__, tmp); \ 76 __func__, tmp); \
77} while (0) 77} while (0)
78 78
79#define DBG1(args...) D_(0x01, ##args) 79#define DBG1(args...) D_(0x01, ##args)
@@ -1407,7 +1407,7 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1407 /* Find out what card type it is */ 1407 /* Find out what card type it is */
1408 nozomi_get_card_type(dc); 1408 nozomi_get_card_type(dc);
1409 1409
1410 dc->base_addr = ioremap(start, dc->card_type); 1410 dc->base_addr = ioremap_nocache(start, dc->card_type);
1411 if (!dc->base_addr) { 1411 if (!dc->base_addr) {
1412 dev_err(&pdev->dev, "Unable to map card MMIO\n"); 1412 dev_err(&pdev->dev, "Unable to map card MMIO\n");
1413 ret = -ENODEV; 1413 ret = -ENODEV;
@@ -1724,6 +1724,8 @@ static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
1724 const struct ctrl_dl *ctrl_dl = &port->ctrl_dl; 1724 const struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
1725 const struct ctrl_ul *ctrl_ul = &port->ctrl_ul; 1725 const struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
1726 1726
1727 /* Note: these could change under us but it is not clear this
1728 matters if so */
1727 return (ctrl_ul->RTS ? TIOCM_RTS : 0) | 1729 return (ctrl_ul->RTS ? TIOCM_RTS : 0) |
1728 (ctrl_ul->DTR ? TIOCM_DTR : 0) | 1730 (ctrl_ul->DTR ? TIOCM_DTR : 0) |
1729 (ctrl_dl->DCD ? TIOCM_CAR : 0) | 1731 (ctrl_dl->DCD ? TIOCM_CAR : 0) |
@@ -1849,16 +1851,6 @@ static void ntty_throttle(struct tty_struct *tty)
1849 spin_unlock_irqrestore(&dc->spin_mutex, flags); 1851 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1850} 1852}
1851 1853
1852/* just to discard single character writes */
1853static void ntty_put_char(struct tty_struct *tty, unsigned char c)
1854{
1855 /*
1856 * card does not react correct when we write single chars
1857 * to the card, so we discard them
1858 */
1859 DBG2("PUT CHAR Function: %c", c);
1860}
1861
1862/* Returns number of chars in buffer, called by tty layer */ 1854/* Returns number of chars in buffer, called by tty layer */
1863static s32 ntty_chars_in_buffer(struct tty_struct *tty) 1855static s32 ntty_chars_in_buffer(struct tty_struct *tty)
1864{ 1856{
@@ -1892,7 +1884,6 @@ static const struct tty_operations tty_ops = {
1892 .unthrottle = ntty_unthrottle, 1884 .unthrottle = ntty_unthrottle,
1893 .throttle = ntty_throttle, 1885 .throttle = ntty_throttle,
1894 .chars_in_buffer = ntty_chars_in_buffer, 1886 .chars_in_buffer = ntty_chars_in_buffer,
1895 .put_char = ntty_put_char,
1896 .tiocmget = ntty_tiocmget, 1887 .tiocmget = ntty_tiocmget,
1897 .tiocmset = ntty_tiocmset, 1888 .tiocmset = ntty_tiocmset,
1898}; 1889};
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 454d7324ba40..4a933d413423 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -53,7 +53,7 @@ module_param(pc_debug, int, 0600);
53#define DEBUGP(n, rdr, x, args...) do { \ 53#define DEBUGP(n, rdr, x, args...) do { \
54 if (pc_debug >= (n)) \ 54 if (pc_debug >= (n)) \
55 dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ 55 dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \
56 __FUNCTION__ , ## args); \ 56 __func__ , ## args); \
57 } while (0) 57 } while (0)
58#else 58#else
59#define DEBUGP(n, rdr, x, args...) 59#define DEBUGP(n, rdr, x, args...)
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 5f291bf739a6..035084c07329 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -47,7 +47,7 @@ module_param(pc_debug, int, 0600);
47#define DEBUGP(n, rdr, x, args...) do { \ 47#define DEBUGP(n, rdr, x, args...) do { \
48 if (pc_debug >= (n)) \ 48 if (pc_debug >= (n)) \
49 dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ 49 dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \
50 __FUNCTION__ , ##args); \ 50 __func__ , ##args); \
51 } while (0) 51 } while (0)
52#else 52#else
53#define DEBUGP(n, rdr, x, args...) 53#define DEBUGP(n, rdr, x, args...)
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c
index 1f978ff87fa8..fa9d3c945f31 100644
--- a/drivers/char/pcmcia/ipwireless/hardware.c
+++ b/drivers/char/pcmcia/ipwireless/hardware.c
@@ -354,32 +354,6 @@ struct ipw_rx_packet {
354 unsigned int channel_idx; 354 unsigned int channel_idx;
355}; 355};
356 356
357#ifdef IPWIRELESS_STATE_DEBUG
358int ipwireless_dump_hardware_state(char *p, size_t limit,
359 struct ipw_hardware *hw)
360{
361 return snprintf(p, limit,
362 "debug: initializing=%d\n"
363 "debug: tx_ready=%d\n"
364 "debug: tx_queued=%d\n"
365 "debug: rx_ready=%d\n"
366 "debug: rx_bytes_queued=%d\n"
367 "debug: blocking_rx=%d\n"
368 "debug: removed=%d\n"
369 "debug: hardware.shutting_down=%d\n"
370 "debug: to_setup=%d\n",
371 hw->initializing,
372 hw->tx_ready,
373 hw->tx_queued,
374 hw->rx_ready,
375 hw->rx_bytes_queued,
376 hw->blocking_rx,
377 hw->removed,
378 hw->shutting_down,
379 hw->to_setup);
380}
381#endif
382
383static char *data_type(const unsigned char *buf, unsigned length) 357static char *data_type(const unsigned char *buf, unsigned length)
384{ 358{
385 struct nl_packet_header *hdr = (struct nl_packet_header *) buf; 359 struct nl_packet_header *hdr = (struct nl_packet_header *) buf;
diff --git a/drivers/char/pcmcia/ipwireless/hardware.h b/drivers/char/pcmcia/ipwireless/hardware.h
index c83190ffb0e7..19ce5eb266b1 100644
--- a/drivers/char/pcmcia/ipwireless/hardware.h
+++ b/drivers/char/pcmcia/ipwireless/hardware.h
@@ -58,7 +58,5 @@ void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
58 void *reboot_cb_data); 58 void *reboot_cb_data);
59void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw); 59void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw);
60void ipwireless_sleep(unsigned int tenths); 60void ipwireless_sleep(unsigned int tenths);
61int ipwireless_dump_hardware_state(char *p, size_t limit,
62 struct ipw_hardware *hw);
63 61
64#endif 62#endif
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c
index d793e68b3e0d..fe914d34f7f6 100644
--- a/drivers/char/pcmcia/ipwireless/network.c
+++ b/drivers/char/pcmcia/ipwireless/network.c
@@ -63,21 +63,6 @@ struct ipw_network {
63 struct work_struct work_go_offline; 63 struct work_struct work_go_offline;
64}; 64};
65 65
66
67#ifdef IPWIRELESS_STATE_DEBUG
68int ipwireless_dump_network_state(char *p, size_t limit,
69 struct ipw_network *network)
70{
71 return snprintf(p, limit,
72 "debug: ppp_blocked=%d\n"
73 "debug: outgoing_packets_queued=%d\n"
74 "debug: network.shutting_down=%d\n",
75 network->ppp_blocked,
76 network->outgoing_packets_queued,
77 network->shutting_down);
78}
79#endif
80
81static void notify_packet_sent(void *callback_data, unsigned int packet_length) 66static void notify_packet_sent(void *callback_data, unsigned int packet_length)
82{ 67{
83 struct ipw_network *network = callback_data; 68 struct ipw_network *network = callback_data;
diff --git a/drivers/char/pcmcia/ipwireless/network.h b/drivers/char/pcmcia/ipwireless/network.h
index b0e1e952fd14..ccacd26fc7ef 100644
--- a/drivers/char/pcmcia/ipwireless/network.h
+++ b/drivers/char/pcmcia/ipwireless/network.h
@@ -49,7 +49,4 @@ void ipwireless_ppp_close(struct ipw_network *net);
49int ipwireless_ppp_channel_index(struct ipw_network *net); 49int ipwireless_ppp_channel_index(struct ipw_network *net);
50int ipwireless_ppp_unit_number(struct ipw_network *net); 50int ipwireless_ppp_unit_number(struct ipw_network *net);
51 51
52int ipwireless_dump_network_state(char *p, size_t limit,
53 struct ipw_network *net);
54
55#endif 52#endif
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 583356426dfb..1dd0e992c83d 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -503,20 +503,9 @@ static void* mgslpc_get_text_ptr(void)
503 * The wrappers maintain line discipline references 503 * The wrappers maintain line discipline references
504 * while calling into the line discipline. 504 * while calling into the line discipline.
505 * 505 *
506 * ldisc_flush_buffer - flush line discipline receive buffers
507 * ldisc_receive_buf - pass receive data to line discipline 506 * ldisc_receive_buf - pass receive data to line discipline
508 */ 507 */
509 508
510static void ldisc_flush_buffer(struct tty_struct *tty)
511{
512 struct tty_ldisc *ld = tty_ldisc_ref(tty);
513 if (ld) {
514 if (ld->flush_buffer)
515 ld->flush_buffer(tty);
516 tty_ldisc_deref(ld);
517 }
518}
519
520static void ldisc_receive_buf(struct tty_struct *tty, 509static void ldisc_receive_buf(struct tty_struct *tty,
521 const __u8 *data, char *flags, int count) 510 const __u8 *data, char *flags, int count)
522{ 511{
@@ -1556,7 +1545,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info)
1556 1545
1557/* Add a character to the transmit buffer 1546/* Add a character to the transmit buffer
1558 */ 1547 */
1559static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch) 1548static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
1560{ 1549{
1561 MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; 1550 MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
1562 unsigned long flags; 1551 unsigned long flags;
@@ -1567,10 +1556,10 @@ static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
1567 } 1556 }
1568 1557
1569 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char")) 1558 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
1570 return; 1559 return 0;
1571 1560
1572 if (!info->tx_buf) 1561 if (!info->tx_buf)
1573 return; 1562 return 0;
1574 1563
1575 spin_lock_irqsave(&info->lock,flags); 1564 spin_lock_irqsave(&info->lock,flags);
1576 1565
@@ -1583,6 +1572,7 @@ static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
1583 } 1572 }
1584 1573
1585 spin_unlock_irqrestore(&info->lock,flags); 1574 spin_unlock_irqrestore(&info->lock,flags);
1575 return 1;
1586} 1576}
1587 1577
1588/* Enable transmitter so remaining characters in the 1578/* Enable transmitter so remaining characters in the
@@ -2467,10 +2457,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
2467 if (info->flags & ASYNC_INITIALIZED) 2457 if (info->flags & ASYNC_INITIALIZED)
2468 mgslpc_wait_until_sent(tty, info->timeout); 2458 mgslpc_wait_until_sent(tty, info->timeout);
2469 2459
2470 if (tty->driver->flush_buffer) 2460 mgslpc_flush_buffer(tty);
2471 tty->driver->flush_buffer(tty);
2472 2461
2473 ldisc_flush_buffer(tty); 2462 tty_ldisc_flush(tty);
2474 2463
2475 shutdown(info); 2464 shutdown(info);
2476 2465
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 706ff34728f1..0a05c038ae6f 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -181,6 +181,7 @@ static int pty_set_lock(struct tty_struct *tty, int __user * arg)
181static void pty_flush_buffer(struct tty_struct *tty) 181static void pty_flush_buffer(struct tty_struct *tty)
182{ 182{
183 struct tty_struct *to = tty->link; 183 struct tty_struct *to = tty->link;
184 unsigned long flags;
184 185
185 if (!to) 186 if (!to)
186 return; 187 return;
@@ -189,8 +190,10 @@ static void pty_flush_buffer(struct tty_struct *tty)
189 to->ldisc.flush_buffer(to); 190 to->ldisc.flush_buffer(to);
190 191
191 if (to->packet) { 192 if (to->packet) {
193 spin_lock_irqsave(&tty->ctrl_lock, flags);
192 tty->ctrl_status |= TIOCPKT_FLUSHWRITE; 194 tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
193 wake_up_interruptible(&to->read_wait); 195 wake_up_interruptible(&to->read_wait);
196 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
194 } 197 }
195} 198}
196 199
@@ -251,6 +254,18 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
251static int legacy_count = CONFIG_LEGACY_PTY_COUNT; 254static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
252module_param(legacy_count, int, 0); 255module_param(legacy_count, int, 0);
253 256
257static const struct tty_operations pty_ops_bsd = {
258 .open = pty_open,
259 .close = pty_close,
260 .write = pty_write,
261 .write_room = pty_write_room,
262 .flush_buffer = pty_flush_buffer,
263 .chars_in_buffer = pty_chars_in_buffer,
264 .unthrottle = pty_unthrottle,
265 .set_termios = pty_set_termios,
266 .ioctl = pty_bsd_ioctl,
267};
268
254static void __init legacy_pty_init(void) 269static void __init legacy_pty_init(void)
255{ 270{
256 if (legacy_count <= 0) 271 if (legacy_count <= 0)
@@ -281,7 +296,6 @@ static void __init legacy_pty_init(void)
281 pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW; 296 pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW;
282 pty_driver->other = pty_slave_driver; 297 pty_driver->other = pty_slave_driver;
283 tty_set_operations(pty_driver, &pty_ops); 298 tty_set_operations(pty_driver, &pty_ops);
284 pty_driver->ioctl = pty_bsd_ioctl;
285 299
286 pty_slave_driver->owner = THIS_MODULE; 300 pty_slave_driver->owner = THIS_MODULE;
287 pty_slave_driver->driver_name = "pty_slave"; 301 pty_slave_driver->driver_name = "pty_slave";
@@ -374,6 +388,19 @@ static int pty_unix98_ioctl(struct tty_struct *tty, struct file *file,
374 return -ENOIOCTLCMD; 388 return -ENOIOCTLCMD;
375} 389}
376 390
391static const struct tty_operations pty_unix98_ops = {
392 .open = pty_open,
393 .close = pty_close,
394 .write = pty_write,
395 .write_room = pty_write_room,
396 .flush_buffer = pty_flush_buffer,
397 .chars_in_buffer = pty_chars_in_buffer,
398 .unthrottle = pty_unthrottle,
399 .set_termios = pty_set_termios,
400 .ioctl = pty_unix98_ioctl
401};
402
403
377static void __init unix98_pty_init(void) 404static void __init unix98_pty_init(void)
378{ 405{
379 ptm_driver = alloc_tty_driver(NR_UNIX98_PTY_MAX); 406 ptm_driver = alloc_tty_driver(NR_UNIX98_PTY_MAX);
@@ -400,8 +427,7 @@ static void __init unix98_pty_init(void)
400 ptm_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | 427 ptm_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW |
401 TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_DEVPTS_MEM; 428 TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_DEVPTS_MEM;
402 ptm_driver->other = pts_driver; 429 ptm_driver->other = pts_driver;
403 tty_set_operations(ptm_driver, &pty_ops); 430 tty_set_operations(ptm_driver, &pty_unix98_ops);
404 ptm_driver->ioctl = pty_unix98_ioctl;
405 431
406 pts_driver->owner = THIS_MODULE; 432 pts_driver->owner = THIS_MODULE;
407 pts_driver->driver_name = "pty_slave"; 433 pts_driver->driver_name = "pty_slave";
diff --git a/drivers/char/random.c b/drivers/char/random.c
index f43c89f7c449..0cf98bd4f2d2 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -272,7 +272,7 @@ static int random_write_wakeup_thresh = 128;
272 272
273static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28; 273static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
274 274
275static DEFINE_PER_CPU(int, trickle_count) = 0; 275static DEFINE_PER_CPU(int, trickle_count);
276 276
277/* 277/*
278 * A pool of size .poolwords is stirred with a primitive polynomial 278 * A pool of size .poolwords is stirred with a primitive polynomial
@@ -370,17 +370,19 @@ static struct poolinfo {
370 */ 370 */
371static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); 371static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
372static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); 372static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
373static struct fasync_struct *fasync;
373 374
374#if 0 375#if 0
375static int debug = 0; 376static int debug;
376module_param(debug, bool, 0644); 377module_param(debug, bool, 0644);
377#define DEBUG_ENT(fmt, arg...) do { if (debug) \ 378#define DEBUG_ENT(fmt, arg...) do { \
378 printk(KERN_DEBUG "random %04d %04d %04d: " \ 379 if (debug) \
379 fmt,\ 380 printk(KERN_DEBUG "random %04d %04d %04d: " \
380 input_pool.entropy_count,\ 381 fmt,\
381 blocking_pool.entropy_count,\ 382 input_pool.entropy_count,\
382 nonblocking_pool.entropy_count,\ 383 blocking_pool.entropy_count,\
383 ## arg); } while (0) 384 nonblocking_pool.entropy_count,\
385 ## arg); } while (0)
384#else 386#else
385#define DEBUG_ENT(fmt, arg...) do {} while (0) 387#define DEBUG_ENT(fmt, arg...) do {} while (0)
386#endif 388#endif
@@ -394,7 +396,7 @@ module_param(debug, bool, 0644);
394 396
395struct entropy_store; 397struct entropy_store;
396struct entropy_store { 398struct entropy_store {
397 /* mostly-read data: */ 399 /* read-only data: */
398 struct poolinfo *poolinfo; 400 struct poolinfo *poolinfo;
399 __u32 *pool; 401 __u32 *pool;
400 const char *name; 402 const char *name;
@@ -402,7 +404,7 @@ struct entropy_store {
402 struct entropy_store *pull; 404 struct entropy_store *pull;
403 405
404 /* read-write data: */ 406 /* read-write data: */
405 spinlock_t lock ____cacheline_aligned_in_smp; 407 spinlock_t lock;
406 unsigned add_ptr; 408 unsigned add_ptr;
407 int entropy_count; 409 int entropy_count;
408 int input_rotate; 410 int input_rotate;
@@ -438,25 +440,26 @@ static struct entropy_store nonblocking_pool = {
438}; 440};
439 441
440/* 442/*
441 * This function adds a byte into the entropy "pool". It does not 443 * This function adds bytes into the entropy "pool". It does not
442 * update the entropy estimate. The caller should call 444 * update the entropy estimate. The caller should call
443 * credit_entropy_store if this is appropriate. 445 * credit_entropy_bits if this is appropriate.
444 * 446 *
445 * The pool is stirred with a primitive polynomial of the appropriate 447 * The pool is stirred with a primitive polynomial of the appropriate
446 * degree, and then twisted. We twist by three bits at a time because 448 * degree, and then twisted. We twist by three bits at a time because
447 * it's cheap to do so and helps slightly in the expected case where 449 * it's cheap to do so and helps slightly in the expected case where
448 * the entropy is concentrated in the low-order bits. 450 * the entropy is concentrated in the low-order bits.
449 */ 451 */
450static void __add_entropy_words(struct entropy_store *r, const __u32 *in, 452static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
451 int nwords, __u32 out[16]) 453 int nbytes, __u8 out[64])
452{ 454{
453 static __u32 const twist_table[8] = { 455 static __u32 const twist_table[8] = {
454 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, 456 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
455 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; 457 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
456 unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5; 458 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
457 int new_rotate, input_rotate; 459 int input_rotate;
458 int wordmask = r->poolinfo->poolwords - 1; 460 int wordmask = r->poolinfo->poolwords - 1;
459 __u32 w, next_w; 461 const char *bytes = in;
462 __u32 w;
460 unsigned long flags; 463 unsigned long flags;
461 464
462 /* Taps are constant, so we can load them without holding r->lock. */ 465 /* Taps are constant, so we can load them without holding r->lock. */
@@ -465,78 +468,76 @@ static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
465 tap3 = r->poolinfo->tap3; 468 tap3 = r->poolinfo->tap3;
466 tap4 = r->poolinfo->tap4; 469 tap4 = r->poolinfo->tap4;
467 tap5 = r->poolinfo->tap5; 470 tap5 = r->poolinfo->tap5;
468 next_w = *in++;
469 471
470 spin_lock_irqsave(&r->lock, flags); 472 spin_lock_irqsave(&r->lock, flags);
471 prefetch_range(r->pool, wordmask);
472 input_rotate = r->input_rotate; 473 input_rotate = r->input_rotate;
473 add_ptr = r->add_ptr; 474 i = r->add_ptr;
474 475
475 while (nwords--) { 476 /* mix one byte at a time to simplify size handling and churn faster */
476 w = rol32(next_w, input_rotate); 477 while (nbytes--) {
477 if (nwords > 0) 478 w = rol32(*bytes++, input_rotate & 31);
478 next_w = *in++; 479 i = (i - 1) & wordmask;
479 i = add_ptr = (add_ptr - 1) & wordmask;
480 /*
481 * Normally, we add 7 bits of rotation to the pool.
482 * At the beginning of the pool, add an extra 7 bits
483 * rotation, so that successive passes spread the
484 * input bits across the pool evenly.
485 */
486 new_rotate = input_rotate + 14;
487 if (i)
488 new_rotate = input_rotate + 7;
489 input_rotate = new_rotate & 31;
490 480
491 /* XOR in the various taps */ 481 /* XOR in the various taps */
482 w ^= r->pool[i];
492 w ^= r->pool[(i + tap1) & wordmask]; 483 w ^= r->pool[(i + tap1) & wordmask];
493 w ^= r->pool[(i + tap2) & wordmask]; 484 w ^= r->pool[(i + tap2) & wordmask];
494 w ^= r->pool[(i + tap3) & wordmask]; 485 w ^= r->pool[(i + tap3) & wordmask];
495 w ^= r->pool[(i + tap4) & wordmask]; 486 w ^= r->pool[(i + tap4) & wordmask];
496 w ^= r->pool[(i + tap5) & wordmask]; 487 w ^= r->pool[(i + tap5) & wordmask];
497 w ^= r->pool[i]; 488
489 /* Mix the result back in with a twist */
498 r->pool[i] = (w >> 3) ^ twist_table[w & 7]; 490 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
491
492 /*
493 * Normally, we add 7 bits of rotation to the pool.
494 * At the beginning of the pool, add an extra 7 bits
495 * rotation, so that successive passes spread the
496 * input bits across the pool evenly.
497 */
498 input_rotate += i ? 7 : 14;
499 } 499 }
500 500
501 r->input_rotate = input_rotate; 501 r->input_rotate = input_rotate;
502 r->add_ptr = add_ptr; 502 r->add_ptr = i;
503 503
504 if (out) { 504 if (out)
505 for (i = 0; i < 16; i++) { 505 for (j = 0; j < 16; j++)
506 out[i] = r->pool[add_ptr]; 506 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
507 add_ptr = (add_ptr - 1) & wordmask;
508 }
509 }
510 507
511 spin_unlock_irqrestore(&r->lock, flags); 508 spin_unlock_irqrestore(&r->lock, flags);
512} 509}
513 510
514static inline void add_entropy_words(struct entropy_store *r, const __u32 *in, 511static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
515 int nwords)
516{ 512{
517 __add_entropy_words(r, in, nwords, NULL); 513 mix_pool_bytes_extract(r, in, bytes, NULL);
518} 514}
519 515
520/* 516/*
521 * Credit (or debit) the entropy store with n bits of entropy 517 * Credit (or debit) the entropy store with n bits of entropy
522 */ 518 */
523static void credit_entropy_store(struct entropy_store *r, int nbits) 519static void credit_entropy_bits(struct entropy_store *r, int nbits)
524{ 520{
525 unsigned long flags; 521 unsigned long flags;
526 522
523 if (!nbits)
524 return;
525
527 spin_lock_irqsave(&r->lock, flags); 526 spin_lock_irqsave(&r->lock, flags);
528 527
529 if (r->entropy_count + nbits < 0) { 528 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
530 DEBUG_ENT("negative entropy/overflow (%d+%d)\n", 529 r->entropy_count += nbits;
531 r->entropy_count, nbits); 530 if (r->entropy_count < 0) {
531 DEBUG_ENT("negative entropy/overflow\n");
532 r->entropy_count = 0; 532 r->entropy_count = 0;
533 } else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) { 533 } else if (r->entropy_count > r->poolinfo->POOLBITS)
534 r->entropy_count = r->poolinfo->POOLBITS; 534 r->entropy_count = r->poolinfo->POOLBITS;
535 } else { 535
536 r->entropy_count += nbits; 536 /* should we wake readers? */
537 if (nbits) 537 if (r == &input_pool &&
538 DEBUG_ENT("added %d entropy credits to %s\n", 538 r->entropy_count >= random_read_wakeup_thresh) {
539 nbits, r->name); 539 wake_up_interruptible(&random_read_wait);
540 kill_fasync(&fasync, SIGIO, POLL_IN);
540 } 541 }
541 542
542 spin_unlock_irqrestore(&r->lock, flags); 543 spin_unlock_irqrestore(&r->lock, flags);
@@ -551,7 +552,7 @@ static void credit_entropy_store(struct entropy_store *r, int nbits)
551/* There is one of these per entropy source */ 552/* There is one of these per entropy source */
552struct timer_rand_state { 553struct timer_rand_state {
553 cycles_t last_time; 554 cycles_t last_time;
554 long last_delta,last_delta2; 555 long last_delta, last_delta2;
555 unsigned dont_count_entropy:1; 556 unsigned dont_count_entropy:1;
556}; 557};
557 558
@@ -586,7 +587,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
586 sample.jiffies = jiffies; 587 sample.jiffies = jiffies;
587 sample.cycles = get_cycles(); 588 sample.cycles = get_cycles();
588 sample.num = num; 589 sample.num = num;
589 add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4); 590 mix_pool_bytes(&input_pool, &sample, sizeof(sample));
590 591
591 /* 592 /*
592 * Calculate number of bits of randomness we probably added. 593 * Calculate number of bits of randomness we probably added.
@@ -620,13 +621,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
620 * Round down by 1 bit on general principles, 621 * Round down by 1 bit on general principles,
621 * and limit entropy entimate to 12 bits. 622 * and limit entropy entimate to 12 bits.
622 */ 623 */
623 credit_entropy_store(&input_pool, 624 credit_entropy_bits(&input_pool,
624 min_t(int, fls(delta>>1), 11)); 625 min_t(int, fls(delta>>1), 11));
625 } 626 }
626
627 if(input_pool.entropy_count >= random_read_wakeup_thresh)
628 wake_up_interruptible(&random_read_wait);
629
630out: 627out:
631 preempt_enable(); 628 preempt_enable();
632} 629}
@@ -677,7 +674,7 @@ void add_disk_randomness(struct gendisk *disk)
677 * 674 *
678 *********************************************************************/ 675 *********************************************************************/
679 676
680static ssize_t extract_entropy(struct entropy_store *r, void * buf, 677static ssize_t extract_entropy(struct entropy_store *r, void *buf,
681 size_t nbytes, int min, int rsvd); 678 size_t nbytes, int min, int rsvd);
682 679
683/* 680/*
@@ -704,10 +701,10 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
704 "(%d of %d requested)\n", 701 "(%d of %d requested)\n",
705 r->name, bytes * 8, nbytes * 8, r->entropy_count); 702 r->name, bytes * 8, nbytes * 8, r->entropy_count);
706 703
707 bytes=extract_entropy(r->pull, tmp, bytes, 704 bytes = extract_entropy(r->pull, tmp, bytes,
708 random_read_wakeup_thresh / 8, rsvd); 705 random_read_wakeup_thresh / 8, rsvd);
709 add_entropy_words(r, tmp, (bytes + 3) / 4); 706 mix_pool_bytes(r, tmp, bytes);
710 credit_entropy_store(r, bytes*8); 707 credit_entropy_bits(r, bytes*8);
711 } 708 }
712} 709}
713 710
@@ -744,13 +741,15 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
744 if (r->limit && nbytes + reserved >= r->entropy_count / 8) 741 if (r->limit && nbytes + reserved >= r->entropy_count / 8)
745 nbytes = r->entropy_count/8 - reserved; 742 nbytes = r->entropy_count/8 - reserved;
746 743
747 if(r->entropy_count / 8 >= nbytes + reserved) 744 if (r->entropy_count / 8 >= nbytes + reserved)
748 r->entropy_count -= nbytes*8; 745 r->entropy_count -= nbytes*8;
749 else 746 else
750 r->entropy_count = reserved; 747 r->entropy_count = reserved;
751 748
752 if (r->entropy_count < random_write_wakeup_thresh) 749 if (r->entropy_count < random_write_wakeup_thresh) {
753 wake_up_interruptible(&random_write_wait); 750 wake_up_interruptible(&random_write_wait);
751 kill_fasync(&fasync, SIGIO, POLL_OUT);
752 }
754 } 753 }
755 754
756 DEBUG_ENT("debiting %d entropy credits from %s%s\n", 755 DEBUG_ENT("debiting %d entropy credits from %s%s\n",
@@ -764,45 +763,46 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
764static void extract_buf(struct entropy_store *r, __u8 *out) 763static void extract_buf(struct entropy_store *r, __u8 *out)
765{ 764{
766 int i; 765 int i;
767 __u32 data[16], buf[5 + SHA_WORKSPACE_WORDS]; 766 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
767 __u8 extract[64];
768
769 /* Generate a hash across the pool, 16 words (512 bits) at a time */
770 sha_init(hash);
771 for (i = 0; i < r->poolinfo->poolwords; i += 16)
772 sha_transform(hash, (__u8 *)(r->pool + i), workspace);
768 773
769 sha_init(buf);
770 /* 774 /*
771 * As we hash the pool, we mix intermediate values of 775 * We mix the hash back into the pool to prevent backtracking
772 * the hash back into the pool. This eliminates 776 * attacks (where the attacker knows the state of the pool
773 * backtracking attacks (where the attacker knows 777 * plus the current outputs, and attempts to find previous
774 * the state of the pool plus the current outputs, and 778 * ouputs), unless the hash function can be inverted. By
775 * attempts to find previous ouputs), unless the hash 779 * mixing at least a SHA1 worth of hash data back, we make
776 * function can be inverted. 780 * brute-forcing the feedback as hard as brute-forcing the
781 * hash.
777 */ 782 */
778 for (i = 0; i < r->poolinfo->poolwords; i += 16) { 783 mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
779 /* hash blocks of 16 words = 512 bits */
780 sha_transform(buf, (__u8 *)(r->pool + i), buf + 5);
781 /* feed back portion of the resulting hash */
782 add_entropy_words(r, &buf[i % 5], 1);
783 }
784 784
785 /* 785 /*
786 * To avoid duplicates, we atomically extract a 786 * To avoid duplicates, we atomically extract a portion of the
787 * portion of the pool while mixing, and hash one 787 * pool while mixing, and hash one final time.
788 * final time.
789 */ 788 */
790 __add_entropy_words(r, &buf[i % 5], 1, data); 789 sha_transform(hash, extract, workspace);
791 sha_transform(buf, (__u8 *)data, buf + 5); 790 memset(extract, 0, sizeof(extract));
791 memset(workspace, 0, sizeof(workspace));
792 792
793 /* 793 /*
794 * In case the hash function has some recognizable 794 * In case the hash function has some recognizable output
795 * output pattern, we fold it in half. 795 * pattern, we fold it in half. Thus, we always feed back
796 * twice as much data as we output.
796 */ 797 */
797 798 hash[0] ^= hash[3];
798 buf[0] ^= buf[3]; 799 hash[1] ^= hash[4];
799 buf[1] ^= buf[4]; 800 hash[2] ^= rol32(hash[2], 16);
800 buf[2] ^= rol32(buf[2], 16); 801 memcpy(out, hash, EXTRACT_SIZE);
801 memcpy(out, buf, EXTRACT_SIZE); 802 memset(hash, 0, sizeof(hash));
802 memset(buf, 0, sizeof(buf));
803} 803}
804 804
805static ssize_t extract_entropy(struct entropy_store *r, void * buf, 805static ssize_t extract_entropy(struct entropy_store *r, void *buf,
806 size_t nbytes, int min, int reserved) 806 size_t nbytes, int min, int reserved)
807{ 807{
808 ssize_t ret = 0, i; 808 ssize_t ret = 0, i;
@@ -872,7 +872,6 @@ void get_random_bytes(void *buf, int nbytes)
872{ 872{
873 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); 873 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
874} 874}
875
876EXPORT_SYMBOL(get_random_bytes); 875EXPORT_SYMBOL(get_random_bytes);
877 876
878/* 877/*
@@ -894,12 +893,11 @@ static void init_std_data(struct entropy_store *r)
894 spin_unlock_irqrestore(&r->lock, flags); 893 spin_unlock_irqrestore(&r->lock, flags);
895 894
896 now = ktime_get_real(); 895 now = ktime_get_real();
897 add_entropy_words(r, (__u32 *)&now, sizeof(now)/4); 896 mix_pool_bytes(r, &now, sizeof(now));
898 add_entropy_words(r, (__u32 *)utsname(), 897 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
899 sizeof(*(utsname()))/4);
900} 898}
901 899
902static int __init rand_initialize(void) 900static int rand_initialize(void)
903{ 901{
904 init_std_data(&input_pool); 902 init_std_data(&input_pool);
905 init_std_data(&blocking_pool); 903 init_std_data(&blocking_pool);
@@ -940,7 +938,7 @@ void rand_initialize_disk(struct gendisk *disk)
940#endif 938#endif
941 939
942static ssize_t 940static ssize_t
943random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos) 941random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
944{ 942{
945 ssize_t n, retval = 0, count = 0; 943 ssize_t n, retval = 0, count = 0;
946 944
@@ -1002,8 +1000,7 @@ random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
1002} 1000}
1003 1001
1004static ssize_t 1002static ssize_t
1005urandom_read(struct file * file, char __user * buf, 1003urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1006 size_t nbytes, loff_t *ppos)
1007{ 1004{
1008 return extract_entropy_user(&nonblocking_pool, buf, nbytes); 1005 return extract_entropy_user(&nonblocking_pool, buf, nbytes);
1009} 1006}
@@ -1038,16 +1035,15 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1038 count -= bytes; 1035 count -= bytes;
1039 p += bytes; 1036 p += bytes;
1040 1037
1041 add_entropy_words(r, buf, (bytes + 3) / 4); 1038 mix_pool_bytes(r, buf, bytes);
1042 cond_resched(); 1039 cond_resched();
1043 } 1040 }
1044 1041
1045 return 0; 1042 return 0;
1046} 1043}
1047 1044
1048static ssize_t 1045static ssize_t random_write(struct file *file, const char __user *buffer,
1049random_write(struct file * file, const char __user * buffer, 1046 size_t count, loff_t *ppos)
1050 size_t count, loff_t *ppos)
1051{ 1047{
1052 size_t ret; 1048 size_t ret;
1053 struct inode *inode = file->f_path.dentry->d_inode; 1049 struct inode *inode = file->f_path.dentry->d_inode;
@@ -1064,9 +1060,7 @@ random_write(struct file * file, const char __user * buffer,
1064 return (ssize_t)count; 1060 return (ssize_t)count;
1065} 1061}
1066 1062
1067static int 1063static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1068random_ioctl(struct inode * inode, struct file * file,
1069 unsigned int cmd, unsigned long arg)
1070{ 1064{
1071 int size, ent_count; 1065 int size, ent_count;
1072 int __user *p = (int __user *)arg; 1066 int __user *p = (int __user *)arg;
@@ -1074,8 +1068,8 @@ random_ioctl(struct inode * inode, struct file * file,
1074 1068
1075 switch (cmd) { 1069 switch (cmd) {
1076 case RNDGETENTCNT: 1070 case RNDGETENTCNT:
1077 ent_count = input_pool.entropy_count; 1071 /* inherently racy, no point locking */
1078 if (put_user(ent_count, p)) 1072 if (put_user(input_pool.entropy_count, p))
1079 return -EFAULT; 1073 return -EFAULT;
1080 return 0; 1074 return 0;
1081 case RNDADDTOENTCNT: 1075 case RNDADDTOENTCNT:
@@ -1083,13 +1077,7 @@ random_ioctl(struct inode * inode, struct file * file,
1083 return -EPERM; 1077 return -EPERM;
1084 if (get_user(ent_count, p)) 1078 if (get_user(ent_count, p))
1085 return -EFAULT; 1079 return -EFAULT;
1086 credit_entropy_store(&input_pool, ent_count); 1080 credit_entropy_bits(&input_pool, ent_count);
1087 /*
1088 * Wake up waiting processes if we have enough
1089 * entropy.
1090 */
1091 if (input_pool.entropy_count >= random_read_wakeup_thresh)
1092 wake_up_interruptible(&random_read_wait);
1093 return 0; 1081 return 0;
1094 case RNDADDENTROPY: 1082 case RNDADDENTROPY:
1095 if (!capable(CAP_SYS_ADMIN)) 1083 if (!capable(CAP_SYS_ADMIN))
@@ -1104,39 +1092,45 @@ random_ioctl(struct inode * inode, struct file * file,
1104 size); 1092 size);
1105 if (retval < 0) 1093 if (retval < 0)
1106 return retval; 1094 return retval;
1107 credit_entropy_store(&input_pool, ent_count); 1095 credit_entropy_bits(&input_pool, ent_count);
1108 /*
1109 * Wake up waiting processes if we have enough
1110 * entropy.
1111 */
1112 if (input_pool.entropy_count >= random_read_wakeup_thresh)
1113 wake_up_interruptible(&random_read_wait);
1114 return 0; 1096 return 0;
1115 case RNDZAPENTCNT: 1097 case RNDZAPENTCNT:
1116 case RNDCLEARPOOL: 1098 case RNDCLEARPOOL:
1117 /* Clear the entropy pool counters. */ 1099 /* Clear the entropy pool counters. */
1118 if (!capable(CAP_SYS_ADMIN)) 1100 if (!capable(CAP_SYS_ADMIN))
1119 return -EPERM; 1101 return -EPERM;
1120 init_std_data(&input_pool); 1102 rand_initialize();
1121 init_std_data(&blocking_pool);
1122 init_std_data(&nonblocking_pool);
1123 return 0; 1103 return 0;
1124 default: 1104 default:
1125 return -EINVAL; 1105 return -EINVAL;
1126 } 1106 }
1127} 1107}
1128 1108
1109static int random_fasync(int fd, struct file *filp, int on)
1110{
1111 return fasync_helper(fd, filp, on, &fasync);
1112}
1113
1114static int random_release(struct inode *inode, struct file *filp)
1115{
1116 return fasync_helper(-1, filp, 0, &fasync);
1117}
1118
1129const struct file_operations random_fops = { 1119const struct file_operations random_fops = {
1130 .read = random_read, 1120 .read = random_read,
1131 .write = random_write, 1121 .write = random_write,
1132 .poll = random_poll, 1122 .poll = random_poll,
1133 .ioctl = random_ioctl, 1123 .unlocked_ioctl = random_ioctl,
1124 .fasync = random_fasync,
1125 .release = random_release,
1134}; 1126};
1135 1127
1136const struct file_operations urandom_fops = { 1128const struct file_operations urandom_fops = {
1137 .read = urandom_read, 1129 .read = urandom_read,
1138 .write = random_write, 1130 .write = random_write,
1139 .ioctl = random_ioctl, 1131 .unlocked_ioctl = random_ioctl,
1132 .fasync = random_fasync,
1133 .release = random_release,
1140}; 1134};
1141 1135
1142/*************************************************************** 1136/***************************************************************
@@ -1157,7 +1151,6 @@ void generate_random_uuid(unsigned char uuid_out[16])
1157 /* Set the UUID variant to DCE */ 1151 /* Set the UUID variant to DCE */
1158 uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; 1152 uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
1159} 1153}
1160
1161EXPORT_SYMBOL(generate_random_uuid); 1154EXPORT_SYMBOL(generate_random_uuid);
1162 1155
1163/******************************************************************** 1156/********************************************************************
@@ -1339,7 +1332,7 @@ ctl_table random_table[] = {
1339 1332
1340#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1333#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1341 1334
1342static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12]) 1335static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
1343{ 1336{
1344 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; 1337 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
1345 1338
@@ -1487,8 +1480,8 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
1487 */ 1480 */
1488 1481
1489 memcpy(hash, saddr, 16); 1482 memcpy(hash, saddr, 16);
1490 hash[4]=((__force u16)sport << 16) + (__force u16)dport; 1483 hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
1491 memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7); 1484 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1492 1485
1493 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; 1486 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
1494 seq += keyptr->count; 1487 seq += keyptr->count;
@@ -1538,10 +1531,10 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1538 * Note that the words are placed into the starting vector, which is 1531 * Note that the words are placed into the starting vector, which is
1539 * then mixed with a partial MD4 over random data. 1532 * then mixed with a partial MD4 over random data.
1540 */ 1533 */
1541 hash[0]=(__force u32)saddr; 1534 hash[0] = (__force u32)saddr;
1542 hash[1]=(__force u32)daddr; 1535 hash[1] = (__force u32)daddr;
1543 hash[2]=((__force u16)sport << 16) + (__force u16)dport; 1536 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1544 hash[3]=keyptr->secret[11]; 1537 hash[3] = keyptr->secret[11];
1545 1538
1546 seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; 1539 seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
1547 seq += keyptr->count; 1540 seq += keyptr->count;
@@ -1556,10 +1549,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1556 * Choosing a clock of 64 ns period is OK. (period of 274 s) 1549 * Choosing a clock of 64 ns period is OK. (period of 274 s)
1557 */ 1550 */
1558 seq += ktime_to_ns(ktime_get_real()) >> 6; 1551 seq += ktime_to_ns(ktime_get_real()) >> 6;
1559#if 0 1552
1560 printk("init_seq(%lx, %lx, %d, %d) = %d\n",
1561 saddr, daddr, sport, dport, seq);
1562#endif
1563 return seq; 1553 return seq;
1564} 1554}
1565 1555
@@ -1582,14 +1572,15 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
1582} 1572}
1583 1573
1584#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1574#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1585u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) 1575u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
1576 __be16 dport)
1586{ 1577{
1587 struct keydata *keyptr = get_keyptr(); 1578 struct keydata *keyptr = get_keyptr();
1588 u32 hash[12]; 1579 u32 hash[12];
1589 1580
1590 memcpy(hash, saddr, 16); 1581 memcpy(hash, saddr, 16);
1591 hash[4] = (__force u32)dport; 1582 hash[4] = (__force u32)dport;
1592 memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7); 1583 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1593 1584
1594 return twothirdsMD4Transform((const __u32 *)daddr, hash); 1585 return twothirdsMD4Transform((const __u32 *)daddr, hash);
1595} 1586}
@@ -1617,13 +1608,9 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
1617 1608
1618 seq += ktime_to_ns(ktime_get_real()); 1609 seq += ktime_to_ns(ktime_get_real());
1619 seq &= (1ull << 48) - 1; 1610 seq &= (1ull << 48) - 1;
1620#if 0 1611
1621 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
1622 saddr, daddr, sport, dport, seq);
1623#endif
1624 return seq; 1612 return seq;
1625} 1613}
1626
1627EXPORT_SYMBOL(secure_dccp_sequence_number); 1614EXPORT_SYMBOL(secure_dccp_sequence_number);
1628#endif 1615#endif
1629 1616
diff --git a/drivers/char/rio/cirrus.h b/drivers/char/rio/cirrus.h
index f4f837f86829..a03a538a3efb 100644
--- a/drivers/char/rio/cirrus.h
+++ b/drivers/char/rio/cirrus.h
@@ -43,83 +43,83 @@
43/* Bit fields for particular registers shared with driver */ 43/* Bit fields for particular registers shared with driver */
44 44
45/* COR1 - driver and RTA */ 45/* COR1 - driver and RTA */
46#define COR1_ODD 0x80 /* Odd parity */ 46#define RIOC_COR1_ODD 0x80 /* Odd parity */
47#define COR1_EVEN 0x00 /* Even parity */ 47#define RIOC_COR1_EVEN 0x00 /* Even parity */
48#define COR1_NOP 0x00 /* No parity */ 48#define RIOC_COR1_NOP 0x00 /* No parity */
49#define COR1_FORCE 0x20 /* Force parity */ 49#define RIOC_COR1_FORCE 0x20 /* Force parity */
50#define COR1_NORMAL 0x40 /* With parity */ 50#define RIOC_COR1_NORMAL 0x40 /* With parity */
51#define COR1_1STOP 0x00 /* 1 stop bit */ 51#define RIOC_COR1_1STOP 0x00 /* 1 stop bit */
52#define COR1_15STOP 0x04 /* 1.5 stop bits */ 52#define RIOC_COR1_15STOP 0x04 /* 1.5 stop bits */
53#define COR1_2STOP 0x08 /* 2 stop bits */ 53#define RIOC_COR1_2STOP 0x08 /* 2 stop bits */
54#define COR1_5BITS 0x00 /* 5 data bits */ 54#define RIOC_COR1_5BITS 0x00 /* 5 data bits */
55#define COR1_6BITS 0x01 /* 6 data bits */ 55#define RIOC_COR1_6BITS 0x01 /* 6 data bits */
56#define COR1_7BITS 0x02 /* 7 data bits */ 56#define RIOC_COR1_7BITS 0x02 /* 7 data bits */
57#define COR1_8BITS 0x03 /* 8 data bits */ 57#define RIOC_COR1_8BITS 0x03 /* 8 data bits */
58 58
59#define COR1_HOST 0xef /* Safe host bits */ 59#define RIOC_COR1_HOST 0xef /* Safe host bits */
60 60
61/* RTA only */ 61/* RTA only */
62#define COR1_CINPCK 0x00 /* Check parity of received characters */ 62#define RIOC_COR1_CINPCK 0x00 /* Check parity of received characters */
63#define COR1_CNINPCK 0x10 /* Don't check parity */ 63#define RIOC_COR1_CNINPCK 0x10 /* Don't check parity */
64 64
65/* COR2 bits for both RTA and driver use */ 65/* COR2 bits for both RTA and driver use */
66#define COR2_IXANY 0x80 /* IXANY - any character is XON */ 66#define RIOC_COR2_IXANY 0x80 /* IXANY - any character is XON */
67#define COR2_IXON 0x40 /* IXON - enable tx soft flowcontrol */ 67#define RIOC_COR2_IXON 0x40 /* IXON - enable tx soft flowcontrol */
68#define COR2_RTSFLOW 0x02 /* Enable tx hardware flow control */ 68#define RIOC_COR2_RTSFLOW 0x02 /* Enable tx hardware flow control */
69 69
70/* Additional driver bits */ 70/* Additional driver bits */
71#define COR2_HUPCL 0x20 /* Hang up on close */ 71#define RIOC_COR2_HUPCL 0x20 /* Hang up on close */
72#define COR2_CTSFLOW 0x04 /* Enable rx hardware flow control */ 72#define RIOC_COR2_CTSFLOW 0x04 /* Enable rx hardware flow control */
73#define COR2_IXOFF 0x01 /* Enable rx software flow control */ 73#define RIOC_COR2_IXOFF 0x01 /* Enable rx software flow control */
74#define COR2_DTRFLOW 0x08 /* Enable tx hardware flow control */ 74#define RIOC_COR2_DTRFLOW 0x08 /* Enable tx hardware flow control */
75 75
76/* RTA use only */ 76/* RTA use only */
77#define COR2_ETC 0x20 /* Embedded transmit options */ 77#define RIOC_COR2_ETC 0x20 /* Embedded transmit options */
78#define COR2_LOCAL 0x10 /* Local loopback mode */ 78#define RIOC_COR2_LOCAL 0x10 /* Local loopback mode */
79#define COR2_REMOTE 0x08 /* Remote loopback mode */ 79#define RIOC_COR2_REMOTE 0x08 /* Remote loopback mode */
80#define COR2_HOST 0xc2 /* Safe host bits */ 80#define RIOC_COR2_HOST 0xc2 /* Safe host bits */
81 81
82/* COR3 - RTA use only */ 82/* COR3 - RTA use only */
83#define COR3_SCDRNG 0x80 /* Enable special char detect for range */ 83#define RIOC_COR3_SCDRNG 0x80 /* Enable special char detect for range */
84#define COR3_SCD34 0x40 /* Special character detect for SCHR's 3 + 4 */ 84#define RIOC_COR3_SCD34 0x40 /* Special character detect for SCHR's 3 + 4 */
85#define COR3_FCT 0x20 /* Flow control transparency */ 85#define RIOC_COR3_FCT 0x20 /* Flow control transparency */
86#define COR3_SCD12 0x10 /* Special character detect for SCHR's 1 + 2 */ 86#define RIOC_COR3_SCD12 0x10 /* Special character detect for SCHR's 1 + 2 */
87#define COR3_FIFO12 0x0c /* 12 chars for receive FIFO threshold */ 87#define RIOC_COR3_FIFO12 0x0c /* 12 chars for receive FIFO threshold */
88#define COR3_FIFO10 0x0a /* 10 chars for receive FIFO threshold */ 88#define RIOC_COR3_FIFO10 0x0a /* 10 chars for receive FIFO threshold */
89#define COR3_FIFO8 0x08 /* 8 chars for receive FIFO threshold */ 89#define RIOC_COR3_FIFO8 0x08 /* 8 chars for receive FIFO threshold */
90#define COR3_FIFO6 0x06 /* 6 chars for receive FIFO threshold */ 90#define RIOC_COR3_FIFO6 0x06 /* 6 chars for receive FIFO threshold */
91 91
92#define COR3_THRESHOLD COR3_FIFO8 /* MUST BE LESS THAN MCOR_THRESHOLD */ 92#define RIOC_COR3_THRESHOLD RIOC_COR3_FIFO8 /* MUST BE LESS THAN MCOR_THRESHOLD */
93 93
94#define COR3_DEFAULT (COR3_FCT | COR3_THRESHOLD) 94#define RIOC_COR3_DEFAULT (RIOC_COR3_FCT | RIOC_COR3_THRESHOLD)
95 /* Default bits for COR3 */ 95 /* Default bits for COR3 */
96 96
97/* COR4 driver and RTA use */ 97/* COR4 driver and RTA use */
98#define COR4_IGNCR 0x80 /* Throw away CR's on input */ 98#define RIOC_COR4_IGNCR 0x80 /* Throw away CR's on input */
99#define COR4_ICRNL 0x40 /* Map CR -> NL on input */ 99#define RIOC_COR4_ICRNL 0x40 /* Map CR -> NL on input */
100#define COR4_INLCR 0x20 /* Map NL -> CR on input */ 100#define RIOC_COR4_INLCR 0x20 /* Map NL -> CR on input */
101#define COR4_IGNBRK 0x10 /* Ignore Break */ 101#define RIOC_COR4_IGNBRK 0x10 /* Ignore Break */
102#define COR4_NBRKINT 0x08 /* No interrupt on break (-BRKINT) */ 102#define RIOC_COR4_NBRKINT 0x08 /* No interrupt on break (-BRKINT) */
103#define COR4_RAISEMOD 0x01 /* Raise modem output lines on non-zero baud */ 103#define RIOC_COR4_RAISEMOD 0x01 /* Raise modem output lines on non-zero baud */
104 104
105 105
106/* COR4 driver only */ 106/* COR4 driver only */
107#define COR4_IGNPAR 0x04 /* IGNPAR (ignore characters with errors) */ 107#define RIOC_COR4_IGNPAR 0x04 /* IGNPAR (ignore characters with errors) */
108#define COR4_PARMRK 0x02 /* PARMRK */ 108#define RIOC_COR4_PARMRK 0x02 /* PARMRK */
109 109
110#define COR4_HOST 0xf8 /* Safe host bits */ 110#define RIOC_COR4_HOST 0xf8 /* Safe host bits */
111 111
112/* COR4 RTA only */ 112/* COR4 RTA only */
113#define COR4_CIGNPAR 0x02 /* Thrown away bad characters */ 113#define RIOC_COR4_CIGNPAR 0x02 /* Thrown away bad characters */
114#define COR4_CPARMRK 0x04 /* PARMRK characters */ 114#define RIOC_COR4_CPARMRK 0x04 /* PARMRK characters */
115#define COR4_CNPARMRK 0x03 /* Don't PARMRK */ 115#define RIOC_COR4_CNPARMRK 0x03 /* Don't PARMRK */
116 116
117/* COR5 driver and RTA use */ 117/* COR5 driver and RTA use */
118#define COR5_ISTRIP 0x80 /* Strip input chars to 7 bits */ 118#define RIOC_COR5_ISTRIP 0x80 /* Strip input chars to 7 bits */
119#define COR5_LNE 0x40 /* Enable LNEXT processing */ 119#define RIOC_COR5_LNE 0x40 /* Enable LNEXT processing */
120#define COR5_CMOE 0x20 /* Match good and errored characters */ 120#define RIOC_COR5_CMOE 0x20 /* Match good and errored characters */
121#define COR5_ONLCR 0x02 /* NL -> CR NL on output */ 121#define RIOC_COR5_ONLCR 0x02 /* NL -> CR NL on output */
122#define COR5_OCRNL 0x01 /* CR -> NL on output */ 122#define RIOC_COR5_OCRNL 0x01 /* CR -> NL on output */
123 123
124/* 124/*
125** Spare bits - these are not used in the CIRRUS registers, so we use 125** Spare bits - these are not used in the CIRRUS registers, so we use
@@ -128,86 +128,86 @@
128/* 128/*
129** tstop and tbusy indication 129** tstop and tbusy indication
130*/ 130*/
131#define COR5_TSTATE_ON 0x08 /* Turn on monitoring of tbusy and tstop */ 131#define RIOC_COR5_TSTATE_ON 0x08 /* Turn on monitoring of tbusy and tstop */
132#define COR5_TSTATE_OFF 0x04 /* Turn off monitoring of tbusy and tstop */ 132#define RIOC_COR5_TSTATE_OFF 0x04 /* Turn off monitoring of tbusy and tstop */
133/* 133/*
134** TAB3 134** TAB3
135*/ 135*/
136#define COR5_TAB3 0x10 /* TAB3 mode */ 136#define RIOC_COR5_TAB3 0x10 /* TAB3 mode */
137 137
138#define COR5_HOST 0xc3 /* Safe host bits */ 138#define RIOC_COR5_HOST 0xc3 /* Safe host bits */
139 139
140/* CCSR */ 140/* CCSR */
141#define CCSR_TXFLOFF 0x04 /* Tx is xoffed */ 141#define RIOC_CCSR_TXFLOFF 0x04 /* Tx is xoffed */
142 142
143/* MSVR1 */ 143/* MSVR1 */
144/* NB. DTR / CD swapped from Cirrus spec as the pins are also reversed on the 144/* NB. DTR / CD swapped from Cirrus spec as the pins are also reversed on the
145 RTA. This is because otherwise DCD would get lost on the 1 parallel / 3 145 RTA. This is because otherwise DCD would get lost on the 1 parallel / 3
146 serial option. 146 serial option.
147*/ 147*/
148#define MSVR1_CD 0x80 /* CD (DSR on Cirrus) */ 148#define RIOC_MSVR1_CD 0x80 /* CD (DSR on Cirrus) */
149#define MSVR1_RTS 0x40 /* RTS (CTS on Cirrus) */ 149#define RIOC_MSVR1_RTS 0x40 /* RTS (CTS on Cirrus) */
150#define MSVR1_RI 0x20 /* RI */ 150#define RIOC_MSVR1_RI 0x20 /* RI */
151#define MSVR1_DTR 0x10 /* DTR (CD on Cirrus) */ 151#define RIOC_MSVR1_DTR 0x10 /* DTR (CD on Cirrus) */
152#define MSVR1_CTS 0x01 /* CTS output pin (RTS on Cirrus) */ 152#define RIOC_MSVR1_CTS 0x01 /* CTS output pin (RTS on Cirrus) */
153/* Next two used to indicate state of tbusy and tstop to driver */ 153/* Next two used to indicate state of tbusy and tstop to driver */
154#define MSVR1_TSTOP 0x08 /* Set if port flow controlled */ 154#define RIOC_MSVR1_TSTOP 0x08 /* Set if port flow controlled */
155#define MSVR1_TEMPTY 0x04 /* Set if port tx buffer empty */ 155#define RIOC_MSVR1_TEMPTY 0x04 /* Set if port tx buffer empty */
156 156
157#define MSVR1_HOST 0xf3 /* The bits the host wants */ 157#define RIOC_MSVR1_HOST 0xf3 /* The bits the host wants */
158 158
159/* Defines for the subscripts of a CONFIG packet */ 159/* Defines for the subscripts of a CONFIG packet */
160#define CONFIG_COR1 1 /* Option register 1 */ 160#define RIOC_CONFIG_COR1 1 /* Option register 1 */
161#define CONFIG_COR2 2 /* Option register 2 */ 161#define RIOC_CONFIG_COR2 2 /* Option register 2 */
162#define CONFIG_COR4 3 /* Option register 4 */ 162#define RIOC_CONFIG_COR4 3 /* Option register 4 */
163#define CONFIG_COR5 4 /* Option register 5 */ 163#define RIOC_CONFIG_COR5 4 /* Option register 5 */
164#define CONFIG_TXXON 5 /* Tx XON character */ 164#define RIOC_CONFIG_TXXON 5 /* Tx XON character */
165#define CONFIG_TXXOFF 6 /* Tx XOFF character */ 165#define RIOC_CONFIG_TXXOFF 6 /* Tx XOFF character */
166#define CONFIG_RXXON 7 /* Rx XON character */ 166#define RIOC_CONFIG_RXXON 7 /* Rx XON character */
167#define CONFIG_RXXOFF 8 /* Rx XOFF character */ 167#define RIOC_CONFIG_RXXOFF 8 /* Rx XOFF character */
168#define CONFIG_LNEXT 9 /* LNEXT character */ 168#define RIOC_CONFIG_LNEXT 9 /* LNEXT character */
169#define CONFIG_TXBAUD 10 /* Tx baud rate */ 169#define RIOC_CONFIG_TXBAUD 10 /* Tx baud rate */
170#define CONFIG_RXBAUD 11 /* Rx baud rate */ 170#define RIOC_CONFIG_RXBAUD 11 /* Rx baud rate */
171 171
172#define PRE_EMPTIVE 0x80 /* Pre-emptive bit in command field */ 172#define RIOC_PRE_EMPTIVE 0x80 /* Pre-emptive bit in command field */
173 173
174/* Packet types going from Host to remote - with the exception of OPEN, MOPEN, 174/* Packet types going from Host to remote - with the exception of OPEN, MOPEN,
175 CONFIG, SBREAK and MEMDUMP the remaining bytes of the data array will not 175 CONFIG, SBREAK and MEMDUMP the remaining bytes of the data array will not
176 be used 176 be used
177*/ 177*/
178#define OPEN 0x00 /* Open a port */ 178#define RIOC_OPEN 0x00 /* Open a port */
179#define CONFIG 0x01 /* Configure a port */ 179#define RIOC_CONFIG 0x01 /* Configure a port */
180#define MOPEN 0x02 /* Modem open (block for DCD) */ 180#define RIOC_MOPEN 0x02 /* Modem open (block for DCD) */
181#define CLOSE 0x03 /* Close a port */ 181#define RIOC_CLOSE 0x03 /* Close a port */
182#define WFLUSH (0x04 | PRE_EMPTIVE) /* Write flush */ 182#define RIOC_WFLUSH (0x04 | RIOC_PRE_EMPTIVE) /* Write flush */
183#define RFLUSH (0x05 | PRE_EMPTIVE) /* Read flush */ 183#define RIOC_RFLUSH (0x05 | RIOC_PRE_EMPTIVE) /* Read flush */
184#define RESUME (0x06 | PRE_EMPTIVE) /* Resume if xoffed */ 184#define RIOC_RESUME (0x06 | RIOC_PRE_EMPTIVE) /* Resume if xoffed */
185#define SBREAK 0x07 /* Start break */ 185#define RIOC_SBREAK 0x07 /* Start break */
186#define EBREAK 0x08 /* End break */ 186#define RIOC_EBREAK 0x08 /* End break */
187#define SUSPEND (0x09 | PRE_EMPTIVE) /* Susp op (behave as tho xoffed) */ 187#define RIOC_SUSPEND (0x09 | RIOC_PRE_EMPTIVE) /* Susp op (behave as tho xoffed) */
188#define FCLOSE (0x0a | PRE_EMPTIVE) /* Force close */ 188#define RIOC_FCLOSE (0x0a | RIOC_PRE_EMPTIVE) /* Force close */
189#define XPRINT 0x0b /* Xprint packet */ 189#define RIOC_XPRINT 0x0b /* Xprint packet */
190#define MBIS (0x0c | PRE_EMPTIVE) /* Set modem lines */ 190#define RIOC_MBIS (0x0c | RIOC_PRE_EMPTIVE) /* Set modem lines */
191#define MBIC (0x0d | PRE_EMPTIVE) /* Clear modem lines */ 191#define RIOC_MBIC (0x0d | RIOC_PRE_EMPTIVE) /* Clear modem lines */
192#define MSET (0x0e | PRE_EMPTIVE) /* Set modem lines */ 192#define RIOC_MSET (0x0e | RIOC_PRE_EMPTIVE) /* Set modem lines */
193#define PCLOSE 0x0f /* Pseudo close - Leaves rx/tx enabled */ 193#define RIOC_PCLOSE 0x0f /* Pseudo close - Leaves rx/tx enabled */
194#define MGET (0x10 | PRE_EMPTIVE) /* Force update of modem status */ 194#define RIOC_MGET (0x10 | RIOC_PRE_EMPTIVE) /* Force update of modem status */
195#define MEMDUMP (0x11 | PRE_EMPTIVE) /* Send back mem from addr supplied */ 195#define RIOC_MEMDUMP (0x11 | RIOC_PRE_EMPTIVE) /* Send back mem from addr supplied */
196#define READ_REGISTER (0x12 | PRE_EMPTIVE) /* Read CD1400 register (debug) */ 196#define RIOC_READ_REGISTER (0x12 | RIOC_PRE_EMPTIVE) /* Read CD1400 register (debug) */
197 197
198/* "Command" packets going from remote to host COMPLETE and MODEM_STATUS 198/* "Command" packets going from remote to host COMPLETE and MODEM_STATUS
199 use data[4] / data[3] to indicate current state and modem status respectively 199 use data[4] / data[3] to indicate current state and modem status respectively
200*/ 200*/
201 201
202#define COMPLETE (0x20 | PRE_EMPTIVE) 202#define RIOC_COMPLETE (0x20 | RIOC_PRE_EMPTIVE)
203 /* Command complete */ 203 /* Command complete */
204#define BREAK_RECEIVED (0x21 | PRE_EMPTIVE) 204#define RIOC_BREAK_RECEIVED (0x21 | RIOC_PRE_EMPTIVE)
205 /* Break received */ 205 /* Break received */
206#define MODEM_STATUS (0x22 | PRE_EMPTIVE) 206#define RIOC_MODEM_STATUS (0x22 | RIOC_PRE_EMPTIVE)
207 /* Change in modem status */ 207 /* Change in modem status */
208 208
209/* "Command" packet that could go either way - handshake wake-up */ 209/* "Command" packet that could go either way - handshake wake-up */
210#define HANDSHAKE (0x23 | PRE_EMPTIVE) 210#define RIOC_HANDSHAKE (0x23 | RIOC_PRE_EMPTIVE)
211 /* Wake-up to HOST / RTA */ 211 /* Wake-up to HOST / RTA */
212 212
213#endif 213#endif
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 0ce96670f979..412777cd1e68 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -344,7 +344,7 @@ int rio_minor(struct tty_struct *tty)
344 344
345static int rio_set_real_termios(void *ptr) 345static int rio_set_real_termios(void *ptr)
346{ 346{
347 return RIOParam((struct Port *) ptr, CONFIG, 1, 1); 347 return RIOParam((struct Port *) ptr, RIOC_CONFIG, 1, 1);
348} 348}
349 349
350 350
@@ -487,7 +487,7 @@ static int rio_get_CD(void *ptr)
487 int rv; 487 int rv;
488 488
489 func_enter(); 489 func_enter();
490 rv = (PortP->ModemState & MSVR1_CD) != 0; 490 rv = (PortP->ModemState & RIOC_MSVR1_CD) != 0;
491 491
492 rio_dprintk(RIO_DEBUG_INIT, "Getting CD status: %d\n", rv); 492 rio_dprintk(RIO_DEBUG_INIT, "Getting CD status: %d\n", rv);
493 493
@@ -607,7 +607,8 @@ static int rio_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd
607 rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n"); 607 rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n");
608 rc = -EIO; 608 rc = -EIO;
609 } else { 609 } else {
610 if (RIOShortCommand(p, PortP, SBREAK, 2, 250) == RIO_FAIL) { 610 if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2, 250) ==
611 RIO_FAIL) {
611 rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n"); 612 rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
612 rc = -EIO; 613 rc = -EIO;
613 } 614 }
@@ -622,7 +623,8 @@ static int rio_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd
622 l = arg ? arg * 100 : 250; 623 l = arg ? arg * 100 : 250;
623 if (l > 255) 624 if (l > 255)
624 l = 255; 625 l = 255;
625 if (RIOShortCommand(p, PortP, SBREAK, 2, arg ? arg * 100 : 250) == RIO_FAIL) { 626 if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2,
627 arg ? arg * 100 : 250) == RIO_FAIL) {
626 rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n"); 628 rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
627 rc = -EIO; 629 rc = -EIO;
628 } 630 }
diff --git a/drivers/char/rio/rio_linux.h b/drivers/char/rio/rio_linux.h
index dc3f005614a3..7f26cd7c815e 100644
--- a/drivers/char/rio/rio_linux.h
+++ b/drivers/char/rio/rio_linux.h
@@ -186,9 +186,9 @@ static inline void *rio_memcpy_fromio(void *dest, void __iomem *source, int n)
186 186
187#ifdef DEBUG 187#ifdef DEBUG
188#define rio_dprintk(f, str...) do { if (rio_debug & f) printk (str);} while (0) 188#define rio_dprintk(f, str...) do { if (rio_debug & f) printk (str);} while (0)
189#define func_enter() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s\n", __FUNCTION__) 189#define func_enter() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s\n", __func__)
190#define func_exit() rio_dprintk (RIO_DEBUG_FLOW, "rio: exit %s\n", __FUNCTION__) 190#define func_exit() rio_dprintk (RIO_DEBUG_FLOW, "rio: exit %s\n", __func__)
191#define func_enter2() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s (port %d)\n",__FUNCTION__, port->line) 191#define func_enter2() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s (port %d)\n",__func__, port->line)
192#else 192#else
193#define rio_dprintk(f, str...) /* nothing */ 193#define rio_dprintk(f, str...) /* nothing */
194#define func_enter() 194#define func_enter()
diff --git a/drivers/char/rio/riocmd.c b/drivers/char/rio/riocmd.c
index bf36959fc121..7b96e0814887 100644
--- a/drivers/char/rio/riocmd.c
+++ b/drivers/char/rio/riocmd.c
@@ -417,7 +417,7 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc
417 PortP = p->RIOPortp[SysPort]; 417 PortP = p->RIOPortp[SysPort];
418 rio_spin_lock_irqsave(&PortP->portSem, flags); 418 rio_spin_lock_irqsave(&PortP->portSem, flags);
419 switch (readb(&PktCmdP->Command)) { 419 switch (readb(&PktCmdP->Command)) {
420 case BREAK_RECEIVED: 420 case RIOC_BREAK_RECEIVED:
421 rio_dprintk(RIO_DEBUG_CMD, "Received a break!\n"); 421 rio_dprintk(RIO_DEBUG_CMD, "Received a break!\n");
422 /* If the current line disc. is not multi-threading and 422 /* If the current line disc. is not multi-threading and
423 the current processor is not the default, reset rup_intr 423 the current processor is not the default, reset rup_intr
@@ -428,16 +428,16 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc
428 gs_got_break(&PortP->gs); 428 gs_got_break(&PortP->gs);
429 break; 429 break;
430 430
431 case COMPLETE: 431 case RIOC_COMPLETE:
432 rio_dprintk(RIO_DEBUG_CMD, "Command complete on phb %d host %Zd\n", readb(&PktCmdP->PhbNum), HostP - p->RIOHosts); 432 rio_dprintk(RIO_DEBUG_CMD, "Command complete on phb %d host %Zd\n", readb(&PktCmdP->PhbNum), HostP - p->RIOHosts);
433 subCommand = 1; 433 subCommand = 1;
434 switch (readb(&PktCmdP->SubCommand)) { 434 switch (readb(&PktCmdP->SubCommand)) {
435 case MEMDUMP: 435 case RIOC_MEMDUMP:
436 rio_dprintk(RIO_DEBUG_CMD, "Memory dump cmd (0x%x) from addr 0x%x\n", readb(&PktCmdP->SubCommand), readw(&PktCmdP->SubAddr)); 436 rio_dprintk(RIO_DEBUG_CMD, "Memory dump cmd (0x%x) from addr 0x%x\n", readb(&PktCmdP->SubCommand), readw(&PktCmdP->SubAddr));
437 break; 437 break;
438 case READ_REGISTER: 438 case RIOC_READ_REGISTER:
439 rio_dprintk(RIO_DEBUG_CMD, "Read register (0x%x)\n", readw(&PktCmdP->SubAddr)); 439 rio_dprintk(RIO_DEBUG_CMD, "Read register (0x%x)\n", readw(&PktCmdP->SubAddr));
440 p->CdRegister = (readb(&PktCmdP->ModemStatus) & MSVR1_HOST); 440 p->CdRegister = (readb(&PktCmdP->ModemStatus) & RIOC_MSVR1_HOST);
441 break; 441 break;
442 default: 442 default:
443 subCommand = 0; 443 subCommand = 0;
@@ -456,14 +456,15 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc
456 rio_dprintk(RIO_DEBUG_CMD, "No change\n"); 456 rio_dprintk(RIO_DEBUG_CMD, "No change\n");
457 457
458 /* FALLTHROUGH */ 458 /* FALLTHROUGH */
459 case MODEM_STATUS: 459 case RIOC_MODEM_STATUS:
460 /* 460 /*
461 ** Knock out the tbusy and tstop bits, as these are not relevant 461 ** Knock out the tbusy and tstop bits, as these are not relevant
462 ** to the check for modem status change (they're just there because 462 ** to the check for modem status change (they're just there because
463 ** it's a convenient place to put them!). 463 ** it's a convenient place to put them!).
464 */ 464 */
465 ReportedModemStatus = readb(&PktCmdP->ModemStatus); 465 ReportedModemStatus = readb(&PktCmdP->ModemStatus);
466 if ((PortP->ModemState & MSVR1_HOST) == (ReportedModemStatus & MSVR1_HOST)) { 466 if ((PortP->ModemState & RIOC_MSVR1_HOST) ==
467 (ReportedModemStatus & RIOC_MSVR1_HOST)) {
467 rio_dprintk(RIO_DEBUG_CMD, "Modem status unchanged 0x%x\n", PortP->ModemState); 468 rio_dprintk(RIO_DEBUG_CMD, "Modem status unchanged 0x%x\n", PortP->ModemState);
468 /* 469 /*
469 ** Update ModemState just in case tbusy or tstop states have 470 ** Update ModemState just in case tbusy or tstop states have
@@ -497,7 +498,7 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc
497 /* 498 /*
498 ** Is there a carrier? 499 ** Is there a carrier?
499 */ 500 */
500 if (PortP->ModemState & MSVR1_CD) { 501 if (PortP->ModemState & RIOC_MSVR1_CD) {
501 /* 502 /*
502 ** Has carrier just appeared? 503 ** Has carrier just appeared?
503 */ 504 */
@@ -691,7 +692,7 @@ void RIOPollHostCommands(struct rio_info *p, struct Host *HostP)
691 */ 692 */
692 rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags); 693 rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
693 FreeMe = RIOCommandRup(p, Rup, HostP, PacketP); 694 FreeMe = RIOCommandRup(p, Rup, HostP, PacketP);
694 if (readb(&PacketP->data[5]) == MEMDUMP) { 695 if (readb(&PacketP->data[5]) == RIOC_MEMDUMP) {
695 rio_dprintk(RIO_DEBUG_CMD, "Memdump from 0x%x complete\n", readw(&(PacketP->data[6]))); 696 rio_dprintk(RIO_DEBUG_CMD, "Memdump from 0x%x complete\n", readw(&(PacketP->data[6])));
696 rio_memcpy_fromio(p->RIOMemDump, &(PacketP->data[8]), 32); 697 rio_memcpy_fromio(p->RIOMemDump, &(PacketP->data[8]), 32);
697 } 698 }
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c
index d8eb2bcbe015..d65ceb9a434a 100644
--- a/drivers/char/rio/rioctrl.c
+++ b/drivers/char/rio/rioctrl.c
@@ -422,7 +422,8 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
422 } 422 }
423 423
424 rio_spin_lock_irqsave(&PortP->portSem, flags); 424 rio_spin_lock_irqsave(&PortP->portSem, flags);
425 if (RIOPreemptiveCmd(p, (p->RIOPortp[port]), RESUME) == RIO_FAIL) { 425 if (RIOPreemptiveCmd(p, (p->RIOPortp[port]), RIOC_RESUME) ==
426 RIO_FAIL) {
426 rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME failed\n"); 427 rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME failed\n");
427 rio_spin_unlock_irqrestore(&PortP->portSem, flags); 428 rio_spin_unlock_irqrestore(&PortP->portSem, flags);
428 return -EBUSY; 429 return -EBUSY;
@@ -636,7 +637,8 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
636 return -ENXIO; 637 return -ENXIO;
637 } 638 }
638 PortP = (p->RIOPortp[PortTty.port]); 639 PortP = (p->RIOPortp[PortTty.port]);
639 RIOParam(PortP, CONFIG, PortP->State & RIO_MODEM, OK_TO_SLEEP); 640 RIOParam(PortP, RIOC_CONFIG, PortP->State & RIO_MODEM,
641 OK_TO_SLEEP);
640 return retval; 642 return retval;
641 643
642 case RIO_SET_PORT_PARAMS: 644 case RIO_SET_PORT_PARAMS:
@@ -1247,7 +1249,7 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
1247 1249
1248 rio_spin_lock_irqsave(&PortP->portSem, flags); 1250 rio_spin_lock_irqsave(&PortP->portSem, flags);
1249 1251
1250 if (RIOPreemptiveCmd(p, PortP, MEMDUMP) == RIO_FAIL) { 1252 if (RIOPreemptiveCmd(p, PortP, RIOC_MEMDUMP) == RIO_FAIL) {
1251 rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP failed\n"); 1253 rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP failed\n");
1252 rio_spin_unlock_irqrestore(&PortP->portSem, flags); 1254 rio_spin_unlock_irqrestore(&PortP->portSem, flags);
1253 return -EBUSY; 1255 return -EBUSY;
@@ -1313,7 +1315,8 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
1313 1315
1314 rio_spin_lock_irqsave(&PortP->portSem, flags); 1316 rio_spin_lock_irqsave(&PortP->portSem, flags);
1315 1317
1316 if (RIOPreemptiveCmd(p, PortP, READ_REGISTER) == RIO_FAIL) { 1318 if (RIOPreemptiveCmd(p, PortP, RIOC_READ_REGISTER) ==
1319 RIO_FAIL) {
1317 rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER failed\n"); 1320 rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER failed\n");
1318 rio_spin_unlock_irqrestore(&PortP->portSem, flags); 1321 rio_spin_unlock_irqrestore(&PortP->portSem, flags);
1319 return -EBUSY; 1322 return -EBUSY;
@@ -1434,50 +1437,50 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
1434 PktCmdP->PhbNum = port; 1437 PktCmdP->PhbNum = port;
1435 1438
1436 switch (Cmd) { 1439 switch (Cmd) {
1437 case MEMDUMP: 1440 case RIOC_MEMDUMP:
1438 rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p " 1441 rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p "
1439 "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); 1442 "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
1440 PktCmdP->SubCommand = MEMDUMP; 1443 PktCmdP->SubCommand = RIOC_MEMDUMP;
1441 PktCmdP->SubAddr = SubCmd.Addr; 1444 PktCmdP->SubAddr = SubCmd.Addr;
1442 break; 1445 break;
1443 case FCLOSE: 1446 case RIOC_FCLOSE:
1444 rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", 1447 rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n",
1445 CmdBlkP); 1448 CmdBlkP);
1446 break; 1449 break;
1447 case READ_REGISTER: 1450 case RIOC_READ_REGISTER:
1448 rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) " 1451 rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) "
1449 "command blk %p\n", (int) SubCmd.Addr, CmdBlkP); 1452 "command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
1450 PktCmdP->SubCommand = READ_REGISTER; 1453 PktCmdP->SubCommand = RIOC_READ_REGISTER;
1451 PktCmdP->SubAddr = SubCmd.Addr; 1454 PktCmdP->SubAddr = SubCmd.Addr;
1452 break; 1455 break;
1453 case RESUME: 1456 case RIOC_RESUME:
1454 rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", 1457 rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n",
1455 CmdBlkP); 1458 CmdBlkP);
1456 break; 1459 break;
1457 case RFLUSH: 1460 case RIOC_RFLUSH:
1458 rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", 1461 rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n",
1459 CmdBlkP); 1462 CmdBlkP);
1460 CmdBlkP->PostFuncP = RIORFlushEnable; 1463 CmdBlkP->PostFuncP = RIORFlushEnable;
1461 break; 1464 break;
1462 case SUSPEND: 1465 case RIOC_SUSPEND:
1463 rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", 1466 rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n",
1464 CmdBlkP); 1467 CmdBlkP);
1465 break; 1468 break;
1466 1469
1467 case MGET: 1470 case RIOC_MGET:
1468 rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", 1471 rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n",
1469 CmdBlkP); 1472 CmdBlkP);
1470 break; 1473 break;
1471 1474
1472 case MSET: 1475 case RIOC_MSET:
1473 case MBIC: 1476 case RIOC_MBIC:
1474 case MBIS: 1477 case RIOC_MBIS:
1475 CmdBlkP->Packet.data[4] = (char) PortP->ModemLines; 1478 CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
1476 rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command " 1479 rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command "
1477 "blk %p\n", CmdBlkP); 1480 "blk %p\n", CmdBlkP);
1478 break; 1481 break;
1479 1482
1480 case WFLUSH: 1483 case RIOC_WFLUSH:
1481 /* 1484 /*
1482 ** If we have queued up the maximum number of Write flushes 1485 ** If we have queued up the maximum number of Write flushes
1483 ** allowed then we should not bother sending any more to the 1486 ** allowed then we should not bother sending any more to the
diff --git a/drivers/char/rio/riointr.c b/drivers/char/rio/riointr.c
index 4734e26e1ccd..ea21686c69a4 100644
--- a/drivers/char/rio/riointr.c
+++ b/drivers/char/rio/riointr.c
@@ -401,9 +401,8 @@ void RIOServiceHost(struct rio_info *p, struct Host *HostP)
401 PortP->InUse = NOT_INUSE; 401 PortP->InUse = NOT_INUSE;
402 402
403 rio_spin_unlock(&PortP->portSem); 403 rio_spin_unlock(&PortP->portSem);
404 if (RIOParam(PortP, OPEN, ((PortP->Cor2Copy & (COR2_RTSFLOW | COR2_CTSFLOW)) == (COR2_RTSFLOW | COR2_CTSFLOW)) ? 1 : 0, DONT_SLEEP) == RIO_FAIL) { 404 if (RIOParam(PortP, RIOC_OPEN, ((PortP->Cor2Copy & (RIOC_COR2_RTSFLOW | RIOC_COR2_CTSFLOW)) == (RIOC_COR2_RTSFLOW | RIOC_COR2_CTSFLOW)) ? 1 : 0, DONT_SLEEP) == RIO_FAIL)
405 continue; /* with next port */ 405 continue; /* with next port */
406 }
407 rio_spin_lock(&PortP->portSem); 406 rio_spin_lock(&PortP->portSem);
408 PortP->MagicFlags &= ~MAGIC_REBOOT; 407 PortP->MagicFlags &= ~MAGIC_REBOOT;
409 } 408 }
@@ -429,7 +428,7 @@ void RIOServiceHost(struct rio_info *p, struct Host *HostP)
429 */ 428 */
430 PktCmdP = (struct PktCmd __iomem *) &PacketP->data[0]; 429 PktCmdP = (struct PktCmd __iomem *) &PacketP->data[0];
431 430
432 writeb(WFLUSH, &PktCmdP->Command); 431 writeb(RIOC_WFLUSH, &PktCmdP->Command);
433 432
434 p = PortP->HostPort % (u16) PORTS_PER_RTA; 433 p = PortP->HostPort % (u16) PORTS_PER_RTA;
435 434
diff --git a/drivers/char/rio/rioparam.c b/drivers/char/rio/rioparam.c
index da276ed57b3f..4810b845cc21 100644
--- a/drivers/char/rio/rioparam.c
+++ b/drivers/char/rio/rioparam.c
@@ -177,7 +177,7 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
177 } 177 }
178 rio_spin_lock_irqsave(&PortP->portSem, flags); 178 rio_spin_lock_irqsave(&PortP->portSem, flags);
179 179
180 if (cmd == OPEN) { 180 if (cmd == RIOC_OPEN) {
181 /* 181 /*
182 ** If the port is set to store or lock the parameters, and it is 182 ** If the port is set to store or lock the parameters, and it is
183 ** paramed with OPEN, we want to restore the saved port termio, but 183 ** paramed with OPEN, we want to restore the saved port termio, but
@@ -241,50 +241,50 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
241 case CS5: 241 case CS5:
242 { 242 {
243 rio_dprintk(RIO_DEBUG_PARAM, "5 bit data\n"); 243 rio_dprintk(RIO_DEBUG_PARAM, "5 bit data\n");
244 Cor1 |= COR1_5BITS; 244 Cor1 |= RIOC_COR1_5BITS;
245 break; 245 break;
246 } 246 }
247 case CS6: 247 case CS6:
248 { 248 {
249 rio_dprintk(RIO_DEBUG_PARAM, "6 bit data\n"); 249 rio_dprintk(RIO_DEBUG_PARAM, "6 bit data\n");
250 Cor1 |= COR1_6BITS; 250 Cor1 |= RIOC_COR1_6BITS;
251 break; 251 break;
252 } 252 }
253 case CS7: 253 case CS7:
254 { 254 {
255 rio_dprintk(RIO_DEBUG_PARAM, "7 bit data\n"); 255 rio_dprintk(RIO_DEBUG_PARAM, "7 bit data\n");
256 Cor1 |= COR1_7BITS; 256 Cor1 |= RIOC_COR1_7BITS;
257 break; 257 break;
258 } 258 }
259 case CS8: 259 case CS8:
260 { 260 {
261 rio_dprintk(RIO_DEBUG_PARAM, "8 bit data\n"); 261 rio_dprintk(RIO_DEBUG_PARAM, "8 bit data\n");
262 Cor1 |= COR1_8BITS; 262 Cor1 |= RIOC_COR1_8BITS;
263 break; 263 break;
264 } 264 }
265 } 265 }
266 266
267 if (TtyP->termios->c_cflag & CSTOPB) { 267 if (TtyP->termios->c_cflag & CSTOPB) {
268 rio_dprintk(RIO_DEBUG_PARAM, "2 stop bits\n"); 268 rio_dprintk(RIO_DEBUG_PARAM, "2 stop bits\n");
269 Cor1 |= COR1_2STOP; 269 Cor1 |= RIOC_COR1_2STOP;
270 } else { 270 } else {
271 rio_dprintk(RIO_DEBUG_PARAM, "1 stop bit\n"); 271 rio_dprintk(RIO_DEBUG_PARAM, "1 stop bit\n");
272 Cor1 |= COR1_1STOP; 272 Cor1 |= RIOC_COR1_1STOP;
273 } 273 }
274 274
275 if (TtyP->termios->c_cflag & PARENB) { 275 if (TtyP->termios->c_cflag & PARENB) {
276 rio_dprintk(RIO_DEBUG_PARAM, "Enable parity\n"); 276 rio_dprintk(RIO_DEBUG_PARAM, "Enable parity\n");
277 Cor1 |= COR1_NORMAL; 277 Cor1 |= RIOC_COR1_NORMAL;
278 } else { 278 } else {
279 rio_dprintk(RIO_DEBUG_PARAM, "Disable parity\n"); 279 rio_dprintk(RIO_DEBUG_PARAM, "Disable parity\n");
280 Cor1 |= COR1_NOP; 280 Cor1 |= RIOC_COR1_NOP;
281 } 281 }
282 if (TtyP->termios->c_cflag & PARODD) { 282 if (TtyP->termios->c_cflag & PARODD) {
283 rio_dprintk(RIO_DEBUG_PARAM, "Odd parity\n"); 283 rio_dprintk(RIO_DEBUG_PARAM, "Odd parity\n");
284 Cor1 |= COR1_ODD; 284 Cor1 |= RIOC_COR1_ODD;
285 } else { 285 } else {
286 rio_dprintk(RIO_DEBUG_PARAM, "Even parity\n"); 286 rio_dprintk(RIO_DEBUG_PARAM, "Even parity\n");
287 Cor1 |= COR1_EVEN; 287 Cor1 |= RIOC_COR1_EVEN;
288 } 288 }
289 289
290 /* 290 /*
@@ -292,11 +292,11 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
292 */ 292 */
293 if (TtyP->termios->c_iflag & IXON) { 293 if (TtyP->termios->c_iflag & IXON) {
294 rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop output control\n"); 294 rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop output control\n");
295 Cor2 |= COR2_IXON; 295 Cor2 |= RIOC_COR2_IXON;
296 } else { 296 } else {
297 if (PortP->Config & RIO_IXON) { 297 if (PortP->Config & RIO_IXON) {
298 rio_dprintk(RIO_DEBUG_PARAM, "Force enable start/stop output control\n"); 298 rio_dprintk(RIO_DEBUG_PARAM, "Force enable start/stop output control\n");
299 Cor2 |= COR2_IXON; 299 Cor2 |= RIOC_COR2_IXON;
300 } else 300 } else
301 rio_dprintk(RIO_DEBUG_PARAM, "IXON has been disabled.\n"); 301 rio_dprintk(RIO_DEBUG_PARAM, "IXON has been disabled.\n");
302 } 302 }
@@ -304,29 +304,29 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
304 if (TtyP->termios->c_iflag & IXANY) { 304 if (TtyP->termios->c_iflag & IXANY) {
305 if (PortP->Config & RIO_IXANY) { 305 if (PortP->Config & RIO_IXANY) {
306 rio_dprintk(RIO_DEBUG_PARAM, "Enable any key to restart output\n"); 306 rio_dprintk(RIO_DEBUG_PARAM, "Enable any key to restart output\n");
307 Cor2 |= COR2_IXANY; 307 Cor2 |= RIOC_COR2_IXANY;
308 } else 308 } else
309 rio_dprintk(RIO_DEBUG_PARAM, "IXANY has been disabled due to sanity reasons.\n"); 309 rio_dprintk(RIO_DEBUG_PARAM, "IXANY has been disabled due to sanity reasons.\n");
310 } 310 }
311 311
312 if (TtyP->termios->c_iflag & IXOFF) { 312 if (TtyP->termios->c_iflag & IXOFF) {
313 rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop input control 2\n"); 313 rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop input control 2\n");
314 Cor2 |= COR2_IXOFF; 314 Cor2 |= RIOC_COR2_IXOFF;
315 } 315 }
316 316
317 if (TtyP->termios->c_cflag & HUPCL) { 317 if (TtyP->termios->c_cflag & HUPCL) {
318 rio_dprintk(RIO_DEBUG_PARAM, "Hangup on last close\n"); 318 rio_dprintk(RIO_DEBUG_PARAM, "Hangup on last close\n");
319 Cor2 |= COR2_HUPCL; 319 Cor2 |= RIOC_COR2_HUPCL;
320 } 320 }
321 321
322 if (C_CRTSCTS(TtyP)) { 322 if (C_CRTSCTS(TtyP)) {
323 rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control enabled\n"); 323 rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control enabled\n");
324 Cor2 |= COR2_CTSFLOW; 324 Cor2 |= RIOC_COR2_CTSFLOW;
325 Cor2 |= COR2_RTSFLOW; 325 Cor2 |= RIOC_COR2_RTSFLOW;
326 } else { 326 } else {
327 rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control disabled\n"); 327 rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control disabled\n");
328 Cor2 &= ~COR2_CTSFLOW; 328 Cor2 &= ~RIOC_COR2_CTSFLOW;
329 Cor2 &= ~COR2_RTSFLOW; 329 Cor2 &= ~RIOC_COR2_RTSFLOW;
330 } 330 }
331 331
332 332
@@ -341,36 +341,36 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
341 */ 341 */
342 if (TtyP->termios->c_iflag & IGNBRK) { 342 if (TtyP->termios->c_iflag & IGNBRK) {
343 rio_dprintk(RIO_DEBUG_PARAM, "Ignore break condition\n"); 343 rio_dprintk(RIO_DEBUG_PARAM, "Ignore break condition\n");
344 Cor4 |= COR4_IGNBRK; 344 Cor4 |= RIOC_COR4_IGNBRK;
345 } 345 }
346 if (!(TtyP->termios->c_iflag & BRKINT)) { 346 if (!(TtyP->termios->c_iflag & BRKINT)) {
347 rio_dprintk(RIO_DEBUG_PARAM, "Break generates NULL condition\n"); 347 rio_dprintk(RIO_DEBUG_PARAM, "Break generates NULL condition\n");
348 Cor4 |= COR4_NBRKINT; 348 Cor4 |= RIOC_COR4_NBRKINT;
349 } else { 349 } else {
350 rio_dprintk(RIO_DEBUG_PARAM, "Interrupt on break condition\n"); 350 rio_dprintk(RIO_DEBUG_PARAM, "Interrupt on break condition\n");
351 } 351 }
352 352
353 if (TtyP->termios->c_iflag & INLCR) { 353 if (TtyP->termios->c_iflag & INLCR) {
354 rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage return on input\n"); 354 rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage return on input\n");
355 Cor4 |= COR4_INLCR; 355 Cor4 |= RIOC_COR4_INLCR;
356 } 356 }
357 357
358 if (TtyP->termios->c_iflag & IGNCR) { 358 if (TtyP->termios->c_iflag & IGNCR) {
359 rio_dprintk(RIO_DEBUG_PARAM, "Ignore carriage return on input\n"); 359 rio_dprintk(RIO_DEBUG_PARAM, "Ignore carriage return on input\n");
360 Cor4 |= COR4_IGNCR; 360 Cor4 |= RIOC_COR4_IGNCR;
361 } 361 }
362 362
363 if (TtyP->termios->c_iflag & ICRNL) { 363 if (TtyP->termios->c_iflag & ICRNL) {
364 rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on input\n"); 364 rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on input\n");
365 Cor4 |= COR4_ICRNL; 365 Cor4 |= RIOC_COR4_ICRNL;
366 } 366 }
367 if (TtyP->termios->c_iflag & IGNPAR) { 367 if (TtyP->termios->c_iflag & IGNPAR) {
368 rio_dprintk(RIO_DEBUG_PARAM, "Ignore characters with parity errors\n"); 368 rio_dprintk(RIO_DEBUG_PARAM, "Ignore characters with parity errors\n");
369 Cor4 |= COR4_IGNPAR; 369 Cor4 |= RIOC_COR4_IGNPAR;
370 } 370 }
371 if (TtyP->termios->c_iflag & PARMRK) { 371 if (TtyP->termios->c_iflag & PARMRK) {
372 rio_dprintk(RIO_DEBUG_PARAM, "Mark parity errors\n"); 372 rio_dprintk(RIO_DEBUG_PARAM, "Mark parity errors\n");
373 Cor4 |= COR4_PARMRK; 373 Cor4 |= RIOC_COR4_PARMRK;
374 } 374 }
375 375
376 /* 376 /*
@@ -378,22 +378,22 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
378 ** on reception of a config packet. 378 ** on reception of a config packet.
379 ** The download code handles the zero baud condition. 379 ** The download code handles the zero baud condition.
380 */ 380 */
381 Cor4 |= COR4_RAISEMOD; 381 Cor4 |= RIOC_COR4_RAISEMOD;
382 382
383 /* 383 /*
384 ** COR 5 384 ** COR 5
385 */ 385 */
386 386
387 Cor5 = COR5_CMOE; 387 Cor5 = RIOC_COR5_CMOE;
388 388
389 /* 389 /*
390 ** Set to monitor tbusy/tstop (or not). 390 ** Set to monitor tbusy/tstop (or not).
391 */ 391 */
392 392
393 if (PortP->MonitorTstate) 393 if (PortP->MonitorTstate)
394 Cor5 |= COR5_TSTATE_ON; 394 Cor5 |= RIOC_COR5_TSTATE_ON;
395 else 395 else
396 Cor5 |= COR5_TSTATE_OFF; 396 Cor5 |= RIOC_COR5_TSTATE_OFF;
397 397
398 /* 398 /*
399 ** Could set LNE here if you wanted LNext processing. SVR4 will use it. 399 ** Could set LNE here if you wanted LNext processing. SVR4 will use it.
@@ -401,24 +401,24 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
401 if (TtyP->termios->c_iflag & ISTRIP) { 401 if (TtyP->termios->c_iflag & ISTRIP) {
402 rio_dprintk(RIO_DEBUG_PARAM, "Strip input characters\n"); 402 rio_dprintk(RIO_DEBUG_PARAM, "Strip input characters\n");
403 if (!(PortP->State & RIO_TRIAD_MODE)) { 403 if (!(PortP->State & RIO_TRIAD_MODE)) {
404 Cor5 |= COR5_ISTRIP; 404 Cor5 |= RIOC_COR5_ISTRIP;
405 } 405 }
406 } 406 }
407 407
408 if (TtyP->termios->c_oflag & ONLCR) { 408 if (TtyP->termios->c_oflag & ONLCR) {
409 rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage-return, newline on output\n"); 409 rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage-return, newline on output\n");
410 if (PortP->CookMode == COOK_MEDIUM) 410 if (PortP->CookMode == COOK_MEDIUM)
411 Cor5 |= COR5_ONLCR; 411 Cor5 |= RIOC_COR5_ONLCR;
412 } 412 }
413 if (TtyP->termios->c_oflag & OCRNL) { 413 if (TtyP->termios->c_oflag & OCRNL) {
414 rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on output\n"); 414 rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on output\n");
415 if (PortP->CookMode == COOK_MEDIUM) 415 if (PortP->CookMode == COOK_MEDIUM)
416 Cor5 |= COR5_OCRNL; 416 Cor5 |= RIOC_COR5_OCRNL;
417 } 417 }
418 if ((TtyP->termios->c_oflag & TABDLY) == TAB3) { 418 if ((TtyP->termios->c_oflag & TABDLY) == TAB3) {
419 rio_dprintk(RIO_DEBUG_PARAM, "Tab delay 3 set\n"); 419 rio_dprintk(RIO_DEBUG_PARAM, "Tab delay 3 set\n");
420 if (PortP->CookMode == COOK_MEDIUM) 420 if (PortP->CookMode == COOK_MEDIUM)
421 Cor5 |= COR5_TAB3; 421 Cor5 |= RIOC_COR5_TAB3;
422 } 422 }
423 423
424 /* 424 /*
diff --git a/drivers/char/rio/rioroute.c b/drivers/char/rio/rioroute.c
index 85091ff74d96..7a9df7dcf9a8 100644
--- a/drivers/char/rio/rioroute.c
+++ b/drivers/char/rio/rioroute.c
@@ -526,7 +526,7 @@ void RIOFixPhbs(struct rio_info *p, struct Host *HostP, unsigned int unit)
526 ** If RTA is not powered on, the tx packets will be 526 ** If RTA is not powered on, the tx packets will be
527 ** unset, so go no further. 527 ** unset, so go no further.
528 */ 528 */
529 if (PortP->TxStart == 0) { 529 if (!PortP->TxStart) {
530 rio_dprintk(RIO_DEBUG_ROUTE, "Tx pkts not set up yet\n"); 530 rio_dprintk(RIO_DEBUG_ROUTE, "Tx pkts not set up yet\n");
531 rio_spin_unlock_irqrestore(&PortP->portSem, flags); 531 rio_spin_unlock_irqrestore(&PortP->portSem, flags);
532 break; 532 break;
diff --git a/drivers/char/rio/riotty.c b/drivers/char/rio/riotty.c
index 1cb8580a161d..c99354843be1 100644
--- a/drivers/char/rio/riotty.c
+++ b/drivers/char/rio/riotty.c
@@ -211,7 +211,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
211 rio_dprintk(RIO_DEBUG_TTY, "Waiting for RIO_CLOSING to go away\n"); 211 rio_dprintk(RIO_DEBUG_TTY, "Waiting for RIO_CLOSING to go away\n");
212 if (repeat_this-- <= 0) { 212 if (repeat_this-- <= 0) {
213 rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n"); 213 rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
214 RIOPreemptiveCmd(p, PortP, FCLOSE); 214 RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
215 retval = -EINTR; 215 retval = -EINTR;
216 goto bombout; 216 goto bombout;
217 } 217 }
@@ -264,7 +264,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
264 here. If I read the docs correctly the "open" 264 here. If I read the docs correctly the "open"
265 command piggybacks the parameters immediately. 265 command piggybacks the parameters immediately.
266 -- REW */ 266 -- REW */
267 RIOParam(PortP, OPEN, 1, OK_TO_SLEEP); /* Open the port */ 267 RIOParam(PortP, RIOC_OPEN, 1, OK_TO_SLEEP); /* Open the port */
268 rio_spin_lock_irqsave(&PortP->portSem, flags); 268 rio_spin_lock_irqsave(&PortP->portSem, flags);
269 269
270 /* 270 /*
@@ -275,7 +275,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
275 rio_spin_unlock_irqrestore(&PortP->portSem, flags); 275 rio_spin_unlock_irqrestore(&PortP->portSem, flags);
276 if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { 276 if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
277 rio_dprintk(RIO_DEBUG_TTY, "Waiting for open to finish broken by signal\n"); 277 rio_dprintk(RIO_DEBUG_TTY, "Waiting for open to finish broken by signal\n");
278 RIOPreemptiveCmd(p, PortP, FCLOSE); 278 RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
279 func_exit(); 279 func_exit();
280 return -EINTR; 280 return -EINTR;
281 } 281 }
@@ -297,7 +297,8 @@ int riotopen(struct tty_struct *tty, struct file *filp)
297 ** insert test for carrier here. -- ??? 297 ** insert test for carrier here. -- ???
298 ** I already see that test here. What's the deal? -- REW 298 ** I already see that test here. What's the deal? -- REW
299 */ 299 */
300 if ((PortP->gs.tty->termios->c_cflag & CLOCAL) || (PortP->ModemState & MSVR1_CD)) { 300 if ((PortP->gs.tty->termios->c_cflag & CLOCAL) ||
301 (PortP->ModemState & RIOC_MSVR1_CD)) {
301 rio_dprintk(RIO_DEBUG_TTY, "open(%d) Modem carr on\n", SysPort); 302 rio_dprintk(RIO_DEBUG_TTY, "open(%d) Modem carr on\n", SysPort);
302 /* 303 /*
303 tp->tm.c_state |= CARR_ON; 304 tp->tm.c_state |= CARR_ON;
@@ -325,7 +326,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
325 ** I think it's OK. -- REW 326 ** I think it's OK. -- REW
326 */ 327 */
327 rio_dprintk(RIO_DEBUG_TTY, "open(%d) sleeping for carr broken by signal\n", SysPort); 328 rio_dprintk(RIO_DEBUG_TTY, "open(%d) sleeping for carr broken by signal\n", SysPort);
328 RIOPreemptiveCmd(p, PortP, FCLOSE); 329 RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
329 /* 330 /*
330 tp->tm.c_state &= ~WOPEN; 331 tp->tm.c_state &= ~WOPEN;
331 */ 332 */
@@ -416,7 +417,7 @@ int riotclose(void *ptr)
416 */ 417 */
417 PortP->State &= ~RIO_MOPEN; 418 PortP->State &= ~RIO_MOPEN;
418 PortP->State &= ~RIO_CARR_ON; 419 PortP->State &= ~RIO_CARR_ON;
419 PortP->ModemState &= ~MSVR1_CD; 420 PortP->ModemState &= ~RIOC_MSVR1_CD;
420 /* 421 /*
421 ** If the device was open as both a Modem and a tty line 422 ** If the device was open as both a Modem and a tty line
422 ** then we need to wimp out here, as the port has not really 423 ** then we need to wimp out here, as the port has not really
@@ -453,7 +454,7 @@ int riotclose(void *ptr)
453 if (repeat_this-- <= 0) { 454 if (repeat_this-- <= 0) {
454 rv = -EINTR; 455 rv = -EINTR;
455 rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n"); 456 rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
456 RIOPreemptiveCmd(p, PortP, FCLOSE); 457 RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
457 goto close_end; 458 goto close_end;
458 } 459 }
459 rio_dprintk(RIO_DEBUG_TTY, "Calling timeout to flush in closing\n"); 460 rio_dprintk(RIO_DEBUG_TTY, "Calling timeout to flush in closing\n");
@@ -492,8 +493,8 @@ int riotclose(void *ptr)
492 /* Can't call RIOShortCommand with the port locked. */ 493 /* Can't call RIOShortCommand with the port locked. */
493 rio_spin_unlock_irqrestore(&PortP->portSem, flags); 494 rio_spin_unlock_irqrestore(&PortP->portSem, flags);
494 495
495 if (RIOShortCommand(p, PortP, CLOSE, 1, 0) == RIO_FAIL) { 496 if (RIOShortCommand(p, PortP, RIOC_CLOSE, 1, 0) == RIO_FAIL) {
496 RIOPreemptiveCmd(p, PortP, FCLOSE); 497 RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
497 rio_spin_lock_irqsave(&PortP->portSem, flags); 498 rio_spin_lock_irqsave(&PortP->portSem, flags);
498 goto close_end; 499 goto close_end;
499 } 500 }
@@ -503,7 +504,7 @@ int riotclose(void *ptr)
503 try--; 504 try--;
504 if (time_after(jiffies, end_time)) { 505 if (time_after(jiffies, end_time)) {
505 rio_dprintk(RIO_DEBUG_TTY, "Run out of tries - force the bugger shut!\n"); 506 rio_dprintk(RIO_DEBUG_TTY, "Run out of tries - force the bugger shut!\n");
506 RIOPreemptiveCmd(p, PortP, FCLOSE); 507 RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
507 break; 508 break;
508 } 509 }
509 rio_dprintk(RIO_DEBUG_TTY, "Close: PortState:ISOPEN is %d\n", PortP->PortState & PORT_ISOPEN); 510 rio_dprintk(RIO_DEBUG_TTY, "Close: PortState:ISOPEN is %d\n", PortP->PortState & PORT_ISOPEN);
@@ -515,14 +516,14 @@ int riotclose(void *ptr)
515 } 516 }
516 if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { 517 if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
517 rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n"); 518 rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n");
518 RIOPreemptiveCmd(p, PortP, FCLOSE); 519 RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
519 break; 520 break;
520 } 521 }
521 } 522 }
522 rio_spin_lock_irqsave(&PortP->portSem, flags); 523 rio_spin_lock_irqsave(&PortP->portSem, flags);
523 rio_dprintk(RIO_DEBUG_TTY, "Close: try was %d on completion\n", try); 524 rio_dprintk(RIO_DEBUG_TTY, "Close: try was %d on completion\n", try);
524 525
525 /* RIOPreemptiveCmd(p, PortP, FCLOSE); */ 526 /* RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); */
526 527
527/* 528/*
528** 15.10.1998 ARG - ESIL 0761 part fix 529** 15.10.1998 ARG - ESIL 0761 part fix
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 3f9d0a9ac36d..f073c710ab8d 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -4,9 +4,9 @@
4 * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com) 4 * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com)
5 * 5 *
6 * This code is loosely based on the Linux serial driver, written by 6 * This code is loosely based on the Linux serial driver, written by
7 * Linus Torvalds, Theodore T'so and others. The RISCom/8 card 7 * Linus Torvalds, Theodore T'so and others. The RISCom/8 card
8 * programming info was obtained from various drivers for other OSes 8 * programming info was obtained from various drivers for other OSes
9 * (FreeBSD, ISC, etc), but no source code from those drivers were 9 * (FreeBSD, ISC, etc), but no source code from those drivers were
10 * directly included in this driver. 10 * directly included in this driver.
11 * 11 *
12 * 12 *
@@ -33,7 +33,7 @@
33 33
34#include <linux/module.h> 34#include <linux/module.h>
35 35
36#include <asm/io.h> 36#include <linux/io.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/ioport.h> 39#include <linux/ioport.h>
@@ -49,7 +49,7 @@
49#include <linux/tty_flip.h> 49#include <linux/tty_flip.h>
50#include <linux/spinlock.h> 50#include <linux/spinlock.h>
51 51
52#include <asm/uaccess.h> 52#include <linux/uaccess.h>
53 53
54#include "riscom8.h" 54#include "riscom8.h"
55#include "riscom8_reg.h" 55#include "riscom8_reg.h"
@@ -57,15 +57,15 @@
57/* Am I paranoid or not ? ;-) */ 57/* Am I paranoid or not ? ;-) */
58#define RISCOM_PARANOIA_CHECK 58#define RISCOM_PARANOIA_CHECK
59 59
60/* 60/*
61 * Crazy InteliCom/8 boards sometimes has swapped CTS & DSR signals. 61 * Crazy InteliCom/8 boards sometimes have swapped CTS & DSR signals.
62 * You can slightly speed up things by #undefing the following option, 62 * You can slightly speed up things by #undefing the following option,
63 * if you are REALLY sure that your board is correct one. 63 * if you are REALLY sure that your board is correct one.
64 */ 64 */
65 65
66#define RISCOM_BRAIN_DAMAGED_CTS 66#define RISCOM_BRAIN_DAMAGED_CTS
67 67
68/* 68/*
69 * The following defines are mostly for testing purposes. But if you need 69 * The following defines are mostly for testing purposes. But if you need
70 * some nice reporting in your syslog, you can define them also. 70 * some nice reporting in your syslog, you can define them also.
71 */ 71 */
@@ -112,7 +112,7 @@ static unsigned short rc_ioport[] = {
112#define RC_NIOPORT ARRAY_SIZE(rc_ioport) 112#define RC_NIOPORT ARRAY_SIZE(rc_ioport)
113 113
114 114
115static inline int rc_paranoia_check(struct riscom_port const * port, 115static int rc_paranoia_check(struct riscom_port const *port,
116 char *name, const char *routine) 116 char *name, const char *routine)
117{ 117{
118#ifdef RISCOM_PARANOIA_CHECK 118#ifdef RISCOM_PARANOIA_CHECK
@@ -134,52 +134,53 @@ static inline int rc_paranoia_check(struct riscom_port const * port,
134} 134}
135 135
136/* 136/*
137 * 137 *
138 * Service functions for RISCom/8 driver. 138 * Service functions for RISCom/8 driver.
139 * 139 *
140 */ 140 */
141 141
142/* Get board number from pointer */ 142/* Get board number from pointer */
143static inline int board_No (struct riscom_board const * bp) 143static inline int board_No(struct riscom_board const *bp)
144{ 144{
145 return bp - rc_board; 145 return bp - rc_board;
146} 146}
147 147
148/* Get port number from pointer */ 148/* Get port number from pointer */
149static inline int port_No (struct riscom_port const * port) 149static inline int port_No(struct riscom_port const *port)
150{ 150{
151 return RC_PORT(port - rc_port); 151 return RC_PORT(port - rc_port);
152} 152}
153 153
154/* Get pointer to board from pointer to port */ 154/* Get pointer to board from pointer to port */
155static inline struct riscom_board * port_Board(struct riscom_port const * port) 155static inline struct riscom_board *port_Board(struct riscom_port const *port)
156{ 156{
157 return &rc_board[RC_BOARD(port - rc_port)]; 157 return &rc_board[RC_BOARD(port - rc_port)];
158} 158}
159 159
160/* Input Byte from CL CD180 register */ 160/* Input Byte from CL CD180 register */
161static inline unsigned char rc_in(struct riscom_board const * bp, unsigned short reg) 161static inline unsigned char rc_in(struct riscom_board const *bp,
162 unsigned short reg)
162{ 163{
163 return inb(bp->base + RC_TO_ISA(reg)); 164 return inb(bp->base + RC_TO_ISA(reg));
164} 165}
165 166
166/* Output Byte to CL CD180 register */ 167/* Output Byte to CL CD180 register */
167static inline void rc_out(struct riscom_board const * bp, unsigned short reg, 168static inline void rc_out(struct riscom_board const *bp, unsigned short reg,
168 unsigned char val) 169 unsigned char val)
169{ 170{
170 outb(val, bp->base + RC_TO_ISA(reg)); 171 outb(val, bp->base + RC_TO_ISA(reg));
171} 172}
172 173
173/* Wait for Channel Command Register ready */ 174/* Wait for Channel Command Register ready */
174static inline void rc_wait_CCR(struct riscom_board const * bp) 175static void rc_wait_CCR(struct riscom_board const *bp)
175{ 176{
176 unsigned long delay; 177 unsigned long delay;
177 178
178 /* FIXME: need something more descriptive then 100000 :) */ 179 /* FIXME: need something more descriptive then 100000 :) */
179 for (delay = 100000; delay; delay--) 180 for (delay = 100000; delay; delay--)
180 if (!rc_in(bp, CD180_CCR)) 181 if (!rc_in(bp, CD180_CCR))
181 return; 182 return;
182 183
183 printk(KERN_INFO "rc%d: Timeout waiting for CCR.\n", board_No(bp)); 184 printk(KERN_INFO "rc%d: Timeout waiting for CCR.\n", board_No(bp));
184} 185}
185 186
@@ -187,11 +188,11 @@ static inline void rc_wait_CCR(struct riscom_board const * bp)
187 * RISCom/8 probe functions. 188 * RISCom/8 probe functions.
188 */ 189 */
189 190
190static inline int rc_request_io_range(struct riscom_board * const bp) 191static int rc_request_io_range(struct riscom_board * const bp)
191{ 192{
192 int i; 193 int i;
193 194
194 for (i = 0; i < RC_NIOPORT; i++) 195 for (i = 0; i < RC_NIOPORT; i++)
195 if (!request_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1, 196 if (!request_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1,
196 "RISCom/8")) { 197 "RISCom/8")) {
197 goto out_release; 198 goto out_release;
@@ -200,42 +201,42 @@ static inline int rc_request_io_range(struct riscom_board * const bp)
200out_release: 201out_release:
201 printk(KERN_INFO "rc%d: Skipping probe at 0x%03x. IO address in use.\n", 202 printk(KERN_INFO "rc%d: Skipping probe at 0x%03x. IO address in use.\n",
202 board_No(bp), bp->base); 203 board_No(bp), bp->base);
203 while(--i >= 0) 204 while (--i >= 0)
204 release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1); 205 release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
205 return 1; 206 return 1;
206} 207}
207 208
208static inline void rc_release_io_range(struct riscom_board * const bp) 209static void rc_release_io_range(struct riscom_board * const bp)
209{ 210{
210 int i; 211 int i;
211 212
212 for (i = 0; i < RC_NIOPORT; i++) 213 for (i = 0; i < RC_NIOPORT; i++)
213 release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1); 214 release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
214} 215}
215 216
216/* Reset and setup CD180 chip */ 217/* Reset and setup CD180 chip */
217static void __init rc_init_CD180(struct riscom_board const * bp) 218static void __init rc_init_CD180(struct riscom_board const *bp)
218{ 219{
219 unsigned long flags; 220 unsigned long flags;
220 221
221 spin_lock_irqsave(&riscom_lock, flags); 222 spin_lock_irqsave(&riscom_lock, flags);
222 223
223 rc_out(bp, RC_CTOUT, 0); /* Clear timeout */ 224 rc_out(bp, RC_CTOUT, 0); /* Clear timeout */
224 rc_wait_CCR(bp); /* Wait for CCR ready */ 225 rc_wait_CCR(bp); /* Wait for CCR ready */
225 rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */ 226 rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */
226 spin_unlock_irqrestore(&riscom_lock, flags); 227 spin_unlock_irqrestore(&riscom_lock, flags);
227 msleep(50); /* Delay 0.05 sec */ 228 msleep(50); /* Delay 0.05 sec */
228 spin_lock_irqsave(&riscom_lock, flags); 229 spin_lock_irqsave(&riscom_lock, flags);
229 rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */ 230 rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */
230 rc_out(bp, CD180_GICR, 0); /* Clear all bits */ 231 rc_out(bp, CD180_GICR, 0); /* Clear all bits */
231 rc_out(bp, CD180_PILR1, RC_ACK_MINT); /* Prio for modem intr */ 232 rc_out(bp, CD180_PILR1, RC_ACK_MINT); /* Prio for modem intr */
232 rc_out(bp, CD180_PILR2, RC_ACK_TINT); /* Prio for transmitter intr */ 233 rc_out(bp, CD180_PILR2, RC_ACK_TINT); /* Prio for tx intr */
233 rc_out(bp, CD180_PILR3, RC_ACK_RINT); /* Prio for receiver intr */ 234 rc_out(bp, CD180_PILR3, RC_ACK_RINT); /* Prio for rx intr */
234 235
235 /* Setting up prescaler. We need 4 ticks per 1 ms */ 236 /* Setting up prescaler. We need 4 ticks per 1 ms */
236 rc_out(bp, CD180_PPRH, (RC_OSCFREQ/(1000000/RISCOM_TPS)) >> 8); 237 rc_out(bp, CD180_PPRH, (RC_OSCFREQ/(1000000/RISCOM_TPS)) >> 8);
237 rc_out(bp, CD180_PPRL, (RC_OSCFREQ/(1000000/RISCOM_TPS)) & 0xff); 238 rc_out(bp, CD180_PPRL, (RC_OSCFREQ/(1000000/RISCOM_TPS)) & 0xff);
238 239
239 spin_unlock_irqrestore(&riscom_lock, flags); 240 spin_unlock_irqrestore(&riscom_lock, flags);
240} 241}
241 242
@@ -245,12 +246,12 @@ static int __init rc_probe(struct riscom_board *bp)
245 unsigned char val1, val2; 246 unsigned char val1, val2;
246 int irqs = 0; 247 int irqs = 0;
247 int retries; 248 int retries;
248 249
249 bp->irq = 0; 250 bp->irq = 0;
250 251
251 if (rc_request_io_range(bp)) 252 if (rc_request_io_range(bp))
252 return 1; 253 return 1;
253 254
254 /* Are the I/O ports here ? */ 255 /* Are the I/O ports here ? */
255 rc_out(bp, CD180_PPRL, 0x5a); 256 rc_out(bp, CD180_PPRL, 0x5a);
256 outb(0xff, 0x80); 257 outb(0xff, 0x80);
@@ -258,34 +259,34 @@ static int __init rc_probe(struct riscom_board *bp)
258 rc_out(bp, CD180_PPRL, 0xa5); 259 rc_out(bp, CD180_PPRL, 0xa5);
259 outb(0x00, 0x80); 260 outb(0x00, 0x80);
260 val2 = rc_in(bp, CD180_PPRL); 261 val2 = rc_in(bp, CD180_PPRL);
261 262
262 if ((val1 != 0x5a) || (val2 != 0xa5)) { 263 if ((val1 != 0x5a) || (val2 != 0xa5)) {
263 printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not found.\n", 264 printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not found.\n",
264 board_No(bp), bp->base); 265 board_No(bp), bp->base);
265 goto out_release; 266 goto out_release;
266 } 267 }
267 268
268 /* It's time to find IRQ for this board */ 269 /* It's time to find IRQ for this board */
269 for (retries = 0; retries < 5 && irqs <= 0; retries++) { 270 for (retries = 0; retries < 5 && irqs <= 0; retries++) {
270 irqs = probe_irq_on(); 271 irqs = probe_irq_on();
271 rc_init_CD180(bp); /* Reset CD180 chip */ 272 rc_init_CD180(bp); /* Reset CD180 chip */
272 rc_out(bp, CD180_CAR, 2); /* Select port 2 */ 273 rc_out(bp, CD180_CAR, 2); /* Select port 2 */
273 rc_wait_CCR(bp); 274 rc_wait_CCR(bp);
274 rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */ 275 rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */
275 rc_out(bp, CD180_IER, IER_TXRDY); /* Enable tx empty intr */ 276 rc_out(bp, CD180_IER, IER_TXRDY);/* Enable tx empty intr */
276 msleep(50); 277 msleep(50);
277 irqs = probe_irq_off(irqs); 278 irqs = probe_irq_off(irqs);
278 val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */ 279 val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */
279 val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */ 280 val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */
280 rc_init_CD180(bp); /* Reset CD180 again */ 281 rc_init_CD180(bp); /* Reset CD180 again */
281 282
282 if ((val1 & RC_BSR_TINT) || (val2 != (RC_ID | GIVR_IT_TX))) { 283 if ((val1 & RC_BSR_TINT) || (val2 != (RC_ID | GIVR_IT_TX))) {
283 printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not " 284 printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not "
284 "found.\n", board_No(bp), bp->base); 285 "found.\n", board_No(bp), bp->base);
285 goto out_release; 286 goto out_release;
286 } 287 }
287 } 288 }
288 289
289 if (irqs <= 0) { 290 if (irqs <= 0) {
290 printk(KERN_ERR "rc%d: Can't find IRQ for RISCom/8 board " 291 printk(KERN_ERR "rc%d: Can't find IRQ for RISCom/8 board "
291 "at 0x%03x.\n", board_No(bp), bp->base); 292 "at 0x%03x.\n", board_No(bp), bp->base);
@@ -293,113 +294,112 @@ static int __init rc_probe(struct riscom_board *bp)
293 } 294 }
294 bp->irq = irqs; 295 bp->irq = irqs;
295 bp->flags |= RC_BOARD_PRESENT; 296 bp->flags |= RC_BOARD_PRESENT;
296 297
297 printk(KERN_INFO "rc%d: RISCom/8 Rev. %c board detected at " 298 printk(KERN_INFO "rc%d: RISCom/8 Rev. %c board detected at "
298 "0x%03x, IRQ %d.\n", 299 "0x%03x, IRQ %d.\n",
299 board_No(bp), 300 board_No(bp),
300 (rc_in(bp, CD180_GFRCR) & 0x0f) + 'A', /* Board revision */ 301 (rc_in(bp, CD180_GFRCR) & 0x0f) + 'A', /* Board revision */
301 bp->base, bp->irq); 302 bp->base, bp->irq);
302 303
303 return 0; 304 return 0;
304out_release: 305out_release:
305 rc_release_io_range(bp); 306 rc_release_io_range(bp);
306 return 1; 307 return 1;
307} 308}
308 309
309/* 310/*
310 * 311 *
311 * Interrupt processing routines. 312 * Interrupt processing routines.
312 * 313 *
313 */ 314 */
314 315
315static inline struct riscom_port * rc_get_port(struct riscom_board const * bp, 316static struct riscom_port *rc_get_port(struct riscom_board const *bp,
316 unsigned char const * what) 317 unsigned char const *what)
317{ 318{
318 unsigned char channel; 319 unsigned char channel;
319 struct riscom_port * port; 320 struct riscom_port *port;
320 321
321 channel = rc_in(bp, CD180_GICR) >> GICR_CHAN_OFF; 322 channel = rc_in(bp, CD180_GICR) >> GICR_CHAN_OFF;
322 if (channel < CD180_NCH) { 323 if (channel < CD180_NCH) {
323 port = &rc_port[board_No(bp) * RC_NPORT + channel]; 324 port = &rc_port[board_No(bp) * RC_NPORT + channel];
324 if (port->flags & ASYNC_INITIALIZED) { 325 if (port->flags & ASYNC_INITIALIZED)
325 return port; 326 return port;
326 }
327 } 327 }
328 printk(KERN_ERR "rc%d: %s interrupt from invalid port %d\n", 328 printk(KERN_ERR "rc%d: %s interrupt from invalid port %d\n",
329 board_No(bp), what, channel); 329 board_No(bp), what, channel);
330 return NULL; 330 return NULL;
331} 331}
332 332
333static inline void rc_receive_exc(struct riscom_board const * bp) 333static void rc_receive_exc(struct riscom_board const *bp)
334{ 334{
335 struct riscom_port *port; 335 struct riscom_port *port;
336 struct tty_struct *tty; 336 struct tty_struct *tty;
337 unsigned char status; 337 unsigned char status;
338 unsigned char ch, flag; 338 unsigned char ch, flag;
339 339
340 if (!(port = rc_get_port(bp, "Receive"))) 340 port = rc_get_port(bp, "Receive");
341 if (port == NULL)
341 return; 342 return;
342 343
343 tty = port->tty; 344 tty = port->tty;
344 345
345#ifdef RC_REPORT_OVERRUN 346#ifdef RC_REPORT_OVERRUN
346 status = rc_in(bp, CD180_RCSR); 347 status = rc_in(bp, CD180_RCSR);
347 if (status & RCSR_OE) 348 if (status & RCSR_OE)
348 port->overrun++; 349 port->overrun++;
349 status &= port->mark_mask; 350 status &= port->mark_mask;
350#else 351#else
351 status = rc_in(bp, CD180_RCSR) & port->mark_mask; 352 status = rc_in(bp, CD180_RCSR) & port->mark_mask;
352#endif 353#endif
353 ch = rc_in(bp, CD180_RDR); 354 ch = rc_in(bp, CD180_RDR);
354 if (!status) { 355 if (!status)
355 return; 356 return;
356 }
357 if (status & RCSR_TOUT) { 357 if (status & RCSR_TOUT) {
358 printk(KERN_WARNING "rc%d: port %d: Receiver timeout. " 358 printk(KERN_WARNING "rc%d: port %d: Receiver timeout. "
359 "Hardware problems ?\n", 359 "Hardware problems ?\n",
360 board_No(bp), port_No(port)); 360 board_No(bp), port_No(port));
361 return; 361 return;
362 362
363 } else if (status & RCSR_BREAK) { 363 } else if (status & RCSR_BREAK) {
364 printk(KERN_INFO "rc%d: port %d: Handling break...\n", 364 printk(KERN_INFO "rc%d: port %d: Handling break...\n",
365 board_No(bp), port_No(port)); 365 board_No(bp), port_No(port));
366 flag = TTY_BREAK; 366 flag = TTY_BREAK;
367 if (port->flags & ASYNC_SAK) 367 if (port->flags & ASYNC_SAK)
368 do_SAK(tty); 368 do_SAK(tty);
369 369
370 } else if (status & RCSR_PE) 370 } else if (status & RCSR_PE)
371 flag = TTY_PARITY; 371 flag = TTY_PARITY;
372 372
373 else if (status & RCSR_FE) 373 else if (status & RCSR_FE)
374 flag = TTY_FRAME; 374 flag = TTY_FRAME;
375 375
376 else if (status & RCSR_OE) 376 else if (status & RCSR_OE)
377 flag = TTY_OVERRUN; 377 flag = TTY_OVERRUN;
378
379 else 378 else
380 flag = TTY_NORMAL; 379 flag = TTY_NORMAL;
381 380
382 tty_insert_flip_char(tty, ch, flag); 381 tty_insert_flip_char(tty, ch, flag);
383 tty_flip_buffer_push(tty); 382 tty_flip_buffer_push(tty);
384} 383}
385 384
386static inline void rc_receive(struct riscom_board const * bp) 385static void rc_receive(struct riscom_board const *bp)
387{ 386{
388 struct riscom_port *port; 387 struct riscom_port *port;
389 struct tty_struct *tty; 388 struct tty_struct *tty;
390 unsigned char count; 389 unsigned char count;
391 390
392 if (!(port = rc_get_port(bp, "Receive"))) 391 port = rc_get_port(bp, "Receive");
392 if (port == NULL)
393 return; 393 return;
394 394
395 tty = port->tty; 395 tty = port->tty;
396 396
397 count = rc_in(bp, CD180_RDCR); 397 count = rc_in(bp, CD180_RDCR);
398 398
399#ifdef RC_REPORT_FIFO 399#ifdef RC_REPORT_FIFO
400 port->hits[count > 8 ? 9 : count]++; 400 port->hits[count > 8 ? 9 : count]++;
401#endif 401#endif
402 402
403 while (count--) { 403 while (count--) {
404 if (tty_buffer_request_room(tty, 1) == 0) { 404 if (tty_buffer_request_room(tty, 1) == 0) {
405 printk(KERN_WARNING "rc%d: port %d: Working around " 405 printk(KERN_WARNING "rc%d: port %d: Working around "
@@ -412,26 +412,26 @@ static inline void rc_receive(struct riscom_board const * bp)
412 tty_flip_buffer_push(tty); 412 tty_flip_buffer_push(tty);
413} 413}
414 414
415static inline void rc_transmit(struct riscom_board const * bp) 415static void rc_transmit(struct riscom_board const *bp)
416{ 416{
417 struct riscom_port *port; 417 struct riscom_port *port;
418 struct tty_struct *tty; 418 struct tty_struct *tty;
419 unsigned char count; 419 unsigned char count;
420 420
421 421 port = rc_get_port(bp, "Transmit");
422 if (!(port = rc_get_port(bp, "Transmit"))) 422 if (port == NULL)
423 return; 423 return;
424 424
425 tty = port->tty; 425 tty = port->tty;
426 426
427 if (port->IER & IER_TXEMPTY) { 427 if (port->IER & IER_TXEMPTY) {
428 /* FIFO drained */ 428 /* FIFO drained */
429 rc_out(bp, CD180_CAR, port_No(port)); 429 rc_out(bp, CD180_CAR, port_No(port));
430 port->IER &= ~IER_TXEMPTY; 430 port->IER &= ~IER_TXEMPTY;
431 rc_out(bp, CD180_IER, port->IER); 431 rc_out(bp, CD180_IER, port->IER);
432 return; 432 return;
433 } 433 }
434 434
435 if ((port->xmit_cnt <= 0 && !port->break_length) 435 if ((port->xmit_cnt <= 0 && !port->break_length)
436 || tty->stopped || tty->hw_stopped) { 436 || tty->stopped || tty->hw_stopped) {
437 rc_out(bp, CD180_CAR, port_No(port)); 437 rc_out(bp, CD180_CAR, port_No(port));
@@ -439,7 +439,7 @@ static inline void rc_transmit(struct riscom_board const * bp)
439 rc_out(bp, CD180_IER, port->IER); 439 rc_out(bp, CD180_IER, port->IER);
440 return; 440 return;
441 } 441 }
442 442
443 if (port->break_length) { 443 if (port->break_length) {
444 if (port->break_length > 0) { 444 if (port->break_length > 0) {
445 if (port->COR2 & COR2_ETC) { 445 if (port->COR2 & COR2_ETC) {
@@ -451,7 +451,8 @@ static inline void rc_transmit(struct riscom_board const * bp)
451 rc_out(bp, CD180_TDR, CD180_C_ESC); 451 rc_out(bp, CD180_TDR, CD180_C_ESC);
452 rc_out(bp, CD180_TDR, CD180_C_DELAY); 452 rc_out(bp, CD180_TDR, CD180_C_DELAY);
453 rc_out(bp, CD180_TDR, count); 453 rc_out(bp, CD180_TDR, count);
454 if (!(port->break_length -= count)) 454 port->break_length -= count;
455 if (port->break_length == 0)
455 port->break_length--; 456 port->break_length--;
456 } else { 457 } else {
457 rc_out(bp, CD180_TDR, CD180_C_ESC); 458 rc_out(bp, CD180_TDR, CD180_C_ESC);
@@ -463,7 +464,7 @@ static inline void rc_transmit(struct riscom_board const * bp)
463 } 464 }
464 return; 465 return;
465 } 466 }
466 467
467 count = CD180_NFIFO; 468 count = CD180_NFIFO;
468 do { 469 do {
469 rc_out(bp, CD180_TDR, port->xmit_buf[port->xmit_tail++]); 470 rc_out(bp, CD180_TDR, port->xmit_buf[port->xmit_tail++]);
@@ -471,7 +472,7 @@ static inline void rc_transmit(struct riscom_board const * bp)
471 if (--port->xmit_cnt <= 0) 472 if (--port->xmit_cnt <= 0)
472 break; 473 break;
473 } while (--count > 0); 474 } while (--count > 0);
474 475
475 if (port->xmit_cnt <= 0) { 476 if (port->xmit_cnt <= 0) {
476 rc_out(bp, CD180_CAR, port_No(port)); 477 rc_out(bp, CD180_CAR, port_No(port));
477 port->IER &= ~IER_TXRDY; 478 port->IER &= ~IER_TXRDY;
@@ -481,25 +482,26 @@ static inline void rc_transmit(struct riscom_board const * bp)
481 tty_wakeup(tty); 482 tty_wakeup(tty);
482} 483}
483 484
484static inline void rc_check_modem(struct riscom_board const * bp) 485static void rc_check_modem(struct riscom_board const *bp)
485{ 486{
486 struct riscom_port *port; 487 struct riscom_port *port;
487 struct tty_struct *tty; 488 struct tty_struct *tty;
488 unsigned char mcr; 489 unsigned char mcr;
489 490
490 if (!(port = rc_get_port(bp, "Modem"))) 491 port = rc_get_port(bp, "Modem");
492 if (port == NULL)
491 return; 493 return;
492 494
493 tty = port->tty; 495 tty = port->tty;
494 496
495 mcr = rc_in(bp, CD180_MCR); 497 mcr = rc_in(bp, CD180_MCR);
496 if (mcr & MCR_CDCHG) { 498 if (mcr & MCR_CDCHG) {
497 if (rc_in(bp, CD180_MSVR) & MSVR_CD) 499 if (rc_in(bp, CD180_MSVR) & MSVR_CD)
498 wake_up_interruptible(&port->open_wait); 500 wake_up_interruptible(&port->open_wait);
499 else 501 else
500 tty_hangup(tty); 502 tty_hangup(tty);
501 } 503 }
502 504
503#ifdef RISCOM_BRAIN_DAMAGED_CTS 505#ifdef RISCOM_BRAIN_DAMAGED_CTS
504 if (mcr & MCR_CTSCHG) { 506 if (mcr & MCR_CTSCHG) {
505 if (rc_in(bp, CD180_MSVR) & MSVR_CTS) { 507 if (rc_in(bp, CD180_MSVR) & MSVR_CTS) {
@@ -526,13 +528,13 @@ static inline void rc_check_modem(struct riscom_board const * bp)
526 rc_out(bp, CD180_IER, port->IER); 528 rc_out(bp, CD180_IER, port->IER);
527 } 529 }
528#endif /* RISCOM_BRAIN_DAMAGED_CTS */ 530#endif /* RISCOM_BRAIN_DAMAGED_CTS */
529 531
530 /* Clear change bits */ 532 /* Clear change bits */
531 rc_out(bp, CD180_MCR, 0); 533 rc_out(bp, CD180_MCR, 0);
532} 534}
533 535
534/* The main interrupt processing routine */ 536/* The main interrupt processing routine */
535static irqreturn_t rc_interrupt(int dummy, void * dev_id) 537static irqreturn_t rc_interrupt(int dummy, void *dev_id)
536{ 538{
537 unsigned char status; 539 unsigned char status;
538 unsigned char ack; 540 unsigned char ack;
@@ -547,13 +549,11 @@ static irqreturn_t rc_interrupt(int dummy, void * dev_id)
547 (RC_BSR_TOUT | RC_BSR_TINT | 549 (RC_BSR_TOUT | RC_BSR_TINT |
548 RC_BSR_MINT | RC_BSR_RINT))) { 550 RC_BSR_MINT | RC_BSR_RINT))) {
549 handled = 1; 551 handled = 1;
550 if (status & RC_BSR_TOUT) 552 if (status & RC_BSR_TOUT)
551 printk(KERN_WARNING "rc%d: Got timeout. Hardware " 553 printk(KERN_WARNING "rc%d: Got timeout. Hardware "
552 "error?\n", board_No(bp)); 554 "error?\n", board_No(bp));
553
554 else if (status & RC_BSR_RINT) { 555 else if (status & RC_BSR_RINT) {
555 ack = rc_in(bp, RC_ACK_RINT); 556 ack = rc_in(bp, RC_ACK_RINT);
556
557 if (ack == (RC_ID | GIVR_IT_RCV)) 557 if (ack == (RC_ID | GIVR_IT_RCV))
558 rc_receive(bp); 558 rc_receive(bp);
559 else if (ack == (RC_ID | GIVR_IT_REXC)) 559 else if (ack == (RC_ID | GIVR_IT_REXC))
@@ -562,29 +562,23 @@ static irqreturn_t rc_interrupt(int dummy, void * dev_id)
562 printk(KERN_WARNING "rc%d: Bad receive ack " 562 printk(KERN_WARNING "rc%d: Bad receive ack "
563 "0x%02x.\n", 563 "0x%02x.\n",
564 board_No(bp), ack); 564 board_No(bp), ack);
565
566 } else if (status & RC_BSR_TINT) { 565 } else if (status & RC_BSR_TINT) {
567 ack = rc_in(bp, RC_ACK_TINT); 566 ack = rc_in(bp, RC_ACK_TINT);
568
569 if (ack == (RC_ID | GIVR_IT_TX)) 567 if (ack == (RC_ID | GIVR_IT_TX))
570 rc_transmit(bp); 568 rc_transmit(bp);
571 else 569 else
572 printk(KERN_WARNING "rc%d: Bad transmit ack " 570 printk(KERN_WARNING "rc%d: Bad transmit ack "
573 "0x%02x.\n", 571 "0x%02x.\n",
574 board_No(bp), ack); 572 board_No(bp), ack);
575
576 } else /* if (status & RC_BSR_MINT) */ { 573 } else /* if (status & RC_BSR_MINT) */ {
577 ack = rc_in(bp, RC_ACK_MINT); 574 ack = rc_in(bp, RC_ACK_MINT);
578 575 if (ack == (RC_ID | GIVR_IT_MODEM))
579 if (ack == (RC_ID | GIVR_IT_MODEM))
580 rc_check_modem(bp); 576 rc_check_modem(bp);
581 else 577 else
582 printk(KERN_WARNING "rc%d: Bad modem ack " 578 printk(KERN_WARNING "rc%d: Bad modem ack "
583 "0x%02x.\n", 579 "0x%02x.\n",
584 board_No(bp), ack); 580 board_No(bp), ack);
585 581 }
586 }
587
588 rc_out(bp, CD180_EOIR, 0); /* Mark end of interrupt */ 582 rc_out(bp, CD180_EOIR, 0); /* Mark end of interrupt */
589 rc_out(bp, RC_CTOUT, 0); /* Clear timeout flag */ 583 rc_out(bp, RC_CTOUT, 0); /* Clear timeout flag */
590 } 584 }
@@ -596,24 +590,24 @@ static irqreturn_t rc_interrupt(int dummy, void * dev_id)
596 */ 590 */
597 591
598/* Called with disabled interrupts */ 592/* Called with disabled interrupts */
599static int rc_setup_board(struct riscom_board * bp) 593static int rc_setup_board(struct riscom_board *bp)
600{ 594{
601 int error; 595 int error;
602 596
603 if (bp->flags & RC_BOARD_ACTIVE) 597 if (bp->flags & RC_BOARD_ACTIVE)
604 return 0; 598 return 0;
605 599
606 error = request_irq(bp->irq, rc_interrupt, IRQF_DISABLED, 600 error = request_irq(bp->irq, rc_interrupt, IRQF_DISABLED,
607 "RISCom/8", bp); 601 "RISCom/8", bp);
608 if (error) 602 if (error)
609 return error; 603 return error;
610 604
611 rc_out(bp, RC_CTOUT, 0); /* Just in case */ 605 rc_out(bp, RC_CTOUT, 0); /* Just in case */
612 bp->DTR = ~0; 606 bp->DTR = ~0;
613 rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */ 607 rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */
614 608
615 bp->flags |= RC_BOARD_ACTIVE; 609 bp->flags |= RC_BOARD_ACTIVE;
616 610
617 return 0; 611 return 0;
618} 612}
619 613
@@ -622,40 +616,40 @@ static void rc_shutdown_board(struct riscom_board *bp)
622{ 616{
623 if (!(bp->flags & RC_BOARD_ACTIVE)) 617 if (!(bp->flags & RC_BOARD_ACTIVE))
624 return; 618 return;
625 619
626 bp->flags &= ~RC_BOARD_ACTIVE; 620 bp->flags &= ~RC_BOARD_ACTIVE;
627 621
628 free_irq(bp->irq, NULL); 622 free_irq(bp->irq, NULL);
629 623
630 bp->DTR = ~0; 624 bp->DTR = ~0;
631 rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */ 625 rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */
632 626
633} 627}
634 628
635/* 629/*
636 * Setting up port characteristics. 630 * Setting up port characteristics.
637 * Must be called with disabled interrupts 631 * Must be called with disabled interrupts
638 */ 632 */
639static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port) 633static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
640{ 634{
641 struct tty_struct *tty; 635 struct tty_struct *tty = port->tty;
642 unsigned long baud; 636 unsigned long baud;
643 long tmp; 637 long tmp;
644 unsigned char cor1 = 0, cor3 = 0; 638 unsigned char cor1 = 0, cor3 = 0;
645 unsigned char mcor1 = 0, mcor2 = 0; 639 unsigned char mcor1 = 0, mcor2 = 0;
646 640
647 if (!(tty = port->tty) || !tty->termios) 641 if (tty == NULL || tty->termios == NULL)
648 return; 642 return;
649 643
650 port->IER = 0; 644 port->IER = 0;
651 port->COR2 = 0; 645 port->COR2 = 0;
652 port->MSVR = MSVR_RTS; 646 port->MSVR = MSVR_RTS;
653 647
654 baud = tty_get_baud_rate(tty); 648 baud = tty_get_baud_rate(tty);
655 649
656 /* Select port on the board */ 650 /* Select port on the board */
657 rc_out(bp, CD180_CAR, port_No(port)); 651 rc_out(bp, CD180_CAR, port_No(port));
658 652
659 if (!baud) { 653 if (!baud) {
660 /* Drop DTR & exit */ 654 /* Drop DTR & exit */
661 bp->DTR |= (1u << port_No(port)); 655 bp->DTR |= (1u << port_No(port));
@@ -666,69 +660,68 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
666 bp->DTR &= ~(1u << port_No(port)); 660 bp->DTR &= ~(1u << port_No(port));
667 rc_out(bp, RC_DTR, bp->DTR); 661 rc_out(bp, RC_DTR, bp->DTR);
668 } 662 }
669 663
670 /* 664 /*
671 * Now we must calculate some speed depended things 665 * Now we must calculate some speed depended things
672 */ 666 */
673 667
674 /* Set baud rate for port */ 668 /* Set baud rate for port */
675 tmp = (((RC_OSCFREQ + baud/2) / baud + 669 tmp = (((RC_OSCFREQ + baud/2) / baud +
676 CD180_TPC/2) / CD180_TPC); 670 CD180_TPC/2) / CD180_TPC);
677 671
678 rc_out(bp, CD180_RBPRH, (tmp >> 8) & 0xff); 672 rc_out(bp, CD180_RBPRH, (tmp >> 8) & 0xff);
679 rc_out(bp, CD180_TBPRH, (tmp >> 8) & 0xff); 673 rc_out(bp, CD180_TBPRH, (tmp >> 8) & 0xff);
680 rc_out(bp, CD180_RBPRL, tmp & 0xff); 674 rc_out(bp, CD180_RBPRL, tmp & 0xff);
681 rc_out(bp, CD180_TBPRL, tmp & 0xff); 675 rc_out(bp, CD180_TBPRL, tmp & 0xff);
682 676
683 baud = (baud + 5) / 10; /* Estimated CPS */ 677 baud = (baud + 5) / 10; /* Estimated CPS */
684 678
685 /* Two timer ticks seems enough to wakeup something like SLIP driver */ 679 /* Two timer ticks seems enough to wakeup something like SLIP driver */
686 tmp = ((baud + HZ/2) / HZ) * 2 - CD180_NFIFO; 680 tmp = ((baud + HZ/2) / HZ) * 2 - CD180_NFIFO;
687 port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ? 681 port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ?
688 SERIAL_XMIT_SIZE - 1 : tmp); 682 SERIAL_XMIT_SIZE - 1 : tmp);
689 683
690 /* Receiver timeout will be transmission time for 1.5 chars */ 684 /* Receiver timeout will be transmission time for 1.5 chars */
691 tmp = (RISCOM_TPS + RISCOM_TPS/2 + baud/2) / baud; 685 tmp = (RISCOM_TPS + RISCOM_TPS/2 + baud/2) / baud;
692 tmp = (tmp > 0xff) ? 0xff : tmp; 686 tmp = (tmp > 0xff) ? 0xff : tmp;
693 rc_out(bp, CD180_RTPR, tmp); 687 rc_out(bp, CD180_RTPR, tmp);
694 688
695 switch (C_CSIZE(tty)) { 689 switch (C_CSIZE(tty)) {
696 case CS5: 690 case CS5:
697 cor1 |= COR1_5BITS; 691 cor1 |= COR1_5BITS;
698 break; 692 break;
699 case CS6: 693 case CS6:
700 cor1 |= COR1_6BITS; 694 cor1 |= COR1_6BITS;
701 break; 695 break;
702 case CS7: 696 case CS7:
703 cor1 |= COR1_7BITS; 697 cor1 |= COR1_7BITS;
704 break; 698 break;
705 case CS8: 699 case CS8:
706 cor1 |= COR1_8BITS; 700 cor1 |= COR1_8BITS;
707 break; 701 break;
708 } 702 }
709 703 if (C_CSTOPB(tty))
710 if (C_CSTOPB(tty))
711 cor1 |= COR1_2SB; 704 cor1 |= COR1_2SB;
712 705
713 cor1 |= COR1_IGNORE; 706 cor1 |= COR1_IGNORE;
714 if (C_PARENB(tty)) { 707 if (C_PARENB(tty)) {
715 cor1 |= COR1_NORMPAR; 708 cor1 |= COR1_NORMPAR;
716 if (C_PARODD(tty)) 709 if (C_PARODD(tty))
717 cor1 |= COR1_ODDP; 710 cor1 |= COR1_ODDP;
718 if (I_INPCK(tty)) 711 if (I_INPCK(tty))
719 cor1 &= ~COR1_IGNORE; 712 cor1 &= ~COR1_IGNORE;
720 } 713 }
721 /* Set marking of some errors */ 714 /* Set marking of some errors */
722 port->mark_mask = RCSR_OE | RCSR_TOUT; 715 port->mark_mask = RCSR_OE | RCSR_TOUT;
723 if (I_INPCK(tty)) 716 if (I_INPCK(tty))
724 port->mark_mask |= RCSR_FE | RCSR_PE; 717 port->mark_mask |= RCSR_FE | RCSR_PE;
725 if (I_BRKINT(tty) || I_PARMRK(tty)) 718 if (I_BRKINT(tty) || I_PARMRK(tty))
726 port->mark_mask |= RCSR_BREAK; 719 port->mark_mask |= RCSR_BREAK;
727 if (I_IGNPAR(tty)) 720 if (I_IGNPAR(tty))
728 port->mark_mask &= ~(RCSR_FE | RCSR_PE); 721 port->mark_mask &= ~(RCSR_FE | RCSR_PE);
729 if (I_IGNBRK(tty)) { 722 if (I_IGNBRK(tty)) {
730 port->mark_mask &= ~RCSR_BREAK; 723 port->mark_mask &= ~RCSR_BREAK;
731 if (I_IGNPAR(tty)) 724 if (I_IGNPAR(tty))
732 /* Real raw mode. Ignore all */ 725 /* Real raw mode. Ignore all */
733 port->mark_mask &= ~RCSR_OE; 726 port->mark_mask &= ~RCSR_OE;
734 } 727 }
@@ -738,7 +731,8 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
738 port->IER |= IER_DSR | IER_CTS; 731 port->IER |= IER_DSR | IER_CTS;
739 mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD; 732 mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD;
740 mcor2 |= MCOR2_DSROD | MCOR2_CTSOD; 733 mcor2 |= MCOR2_DSROD | MCOR2_CTSOD;
741 tty->hw_stopped = !(rc_in(bp, CD180_MSVR) & (MSVR_CTS|MSVR_DSR)); 734 tty->hw_stopped = !(rc_in(bp, CD180_MSVR) &
735 (MSVR_CTS|MSVR_DSR));
742#else 736#else
743 port->COR2 |= COR2_CTSAE; 737 port->COR2 |= COR2_CTSAE;
744#endif 738#endif
@@ -761,13 +755,13 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
761 mcor1 |= MCOR1_CDZD; 755 mcor1 |= MCOR1_CDZD;
762 mcor2 |= MCOR2_CDOD; 756 mcor2 |= MCOR2_CDOD;
763 } 757 }
764 758
765 if (C_CREAD(tty)) 759 if (C_CREAD(tty))
766 /* Enable receiver */ 760 /* Enable receiver */
767 port->IER |= IER_RXD; 761 port->IER |= IER_RXD;
768 762
769 /* Set input FIFO size (1-8 bytes) */ 763 /* Set input FIFO size (1-8 bytes) */
770 cor3 |= RISCOM_RXFIFO; 764 cor3 |= RISCOM_RXFIFO;
771 /* Setting up CD180 channel registers */ 765 /* Setting up CD180 channel registers */
772 rc_out(bp, CD180_COR1, cor1); 766 rc_out(bp, CD180_COR1, cor1);
773 rc_out(bp, CD180_COR2, port->COR2); 767 rc_out(bp, CD180_COR2, port->COR2);
@@ -791,36 +785,30 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
791static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port) 785static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port)
792{ 786{
793 unsigned long flags; 787 unsigned long flags;
794 788
795 if (port->flags & ASYNC_INITIALIZED) 789 if (port->flags & ASYNC_INITIALIZED)
796 return 0; 790 return 0;
797 791
798 if (!port->xmit_buf) { 792 if (!port->xmit_buf) {
799 /* We may sleep in get_zeroed_page() */ 793 /* We may sleep in get_zeroed_page() */
800 unsigned long tmp; 794 unsigned long tmp = get_zeroed_page(GFP_KERNEL);
801 795 if (tmp == 0)
802 if (!(tmp = get_zeroed_page(GFP_KERNEL)))
803 return -ENOMEM; 796 return -ENOMEM;
804 797 if (port->xmit_buf)
805 if (port->xmit_buf) {
806 free_page(tmp); 798 free_page(tmp);
807 return -ERESTARTSYS; 799 else
808 } 800 port->xmit_buf = (unsigned char *) tmp;
809 port->xmit_buf = (unsigned char *) tmp;
810 } 801 }
811
812 spin_lock_irqsave(&riscom_lock, flags); 802 spin_lock_irqsave(&riscom_lock, flags);
813 803
814 if (port->tty) 804 if (port->tty)
815 clear_bit(TTY_IO_ERROR, &port->tty->flags); 805 clear_bit(TTY_IO_ERROR, &port->tty->flags);
816 806 if (port->count == 1)
817 if (port->count == 1)
818 bp->count++; 807 bp->count++;
819
820 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; 808 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
821 rc_change_speed(bp, port); 809 rc_change_speed(bp, port);
822 port->flags |= ASYNC_INITIALIZED; 810 port->flags |= ASYNC_INITIALIZED;
823 811
824 spin_unlock_irqrestore(&riscom_lock, flags); 812 spin_unlock_irqrestore(&riscom_lock, flags);
825 return 0; 813 return 0;
826} 814}
@@ -829,38 +817,39 @@ static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port)
829static void rc_shutdown_port(struct riscom_board *bp, struct riscom_port *port) 817static void rc_shutdown_port(struct riscom_board *bp, struct riscom_port *port)
830{ 818{
831 struct tty_struct *tty; 819 struct tty_struct *tty;
832 820
833 if (!(port->flags & ASYNC_INITIALIZED)) 821 if (!(port->flags & ASYNC_INITIALIZED))
834 return; 822 return;
835 823
836#ifdef RC_REPORT_OVERRUN 824#ifdef RC_REPORT_OVERRUN
837 printk(KERN_INFO "rc%d: port %d: Total %ld overruns were detected.\n", 825 printk(KERN_INFO "rc%d: port %d: Total %ld overruns were detected.\n",
838 board_No(bp), port_No(port), port->overrun); 826 board_No(bp), port_No(port), port->overrun);
839#endif 827#endif
840#ifdef RC_REPORT_FIFO 828#ifdef RC_REPORT_FIFO
841 { 829 {
842 int i; 830 int i;
843 831
844 printk(KERN_INFO "rc%d: port %d: FIFO hits [ ", 832 printk(KERN_INFO "rc%d: port %d: FIFO hits [ ",
845 board_No(bp), port_No(port)); 833 board_No(bp), port_No(port));
846 for (i = 0; i < 10; i++) { 834 for (i = 0; i < 10; i++)
847 printk("%ld ", port->hits[i]); 835 printk("%ld ", port->hits[i]);
848 }
849 printk("].\n"); 836 printk("].\n");
850 } 837 }
851#endif 838#endif
852 if (port->xmit_buf) { 839 if (port->xmit_buf) {
853 free_page((unsigned long) port->xmit_buf); 840 free_page((unsigned long) port->xmit_buf);
854 port->xmit_buf = NULL; 841 port->xmit_buf = NULL;
855 } 842 }
856 843
857 if (!(tty = port->tty) || C_HUPCL(tty)) { 844 tty = port->tty;
845
846 if (tty == NULL || C_HUPCL(tty)) {
858 /* Drop DTR */ 847 /* Drop DTR */
859 bp->DTR |= (1u << port_No(port)); 848 bp->DTR |= (1u << port_No(port));
860 rc_out(bp, RC_DTR, bp->DTR); 849 rc_out(bp, RC_DTR, bp->DTR);
861 } 850 }
862 851
863 /* Select port */ 852 /* Select port */
864 rc_out(bp, CD180_CAR, port_No(port)); 853 rc_out(bp, CD180_CAR, port_No(port));
865 /* Reset port */ 854 /* Reset port */
866 rc_wait_CCR(bp); 855 rc_wait_CCR(bp);
@@ -868,28 +857,26 @@ static void rc_shutdown_port(struct riscom_board *bp, struct riscom_port *port)
868 /* Disable all interrupts from this port */ 857 /* Disable all interrupts from this port */
869 port->IER = 0; 858 port->IER = 0;
870 rc_out(bp, CD180_IER, port->IER); 859 rc_out(bp, CD180_IER, port->IER);
871 860
872 if (tty) 861 if (tty)
873 set_bit(TTY_IO_ERROR, &tty->flags); 862 set_bit(TTY_IO_ERROR, &tty->flags);
874 port->flags &= ~ASYNC_INITIALIZED; 863 port->flags &= ~ASYNC_INITIALIZED;
875 864
876 if (--bp->count < 0) { 865 if (--bp->count < 0) {
877 printk(KERN_INFO "rc%d: rc_shutdown_port: " 866 printk(KERN_INFO "rc%d: rc_shutdown_port: "
878 "bad board count: %d\n", 867 "bad board count: %d\n",
879 board_No(bp), bp->count); 868 board_No(bp), bp->count);
880 bp->count = 0; 869 bp->count = 0;
881 } 870 }
882
883 /* 871 /*
884 * If this is the last opened port on the board 872 * If this is the last opened port on the board
885 * shutdown whole board 873 * shutdown whole board
886 */ 874 */
887 if (!bp->count) 875 if (!bp->count)
888 rc_shutdown_board(bp); 876 rc_shutdown_board(bp);
889} 877}
890 878
891 879static int block_til_ready(struct tty_struct *tty, struct file *filp,
892static int block_til_ready(struct tty_struct *tty, struct file * filp,
893 struct riscom_port *port) 880 struct riscom_port *port)
894{ 881{
895 DECLARE_WAITQUEUE(wait, current); 882 DECLARE_WAITQUEUE(wait, current);
@@ -921,7 +908,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
921 return 0; 908 return 0;
922 } 909 }
923 910
924 if (C_CLOCAL(tty)) 911 if (C_CLOCAL(tty))
925 do_clocal = 1; 912 do_clocal = 1;
926 913
927 /* 914 /*
@@ -959,7 +946,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
959 if (port->flags & ASYNC_HUP_NOTIFY) 946 if (port->flags & ASYNC_HUP_NOTIFY)
960 retval = -EAGAIN; 947 retval = -EAGAIN;
961 else 948 else
962 retval = -ERESTARTSYS; 949 retval = -ERESTARTSYS;
963 break; 950 break;
964 } 951 }
965 if (!(port->flags & ASYNC_CLOSING) && 952 if (!(port->flags & ASYNC_CLOSING) &&
@@ -978,50 +965,63 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
978 port->blocked_open--; 965 port->blocked_open--;
979 if (retval) 966 if (retval)
980 return retval; 967 return retval;
981 968
982 port->flags |= ASYNC_NORMAL_ACTIVE; 969 port->flags |= ASYNC_NORMAL_ACTIVE;
983 return 0; 970 return 0;
984} 971}
985 972
986static int rc_open(struct tty_struct * tty, struct file * filp) 973static int rc_open(struct tty_struct *tty, struct file *filp)
987{ 974{
988 int board; 975 int board;
989 int error; 976 int error;
990 struct riscom_port * port; 977 struct riscom_port *port;
991 struct riscom_board * bp; 978 struct riscom_board *bp;
992 979
993 board = RC_BOARD(tty->index); 980 board = RC_BOARD(tty->index);
994 if (board >= RC_NBOARD || !(rc_board[board].flags & RC_BOARD_PRESENT)) 981 if (board >= RC_NBOARD || !(rc_board[board].flags & RC_BOARD_PRESENT))
995 return -ENODEV; 982 return -ENODEV;
996 983
997 bp = &rc_board[board]; 984 bp = &rc_board[board];
998 port = rc_port + board * RC_NPORT + RC_PORT(tty->index); 985 port = rc_port + board * RC_NPORT + RC_PORT(tty->index);
999 if (rc_paranoia_check(port, tty->name, "rc_open")) 986 if (rc_paranoia_check(port, tty->name, "rc_open"))
1000 return -ENODEV; 987 return -ENODEV;
1001 988
1002 if ((error = rc_setup_board(bp))) 989 error = rc_setup_board(bp);
990 if (error)
1003 return error; 991 return error;
1004 992
1005 port->count++; 993 port->count++;
1006 tty->driver_data = port; 994 tty->driver_data = port;
1007 port->tty = tty; 995 port->tty = tty;
1008 996
1009 if ((error = rc_setup_port(bp, port))) 997 error = rc_setup_port(bp, port);
1010 return error; 998 if (error == 0)
1011 999 error = block_til_ready(tty, filp, port);
1012 if ((error = block_til_ready(tty, filp, port))) 1000 return error;
1013 return error;
1014
1015 return 0;
1016} 1001}
1017 1002
1018static void rc_close(struct tty_struct * tty, struct file * filp) 1003static void rc_flush_buffer(struct tty_struct *tty)
1004{
1005 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1006 unsigned long flags;
1007
1008 if (rc_paranoia_check(port, tty->name, "rc_flush_buffer"))
1009 return;
1010
1011 spin_lock_irqsave(&riscom_lock, flags);
1012 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
1013 spin_unlock_irqrestore(&riscom_lock, flags);
1014
1015 tty_wakeup(tty);
1016}
1017
1018static void rc_close(struct tty_struct *tty, struct file *filp)
1019{ 1019{
1020 struct riscom_port *port = (struct riscom_port *) tty->driver_data; 1020 struct riscom_port *port = (struct riscom_port *) tty->driver_data;
1021 struct riscom_board *bp; 1021 struct riscom_board *bp;
1022 unsigned long flags; 1022 unsigned long flags;
1023 unsigned long timeout; 1023 unsigned long timeout;
1024 1024
1025 if (!port || rc_paranoia_check(port, tty->name, "close")) 1025 if (!port || rc_paranoia_check(port, tty->name, "close"))
1026 return; 1026 return;
1027 1027
@@ -1029,7 +1029,7 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
1029 1029
1030 if (tty_hung_up_p(filp)) 1030 if (tty_hung_up_p(filp))
1031 goto out; 1031 goto out;
1032 1032
1033 bp = port_Board(port); 1033 bp = port_Board(port);
1034 if ((tty->count == 1) && (port->count != 1)) { 1034 if ((tty->count == 1) && (port->count != 1)) {
1035 printk(KERN_INFO "rc%d: rc_close: bad port count;" 1035 printk(KERN_INFO "rc%d: rc_close: bad port count;"
@@ -1047,7 +1047,7 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
1047 goto out; 1047 goto out;
1048 port->flags |= ASYNC_CLOSING; 1048 port->flags |= ASYNC_CLOSING;
1049 /* 1049 /*
1050 * Now we wait for the transmit buffer to clear; and we notify 1050 * Now we wait for the transmit buffer to clear; and we notify
1051 * the line discipline to only process XON/XOFF characters. 1051 * the line discipline to only process XON/XOFF characters.
1052 */ 1052 */
1053 tty->closing = 1; 1053 tty->closing = 1;
@@ -1070,24 +1070,22 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
1070 * has completely drained; this is especially 1070 * has completely drained; this is especially
1071 * important if there is a transmit FIFO! 1071 * important if there is a transmit FIFO!
1072 */ 1072 */
1073 timeout = jiffies+HZ; 1073 timeout = jiffies + HZ;
1074 while(port->IER & IER_TXEMPTY) { 1074 while (port->IER & IER_TXEMPTY) {
1075 msleep_interruptible(jiffies_to_msecs(port->timeout)); 1075 msleep_interruptible(jiffies_to_msecs(port->timeout));
1076 if (time_after(jiffies, timeout)) 1076 if (time_after(jiffies, timeout))
1077 break; 1077 break;
1078 } 1078 }
1079 } 1079 }
1080 rc_shutdown_port(bp, port); 1080 rc_shutdown_port(bp, port);
1081 if (tty->driver->flush_buffer) 1081 rc_flush_buffer(tty);
1082 tty->driver->flush_buffer(tty);
1083 tty_ldisc_flush(tty); 1082 tty_ldisc_flush(tty);
1084 1083
1085 tty->closing = 0; 1084 tty->closing = 0;
1086 port->tty = NULL; 1085 port->tty = NULL;
1087 if (port->blocked_open) { 1086 if (port->blocked_open) {
1088 if (port->close_delay) { 1087 if (port->close_delay)
1089 msleep_interruptible(jiffies_to_msecs(port->close_delay)); 1088 msleep_interruptible(jiffies_to_msecs(port->close_delay));
1090 }
1091 wake_up_interruptible(&port->open_wait); 1089 wake_up_interruptible(&port->open_wait);
1092 } 1090 }
1093 port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); 1091 port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
@@ -1097,17 +1095,17 @@ out:
1097 spin_unlock_irqrestore(&riscom_lock, flags); 1095 spin_unlock_irqrestore(&riscom_lock, flags);
1098} 1096}
1099 1097
1100static int rc_write(struct tty_struct * tty, 1098static int rc_write(struct tty_struct *tty,
1101 const unsigned char *buf, int count) 1099 const unsigned char *buf, int count)
1102{ 1100{
1103 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1101 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1104 struct riscom_board *bp; 1102 struct riscom_board *bp;
1105 int c, total = 0; 1103 int c, total = 0;
1106 unsigned long flags; 1104 unsigned long flags;
1107 1105
1108 if (rc_paranoia_check(port, tty->name, "rc_write")) 1106 if (rc_paranoia_check(port, tty->name, "rc_write"))
1109 return 0; 1107 return 0;
1110 1108
1111 bp = port_Board(port); 1109 bp = port_Board(port);
1112 1110
1113 if (!tty || !port->xmit_buf) 1111 if (!tty || !port->xmit_buf)
@@ -1144,38 +1142,41 @@ static int rc_write(struct tty_struct * tty,
1144 return total; 1142 return total;
1145} 1143}
1146 1144
1147static void rc_put_char(struct tty_struct * tty, unsigned char ch) 1145static int rc_put_char(struct tty_struct *tty, unsigned char ch)
1148{ 1146{
1149 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1147 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1150 unsigned long flags; 1148 unsigned long flags;
1149 int ret = 0;
1151 1150
1152 if (rc_paranoia_check(port, tty->name, "rc_put_char")) 1151 if (rc_paranoia_check(port, tty->name, "rc_put_char"))
1153 return; 1152 return 0;
1154 1153
1155 if (!tty || !port->xmit_buf) 1154 if (!tty || !port->xmit_buf)
1156 return; 1155 return 0;
1157 1156
1158 spin_lock_irqsave(&riscom_lock, flags); 1157 spin_lock_irqsave(&riscom_lock, flags);
1159 1158
1160 if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) 1159 if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
1161 goto out; 1160 goto out;
1162 1161
1163 port->xmit_buf[port->xmit_head++] = ch; 1162 port->xmit_buf[port->xmit_head++] = ch;
1164 port->xmit_head &= SERIAL_XMIT_SIZE - 1; 1163 port->xmit_head &= SERIAL_XMIT_SIZE - 1;
1165 port->xmit_cnt++; 1164 port->xmit_cnt++;
1165 ret = 1;
1166 1166
1167out: 1167out:
1168 spin_unlock_irqrestore(&riscom_lock, flags); 1168 spin_unlock_irqrestore(&riscom_lock, flags);
1169 return ret;
1169} 1170}
1170 1171
1171static void rc_flush_chars(struct tty_struct * tty) 1172static void rc_flush_chars(struct tty_struct *tty)
1172{ 1173{
1173 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1174 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1174 unsigned long flags; 1175 unsigned long flags;
1175 1176
1176 if (rc_paranoia_check(port, tty->name, "rc_flush_chars")) 1177 if (rc_paranoia_check(port, tty->name, "rc_flush_chars"))
1177 return; 1178 return;
1178 1179
1179 if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || 1180 if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
1180 !port->xmit_buf) 1181 !port->xmit_buf)
1181 return; 1182 return;
@@ -1189,11 +1190,11 @@ static void rc_flush_chars(struct tty_struct * tty)
1189 spin_unlock_irqrestore(&riscom_lock, flags); 1190 spin_unlock_irqrestore(&riscom_lock, flags);
1190} 1191}
1191 1192
1192static int rc_write_room(struct tty_struct * tty) 1193static int rc_write_room(struct tty_struct *tty)
1193{ 1194{
1194 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1195 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1195 int ret; 1196 int ret;
1196 1197
1197 if (rc_paranoia_check(port, tty->name, "rc_write_room")) 1198 if (rc_paranoia_check(port, tty->name, "rc_write_room"))
1198 return 0; 1199 return 0;
1199 1200
@@ -1206,39 +1207,22 @@ static int rc_write_room(struct tty_struct * tty)
1206static int rc_chars_in_buffer(struct tty_struct *tty) 1207static int rc_chars_in_buffer(struct tty_struct *tty)
1207{ 1208{
1208 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1209 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1209 1210
1210 if (rc_paranoia_check(port, tty->name, "rc_chars_in_buffer")) 1211 if (rc_paranoia_check(port, tty->name, "rc_chars_in_buffer"))
1211 return 0; 1212 return 0;
1212
1213 return port->xmit_cnt;
1214}
1215
1216static void rc_flush_buffer(struct tty_struct *tty)
1217{
1218 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1219 unsigned long flags;
1220
1221 if (rc_paranoia_check(port, tty->name, "rc_flush_buffer"))
1222 return;
1223
1224 spin_lock_irqsave(&riscom_lock, flags);
1225
1226 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
1227 1213
1228 spin_unlock_irqrestore(&riscom_lock, flags); 1214 return port->xmit_cnt;
1229
1230 tty_wakeup(tty);
1231} 1215}
1232 1216
1233static int rc_tiocmget(struct tty_struct *tty, struct file *file) 1217static int rc_tiocmget(struct tty_struct *tty, struct file *file)
1234{ 1218{
1235 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1219 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1236 struct riscom_board * bp; 1220 struct riscom_board *bp;
1237 unsigned char status; 1221 unsigned char status;
1238 unsigned int result; 1222 unsigned int result;
1239 unsigned long flags; 1223 unsigned long flags;
1240 1224
1241 if (rc_paranoia_check(port, tty->name, __FUNCTION__)) 1225 if (rc_paranoia_check(port, tty->name, __func__))
1242 return -ENODEV; 1226 return -ENODEV;
1243 1227
1244 bp = port_Board(port); 1228 bp = port_Board(port);
@@ -1266,7 +1250,7 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file,
1266 unsigned long flags; 1250 unsigned long flags;
1267 struct riscom_board *bp; 1251 struct riscom_board *bp;
1268 1252
1269 if (rc_paranoia_check(port, tty->name, __FUNCTION__)) 1253 if (rc_paranoia_check(port, tty->name, __func__))
1270 return -ENODEV; 1254 return -ENODEV;
1271 1255
1272 bp = port_Board(port); 1256 bp = port_Board(port);
@@ -1292,11 +1276,11 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file,
1292 return 0; 1276 return 0;
1293} 1277}
1294 1278
1295static inline void rc_send_break(struct riscom_port * port, unsigned long length) 1279static void rc_send_break(struct riscom_port *port, unsigned long length)
1296{ 1280{
1297 struct riscom_board *bp = port_Board(port); 1281 struct riscom_board *bp = port_Board(port);
1298 unsigned long flags; 1282 unsigned long flags;
1299 1283
1300 spin_lock_irqsave(&riscom_lock, flags); 1284 spin_lock_irqsave(&riscom_lock, flags);
1301 1285
1302 port->break_length = RISCOM_TPS / HZ * length; 1286 port->break_length = RISCOM_TPS / HZ * length;
@@ -1312,17 +1296,17 @@ static inline void rc_send_break(struct riscom_port * port, unsigned long length
1312 spin_unlock_irqrestore(&riscom_lock, flags); 1296 spin_unlock_irqrestore(&riscom_lock, flags);
1313} 1297}
1314 1298
1315static inline int rc_set_serial_info(struct riscom_port * port, 1299static int rc_set_serial_info(struct riscom_port *port,
1316 struct serial_struct __user * newinfo) 1300 struct serial_struct __user *newinfo)
1317{ 1301{
1318 struct serial_struct tmp; 1302 struct serial_struct tmp;
1319 struct riscom_board *bp = port_Board(port); 1303 struct riscom_board *bp = port_Board(port);
1320 int change_speed; 1304 int change_speed;
1321 1305
1322 if (copy_from_user(&tmp, newinfo, sizeof(tmp))) 1306 if (copy_from_user(&tmp, newinfo, sizeof(tmp)))
1323 return -EFAULT; 1307 return -EFAULT;
1324 1308
1325#if 0 1309#if 0
1326 if ((tmp.irq != bp->irq) || 1310 if ((tmp.irq != bp->irq) ||
1327 (tmp.port != bp->base) || 1311 (tmp.port != bp->base) ||
1328 (tmp.type != PORT_CIRRUS) || 1312 (tmp.type != PORT_CIRRUS) ||
@@ -1331,16 +1315,16 @@ static inline int rc_set_serial_info(struct riscom_port * port,
1331 (tmp.xmit_fifo_size != CD180_NFIFO) || 1315 (tmp.xmit_fifo_size != CD180_NFIFO) ||
1332 (tmp.flags & ~RISCOM_LEGAL_FLAGS)) 1316 (tmp.flags & ~RISCOM_LEGAL_FLAGS))
1333 return -EINVAL; 1317 return -EINVAL;
1334#endif 1318#endif
1335 1319
1336 change_speed = ((port->flags & ASYNC_SPD_MASK) != 1320 change_speed = ((port->flags & ASYNC_SPD_MASK) !=
1337 (tmp.flags & ASYNC_SPD_MASK)); 1321 (tmp.flags & ASYNC_SPD_MASK));
1338 1322
1339 if (!capable(CAP_SYS_ADMIN)) { 1323 if (!capable(CAP_SYS_ADMIN)) {
1340 if ((tmp.close_delay != port->close_delay) || 1324 if ((tmp.close_delay != port->close_delay) ||
1341 (tmp.closing_wait != port->closing_wait) || 1325 (tmp.closing_wait != port->closing_wait) ||
1342 ((tmp.flags & ~ASYNC_USR_MASK) != 1326 ((tmp.flags & ~ASYNC_USR_MASK) !=
1343 (port->flags & ~ASYNC_USR_MASK))) 1327 (port->flags & ~ASYNC_USR_MASK)))
1344 return -EPERM; 1328 return -EPERM;
1345 port->flags = ((port->flags & ~ASYNC_USR_MASK) | 1329 port->flags = ((port->flags & ~ASYNC_USR_MASK) |
1346 (tmp.flags & ASYNC_USR_MASK)); 1330 (tmp.flags & ASYNC_USR_MASK));
@@ -1360,12 +1344,12 @@ static inline int rc_set_serial_info(struct riscom_port * port,
1360 return 0; 1344 return 0;
1361} 1345}
1362 1346
1363static inline int rc_get_serial_info(struct riscom_port * port, 1347static int rc_get_serial_info(struct riscom_port *port,
1364 struct serial_struct __user *retinfo) 1348 struct serial_struct __user *retinfo)
1365{ 1349{
1366 struct serial_struct tmp; 1350 struct serial_struct tmp;
1367 struct riscom_board *bp = port_Board(port); 1351 struct riscom_board *bp = port_Board(port);
1368 1352
1369 memset(&tmp, 0, sizeof(tmp)); 1353 memset(&tmp, 0, sizeof(tmp));
1370 tmp.type = PORT_CIRRUS; 1354 tmp.type = PORT_CIRRUS;
1371 tmp.line = port - rc_port; 1355 tmp.line = port - rc_port;
@@ -1379,19 +1363,18 @@ static inline int rc_get_serial_info(struct riscom_port * port,
1379 return copy_to_user(retinfo, &tmp, sizeof(tmp)) ? -EFAULT : 0; 1363 return copy_to_user(retinfo, &tmp, sizeof(tmp)) ? -EFAULT : 0;
1380} 1364}
1381 1365
1382static int rc_ioctl(struct tty_struct * tty, struct file * filp, 1366static int rc_ioctl(struct tty_struct *tty, struct file *filp,
1383 unsigned int cmd, unsigned long arg) 1367 unsigned int cmd, unsigned long arg)
1384
1385{ 1368{
1386 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1369 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1387 void __user *argp = (void __user *)arg; 1370 void __user *argp = (void __user *)arg;
1388 int retval; 1371 int retval = 0;
1389 1372
1390 if (rc_paranoia_check(port, tty->name, "rc_ioctl")) 1373 if (rc_paranoia_check(port, tty->name, "rc_ioctl"))
1391 return -ENODEV; 1374 return -ENODEV;
1392 1375
1393 switch (cmd) { 1376 switch (cmd) {
1394 case TCSBRK: /* SVID version: non-zero arg --> no break */ 1377 case TCSBRK: /* SVID version: non-zero arg --> no break */
1395 retval = tty_check_change(tty); 1378 retval = tty_check_change(tty);
1396 if (retval) 1379 if (retval)
1397 return retval; 1380 return retval;
@@ -1399,45 +1382,40 @@ static int rc_ioctl(struct tty_struct * tty, struct file * filp,
1399 if (!arg) 1382 if (!arg)
1400 rc_send_break(port, HZ/4); /* 1/4 second */ 1383 rc_send_break(port, HZ/4); /* 1/4 second */
1401 break; 1384 break;
1402 case TCSBRKP: /* support for POSIX tcsendbreak() */ 1385 case TCSBRKP: /* support for POSIX tcsendbreak() */
1403 retval = tty_check_change(tty); 1386 retval = tty_check_change(tty);
1404 if (retval) 1387 if (retval)
1405 return retval; 1388 return retval;
1406 tty_wait_until_sent(tty, 0); 1389 tty_wait_until_sent(tty, 0);
1407 rc_send_break(port, arg ? arg*(HZ/10) : HZ/4); 1390 rc_send_break(port, arg ? arg*(HZ/10) : HZ/4);
1408 break; 1391 break;
1409 case TIOCGSOFTCAR: 1392 case TIOCGSERIAL:
1410 return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned __user *)argp); 1393 lock_kernel();
1411 case TIOCSSOFTCAR: 1394 retval = rc_get_serial_info(port, argp);
1412 if (get_user(arg,(unsigned __user *) argp)) 1395 unlock_kernel();
1413 return -EFAULT;
1414 tty->termios->c_cflag =
1415 ((tty->termios->c_cflag & ~CLOCAL) |
1416 (arg ? CLOCAL : 0));
1417 break; 1396 break;
1418 case TIOCGSERIAL: 1397 case TIOCSSERIAL:
1419 return rc_get_serial_info(port, argp); 1398 lock_kernel();
1420 case TIOCSSERIAL: 1399 retval = rc_set_serial_info(port, argp);
1421 return rc_set_serial_info(port, argp); 1400 unlock_kernel();
1422 default: 1401 break;
1423 return -ENOIOCTLCMD; 1402 default:
1403 retval = -ENOIOCTLCMD;
1424 } 1404 }
1425 return 0; 1405 return retval;
1426} 1406}
1427 1407
1428static void rc_throttle(struct tty_struct * tty) 1408static void rc_throttle(struct tty_struct *tty)
1429{ 1409{
1430 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1410 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1431 struct riscom_board *bp; 1411 struct riscom_board *bp;
1432 unsigned long flags; 1412 unsigned long flags;
1433 1413
1434 if (rc_paranoia_check(port, tty->name, "rc_throttle")) 1414 if (rc_paranoia_check(port, tty->name, "rc_throttle"))
1435 return; 1415 return;
1436
1437 bp = port_Board(port); 1416 bp = port_Board(port);
1438 1417
1439 spin_lock_irqsave(&riscom_lock, flags); 1418 spin_lock_irqsave(&riscom_lock, flags);
1440
1441 port->MSVR &= ~MSVR_RTS; 1419 port->MSVR &= ~MSVR_RTS;
1442 rc_out(bp, CD180_CAR, port_No(port)); 1420 rc_out(bp, CD180_CAR, port_No(port));
1443 if (I_IXOFF(tty)) { 1421 if (I_IXOFF(tty)) {
@@ -1446,23 +1424,20 @@ static void rc_throttle(struct tty_struct * tty)
1446 rc_wait_CCR(bp); 1424 rc_wait_CCR(bp);
1447 } 1425 }
1448 rc_out(bp, CD180_MSVR, port->MSVR); 1426 rc_out(bp, CD180_MSVR, port->MSVR);
1449
1450 spin_unlock_irqrestore(&riscom_lock, flags); 1427 spin_unlock_irqrestore(&riscom_lock, flags);
1451} 1428}
1452 1429
1453static void rc_unthrottle(struct tty_struct * tty) 1430static void rc_unthrottle(struct tty_struct *tty)
1454{ 1431{
1455 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1432 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1456 struct riscom_board *bp; 1433 struct riscom_board *bp;
1457 unsigned long flags; 1434 unsigned long flags;
1458 1435
1459 if (rc_paranoia_check(port, tty->name, "rc_unthrottle")) 1436 if (rc_paranoia_check(port, tty->name, "rc_unthrottle"))
1460 return; 1437 return;
1461
1462 bp = port_Board(port); 1438 bp = port_Board(port);
1463
1464 spin_lock_irqsave(&riscom_lock, flags);
1465 1439
1440 spin_lock_irqsave(&riscom_lock, flags);
1466 port->MSVR |= MSVR_RTS; 1441 port->MSVR |= MSVR_RTS;
1467 rc_out(bp, CD180_CAR, port_No(port)); 1442 rc_out(bp, CD180_CAR, port_No(port));
1468 if (I_IXOFF(tty)) { 1443 if (I_IXOFF(tty)) {
@@ -1471,62 +1446,58 @@ static void rc_unthrottle(struct tty_struct * tty)
1471 rc_wait_CCR(bp); 1446 rc_wait_CCR(bp);
1472 } 1447 }
1473 rc_out(bp, CD180_MSVR, port->MSVR); 1448 rc_out(bp, CD180_MSVR, port->MSVR);
1474
1475 spin_unlock_irqrestore(&riscom_lock, flags); 1449 spin_unlock_irqrestore(&riscom_lock, flags);
1476} 1450}
1477 1451
1478static void rc_stop(struct tty_struct * tty) 1452static void rc_stop(struct tty_struct *tty)
1479{ 1453{
1480 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1454 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1481 struct riscom_board *bp; 1455 struct riscom_board *bp;
1482 unsigned long flags; 1456 unsigned long flags;
1483 1457
1484 if (rc_paranoia_check(port, tty->name, "rc_stop")) 1458 if (rc_paranoia_check(port, tty->name, "rc_stop"))
1485 return; 1459 return;
1486 1460
1487 bp = port_Board(port); 1461 bp = port_Board(port);
1488
1489 spin_lock_irqsave(&riscom_lock, flags);
1490 1462
1463 spin_lock_irqsave(&riscom_lock, flags);
1491 port->IER &= ~IER_TXRDY; 1464 port->IER &= ~IER_TXRDY;
1492 rc_out(bp, CD180_CAR, port_No(port)); 1465 rc_out(bp, CD180_CAR, port_No(port));
1493 rc_out(bp, CD180_IER, port->IER); 1466 rc_out(bp, CD180_IER, port->IER);
1494
1495 spin_unlock_irqrestore(&riscom_lock, flags); 1467 spin_unlock_irqrestore(&riscom_lock, flags);
1496} 1468}
1497 1469
1498static void rc_start(struct tty_struct * tty) 1470static void rc_start(struct tty_struct *tty)
1499{ 1471{
1500 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1472 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1501 struct riscom_board *bp; 1473 struct riscom_board *bp;
1502 unsigned long flags; 1474 unsigned long flags;
1503 1475
1504 if (rc_paranoia_check(port, tty->name, "rc_start")) 1476 if (rc_paranoia_check(port, tty->name, "rc_start"))
1505 return; 1477 return;
1506 1478
1507 bp = port_Board(port); 1479 bp = port_Board(port);
1508 1480
1509 spin_lock_irqsave(&riscom_lock, flags); 1481 spin_lock_irqsave(&riscom_lock, flags);
1510 1482
1511 if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) { 1483 if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) {
1512 port->IER |= IER_TXRDY; 1484 port->IER |= IER_TXRDY;
1513 rc_out(bp, CD180_CAR, port_No(port)); 1485 rc_out(bp, CD180_CAR, port_No(port));
1514 rc_out(bp, CD180_IER, port->IER); 1486 rc_out(bp, CD180_IER, port->IER);
1515 } 1487 }
1516
1517 spin_unlock_irqrestore(&riscom_lock, flags); 1488 spin_unlock_irqrestore(&riscom_lock, flags);
1518} 1489}
1519 1490
1520static void rc_hangup(struct tty_struct * tty) 1491static void rc_hangup(struct tty_struct *tty)
1521{ 1492{
1522 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1493 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1523 struct riscom_board *bp; 1494 struct riscom_board *bp;
1524 1495
1525 if (rc_paranoia_check(port, tty->name, "rc_hangup")) 1496 if (rc_paranoia_check(port, tty->name, "rc_hangup"))
1526 return; 1497 return;
1527 1498
1528 bp = port_Board(port); 1499 bp = port_Board(port);
1529 1500
1530 rc_shutdown_port(bp, port); 1501 rc_shutdown_port(bp, port);
1531 port->count = 0; 1502 port->count = 0;
1532 port->flags &= ~ASYNC_NORMAL_ACTIVE; 1503 port->flags &= ~ASYNC_NORMAL_ACTIVE;
@@ -1534,17 +1505,14 @@ static void rc_hangup(struct tty_struct * tty)
1534 wake_up_interruptible(&port->open_wait); 1505 wake_up_interruptible(&port->open_wait);
1535} 1506}
1536 1507
1537static void rc_set_termios(struct tty_struct * tty, struct ktermios * old_termios) 1508static void rc_set_termios(struct tty_struct *tty,
1509 struct ktermios *old_termios)
1538{ 1510{
1539 struct riscom_port *port = (struct riscom_port *)tty->driver_data; 1511 struct riscom_port *port = (struct riscom_port *)tty->driver_data;
1540 unsigned long flags; 1512 unsigned long flags;
1541 1513
1542 if (rc_paranoia_check(port, tty->name, "rc_set_termios")) 1514 if (rc_paranoia_check(port, tty->name, "rc_set_termios"))
1543 return; 1515 return;
1544
1545 if (tty->termios->c_cflag == old_termios->c_cflag &&
1546 tty->termios->c_iflag == old_termios->c_iflag)
1547 return;
1548 1516
1549 spin_lock_irqsave(&riscom_lock, flags); 1517 spin_lock_irqsave(&riscom_lock, flags);
1550 rc_change_speed(port_Board(port), port); 1518 rc_change_speed(port_Board(port), port);
@@ -1583,9 +1551,9 @@ static int __init rc_init_drivers(void)
1583 int i; 1551 int i;
1584 1552
1585 riscom_driver = alloc_tty_driver(RC_NBOARD * RC_NPORT); 1553 riscom_driver = alloc_tty_driver(RC_NBOARD * RC_NPORT);
1586 if (!riscom_driver) 1554 if (!riscom_driver)
1587 return -ENOMEM; 1555 return -ENOMEM;
1588 1556
1589 riscom_driver->owner = THIS_MODULE; 1557 riscom_driver->owner = THIS_MODULE;
1590 riscom_driver->name = "ttyL"; 1558 riscom_driver->name = "ttyL";
1591 riscom_driver->major = RISCOM8_NORMAL_MAJOR; 1559 riscom_driver->major = RISCOM8_NORMAL_MAJOR;
@@ -1598,23 +1566,21 @@ static int __init rc_init_drivers(void)
1598 riscom_driver->init_termios.c_ospeed = 9600; 1566 riscom_driver->init_termios.c_ospeed = 9600;
1599 riscom_driver->flags = TTY_DRIVER_REAL_RAW; 1567 riscom_driver->flags = TTY_DRIVER_REAL_RAW;
1600 tty_set_operations(riscom_driver, &riscom_ops); 1568 tty_set_operations(riscom_driver, &riscom_ops);
1601 if ((error = tty_register_driver(riscom_driver))) { 1569 error = tty_register_driver(riscom_driver);
1570 if (error != 0) {
1602 put_tty_driver(riscom_driver); 1571 put_tty_driver(riscom_driver);
1603 printk(KERN_ERR "rc: Couldn't register RISCom/8 driver, " 1572 printk(KERN_ERR "rc: Couldn't register RISCom/8 driver, "
1604 "error = %d\n", 1573 "error = %d\n", error);
1605 error);
1606 return 1; 1574 return 1;
1607 } 1575 }
1608
1609 memset(rc_port, 0, sizeof(rc_port)); 1576 memset(rc_port, 0, sizeof(rc_port));
1610 for (i = 0; i < RC_NPORT * RC_NBOARD; i++) { 1577 for (i = 0; i < RC_NPORT * RC_NBOARD; i++) {
1611 rc_port[i].magic = RISCOM8_MAGIC; 1578 rc_port[i].magic = RISCOM8_MAGIC;
1612 rc_port[i].close_delay = 50 * HZ/100; 1579 rc_port[i].close_delay = 50 * HZ / 100;
1613 rc_port[i].closing_wait = 3000 * HZ/100; 1580 rc_port[i].closing_wait = 3000 * HZ / 100;
1614 init_waitqueue_head(&rc_port[i].open_wait); 1581 init_waitqueue_head(&rc_port[i].open_wait);
1615 init_waitqueue_head(&rc_port[i].close_wait); 1582 init_waitqueue_head(&rc_port[i].close_wait);
1616 } 1583 }
1617
1618 return 0; 1584 return 0;
1619} 1585}
1620 1586
@@ -1627,13 +1593,13 @@ static void rc_release_drivers(void)
1627#ifndef MODULE 1593#ifndef MODULE
1628/* 1594/*
1629 * Called at boot time. 1595 * Called at boot time.
1630 * 1596 *
1631 * You can specify IO base for up to RC_NBOARD cards, 1597 * You can specify IO base for up to RC_NBOARD cards,
1632 * using line "riscom8=0xiobase1,0xiobase2,.." at LILO prompt. 1598 * using line "riscom8=0xiobase1,0xiobase2,.." at LILO prompt.
1633 * Note that there will be no probing at default 1599 * Note that there will be no probing at default
1634 * addresses in this case. 1600 * addresses in this case.
1635 * 1601 *
1636 */ 1602 */
1637static int __init riscom8_setup(char *str) 1603static int __init riscom8_setup(char *str)
1638{ 1604{
1639 int ints[RC_NBOARD]; 1605 int ints[RC_NBOARD];
@@ -1644,7 +1610,7 @@ static int __init riscom8_setup(char *str)
1644 for (i = 0; i < RC_NBOARD; i++) { 1610 for (i = 0; i < RC_NBOARD; i++) {
1645 if (i < ints[0]) 1611 if (i < ints[0])
1646 rc_board[i].base = ints[i+1]; 1612 rc_board[i].base = ints[i+1];
1647 else 1613 else
1648 rc_board[i].base = 0; 1614 rc_board[i].base = 0;
1649 } 1615 }
1650 return 1; 1616 return 1;
@@ -1659,8 +1625,8 @@ static char banner[] __initdata =
1659static char no_boards_msg[] __initdata = 1625static char no_boards_msg[] __initdata =
1660 KERN_INFO "rc: No RISCom/8 boards detected.\n"; 1626 KERN_INFO "rc: No RISCom/8 boards detected.\n";
1661 1627
1662/* 1628/*
1663 * This routine must be called by kernel at boot time 1629 * This routine must be called by kernel at boot time
1664 */ 1630 */
1665static int __init riscom8_init(void) 1631static int __init riscom8_init(void)
1666{ 1632{
@@ -1669,13 +1635,12 @@ static int __init riscom8_init(void)
1669 1635
1670 printk(banner); 1636 printk(banner);
1671 1637
1672 if (rc_init_drivers()) 1638 if (rc_init_drivers())
1673 return -EIO; 1639 return -EIO;
1674 1640
1675 for (i = 0; i < RC_NBOARD; i++) 1641 for (i = 0; i < RC_NBOARD; i++)
1676 if (rc_board[i].base && !rc_probe(&rc_board[i])) 1642 if (rc_board[i].base && !rc_probe(&rc_board[i]))
1677 found++; 1643 found++;
1678
1679 if (!found) { 1644 if (!found) {
1680 rc_release_drivers(); 1645 rc_release_drivers();
1681 printk(no_boards_msg); 1646 printk(no_boards_msg);
@@ -1702,13 +1667,13 @@ MODULE_LICENSE("GPL");
1702 * by specifying "iobase=0xXXX iobase1=0xXXX ..." as insmod parameter. 1667 * by specifying "iobase=0xXXX iobase1=0xXXX ..." as insmod parameter.
1703 * 1668 *
1704 */ 1669 */
1705static int __init riscom8_init_module (void) 1670static int __init riscom8_init_module(void)
1706{ 1671{
1707#ifdef MODULE 1672#ifdef MODULE
1708 int i; 1673 int i;
1709 1674
1710 if (iobase || iobase1 || iobase2 || iobase3) { 1675 if (iobase || iobase1 || iobase2 || iobase3) {
1711 for(i = 0; i < RC_NBOARD; i++) 1676 for (i = 0; i < RC_NBOARD; i++)
1712 rc_board[i].base = 0; 1677 rc_board[i].base = 0;
1713 } 1678 }
1714 1679
@@ -1724,18 +1689,17 @@ static int __init riscom8_init_module (void)
1724 1689
1725 return riscom8_init(); 1690 return riscom8_init();
1726} 1691}
1727 1692
1728static void __exit riscom8_exit_module (void) 1693static void __exit riscom8_exit_module(void)
1729{ 1694{
1730 int i; 1695 int i;
1731 1696
1732 rc_release_drivers(); 1697 rc_release_drivers();
1733 for (i = 0; i < RC_NBOARD; i++) 1698 for (i = 0; i < RC_NBOARD; i++)
1734 if (rc_board[i].flags & RC_BOARD_PRESENT) 1699 if (rc_board[i].flags & RC_BOARD_PRESENT)
1735 rc_release_io_range(&rc_board[i]); 1700 rc_release_io_range(&rc_board[i]);
1736 1701
1737} 1702}
1738 1703
1739module_init(riscom8_init_module); 1704module_init(riscom8_init_module);
1740module_exit(riscom8_exit_module); 1705module_exit(riscom8_exit_module);
1741
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index f585bc8579e9..743dc80a9325 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -449,7 +449,8 @@ static void rp_do_transmit(struct r_port *info)
449 while (1) { 449 while (1) {
450 if (tty->stopped || tty->hw_stopped) 450 if (tty->stopped || tty->hw_stopped)
451 break; 451 break;
452 c = min(info->xmit_fifo_room, min(info->xmit_cnt, XMIT_BUF_SIZE - info->xmit_tail)); 452 c = min(info->xmit_fifo_room, info->xmit_cnt);
453 c = min(c, XMIT_BUF_SIZE - info->xmit_tail);
453 if (c <= 0 || info->xmit_fifo_room <= 0) 454 if (c <= 0 || info->xmit_fifo_room <= 0)
454 break; 455 break;
455 sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2); 456 sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2);
@@ -1433,29 +1434,38 @@ static int rp_ioctl(struct tty_struct *tty, struct file *file,
1433{ 1434{
1434 struct r_port *info = (struct r_port *) tty->driver_data; 1435 struct r_port *info = (struct r_port *) tty->driver_data;
1435 void __user *argp = (void __user *)arg; 1436 void __user *argp = (void __user *)arg;
1437 int ret = 0;
1436 1438
1437 if (cmd != RCKP_GET_PORTS && rocket_paranoia_check(info, "rp_ioctl")) 1439 if (cmd != RCKP_GET_PORTS && rocket_paranoia_check(info, "rp_ioctl"))
1438 return -ENXIO; 1440 return -ENXIO;
1439 1441
1442 lock_kernel();
1443
1440 switch (cmd) { 1444 switch (cmd) {
1441 case RCKP_GET_STRUCT: 1445 case RCKP_GET_STRUCT:
1442 if (copy_to_user(argp, info, sizeof (struct r_port))) 1446 if (copy_to_user(argp, info, sizeof (struct r_port)))
1443 return -EFAULT; 1447 ret = -EFAULT;
1444 return 0; 1448 break;
1445 case RCKP_GET_CONFIG: 1449 case RCKP_GET_CONFIG:
1446 return get_config(info, argp); 1450 ret = get_config(info, argp);
1451 break;
1447 case RCKP_SET_CONFIG: 1452 case RCKP_SET_CONFIG:
1448 return set_config(info, argp); 1453 ret = set_config(info, argp);
1454 break;
1449 case RCKP_GET_PORTS: 1455 case RCKP_GET_PORTS:
1450 return get_ports(info, argp); 1456 ret = get_ports(info, argp);
1457 break;
1451 case RCKP_RESET_RM2: 1458 case RCKP_RESET_RM2:
1452 return reset_rm2(info, argp); 1459 ret = reset_rm2(info, argp);
1460 break;
1453 case RCKP_GET_VERSION: 1461 case RCKP_GET_VERSION:
1454 return get_version(info, argp); 1462 ret = get_version(info, argp);
1463 break;
1455 default: 1464 default:
1456 return -ENOIOCTLCMD; 1465 ret = -ENOIOCTLCMD;
1457 } 1466 }
1458 return 0; 1467 unlock_kernel();
1468 return ret;
1459} 1469}
1460 1470
1461static void rp_send_xchar(struct tty_struct *tty, char ch) 1471static void rp_send_xchar(struct tty_struct *tty, char ch)
@@ -1575,6 +1585,7 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
1575 jiffies); 1585 jiffies);
1576 printk(KERN_INFO "cps=%d...\n", info->cps); 1586 printk(KERN_INFO "cps=%d...\n", info->cps);
1577#endif 1587#endif
1588 lock_kernel();
1578 while (1) { 1589 while (1) {
1579 txcnt = sGetTxCnt(cp); 1590 txcnt = sGetTxCnt(cp);
1580 if (!txcnt) { 1591 if (!txcnt) {
@@ -1602,6 +1613,7 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
1602 break; 1613 break;
1603 } 1614 }
1604 __set_current_state(TASK_RUNNING); 1615 __set_current_state(TASK_RUNNING);
1616 unlock_kernel();
1605#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT 1617#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
1606 printk(KERN_INFO "txcnt = %d (jiff=%lu)...done\n", txcnt, jiffies); 1618 printk(KERN_INFO "txcnt = %d (jiff=%lu)...done\n", txcnt, jiffies);
1607#endif 1619#endif
@@ -1651,14 +1663,14 @@ static void rp_hangup(struct tty_struct *tty)
1651 * writing routines will write directly to transmit FIFO. 1663 * writing routines will write directly to transmit FIFO.
1652 * Write buffer and counters protected by spinlocks 1664 * Write buffer and counters protected by spinlocks
1653 */ 1665 */
1654static void rp_put_char(struct tty_struct *tty, unsigned char ch) 1666static int rp_put_char(struct tty_struct *tty, unsigned char ch)
1655{ 1667{
1656 struct r_port *info = (struct r_port *) tty->driver_data; 1668 struct r_port *info = (struct r_port *) tty->driver_data;
1657 CHANNEL_t *cp; 1669 CHANNEL_t *cp;
1658 unsigned long flags; 1670 unsigned long flags;
1659 1671
1660 if (rocket_paranoia_check(info, "rp_put_char")) 1672 if (rocket_paranoia_check(info, "rp_put_char"))
1661 return; 1673 return 0;
1662 1674
1663 /* 1675 /*
1664 * Grab the port write mutex, locking out other processes that try to 1676 * Grab the port write mutex, locking out other processes that try to
@@ -1687,6 +1699,7 @@ static void rp_put_char(struct tty_struct *tty, unsigned char ch)
1687 } 1699 }
1688 spin_unlock_irqrestore(&info->slock, flags); 1700 spin_unlock_irqrestore(&info->slock, flags);
1689 mutex_unlock(&info->write_mtx); 1701 mutex_unlock(&info->write_mtx);
1702 return 1;
1690} 1703}
1691 1704
1692/* 1705/*
@@ -1749,10 +1762,10 @@ static int rp_write(struct tty_struct *tty,
1749 1762
1750 /* Write remaining data into the port's xmit_buf */ 1763 /* Write remaining data into the port's xmit_buf */
1751 while (1) { 1764 while (1) {
1752 if (!info->tty) /* Seemingly obligatory check... */ 1765 if (!info->tty) /* Seemingly obligatory check... */
1753 goto end; 1766 goto end;
1754 1767 c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1);
1755 c = min(count, min(XMIT_BUF_SIZE - info->xmit_cnt - 1, XMIT_BUF_SIZE - info->xmit_head)); 1768 c = min(c, XMIT_BUF_SIZE - info->xmit_head);
1756 if (c <= 0) 1769 if (c <= 0)
1757 break; 1770 break;
1758 1771
diff --git a/drivers/char/rocket_int.h b/drivers/char/rocket_int.h
index b01d38125a8f..143cc432fdb2 100644
--- a/drivers/char/rocket_int.h
+++ b/drivers/char/rocket_int.h
@@ -55,7 +55,7 @@ static inline void sOutW(unsigned short port, unsigned short value)
55 55
56static inline void out32(unsigned short port, Byte_t *p) 56static inline void out32(unsigned short port, Byte_t *p)
57{ 57{
58 u32 value = le32_to_cpu(get_unaligned((__le32 *)p)); 58 u32 value = get_unaligned_le32(p);
59#ifdef ROCKET_DEBUG_IO 59#ifdef ROCKET_DEBUG_IO
60 printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value); 60 printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value);
61#endif 61#endif
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index e2ec2ee4cf79..5f80a9dff573 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -1069,10 +1069,8 @@ no_irq:
1069 } 1069 }
1070 1070
1071#ifdef CONFIG_PROC_FS 1071#ifdef CONFIG_PROC_FS
1072 ent = create_proc_entry("driver/rtc", 0, NULL); 1072 ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops);
1073 if (ent) 1073 if (!ent)
1074 ent->proc_fops = &rtc_proc_fops;
1075 else
1076 printk(KERN_WARNING "rtc: Failed to register with procfs.\n"); 1074 printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
1077#endif 1075#endif
1078 1076
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index df8cd0ca97eb..3b23270eaa65 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -1060,7 +1060,7 @@ static void config_setup(struct cyclades_port *info)
1060 1060
1061} /* config_setup */ 1061} /* config_setup */
1062 1062
1063static void cy_put_char(struct tty_struct *tty, unsigned char ch) 1063static int cy_put_char(struct tty_struct *tty, unsigned char ch)
1064{ 1064{
1065 struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; 1065 struct cyclades_port *info = (struct cyclades_port *)tty->driver_data;
1066 unsigned long flags; 1066 unsigned long flags;
@@ -1070,21 +1070,22 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch)
1070#endif 1070#endif
1071 1071
1072 if (serial_paranoia_check(info, tty->name, "cy_put_char")) 1072 if (serial_paranoia_check(info, tty->name, "cy_put_char"))
1073 return; 1073 return 0;
1074 1074
1075 if (!info->xmit_buf) 1075 if (!info->xmit_buf)
1076 return; 1076 return 0;
1077 1077
1078 local_irq_save(flags); 1078 local_irq_save(flags);
1079 if (info->xmit_cnt >= PAGE_SIZE - 1) { 1079 if (info->xmit_cnt >= PAGE_SIZE - 1) {
1080 local_irq_restore(flags); 1080 local_irq_restore(flags);
1081 return; 1081 return 0;
1082 } 1082 }
1083 1083
1084 info->xmit_buf[info->xmit_head++] = ch; 1084 info->xmit_buf[info->xmit_head++] = ch;
1085 info->xmit_head &= PAGE_SIZE - 1; 1085 info->xmit_head &= PAGE_SIZE - 1;
1086 info->xmit_cnt++; 1086 info->xmit_cnt++;
1087 local_irq_restore(flags); 1087 local_irq_restore(flags);
1088 return 1;
1088} /* cy_put_char */ 1089} /* cy_put_char */
1089 1090
1090static void cy_flush_chars(struct tty_struct *tty) 1091static void cy_flush_chars(struct tty_struct *tty)
@@ -1539,6 +1540,8 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
1539 printk("cy_ioctl %s, cmd = %x arg = %lx\n", tty->name, cmd, arg); /* */ 1540 printk("cy_ioctl %s, cmd = %x arg = %lx\n", tty->name, cmd, arg); /* */
1540#endif 1541#endif
1541 1542
1543 lock_kernel();
1544
1542 switch (cmd) { 1545 switch (cmd) {
1543 case CYGETMON: 1546 case CYGETMON:
1544 ret_val = get_mon_info(info, argp); 1547 ret_val = get_mon_info(info, argp);
@@ -1584,18 +1587,6 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
1584 break; 1587 break;
1585 1588
1586/* The following commands are incompletely implemented!!! */ 1589/* The following commands are incompletely implemented!!! */
1587 case TIOCGSOFTCAR:
1588 ret_val =
1589 put_user(C_CLOCAL(tty) ? 1 : 0,
1590 (unsigned long __user *)argp);
1591 break;
1592 case TIOCSSOFTCAR:
1593 ret_val = get_user(val, (unsigned long __user *)argp);
1594 if (ret_val)
1595 break;
1596 tty->termios->c_cflag =
1597 ((tty->termios->c_cflag & ~CLOCAL) | (val ? CLOCAL : 0));
1598 break;
1599 case TIOCGSERIAL: 1590 case TIOCGSERIAL:
1600 ret_val = get_serial_info(info, argp); 1591 ret_val = get_serial_info(info, argp);
1601 break; 1592 break;
@@ -1605,6 +1596,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
1605 default: 1596 default:
1606 ret_val = -ENOIOCTLCMD; 1597 ret_val = -ENOIOCTLCMD;
1607 } 1598 }
1599 unlock_kernel();
1608 1600
1609#ifdef SERIAL_DEBUG_OTHER 1601#ifdef SERIAL_DEBUG_OTHER
1610 printk("cy_ioctl done\n"); 1602 printk("cy_ioctl done\n");
@@ -1683,8 +1675,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
1683 if (info->flags & ASYNC_INITIALIZED) 1675 if (info->flags & ASYNC_INITIALIZED)
1684 tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */ 1676 tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
1685 shutdown(info); 1677 shutdown(info);
1686 if (tty->driver->flush_buffer) 1678 cy_flush_buffer(tty);
1687 tty->driver->flush_buffer(tty);
1688 tty_ldisc_flush(tty); 1679 tty_ldisc_flush(tty);
1689 info->tty = NULL; 1680 info->tty = NULL;
1690 if (info->blocked_open) { 1681 if (info->blocked_open) {
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index b9c1dba6bd01..8fe099a41065 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -80,7 +80,7 @@ scdrv_open(struct inode *inode, struct file *file)
80 sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); 80 sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
81 if (sd == NULL) { 81 if (sd == NULL) {
82 printk("%s: couldn't allocate subchannel data\n", 82 printk("%s: couldn't allocate subchannel data\n",
83 __FUNCTION__); 83 __func__);
84 return -ENOMEM; 84 return -ENOMEM;
85 } 85 }
86 86
@@ -90,7 +90,7 @@ scdrv_open(struct inode *inode, struct file *file)
90 90
91 if (sd->sd_subch < 0) { 91 if (sd->sd_subch < 0) {
92 kfree(sd); 92 kfree(sd);
93 printk("%s: couldn't allocate subchannel\n", __FUNCTION__); 93 printk("%s: couldn't allocate subchannel\n", __func__);
94 return -EBUSY; 94 return -EBUSY;
95 } 95 }
96 96
@@ -110,7 +110,7 @@ scdrv_open(struct inode *inode, struct file *file)
110 if (rv) { 110 if (rv) {
111 ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch); 111 ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
112 kfree(sd); 112 kfree(sd);
113 printk("%s: irq request failed (%d)\n", __FUNCTION__, rv); 113 printk("%s: irq request failed (%d)\n", __func__, rv);
114 return -EBUSY; 114 return -EBUSY;
115 } 115 }
116 116
@@ -215,7 +215,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
215 */ 215 */
216 if (count < len) { 216 if (count < len) {
217 pr_debug("%s: only accepting %d of %d bytes\n", 217 pr_debug("%s: only accepting %d of %d bytes\n",
218 __FUNCTION__, (int) count, len); 218 __func__, (int) count, len);
219 } 219 }
220 len = min((int) count, len); 220 len = min((int) count, len);
221 if (copy_to_user(buf, sd->sd_rb, len)) 221 if (copy_to_user(buf, sd->sd_rb, len))
@@ -384,7 +384,7 @@ scdrv_init(void)
384 if (alloc_chrdev_region(&first_dev, 0, num_cnodes, 384 if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
385 SYSCTL_BASENAME) < 0) { 385 SYSCTL_BASENAME) < 0) {
386 printk("%s: failed to register SN system controller device\n", 386 printk("%s: failed to register SN system controller device\n",
387 __FUNCTION__); 387 __func__);
388 return -ENODEV; 388 return -ENODEV;
389 } 389 }
390 snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME); 390 snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);
@@ -403,7 +403,7 @@ scdrv_init(void)
403 GFP_KERNEL); 403 GFP_KERNEL);
404 if (!scd) { 404 if (!scd) {
405 printk("%s: failed to allocate device info" 405 printk("%s: failed to allocate device info"
406 "for %s/%s\n", __FUNCTION__, 406 "for %s/%s\n", __func__,
407 SYSCTL_BASENAME, devname); 407 SYSCTL_BASENAME, devname);
408 continue; 408 continue;
409 } 409 }
@@ -412,7 +412,7 @@ scdrv_init(void)
412 scd->scd_nasid = cnodeid_to_nasid(cnode); 412 scd->scd_nasid = cnodeid_to_nasid(cnode);
413 if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) { 413 if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
414 printk("%s: failed to allocate driver buffer" 414 printk("%s: failed to allocate driver buffer"
415 "(%s%s)\n", __FUNCTION__, 415 "(%s%s)\n", __func__,
416 SYSCTL_BASENAME, devname); 416 SYSCTL_BASENAME, devname);
417 kfree(scd); 417 kfree(scd);
418 continue; 418 continue;
@@ -424,7 +424,7 @@ scdrv_init(void)
424 ("%s: failed to initialize SAL for" 424 ("%s: failed to initialize SAL for"
425 " system controller communication" 425 " system controller communication"
426 " (%s/%s): outdated PROM?\n", 426 " (%s/%s): outdated PROM?\n",
427 __FUNCTION__, SYSCTL_BASENAME, devname); 427 __func__, SYSCTL_BASENAME, devname);
428 kfree(scd); 428 kfree(scd);
429 kfree(salbuf); 429 kfree(salbuf);
430 continue; 430 continue;
@@ -435,7 +435,7 @@ scdrv_init(void)
435 if (cdev_add(&scd->scd_cdev, dev, 1)) { 435 if (cdev_add(&scd->scd_cdev, dev, 1)) {
436 printk("%s: failed to register system" 436 printk("%s: failed to register system"
437 " controller device (%s%s)\n", 437 " controller device (%s%s)\n",
438 __FUNCTION__, SYSCTL_BASENAME, devname); 438 __func__, SYSCTL_BASENAME, devname);
439 kfree(scd); 439 kfree(scd);
440 kfree(salbuf); 440 kfree(salbuf);
441 continue; 441 continue;
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
index 1b75b0b7d542..53b3d44f8c06 100644
--- a/drivers/char/snsc_event.c
+++ b/drivers/char/snsc_event.c
@@ -63,16 +63,13 @@ static int
63scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) 63scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
64{ 64{
65 char *desc_end; 65 char *desc_end;
66 __be32 from_buf;
67 66
68 /* record event source address */ 67 /* record event source address */
69 from_buf = get_unaligned((__be32 *)event); 68 *src = get_unaligned_be32(event);
70 *src = be32_to_cpup(&from_buf);
71 event += 4; /* move on to event code */ 69 event += 4; /* move on to event code */
72 70
73 /* record the system controller's event code */ 71 /* record the system controller's event code */
74 from_buf = get_unaligned((__be32 *)event); 72 *code = get_unaligned_be32(event);
75 *code = be32_to_cpup(&from_buf);
76 event += 4; /* move on to event arguments */ 73 event += 4; /* move on to event arguments */
77 74
78 /* how many arguments are in the packet? */ 75 /* how many arguments are in the packet? */
@@ -86,8 +83,7 @@ scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
86 /* not an integer argument, so give up */ 83 /* not an integer argument, so give up */
87 return -1; 84 return -1;
88 } 85 }
89 from_buf = get_unaligned((__be32 *)event); 86 *esp_code = get_unaligned_be32(event);
90 *esp_code = be32_to_cpup(&from_buf);
91 event += 4; 87 event += 4;
92 88
93 /* parse out the event description */ 89 /* parse out the event description */
@@ -275,7 +271,7 @@ scdrv_event_init(struct sysctl_data_s *scd)
275 event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); 271 event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
276 if (event_sd == NULL) { 272 if (event_sd == NULL) {
277 printk(KERN_WARNING "%s: couldn't allocate subchannel info" 273 printk(KERN_WARNING "%s: couldn't allocate subchannel info"
278 " for event monitoring\n", __FUNCTION__); 274 " for event monitoring\n", __func__);
279 return; 275 return;
280 } 276 }
281 277
@@ -289,7 +285,7 @@ scdrv_event_init(struct sysctl_data_s *scd)
289 if (event_sd->sd_subch < 0) { 285 if (event_sd->sd_subch < 0) {
290 kfree(event_sd); 286 kfree(event_sd);
291 printk(KERN_WARNING "%s: couldn't open event subchannel\n", 287 printk(KERN_WARNING "%s: couldn't open event subchannel\n",
292 __FUNCTION__); 288 __func__);
293 return; 289 return;
294 } 290 }
295 291
@@ -299,7 +295,7 @@ scdrv_event_init(struct sysctl_data_s *scd)
299 "system controller events", event_sd); 295 "system controller events", event_sd);
300 if (rv) { 296 if (rv) {
301 printk(KERN_WARNING "%s: irq request failed (%d)\n", 297 printk(KERN_WARNING "%s: irq request failed (%d)\n",
302 __FUNCTION__, rv); 298 __func__, rv);
303 ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch); 299 ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch);
304 kfree(event_sd); 300 kfree(event_sd);
305 return; 301 return;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index c03ad164c39a..58533de59027 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -506,7 +506,7 @@ static struct sonypi_device {
506 while (--n && (command)) \ 506 while (--n && (command)) \
507 udelay(1); \ 507 udelay(1); \
508 if (!n && (verbose || !quiet)) \ 508 if (!n && (verbose || !quiet)) \
509 printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __FUNCTION__, __LINE__); \ 509 printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \
510} 510}
511 511
512#ifdef CONFIG_ACPI 512#ifdef CONFIG_ACPI
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 4b5b5b78acb4..2ee4d9893757 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -131,8 +131,8 @@ static int sx_rxfifo = SPECIALIX_RXFIFO;
131#define SX_DEBUG_FIFO 0x0800 131#define SX_DEBUG_FIFO 0x0800
132 132
133 133
134#define func_enter() dprintk (SX_DEBUG_FLOW, "io8: enter %s\n",__FUNCTION__) 134#define func_enter() dprintk (SX_DEBUG_FLOW, "io8: enter %s\n",__func__)
135#define func_exit() dprintk (SX_DEBUG_FLOW, "io8: exit %s\n", __FUNCTION__) 135#define func_exit() dprintk (SX_DEBUG_FLOW, "io8: exit %s\n", __func__)
136 136
137#define jiffies_from_ms(a) ((((a) * HZ)/1000)+1) 137#define jiffies_from_ms(a) ((((a) * HZ)/1000)+1)
138 138
@@ -874,7 +874,7 @@ static irqreturn_t sx_interrupt(int dummy, void *dev_id)
874 874
875 spin_lock_irqsave(&bp->lock, flags); 875 spin_lock_irqsave(&bp->lock, flags);
876 876
877 dprintk (SX_DEBUG_FLOW, "enter %s port %d room: %ld\n", __FUNCTION__, port_No(sx_get_port(bp, "INT")), SERIAL_XMIT_SIZE - sx_get_port(bp, "ITN")->xmit_cnt - 1); 877 dprintk (SX_DEBUG_FLOW, "enter %s port %d room: %ld\n", __func__, port_No(sx_get_port(bp, "INT")), SERIAL_XMIT_SIZE - sx_get_port(bp, "ITN")->xmit_cnt - 1);
878 if (!(bp->flags & SX_BOARD_ACTIVE)) { 878 if (!(bp->flags & SX_BOARD_ACTIVE)) {
879 dprintk (SX_DEBUG_IRQ, "sx: False interrupt. irq %d.\n", bp->irq); 879 dprintk (SX_DEBUG_IRQ, "sx: False interrupt. irq %d.\n", bp->irq);
880 spin_unlock_irqrestore(&bp->lock, flags); 880 spin_unlock_irqrestore(&bp->lock, flags);
@@ -1504,6 +1504,27 @@ static int sx_open(struct tty_struct * tty, struct file * filp)
1504 return 0; 1504 return 0;
1505} 1505}
1506 1506
1507static void sx_flush_buffer(struct tty_struct *tty)
1508{
1509 struct specialix_port *port = (struct specialix_port *)tty->driver_data;
1510 unsigned long flags;
1511 struct specialix_board * bp;
1512
1513 func_enter();
1514
1515 if (sx_paranoia_check(port, tty->name, "sx_flush_buffer")) {
1516 func_exit();
1517 return;
1518 }
1519
1520 bp = port_Board(port);
1521 spin_lock_irqsave(&port->lock, flags);
1522 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
1523 spin_unlock_irqrestore(&port->lock, flags);
1524 tty_wakeup(tty);
1525
1526 func_exit();
1527}
1507 1528
1508static void sx_close(struct tty_struct * tty, struct file * filp) 1529static void sx_close(struct tty_struct * tty, struct file * filp)
1509{ 1530{
@@ -1597,8 +1618,7 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
1597 } 1618 }
1598 1619
1599 sx_shutdown_port(bp, port); 1620 sx_shutdown_port(bp, port);
1600 if (tty->driver->flush_buffer) 1621 sx_flush_buffer(tty);
1601 tty->driver->flush_buffer(tty);
1602 tty_ldisc_flush(tty); 1622 tty_ldisc_flush(tty);
1603 spin_lock_irqsave(&port->lock, flags); 1623 spin_lock_irqsave(&port->lock, flags);
1604 tty->closing = 0; 1624 tty->closing = 0;
@@ -1670,7 +1690,7 @@ static int sx_write(struct tty_struct * tty,
1670} 1690}
1671 1691
1672 1692
1673static void sx_put_char(struct tty_struct * tty, unsigned char ch) 1693static int sx_put_char(struct tty_struct * tty, unsigned char ch)
1674{ 1694{
1675 struct specialix_port *port = (struct specialix_port *)tty->driver_data; 1695 struct specialix_port *port = (struct specialix_port *)tty->driver_data;
1676 unsigned long flags; 1696 unsigned long flags;
@@ -1680,12 +1700,12 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch)
1680 1700
1681 if (sx_paranoia_check(port, tty->name, "sx_put_char")) { 1701 if (sx_paranoia_check(port, tty->name, "sx_put_char")) {
1682 func_exit(); 1702 func_exit();
1683 return; 1703 return 0;
1684 } 1704 }
1685 dprintk (SX_DEBUG_TX, "check tty: %p %p\n", tty, port->xmit_buf); 1705 dprintk (SX_DEBUG_TX, "check tty: %p %p\n", tty, port->xmit_buf);
1686 if (!port->xmit_buf) { 1706 if (!port->xmit_buf) {
1687 func_exit(); 1707 func_exit();
1688 return; 1708 return 0;
1689 } 1709 }
1690 bp = port_Board(port); 1710 bp = port_Board(port);
1691 spin_lock_irqsave(&port->lock, flags); 1711 spin_lock_irqsave(&port->lock, flags);
@@ -1695,7 +1715,7 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch)
1695 spin_unlock_irqrestore(&port->lock, flags); 1715 spin_unlock_irqrestore(&port->lock, flags);
1696 dprintk (SX_DEBUG_TX, "Exit size\n"); 1716 dprintk (SX_DEBUG_TX, "Exit size\n");
1697 func_exit(); 1717 func_exit();
1698 return; 1718 return 0;
1699 } 1719 }
1700 dprintk (SX_DEBUG_TX, "Handle xmit: %p %p\n", port, port->xmit_buf); 1720 dprintk (SX_DEBUG_TX, "Handle xmit: %p %p\n", port, port->xmit_buf);
1701 port->xmit_buf[port->xmit_head++] = ch; 1721 port->xmit_buf[port->xmit_head++] = ch;
@@ -1704,6 +1724,7 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch)
1704 spin_unlock_irqrestore(&port->lock, flags); 1724 spin_unlock_irqrestore(&port->lock, flags);
1705 1725
1706 func_exit(); 1726 func_exit();
1727 return 1;
1707} 1728}
1708 1729
1709 1730
@@ -1770,28 +1791,6 @@ static int sx_chars_in_buffer(struct tty_struct *tty)
1770} 1791}
1771 1792
1772 1793
1773static void sx_flush_buffer(struct tty_struct *tty)
1774{
1775 struct specialix_port *port = (struct specialix_port *)tty->driver_data;
1776 unsigned long flags;
1777 struct specialix_board * bp;
1778
1779 func_enter();
1780
1781 if (sx_paranoia_check(port, tty->name, "sx_flush_buffer")) {
1782 func_exit();
1783 return;
1784 }
1785
1786 bp = port_Board(port);
1787 spin_lock_irqsave(&port->lock, flags);
1788 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
1789 spin_unlock_irqrestore(&port->lock, flags);
1790 tty_wakeup(tty);
1791
1792 func_exit();
1793}
1794
1795 1794
1796static int sx_tiocmget(struct tty_struct *tty, struct file *file) 1795static int sx_tiocmget(struct tty_struct *tty, struct file *file)
1797{ 1796{
@@ -1803,7 +1802,7 @@ static int sx_tiocmget(struct tty_struct *tty, struct file *file)
1803 1802
1804 func_enter(); 1803 func_enter();
1805 1804
1806 if (sx_paranoia_check(port, tty->name, __FUNCTION__)) { 1805 if (sx_paranoia_check(port, tty->name, __func__)) {
1807 func_exit(); 1806 func_exit();
1808 return -ENODEV; 1807 return -ENODEV;
1809 } 1808 }
@@ -1845,7 +1844,7 @@ static int sx_tiocmset(struct tty_struct *tty, struct file *file,
1845 1844
1846 func_enter(); 1845 func_enter();
1847 1846
1848 if (sx_paranoia_check(port, tty->name, __FUNCTION__)) { 1847 if (sx_paranoia_check(port, tty->name, __func__)) {
1849 func_exit(); 1848 func_exit();
1850 return -ENODEV; 1849 return -ENODEV;
1851 } 1850 }
@@ -1922,29 +1921,13 @@ static inline int sx_set_serial_info(struct specialix_port * port,
1922 int change_speed; 1921 int change_speed;
1923 1922
1924 func_enter(); 1923 func_enter();
1925 /* 1924
1926 if (!access_ok(VERIFY_READ, (void *) newinfo, sizeof(tmp))) {
1927 func_exit();
1928 return -EFAULT;
1929 }
1930 */
1931 if (copy_from_user(&tmp, newinfo, sizeof(tmp))) { 1925 if (copy_from_user(&tmp, newinfo, sizeof(tmp))) {
1932 func_enter(); 1926 func_enter();
1933 return -EFAULT; 1927 return -EFAULT;
1934 } 1928 }
1935 1929
1936#if 0 1930 lock_kernel();
1937 if ((tmp.irq != bp->irq) ||
1938 (tmp.port != bp->base) ||
1939 (tmp.type != PORT_CIRRUS) ||
1940 (tmp.baud_base != (SX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC) ||
1941 (tmp.custom_divisor != 0) ||
1942 (tmp.xmit_fifo_size != CD186x_NFIFO) ||
1943 (tmp.flags & ~SPECIALIX_LEGAL_FLAGS)) {
1944 func_exit();
1945 return -EINVAL;
1946 }
1947#endif
1948 1931
1949 change_speed = ((port->flags & ASYNC_SPD_MASK) != 1932 change_speed = ((port->flags & ASYNC_SPD_MASK) !=
1950 (tmp.flags & ASYNC_SPD_MASK)); 1933 (tmp.flags & ASYNC_SPD_MASK));
@@ -1956,6 +1939,7 @@ static inline int sx_set_serial_info(struct specialix_port * port,
1956 ((tmp.flags & ~ASYNC_USR_MASK) != 1939 ((tmp.flags & ~ASYNC_USR_MASK) !=
1957 (port->flags & ~ASYNC_USR_MASK))) { 1940 (port->flags & ~ASYNC_USR_MASK))) {
1958 func_exit(); 1941 func_exit();
1942 unlock_kernel();
1959 return -EPERM; 1943 return -EPERM;
1960 } 1944 }
1961 port->flags = ((port->flags & ~ASYNC_USR_MASK) | 1945 port->flags = ((port->flags & ~ASYNC_USR_MASK) |
@@ -1972,6 +1956,7 @@ static inline int sx_set_serial_info(struct specialix_port * port,
1972 sx_change_speed(bp, port); 1956 sx_change_speed(bp, port);
1973 } 1957 }
1974 func_exit(); 1958 func_exit();
1959 unlock_kernel();
1975 return 0; 1960 return 0;
1976} 1961}
1977 1962
@@ -1984,12 +1969,8 @@ static inline int sx_get_serial_info(struct specialix_port * port,
1984 1969
1985 func_enter(); 1970 func_enter();
1986 1971
1987 /*
1988 if (!access_ok(VERIFY_WRITE, (void *) retinfo, sizeof(tmp)))
1989 return -EFAULT;
1990 */
1991
1992 memset(&tmp, 0, sizeof(tmp)); 1972 memset(&tmp, 0, sizeof(tmp));
1973 lock_kernel();
1993 tmp.type = PORT_CIRRUS; 1974 tmp.type = PORT_CIRRUS;
1994 tmp.line = port - sx_port; 1975 tmp.line = port - sx_port;
1995 tmp.port = bp->base; 1976 tmp.port = bp->base;
@@ -2000,6 +1981,7 @@ static inline int sx_get_serial_info(struct specialix_port * port,
2000 tmp.closing_wait = port->closing_wait * HZ/100; 1981 tmp.closing_wait = port->closing_wait * HZ/100;
2001 tmp.custom_divisor = port->custom_divisor; 1982 tmp.custom_divisor = port->custom_divisor;
2002 tmp.xmit_fifo_size = CD186x_NFIFO; 1983 tmp.xmit_fifo_size = CD186x_NFIFO;
1984 unlock_kernel();
2003 if (copy_to_user(retinfo, &tmp, sizeof(tmp))) { 1985 if (copy_to_user(retinfo, &tmp, sizeof(tmp))) {
2004 func_exit(); 1986 func_exit();
2005 return -EFAULT; 1987 return -EFAULT;
@@ -2045,23 +2027,6 @@ static int sx_ioctl(struct tty_struct * tty, struct file * filp,
2045 sx_send_break(port, arg ? arg*(HZ/10) : HZ/4); 2027 sx_send_break(port, arg ? arg*(HZ/10) : HZ/4);
2046 func_exit(); 2028 func_exit();
2047 return 0; 2029 return 0;
2048 case TIOCGSOFTCAR:
2049 if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)argp)) {
2050 func_exit();
2051 return -EFAULT;
2052 }
2053 func_exit();
2054 return 0;
2055 case TIOCSSOFTCAR:
2056 if (get_user(arg, (unsigned long __user *) argp)) {
2057 func_exit();
2058 return -EFAULT;
2059 }
2060 tty->termios->c_cflag =
2061 ((tty->termios->c_cflag & ~CLOCAL) |
2062 (arg ? CLOCAL : 0));
2063 func_exit();
2064 return 0;
2065 case TIOCGSERIAL: 2030 case TIOCGSERIAL:
2066 func_exit(); 2031 func_exit();
2067 return sx_get_serial_info(port, argp); 2032 return sx_get_serial_info(port, argp);
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 874aaa08e956..d17be10c5d21 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -875,6 +875,7 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout)
875 timeout = HZ; 875 timeout = HZ;
876 tend = jiffies + timeout; 876 tend = jiffies + timeout;
877 877
878 lock_kernel();
878 while (stl_datastate(portp)) { 879 while (stl_datastate(portp)) {
879 if (signal_pending(current)) 880 if (signal_pending(current))
880 break; 881 break;
@@ -882,6 +883,7 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout)
882 if (time_after_eq(jiffies, tend)) 883 if (time_after_eq(jiffies, tend))
883 break; 884 break;
884 } 885 }
886 unlock_kernel();
885} 887}
886 888
887/*****************************************************************************/ 889/*****************************************************************************/
@@ -1273,18 +1275,9 @@ static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd
1273 1275
1274 rc = 0; 1276 rc = 0;
1275 1277
1278 lock_kernel();
1279
1276 switch (cmd) { 1280 switch (cmd) {
1277 case TIOCGSOFTCAR:
1278 rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
1279 (unsigned __user *) argp);
1280 break;
1281 case TIOCSSOFTCAR:
1282 if (get_user(ival, (unsigned int __user *) arg))
1283 return -EFAULT;
1284 tty->termios->c_cflag =
1285 (tty->termios->c_cflag & ~CLOCAL) |
1286 (ival ? CLOCAL : 0);
1287 break;
1288 case TIOCGSERIAL: 1281 case TIOCGSERIAL:
1289 rc = stl_getserial(portp, argp); 1282 rc = stl_getserial(portp, argp);
1290 break; 1283 break;
@@ -1308,7 +1301,7 @@ static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd
1308 rc = -ENOIOCTLCMD; 1301 rc = -ENOIOCTLCMD;
1309 break; 1302 break;
1310 } 1303 }
1311 1304 unlock_kernel();
1312 return rc; 1305 return rc;
1313} 1306}
1314 1307
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index a6e1c9ba1217..f39f6fd89350 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -384,11 +384,11 @@ static struct real_driver sx_real_driver = {
384#define sx_dprintk(f, str...) /* nothing */ 384#define sx_dprintk(f, str...) /* nothing */
385#endif 385#endif
386 386
387#define func_enter() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s\n",__FUNCTION__) 387#define func_enter() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s\n",__func__)
388#define func_exit() sx_dprintk(SX_DEBUG_FLOW, "sx: exit %s\n",__FUNCTION__) 388#define func_exit() sx_dprintk(SX_DEBUG_FLOW, "sx: exit %s\n",__func__)
389 389
390#define func_enter2() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s (port %d)\n", \ 390#define func_enter2() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s (port %d)\n", \
391 __FUNCTION__, port->line) 391 __func__, port->line)
392 392
393/* 393/*
394 * Firmware loader driver specific routines 394 * Firmware loader driver specific routines
@@ -1574,7 +1574,7 @@ static void sx_close(void *ptr)
1574 sx_dprintk(SX_DEBUG_CLOSE, "WARNING port count:%d\n", 1574 sx_dprintk(SX_DEBUG_CLOSE, "WARNING port count:%d\n",
1575 port->gs.count); 1575 port->gs.count);
1576 /*printk("%s SETTING port count to zero: %p count: %d\n", 1576 /*printk("%s SETTING port count to zero: %p count: %d\n",
1577 __FUNCTION__, port, port->gs.count); 1577 __func__, port, port->gs.count);
1578 port->gs.count = 0;*/ 1578 port->gs.count = 0;*/
1579 } 1579 }
1580 1580
@@ -1844,6 +1844,7 @@ static void sx_break(struct tty_struct *tty, int flag)
1844 int rv; 1844 int rv;
1845 1845
1846 func_enter(); 1846 func_enter();
1847 lock_kernel();
1847 1848
1848 if (flag) 1849 if (flag)
1849 rv = sx_send_command(port, HS_START, -1, HS_IDLE_BREAK); 1850 rv = sx_send_command(port, HS_START, -1, HS_IDLE_BREAK);
@@ -1852,7 +1853,7 @@ static void sx_break(struct tty_struct *tty, int flag)
1852 if (rv != 1) 1853 if (rv != 1)
1853 printk(KERN_ERR "sx: couldn't send break (%x).\n", 1854 printk(KERN_ERR "sx: couldn't send break (%x).\n",
1854 read_sx_byte(port->board, CHAN_OFFSET(port, hi_hstat))); 1855 read_sx_byte(port->board, CHAN_OFFSET(port, hi_hstat)));
1855 1856 unlock_kernel();
1856 func_exit(); 1857 func_exit();
1857} 1858}
1858 1859
@@ -1888,23 +1889,12 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp,
1888 int rc; 1889 int rc;
1889 struct sx_port *port = tty->driver_data; 1890 struct sx_port *port = tty->driver_data;
1890 void __user *argp = (void __user *)arg; 1891 void __user *argp = (void __user *)arg;
1891 int ival;
1892 1892
1893 /* func_enter2(); */ 1893 /* func_enter2(); */
1894 1894
1895 rc = 0; 1895 rc = 0;
1896 lock_kernel();
1896 switch (cmd) { 1897 switch (cmd) {
1897 case TIOCGSOFTCAR:
1898 rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
1899 (unsigned __user *)argp);
1900 break;
1901 case TIOCSSOFTCAR:
1902 if ((rc = get_user(ival, (unsigned __user *)argp)) == 0) {
1903 tty->termios->c_cflag =
1904 (tty->termios->c_cflag & ~CLOCAL) |
1905 (ival ? CLOCAL : 0);
1906 }
1907 break;
1908 case TIOCGSERIAL: 1898 case TIOCGSERIAL:
1909 rc = gs_getserial(&port->gs, argp); 1899 rc = gs_getserial(&port->gs, argp);
1910 break; 1900 break;
@@ -1915,6 +1905,7 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp,
1915 rc = -ENOIOCTLCMD; 1905 rc = -ENOIOCTLCMD;
1916 break; 1906 break;
1917 } 1907 }
1908 unlock_kernel();
1918 1909
1919 /* func_exit(); */ 1910 /* func_exit(); */
1920 return rc; 1911 return rc;
@@ -2549,7 +2540,7 @@ static int __devinit sx_eisa_probe(struct device *dev)
2549 goto err_flag; 2540 goto err_flag;
2550 } 2541 }
2551 board->base2 = 2542 board->base2 =
2552 board->base = ioremap(board->hw_base, SI2_EISA_WINDOW_LEN); 2543 board->base = ioremap_nocache(board->hw_base, SI2_EISA_WINDOW_LEN);
2553 if (!board->base) { 2544 if (!board->base) {
2554 dev_err(dev, "can't remap memory\n"); 2545 dev_err(dev, "can't remap memory\n");
2555 goto err_reg; 2546 goto err_reg;
@@ -2626,7 +2617,7 @@ static void __devinit fix_sx_pci(struct pci_dev *pdev, struct sx_board *board)
2626 2617
2627 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &hwbase); 2618 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &hwbase);
2628 hwbase &= PCI_BASE_ADDRESS_MEM_MASK; 2619 hwbase &= PCI_BASE_ADDRESS_MEM_MASK;
2629 rebase = ioremap(hwbase, 0x80); 2620 rebase = ioremap_nocache(hwbase, 0x80);
2630 t = readl(rebase + CNTRL_REG_OFFSET); 2621 t = readl(rebase + CNTRL_REG_OFFSET);
2631 if (t != CNTRL_REG_GOODVALUE) { 2622 if (t != CNTRL_REG_GOODVALUE) {
2632 printk(KERN_DEBUG "sx: performing cntrl reg fix: %08x -> " 2623 printk(KERN_DEBUG "sx: performing cntrl reg fix: %08x -> "
@@ -2770,7 +2761,7 @@ static int __init sx_init(void)
2770 if (!request_region(board->hw_base, board->hw_len, "sx")) 2761 if (!request_region(board->hw_base, board->hw_len, "sx"))
2771 continue; 2762 continue;
2772 board->base2 = 2763 board->base2 =
2773 board->base = ioremap(board->hw_base, board->hw_len); 2764 board->base = ioremap_nocache(board->hw_base, board->hw_len);
2774 if (!board->base) 2765 if (!board->base)
2775 goto err_sx_reg; 2766 goto err_sx_reg;
2776 board->flags &= ~SX_BOARD_TYPE; 2767 board->flags &= ~SX_BOARD_TYPE;
@@ -2794,7 +2785,7 @@ err_sx_reg:
2794 if (!request_region(board->hw_base, board->hw_len, "sx")) 2785 if (!request_region(board->hw_base, board->hw_len, "sx"))
2795 continue; 2786 continue;
2796 board->base2 = 2787 board->base2 =
2797 board->base = ioremap(board->hw_base, board->hw_len); 2788 board->base = ioremap_nocache(board->hw_base, board->hw_len);
2798 if (!board->base) 2789 if (!board->base)
2799 goto err_si_reg; 2790 goto err_si_reg;
2800 board->flags &= ~SX_BOARD_TYPE; 2791 board->flags &= ~SX_BOARD_TYPE;
@@ -2817,7 +2808,7 @@ err_si_reg:
2817 if (!request_region(board->hw_base, board->hw_len, "sx")) 2808 if (!request_region(board->hw_base, board->hw_len, "sx"))
2818 continue; 2809 continue;
2819 board->base2 = 2810 board->base2 =
2820 board->base = ioremap(board->hw_base, board->hw_len); 2811 board->base = ioremap_nocache(board->hw_base, board->hw_len);
2821 if (!board->base) 2812 if (!board->base)
2822 goto err_si1_reg; 2813 goto err_si1_reg;
2823 board->flags &= ~SX_BOARD_TYPE; 2814 board->flags &= ~SX_BOARD_TYPE;
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index fadab1d9510f..ac5080df2565 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -2026,34 +2026,35 @@ static void mgsl_change_params(struct mgsl_struct *info)
2026 * 2026 *
2027 * Return Value: None 2027 * Return Value: None
2028 */ 2028 */
2029static void mgsl_put_char(struct tty_struct *tty, unsigned char ch) 2029static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2030{ 2030{
2031 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2031 struct mgsl_struct *info = tty->driver_data;
2032 unsigned long flags; 2032 unsigned long flags;
2033 int ret = 0;
2033 2034
2034 if ( debug_level >= DEBUG_LEVEL_INFO ) { 2035 if (debug_level >= DEBUG_LEVEL_INFO) {
2035 printk( "%s(%d):mgsl_put_char(%d) on %s\n", 2036 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2036 __FILE__,__LINE__,ch,info->device_name); 2037 __FILE__, __LINE__, ch, info->device_name);
2037 } 2038 }
2038 2039
2039 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) 2040 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2040 return; 2041 return 0;
2041 2042
2042 if (!tty || !info->xmit_buf) 2043 if (!tty || !info->xmit_buf)
2043 return; 2044 return 0;
2044 2045
2045 spin_lock_irqsave(&info->irq_spinlock,flags); 2046 spin_lock_irqsave(&info->irq_spinlock, flags);
2046
2047 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2048 2047
2048 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2049 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { 2049 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2050 info->xmit_buf[info->xmit_head++] = ch; 2050 info->xmit_buf[info->xmit_head++] = ch;
2051 info->xmit_head &= SERIAL_XMIT_SIZE-1; 2051 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2052 info->xmit_cnt++; 2052 info->xmit_cnt++;
2053 ret = 1;
2053 } 2054 }
2054 } 2055 }
2055 2056 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2056 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2057 return ret;
2057 2058
2058} /* end of mgsl_put_char() */ 2059} /* end of mgsl_put_char() */
2059 2060
@@ -2942,6 +2943,7 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2942 unsigned int cmd, unsigned long arg) 2943 unsigned int cmd, unsigned long arg)
2943{ 2944{
2944 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 2945 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2946 int ret;
2945 2947
2946 if (debug_level >= DEBUG_LEVEL_INFO) 2948 if (debug_level >= DEBUG_LEVEL_INFO)
2947 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, 2949 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
@@ -2956,7 +2958,10 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2956 return -EIO; 2958 return -EIO;
2957 } 2959 }
2958 2960
2959 return mgsl_ioctl_common(info, cmd, arg); 2961 lock_kernel();
2962 ret = mgsl_ioctl_common(info, cmd, arg);
2963 unlock_kernel();
2964 return ret;
2960} 2965}
2961 2966
2962static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) 2967static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
@@ -3153,8 +3158,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
3153 if (info->flags & ASYNC_INITIALIZED) 3158 if (info->flags & ASYNC_INITIALIZED)
3154 mgsl_wait_until_sent(tty, info->timeout); 3159 mgsl_wait_until_sent(tty, info->timeout);
3155 3160
3156 if (tty->driver->flush_buffer) 3161 mgsl_flush_buffer(tty);
3157 tty->driver->flush_buffer(tty);
3158 3162
3159 tty_ldisc_flush(tty); 3163 tty_ldisc_flush(tty);
3160 3164
@@ -3217,7 +3221,8 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3217 * interval should also be less than the timeout. 3221 * interval should also be less than the timeout.
3218 * Note: use tight timings here to satisfy the NIST-PCTS. 3222 * Note: use tight timings here to satisfy the NIST-PCTS.
3219 */ 3223 */
3220 3224
3225 lock_kernel();
3221 if ( info->params.data_rate ) { 3226 if ( info->params.data_rate ) {
3222 char_time = info->timeout/(32 * 5); 3227 char_time = info->timeout/(32 * 5);
3223 if (!char_time) 3228 if (!char_time)
@@ -3247,6 +3252,7 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3247 break; 3252 break;
3248 } 3253 }
3249 } 3254 }
3255 unlock_kernel();
3250 3256
3251exit: 3257exit:
3252 if (debug_level >= DEBUG_LEVEL_INFO) 3258 if (debug_level >= DEBUG_LEVEL_INFO)
@@ -4144,7 +4150,8 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
4144 } 4150 }
4145 info->lcr_mem_requested = true; 4151 info->lcr_mem_requested = true;
4146 4152
4147 info->memory_base = ioremap(info->phys_memory_base,0x40000); 4153 info->memory_base = ioremap_nocache(info->phys_memory_base,
4154 0x40000);
4148 if (!info->memory_base) { 4155 if (!info->memory_base) {
4149 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n", 4156 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4150 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4157 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
@@ -4157,12 +4164,14 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
4157 goto errout; 4164 goto errout;
4158 } 4165 }
4159 4166
4160 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset; 4167 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4168 PAGE_SIZE);
4161 if (!info->lcr_base) { 4169 if (!info->lcr_base) {
4162 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n", 4170 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4163 __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); 4171 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4164 goto errout; 4172 goto errout;
4165 } 4173 }
4174 info->lcr_base += info->lcr_offset;
4166 4175
4167 } else { 4176 } else {
4168 /* claim DMA channel */ 4177 /* claim DMA channel */
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index f3d8d72e5ea4..2001b0e52dc6 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -151,7 +151,7 @@ static void hangup(struct tty_struct *tty);
151static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); 151static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
152 152
153static int write(struct tty_struct *tty, const unsigned char *buf, int count); 153static int write(struct tty_struct *tty, const unsigned char *buf, int count);
154static void put_char(struct tty_struct *tty, unsigned char ch); 154static int put_char(struct tty_struct *tty, unsigned char ch);
155static void send_xchar(struct tty_struct *tty, char ch); 155static void send_xchar(struct tty_struct *tty, char ch);
156static void wait_until_sent(struct tty_struct *tty, int timeout); 156static void wait_until_sent(struct tty_struct *tty, int timeout);
157static int write_room(struct tty_struct *tty); 157static int write_room(struct tty_struct *tty);
@@ -771,8 +771,7 @@ static void close(struct tty_struct *tty, struct file *filp)
771 771
772 if (info->flags & ASYNC_INITIALIZED) 772 if (info->flags & ASYNC_INITIALIZED)
773 wait_until_sent(tty, info->timeout); 773 wait_until_sent(tty, info->timeout);
774 if (tty->driver->flush_buffer) 774 flush_buffer(tty);
775 tty->driver->flush_buffer(tty);
776 tty_ldisc_flush(tty); 775 tty_ldisc_flush(tty);
777 776
778 shutdown(info); 777 shutdown(info);
@@ -913,20 +912,24 @@ cleanup:
913 return ret; 912 return ret;
914} 913}
915 914
916static void put_char(struct tty_struct *tty, unsigned char ch) 915static int put_char(struct tty_struct *tty, unsigned char ch)
917{ 916{
918 struct slgt_info *info = tty->driver_data; 917 struct slgt_info *info = tty->driver_data;
919 unsigned long flags; 918 unsigned long flags;
919 int ret;
920 920
921 if (sanity_check(info, tty->name, "put_char")) 921 if (sanity_check(info, tty->name, "put_char"))
922 return; 922 return 0;
923 DBGINFO(("%s put_char(%d)\n", info->device_name, ch)); 923 DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
924 if (!info->tx_buf) 924 if (!info->tx_buf)
925 return; 925 return 0;
926 spin_lock_irqsave(&info->lock,flags); 926 spin_lock_irqsave(&info->lock,flags);
927 if (!info->tx_active && (info->tx_count < info->max_frame_size)) 927 if (!info->tx_active && (info->tx_count < info->max_frame_size)) {
928 info->tx_buf[info->tx_count++] = ch; 928 info->tx_buf[info->tx_count++] = ch;
929 ret = 1;
930 }
929 spin_unlock_irqrestore(&info->lock,flags); 931 spin_unlock_irqrestore(&info->lock,flags);
932 return ret;
930} 933}
931 934
932static void send_xchar(struct tty_struct *tty, char ch) 935static void send_xchar(struct tty_struct *tty, char ch)
@@ -967,6 +970,8 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
967 * Note: use tight timings here to satisfy the NIST-PCTS. 970 * Note: use tight timings here to satisfy the NIST-PCTS.
968 */ 971 */
969 972
973 lock_kernel();
974
970 if (info->params.data_rate) { 975 if (info->params.data_rate) {
971 char_time = info->timeout/(32 * 5); 976 char_time = info->timeout/(32 * 5);
972 if (!char_time) 977 if (!char_time)
@@ -984,6 +989,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
984 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 989 if (timeout && time_after(jiffies, orig_jiffies + timeout))
985 break; 990 break;
986 } 991 }
992 unlock_kernel();
987 993
988exit: 994exit:
989 DBGINFO(("%s wait_until_sent exit\n", info->device_name)); 995 DBGINFO(("%s wait_until_sent exit\n", info->device_name));
@@ -1097,6 +1103,7 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1097 struct serial_icounter_struct __user *p_cuser; /* user space */ 1103 struct serial_icounter_struct __user *p_cuser; /* user space */
1098 unsigned long flags; 1104 unsigned long flags;
1099 void __user *argp = (void __user *)arg; 1105 void __user *argp = (void __user *)arg;
1106 int ret;
1100 1107
1101 if (sanity_check(info, tty->name, "ioctl")) 1108 if (sanity_check(info, tty->name, "ioctl"))
1102 return -ENODEV; 1109 return -ENODEV;
@@ -1108,37 +1115,54 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1108 return -EIO; 1115 return -EIO;
1109 } 1116 }
1110 1117
1118 lock_kernel();
1119
1111 switch (cmd) { 1120 switch (cmd) {
1112 case MGSL_IOCGPARAMS: 1121 case MGSL_IOCGPARAMS:
1113 return get_params(info, argp); 1122 ret = get_params(info, argp);
1123 break;
1114 case MGSL_IOCSPARAMS: 1124 case MGSL_IOCSPARAMS:
1115 return set_params(info, argp); 1125 ret = set_params(info, argp);
1126 break;
1116 case MGSL_IOCGTXIDLE: 1127 case MGSL_IOCGTXIDLE:
1117 return get_txidle(info, argp); 1128 ret = get_txidle(info, argp);
1129 break;
1118 case MGSL_IOCSTXIDLE: 1130 case MGSL_IOCSTXIDLE:
1119 return set_txidle(info, (int)arg); 1131 ret = set_txidle(info, (int)arg);
1132 break;
1120 case MGSL_IOCTXENABLE: 1133 case MGSL_IOCTXENABLE:
1121 return tx_enable(info, (int)arg); 1134 ret = tx_enable(info, (int)arg);
1135 break;
1122 case MGSL_IOCRXENABLE: 1136 case MGSL_IOCRXENABLE:
1123 return rx_enable(info, (int)arg); 1137 ret = rx_enable(info, (int)arg);
1138 break;
1124 case MGSL_IOCTXABORT: 1139 case MGSL_IOCTXABORT:
1125 return tx_abort(info); 1140 ret = tx_abort(info);
1141 break;
1126 case MGSL_IOCGSTATS: 1142 case MGSL_IOCGSTATS:
1127 return get_stats(info, argp); 1143 ret = get_stats(info, argp);
1144 break;
1128 case MGSL_IOCWAITEVENT: 1145 case MGSL_IOCWAITEVENT:
1129 return wait_mgsl_event(info, argp); 1146 ret = wait_mgsl_event(info, argp);
1147 break;
1130 case TIOCMIWAIT: 1148 case TIOCMIWAIT:
1131 return modem_input_wait(info,(int)arg); 1149 ret = modem_input_wait(info,(int)arg);
1150 break;
1132 case MGSL_IOCGIF: 1151 case MGSL_IOCGIF:
1133 return get_interface(info, argp); 1152 ret = get_interface(info, argp);
1153 break;
1134 case MGSL_IOCSIF: 1154 case MGSL_IOCSIF:
1135 return set_interface(info,(int)arg); 1155 ret = set_interface(info,(int)arg);
1156 break;
1136 case MGSL_IOCSGPIO: 1157 case MGSL_IOCSGPIO:
1137 return set_gpio(info, argp); 1158 ret = set_gpio(info, argp);
1159 break;
1138 case MGSL_IOCGGPIO: 1160 case MGSL_IOCGGPIO:
1139 return get_gpio(info, argp); 1161 ret = get_gpio(info, argp);
1162 break;
1140 case MGSL_IOCWAITGPIO: 1163 case MGSL_IOCWAITGPIO:
1141 return wait_gpio(info, argp); 1164 ret = wait_gpio(info, argp);
1165 break;
1142 case TIOCGICOUNT: 1166 case TIOCGICOUNT:
1143 spin_lock_irqsave(&info->lock,flags); 1167 spin_lock_irqsave(&info->lock,flags);
1144 cnow = info->icount; 1168 cnow = info->icount;
@@ -1155,12 +1179,14 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1155 put_user(cnow.parity, &p_cuser->parity) || 1179 put_user(cnow.parity, &p_cuser->parity) ||
1156 put_user(cnow.brk, &p_cuser->brk) || 1180 put_user(cnow.brk, &p_cuser->brk) ||
1157 put_user(cnow.buf_overrun, &p_cuser->buf_overrun)) 1181 put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
1158 return -EFAULT; 1182 ret = -EFAULT;
1159 return 0; 1183 ret = 0;
1184 break;
1160 default: 1185 default:
1161 return -ENOIOCTLCMD; 1186 ret = -ENOIOCTLCMD;
1162 } 1187 }
1163 return 0; 1188 unlock_kernel();
1189 return ret;
1164} 1190}
1165 1191
1166/* 1192/*
@@ -3324,7 +3350,7 @@ static int claim_resources(struct slgt_info *info)
3324 else 3350 else
3325 info->reg_addr_requested = true; 3351 info->reg_addr_requested = true;
3326 3352
3327 info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE); 3353 info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
3328 if (!info->reg_addr) { 3354 if (!info->reg_addr) {
3329 DBGERR(("%s cant map device registers, addr=%08X\n", 3355 DBGERR(("%s cant map device registers, addr=%08X\n",
3330 info->device_name, info->phys_reg_addr)); 3356 info->device_name, info->phys_reg_addr));
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index e98c3e6f8216..bec54866e0bb 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -519,7 +519,7 @@ static void hangup(struct tty_struct *tty);
519static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); 519static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
520 520
521static int write(struct tty_struct *tty, const unsigned char *buf, int count); 521static int write(struct tty_struct *tty, const unsigned char *buf, int count);
522static void put_char(struct tty_struct *tty, unsigned char ch); 522static int put_char(struct tty_struct *tty, unsigned char ch);
523static void send_xchar(struct tty_struct *tty, char ch); 523static void send_xchar(struct tty_struct *tty, char ch);
524static void wait_until_sent(struct tty_struct *tty, int timeout); 524static void wait_until_sent(struct tty_struct *tty, int timeout);
525static int write_room(struct tty_struct *tty); 525static int write_room(struct tty_struct *tty);
@@ -862,8 +862,7 @@ static void close(struct tty_struct *tty, struct file *filp)
862 if (info->flags & ASYNC_INITIALIZED) 862 if (info->flags & ASYNC_INITIALIZED)
863 wait_until_sent(tty, info->timeout); 863 wait_until_sent(tty, info->timeout);
864 864
865 if (tty->driver->flush_buffer) 865 flush_buffer(tty);
866 tty->driver->flush_buffer(tty);
867 866
868 tty_ldisc_flush(tty); 867 tty_ldisc_flush(tty);
869 868
@@ -1046,10 +1045,11 @@ cleanup:
1046 1045
1047/* Add a character to the transmit buffer. 1046/* Add a character to the transmit buffer.
1048 */ 1047 */
1049static void put_char(struct tty_struct *tty, unsigned char ch) 1048static int put_char(struct tty_struct *tty, unsigned char ch)
1050{ 1049{
1051 SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; 1050 SLMP_INFO *info = (SLMP_INFO *)tty->driver_data;
1052 unsigned long flags; 1051 unsigned long flags;
1052 int ret = 0;
1053 1053
1054 if ( debug_level >= DEBUG_LEVEL_INFO ) { 1054 if ( debug_level >= DEBUG_LEVEL_INFO ) {
1055 printk( "%s(%d):%s put_char(%d)\n", 1055 printk( "%s(%d):%s put_char(%d)\n",
@@ -1057,10 +1057,10 @@ static void put_char(struct tty_struct *tty, unsigned char ch)
1057 } 1057 }
1058 1058
1059 if (sanity_check(info, tty->name, "put_char")) 1059 if (sanity_check(info, tty->name, "put_char"))
1060 return; 1060 return 0;
1061 1061
1062 if (!info->tx_buf) 1062 if (!info->tx_buf)
1063 return; 1063 return 0;
1064 1064
1065 spin_lock_irqsave(&info->lock,flags); 1065 spin_lock_irqsave(&info->lock,flags);
1066 1066
@@ -1072,10 +1072,12 @@ static void put_char(struct tty_struct *tty, unsigned char ch)
1072 if (info->tx_put >= info->max_frame_size) 1072 if (info->tx_put >= info->max_frame_size)
1073 info->tx_put -= info->max_frame_size; 1073 info->tx_put -= info->max_frame_size;
1074 info->tx_count++; 1074 info->tx_count++;
1075 ret = 1;
1075 } 1076 }
1076 } 1077 }
1077 1078
1078 spin_unlock_irqrestore(&info->lock,flags); 1079 spin_unlock_irqrestore(&info->lock,flags);
1080 return ret;
1079} 1081}
1080 1082
1081/* Send a high-priority XON/XOFF character 1083/* Send a high-priority XON/XOFF character
@@ -1119,6 +1121,8 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
1119 if (sanity_check(info, tty->name, "wait_until_sent")) 1121 if (sanity_check(info, tty->name, "wait_until_sent"))
1120 return; 1122 return;
1121 1123
1124 lock_kernel();
1125
1122 if (!(info->flags & ASYNC_INITIALIZED)) 1126 if (!(info->flags & ASYNC_INITIALIZED))
1123 goto exit; 1127 goto exit;
1124 1128
@@ -1161,6 +1165,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
1161 } 1165 }
1162 1166
1163exit: 1167exit:
1168 unlock_kernel();
1164 if (debug_level >= DEBUG_LEVEL_INFO) 1169 if (debug_level >= DEBUG_LEVEL_INFO)
1165 printk("%s(%d):%s wait_until_sent() exit\n", 1170 printk("%s(%d):%s wait_until_sent() exit\n",
1166 __FILE__,__LINE__, info->device_name ); 1171 __FILE__,__LINE__, info->device_name );
@@ -1176,6 +1181,7 @@ static int write_room(struct tty_struct *tty)
1176 if (sanity_check(info, tty->name, "write_room")) 1181 if (sanity_check(info, tty->name, "write_room"))
1177 return 0; 1182 return 0;
1178 1183
1184 lock_kernel();
1179 if (info->params.mode == MGSL_MODE_HDLC) { 1185 if (info->params.mode == MGSL_MODE_HDLC) {
1180 ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE; 1186 ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
1181 } else { 1187 } else {
@@ -1183,6 +1189,7 @@ static int write_room(struct tty_struct *tty)
1183 if (ret < 0) 1189 if (ret < 0)
1184 ret = 0; 1190 ret = 0;
1185 } 1191 }
1192 unlock_kernel();
1186 1193
1187 if (debug_level >= DEBUG_LEVEL_INFO) 1194 if (debug_level >= DEBUG_LEVEL_INFO)
1188 printk("%s(%d):%s write_room()=%d\n", 1195 printk("%s(%d):%s write_room()=%d\n",
@@ -1303,7 +1310,7 @@ static void tx_release(struct tty_struct *tty)
1303 * 1310 *
1304 * Return Value: 0 if success, otherwise error code 1311 * Return Value: 0 if success, otherwise error code
1305 */ 1312 */
1306static int ioctl(struct tty_struct *tty, struct file *file, 1313static int do_ioctl(struct tty_struct *tty, struct file *file,
1307 unsigned int cmd, unsigned long arg) 1314 unsigned int cmd, unsigned long arg)
1308{ 1315{
1309 SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; 1316 SLMP_INFO *info = (SLMP_INFO *)tty->driver_data;
@@ -1393,6 +1400,16 @@ static int ioctl(struct tty_struct *tty, struct file *file,
1393 return 0; 1400 return 0;
1394} 1401}
1395 1402
1403static int ioctl(struct tty_struct *tty, struct file *file,
1404 unsigned int cmd, unsigned long arg)
1405{
1406 int ret;
1407 lock_kernel();
1408 ret = do_ioctl(tty, file, cmd, arg);
1409 unlock_kernel();
1410 return ret;
1411}
1412
1396/* 1413/*
1397 * /proc fs routines.... 1414 * /proc fs routines....
1398 */ 1415 */
@@ -3626,7 +3643,8 @@ static int claim_resources(SLMP_INFO *info)
3626 else 3643 else
3627 info->sca_statctrl_requested = true; 3644 info->sca_statctrl_requested = true;
3628 3645
3629 info->memory_base = ioremap(info->phys_memory_base,SCA_MEM_SIZE); 3646 info->memory_base = ioremap_nocache(info->phys_memory_base,
3647 SCA_MEM_SIZE);
3630 if (!info->memory_base) { 3648 if (!info->memory_base) {
3631 printk( "%s(%d):%s Cant map shared memory, MemAddr=%08X\n", 3649 printk( "%s(%d):%s Cant map shared memory, MemAddr=%08X\n",
3632 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 3650 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
@@ -3634,7 +3652,7 @@ static int claim_resources(SLMP_INFO *info)
3634 goto errout; 3652 goto errout;
3635 } 3653 }
3636 3654
3637 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE); 3655 info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE);
3638 if (!info->lcr_base) { 3656 if (!info->lcr_base) {
3639 printk( "%s(%d):%s Cant map LCR memory, MemAddr=%08X\n", 3657 printk( "%s(%d):%s Cant map LCR memory, MemAddr=%08X\n",
3640 __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); 3658 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
@@ -3643,7 +3661,7 @@ static int claim_resources(SLMP_INFO *info)
3643 } 3661 }
3644 info->lcr_base += info->lcr_offset; 3662 info->lcr_base += info->lcr_offset;
3645 3663
3646 info->sca_base = ioremap(info->phys_sca_base,PAGE_SIZE); 3664 info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE);
3647 if (!info->sca_base) { 3665 if (!info->sca_base) {
3648 printk( "%s(%d):%s Cant map SCA memory, MemAddr=%08X\n", 3666 printk( "%s(%d):%s Cant map SCA memory, MemAddr=%08X\n",
3649 __FILE__,__LINE__,info->device_name, info->phys_sca_base ); 3667 __FILE__,__LINE__,info->device_name, info->phys_sca_base );
@@ -3652,7 +3670,8 @@ static int claim_resources(SLMP_INFO *info)
3652 } 3670 }
3653 info->sca_base += info->sca_offset; 3671 info->sca_base += info->sca_offset;
3654 3672
3655 info->statctrl_base = ioremap(info->phys_statctrl_base,PAGE_SIZE); 3673 info->statctrl_base = ioremap_nocache(info->phys_statctrl_base,
3674 PAGE_SIZE);
3656 if (!info->statctrl_base) { 3675 if (!info->statctrl_base) {
3657 printk( "%s(%d):%s Cant map SCA Status/Control memory, MemAddr=%08X\n", 3676 printk( "%s(%d):%s Cant map SCA Status/Control memory, MemAddr=%08X\n",
3658 __FILE__,__LINE__,info->device_name, info->phys_statctrl_base ); 3677 __FILE__,__LINE__,info->device_name, info->phys_statctrl_base );
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 1ade193c9128..9e9bad8bdcf4 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -196,6 +196,48 @@ static struct sysrq_key_op sysrq_showlocks_op = {
196#define sysrq_showlocks_op (*(struct sysrq_key_op *)0) 196#define sysrq_showlocks_op (*(struct sysrq_key_op *)0)
197#endif 197#endif
198 198
199#ifdef CONFIG_SMP
200static DEFINE_SPINLOCK(show_lock);
201
202static void showacpu(void *dummy)
203{
204 unsigned long flags;
205
206 /* Idle CPUs have no interesting backtrace. */
207 if (idle_cpu(smp_processor_id()))
208 return;
209
210 spin_lock_irqsave(&show_lock, flags);
211 printk(KERN_INFO "CPU%d:\n", smp_processor_id());
212 show_stack(NULL, NULL);
213 spin_unlock_irqrestore(&show_lock, flags);
214}
215
216static void sysrq_showregs_othercpus(struct work_struct *dummy)
217{
218 smp_call_function(showacpu, NULL, 0, 0);
219}
220
221static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
222
223static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
224{
225 struct pt_regs *regs = get_irq_regs();
226 if (regs) {
227 printk(KERN_INFO "CPU%d:\n", smp_processor_id());
228 show_regs(regs);
229 }
230 schedule_work(&sysrq_showallcpus);
231}
232
233static struct sysrq_key_op sysrq_showallcpus_op = {
234 .handler = sysrq_handle_showallcpus,
235 .help_msg = "aLlcpus",
236 .action_msg = "Show backtrace of all active CPUs",
237 .enable_mask = SYSRQ_ENABLE_DUMP,
238};
239#endif
240
199static void sysrq_handle_showregs(int key, struct tty_struct *tty) 241static void sysrq_handle_showregs(int key, struct tty_struct *tty)
200{ 242{
201 struct pt_regs *regs = get_irq_regs(); 243 struct pt_regs *regs = get_irq_regs();
@@ -340,7 +382,11 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
340 &sysrq_kill_op, /* i */ 382 &sysrq_kill_op, /* i */
341 NULL, /* j */ 383 NULL, /* j */
342 &sysrq_SAK_op, /* k */ 384 &sysrq_SAK_op, /* k */
385#ifdef CONFIG_SMP
386 &sysrq_showallcpus_op, /* l */
387#else
343 NULL, /* l */ 388 NULL, /* l */
389#endif
344 &sysrq_showmem_op, /* m */ 390 &sysrq_showmem_op, /* m */
345 &sysrq_unrt_op, /* n */ 391 &sysrq_unrt_op, /* n */
346 /* o: This will often be registered as 'Off' at init time */ 392 /* o: This will often be registered as 'Off' at init time */
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index ce5ebe3b168f..663cd15d7c78 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -426,7 +426,7 @@ static int tosh_probe(void)
426 int i,major,minor,day,year,month,flag; 426 int i,major,minor,day,year,month,flag;
427 unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 }; 427 unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
428 SMMRegisters regs; 428 SMMRegisters regs;
429 void __iomem *bios = ioremap(0xf0000, 0x10000); 429 void __iomem *bios = ioremap_cache(0xf0000, 0x10000);
430 430
431 if (!bios) 431 if (!bios)
432 return -ENOMEM; 432 return -ENOMEM;
@@ -520,12 +520,11 @@ static int __init toshiba_init(void)
520 { 520 {
521 struct proc_dir_entry *pde; 521 struct proc_dir_entry *pde;
522 522
523 pde = create_proc_entry("toshiba", 0, NULL); 523 pde = proc_create("toshiba", 0, NULL, &proc_toshiba_fops);
524 if (!pde) { 524 if (!pde) {
525 misc_deregister(&tosh_device); 525 misc_deregister(&tosh_device);
526 return -ENOMEM; 526 return -ENOMEM;
527 } 527 }
528 pde->proc_fops = &proc_toshiba_fops;
529 } 528 }
530#endif 529#endif
531 530
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 8f3f7620f95a..3738cfa209ff 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -23,7 +23,7 @@ if TCG_TPM
23 23
24config TCG_TIS 24config TCG_TIS
25 tristate "TPM Interface Specification 1.2 Interface" 25 tristate "TPM Interface Specification 1.2 Interface"
26 depends on PNPACPI 26 depends on PNP
27 ---help--- 27 ---help---
28 If you have a TPM security chip that is compliant with the 28 If you have a TPM security chip that is compliant with the
29 TCG TIS 1.2 TPM specification say Yes and it will be accessible 29 TCG TIS 1.2 TPM specification say Yes and it will be accessible
@@ -32,7 +32,6 @@ config TCG_TIS
32 32
33config TCG_NSC 33config TCG_NSC
34 tristate "National Semiconductor TPM Interface" 34 tristate "National Semiconductor TPM Interface"
35 depends on PNPACPI
36 ---help--- 35 ---help---
37 If you have a TPM security chip from National Semiconductor 36 If you have a TPM security chip from National Semiconductor
38 say Yes and it will be accessible from within Linux. To 37 say Yes and it will be accessible from within Linux. To
@@ -48,7 +47,7 @@ config TCG_ATMEL
48 47
49config TCG_INFINEON 48config TCG_INFINEON
50 tristate "Infineon Technologies TPM Interface" 49 tristate "Infineon Technologies TPM Interface"
51 depends on PNPACPI 50 depends on PNP
52 ---help--- 51 ---help---
53 If you have a TPM security chip from Infineon Technologies 52 If you have a TPM security chip from Infineon Technologies
54 (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it 53 (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 6313326bc41f..ab18c1e7b115 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -264,7 +264,7 @@ static const struct tpm_vendor_specific tpm_nsc = {
264 264
265static struct platform_device *pdev = NULL; 265static struct platform_device *pdev = NULL;
266 266
267static void __devexit tpm_nsc_remove(struct device *dev) 267static void tpm_nsc_remove(struct device *dev)
268{ 268{
269 struct tpm_chip *chip = dev_get_drvdata(dev); 269 struct tpm_chip *chip = dev_get_drvdata(dev);
270 if ( chip ) { 270 if ( chip ) {
diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c
index 7722466e052f..3582f43345a8 100644
--- a/drivers/char/tty_audit.c
+++ b/drivers/char/tty_audit.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/audit.h> 12#include <linux/audit.h>
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/fdtable.h>
14#include <linux/tty.h> 15#include <linux/tty.h>
15 16
16struct tty_audit_buf { 17struct tty_audit_buf {
@@ -92,7 +93,7 @@ static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid,
92 get_task_comm(name, tsk); 93 get_task_comm(name, tsk);
93 audit_log_untrustedstring(ab, name); 94 audit_log_untrustedstring(ab, name);
94 audit_log_format(ab, " data="); 95 audit_log_format(ab, " data=");
95 audit_log_n_untrustedstring(ab, buf->valid, buf->data); 96 audit_log_n_untrustedstring(ab, buf->data, buf->valid);
96 audit_log_end(ab); 97 audit_log_end(ab);
97 } 98 }
98 buf->valid = 0; 99 buf->valid = 0;
@@ -151,14 +152,9 @@ void tty_audit_fork(struct signal_struct *sig)
151/** 152/**
152 * tty_audit_push_task - Flush task's pending audit data 153 * tty_audit_push_task - Flush task's pending audit data
153 */ 154 */
154void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid) 155void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid)
155{ 156{
156 struct tty_audit_buf *buf; 157 struct tty_audit_buf *buf;
157 /* FIXME I think this is correct. Check against netlink once that is
158 * I really need to read this code more closely. But that's for
159 * another patch.
160 */
161 unsigned int sessionid = audit_get_sessionid(tsk);
162 158
163 spin_lock_irq(&tsk->sighand->siglock); 159 spin_lock_irq(&tsk->sighand->siglock);
164 buf = tsk->signal->tty_audit_buf; 160 buf = tsk->signal->tty_audit_buf;
@@ -238,6 +234,10 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
238 if (unlikely(size == 0)) 234 if (unlikely(size == 0))
239 return; 235 return;
240 236
237 if (tty->driver->type == TTY_DRIVER_TYPE_PTY
238 && tty->driver->subtype == PTY_TYPE_MASTER)
239 return;
240
241 buf = tty_audit_buf_get(tty); 241 buf = tty_audit_buf_get(tty);
242 if (!buf) 242 if (!buf)
243 return; 243 return;
@@ -300,53 +300,3 @@ void tty_audit_push(struct tty_struct *tty)
300 tty_audit_buf_put(buf); 300 tty_audit_buf_put(buf);
301 } 301 }
302} 302}
303
304/**
305 * tty_audit_opening - A TTY is being opened.
306 *
307 * As a special hack, tasks that close all their TTYs and open new ones
308 * are assumed to be system daemons (e.g. getty) and auditing is
309 * automatically disabled for them.
310 */
311void tty_audit_opening(void)
312{
313 int disable;
314
315 disable = 1;
316 spin_lock_irq(&current->sighand->siglock);
317 if (current->signal->audit_tty == 0)
318 disable = 0;
319 spin_unlock_irq(&current->sighand->siglock);
320 if (!disable)
321 return;
322
323 task_lock(current);
324 if (current->files) {
325 struct fdtable *fdt;
326 unsigned i;
327
328 /*
329 * We don't take a ref to the file, so we must hold ->file_lock
330 * instead.
331 */
332 spin_lock(&current->files->file_lock);
333 fdt = files_fdtable(current->files);
334 for (i = 0; i < fdt->max_fds; i++) {
335 struct file *filp;
336
337 filp = fcheck_files(current->files, i);
338 if (filp && is_tty(filp)) {
339 disable = 0;
340 break;
341 }
342 }
343 spin_unlock(&current->files->file_lock);
344 }
345 task_unlock(current);
346 if (!disable)
347 return;
348
349 spin_lock_irq(&current->sighand->siglock);
350 current->signal->audit_tty = 0;
351 spin_unlock_irq(&current->sighand->siglock);
352}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 4d3c7018f0c3..49c1a2267a55 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -78,6 +78,7 @@
78#include <linux/tty_flip.h> 78#include <linux/tty_flip.h>
79#include <linux/devpts_fs.h> 79#include <linux/devpts_fs.h>
80#include <linux/file.h> 80#include <linux/file.h>
81#include <linux/fdtable.h>
81#include <linux/console.h> 82#include <linux/console.h>
82#include <linux/timer.h> 83#include <linux/timer.h>
83#include <linux/ctype.h> 84#include <linux/ctype.h>
@@ -91,7 +92,6 @@
91#include <linux/module.h> 92#include <linux/module.h>
92#include <linux/smp_lock.h> 93#include <linux/smp_lock.h>
93#include <linux/device.h> 94#include <linux/device.h>
94#include <linux/idr.h>
95#include <linux/wait.h> 95#include <linux/wait.h>
96#include <linux/bitops.h> 96#include <linux/bitops.h>
97#include <linux/delay.h> 97#include <linux/delay.h>
@@ -137,9 +137,6 @@ EXPORT_SYMBOL(tty_mutex);
137 137
138#ifdef CONFIG_UNIX98_PTYS 138#ifdef CONFIG_UNIX98_PTYS
139extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */ 139extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
140extern int pty_limit; /* Config limit on Unix98 ptys */
141static DEFINE_IDR(allocated_ptys);
142static DEFINE_MUTEX(allocated_ptys_lock);
143static int ptmx_open(struct inode *, struct file *); 140static int ptmx_open(struct inode *, struct file *);
144#endif 141#endif
145 142
@@ -152,8 +149,7 @@ ssize_t redirected_tty_write(struct file *, const char __user *,
152static unsigned int tty_poll(struct file *, poll_table *); 149static unsigned int tty_poll(struct file *, poll_table *);
153static int tty_open(struct inode *, struct file *); 150static int tty_open(struct inode *, struct file *);
154static int tty_release(struct inode *, struct file *); 151static int tty_release(struct inode *, struct file *);
155int tty_ioctl(struct inode *inode, struct file *file, 152long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
156 unsigned int cmd, unsigned long arg);
157#ifdef CONFIG_COMPAT 153#ifdef CONFIG_COMPAT
158static long tty_compat_ioctl(struct file *file, unsigned int cmd, 154static long tty_compat_ioctl(struct file *file, unsigned int cmd,
159 unsigned long arg); 155 unsigned long arg);
@@ -1109,8 +1105,8 @@ restart:
1109 a reference to the old ldisc. If we ended up flipping back 1105 a reference to the old ldisc. If we ended up flipping back
1110 to the existing ldisc we have two references to it */ 1106 to the existing ldisc we have two references to it */
1111 1107
1112 if (tty->ldisc.num != o_ldisc.num && tty->driver->set_ldisc) 1108 if (tty->ldisc.num != o_ldisc.num && tty->ops->set_ldisc)
1113 tty->driver->set_ldisc(tty); 1109 tty->ops->set_ldisc(tty);
1114 1110
1115 tty_ldisc_put(o_ldisc.num); 1111 tty_ldisc_put(o_ldisc.num);
1116 1112
@@ -1180,11 +1176,10 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
1180 if (*str == ',') 1176 if (*str == ',')
1181 str++; 1177 str++;
1182 if (*str == '\0') 1178 if (*str == '\0')
1183 str = 0; 1179 str = NULL;
1184
1185 if (tty_line >= 0 && tty_line <= p->num && p->poll_init &&
1186 !p->poll_init(p, tty_line, str)) {
1187 1180
1181 if (tty_line >= 0 && tty_line <= p->num && p->ops &&
1182 p->ops->poll_init && !p->ops->poll_init(p, tty_line, str)) {
1188 res = p; 1183 res = p;
1189 *line = tty_line; 1184 *line = tty_line;
1190 break; 1185 break;
@@ -1205,26 +1200,37 @@ EXPORT_SYMBOL_GPL(tty_find_polling_driver);
1205 * not in the foreground, send a SIGTTOU. If the signal is blocked or 1200 * not in the foreground, send a SIGTTOU. If the signal is blocked or
1206 * ignored, go ahead and perform the operation. (POSIX 7.2) 1201 * ignored, go ahead and perform the operation. (POSIX 7.2)
1207 * 1202 *
1208 * Locking: none 1203 * Locking: ctrl_lock
1209 */ 1204 */
1210 1205
1211int tty_check_change(struct tty_struct *tty) 1206int tty_check_change(struct tty_struct *tty)
1212{ 1207{
1208 unsigned long flags;
1209 int ret = 0;
1210
1213 if (current->signal->tty != tty) 1211 if (current->signal->tty != tty)
1214 return 0; 1212 return 0;
1213
1214 spin_lock_irqsave(&tty->ctrl_lock, flags);
1215
1215 if (!tty->pgrp) { 1216 if (!tty->pgrp) {
1216 printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n"); 1217 printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n");
1217 return 0; 1218 goto out;
1218 } 1219 }
1219 if (task_pgrp(current) == tty->pgrp) 1220 if (task_pgrp(current) == tty->pgrp)
1220 return 0; 1221 goto out;
1221 if (is_ignored(SIGTTOU)) 1222 if (is_ignored(SIGTTOU))
1222 return 0; 1223 goto out;
1223 if (is_current_pgrp_orphaned()) 1224 if (is_current_pgrp_orphaned()) {
1224 return -EIO; 1225 ret = -EIO;
1226 goto out;
1227 }
1225 kill_pgrp(task_pgrp(current), SIGTTOU, 1); 1228 kill_pgrp(task_pgrp(current), SIGTTOU, 1);
1226 set_thread_flag(TIF_SIGPENDING); 1229 set_thread_flag(TIF_SIGPENDING);
1227 return -ERESTARTSYS; 1230 ret = -ERESTARTSYS;
1231out:
1232 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1233 return ret;
1228} 1234}
1229 1235
1230EXPORT_SYMBOL(tty_check_change); 1236EXPORT_SYMBOL(tty_check_change);
@@ -1247,8 +1253,8 @@ static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait)
1247 return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM; 1253 return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
1248} 1254}
1249 1255
1250static int hung_up_tty_ioctl(struct inode *inode, struct file *file, 1256static long hung_up_tty_ioctl(struct file *file, unsigned int cmd,
1251 unsigned int cmd, unsigned long arg) 1257 unsigned long arg)
1252{ 1258{
1253 return cmd == TIOCSPGRP ? -ENOTTY : -EIO; 1259 return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
1254} 1260}
@@ -1264,7 +1270,7 @@ static const struct file_operations tty_fops = {
1264 .read = tty_read, 1270 .read = tty_read,
1265 .write = tty_write, 1271 .write = tty_write,
1266 .poll = tty_poll, 1272 .poll = tty_poll,
1267 .ioctl = tty_ioctl, 1273 .unlocked_ioctl = tty_ioctl,
1268 .compat_ioctl = tty_compat_ioctl, 1274 .compat_ioctl = tty_compat_ioctl,
1269 .open = tty_open, 1275 .open = tty_open,
1270 .release = tty_release, 1276 .release = tty_release,
@@ -1277,7 +1283,7 @@ static const struct file_operations ptmx_fops = {
1277 .read = tty_read, 1283 .read = tty_read,
1278 .write = tty_write, 1284 .write = tty_write,
1279 .poll = tty_poll, 1285 .poll = tty_poll,
1280 .ioctl = tty_ioctl, 1286 .unlocked_ioctl = tty_ioctl,
1281 .compat_ioctl = tty_compat_ioctl, 1287 .compat_ioctl = tty_compat_ioctl,
1282 .open = ptmx_open, 1288 .open = ptmx_open,
1283 .release = tty_release, 1289 .release = tty_release,
@@ -1290,7 +1296,7 @@ static const struct file_operations console_fops = {
1290 .read = tty_read, 1296 .read = tty_read,
1291 .write = redirected_tty_write, 1297 .write = redirected_tty_write,
1292 .poll = tty_poll, 1298 .poll = tty_poll,
1293 .ioctl = tty_ioctl, 1299 .unlocked_ioctl = tty_ioctl,
1294 .compat_ioctl = tty_compat_ioctl, 1300 .compat_ioctl = tty_compat_ioctl,
1295 .open = tty_open, 1301 .open = tty_open,
1296 .release = tty_release, 1302 .release = tty_release,
@@ -1302,7 +1308,7 @@ static const struct file_operations hung_up_tty_fops = {
1302 .read = hung_up_tty_read, 1308 .read = hung_up_tty_read,
1303 .write = hung_up_tty_write, 1309 .write = hung_up_tty_write,
1304 .poll = hung_up_tty_poll, 1310 .poll = hung_up_tty_poll,
1305 .ioctl = hung_up_tty_ioctl, 1311 .unlocked_ioctl = hung_up_tty_ioctl,
1306 .compat_ioctl = hung_up_tty_compat_ioctl, 1312 .compat_ioctl = hung_up_tty_compat_ioctl,
1307 .release = tty_release, 1313 .release = tty_release,
1308}; 1314};
@@ -1404,6 +1410,7 @@ static void do_tty_hangup(struct work_struct *work)
1404 struct task_struct *p; 1410 struct task_struct *p;
1405 struct tty_ldisc *ld; 1411 struct tty_ldisc *ld;
1406 int closecount = 0, n; 1412 int closecount = 0, n;
1413 unsigned long flags;
1407 1414
1408 if (!tty) 1415 if (!tty)
1409 return; 1416 return;
@@ -1441,8 +1448,7 @@ static void do_tty_hangup(struct work_struct *work)
1441 /* We may have no line discipline at this point */ 1448 /* We may have no line discipline at this point */
1442 if (ld->flush_buffer) 1449 if (ld->flush_buffer)
1443 ld->flush_buffer(tty); 1450 ld->flush_buffer(tty);
1444 if (tty->driver->flush_buffer) 1451 tty_driver_flush_buffer(tty);
1445 tty->driver->flush_buffer(tty);
1446 if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && 1452 if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
1447 ld->write_wakeup) 1453 ld->write_wakeup)
1448 ld->write_wakeup(tty); 1454 ld->write_wakeup(tty);
@@ -1480,19 +1486,24 @@ static void do_tty_hangup(struct work_struct *work)
1480 __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p); 1486 __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
1481 __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p); 1487 __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
1482 put_pid(p->signal->tty_old_pgrp); /* A noop */ 1488 put_pid(p->signal->tty_old_pgrp); /* A noop */
1489 spin_lock_irqsave(&tty->ctrl_lock, flags);
1483 if (tty->pgrp) 1490 if (tty->pgrp)
1484 p->signal->tty_old_pgrp = get_pid(tty->pgrp); 1491 p->signal->tty_old_pgrp = get_pid(tty->pgrp);
1492 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1485 spin_unlock_irq(&p->sighand->siglock); 1493 spin_unlock_irq(&p->sighand->siglock);
1486 } while_each_pid_task(tty->session, PIDTYPE_SID, p); 1494 } while_each_pid_task(tty->session, PIDTYPE_SID, p);
1487 } 1495 }
1488 read_unlock(&tasklist_lock); 1496 read_unlock(&tasklist_lock);
1489 1497
1498 spin_lock_irqsave(&tty->ctrl_lock, flags);
1490 tty->flags = 0; 1499 tty->flags = 0;
1491 put_pid(tty->session); 1500 put_pid(tty->session);
1492 put_pid(tty->pgrp); 1501 put_pid(tty->pgrp);
1493 tty->session = NULL; 1502 tty->session = NULL;
1494 tty->pgrp = NULL; 1503 tty->pgrp = NULL;
1495 tty->ctrl_status = 0; 1504 tty->ctrl_status = 0;
1505 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1506
1496 /* 1507 /*
1497 * If one of the devices matches a console pointer, we 1508 * If one of the devices matches a console pointer, we
1498 * cannot just call hangup() because that will cause 1509 * cannot just call hangup() because that will cause
@@ -1500,11 +1511,11 @@ static void do_tty_hangup(struct work_struct *work)
1500 * So we just call close() the right number of times. 1511 * So we just call close() the right number of times.
1501 */ 1512 */
1502 if (cons_filp) { 1513 if (cons_filp) {
1503 if (tty->driver->close) 1514 if (tty->ops->close)
1504 for (n = 0; n < closecount; n++) 1515 for (n = 0; n < closecount; n++)
1505 tty->driver->close(tty, cons_filp); 1516 tty->ops->close(tty, cons_filp);
1506 } else if (tty->driver->hangup) 1517 } else if (tty->ops->hangup)
1507 (tty->driver->hangup)(tty); 1518 (tty->ops->hangup)(tty);
1508 /* 1519 /*
1509 * We don't want to have driver/ldisc interactions beyond 1520 * We don't want to have driver/ldisc interactions beyond
1510 * the ones we did here. The driver layer expects no 1521 * the ones we did here. The driver layer expects no
@@ -1626,16 +1637,17 @@ void disassociate_ctty(int on_exit)
1626 struct tty_struct *tty; 1637 struct tty_struct *tty;
1627 struct pid *tty_pgrp = NULL; 1638 struct pid *tty_pgrp = NULL;
1628 1639
1629 lock_kernel();
1630 1640
1631 mutex_lock(&tty_mutex); 1641 mutex_lock(&tty_mutex);
1632 tty = get_current_tty(); 1642 tty = get_current_tty();
1633 if (tty) { 1643 if (tty) {
1634 tty_pgrp = get_pid(tty->pgrp); 1644 tty_pgrp = get_pid(tty->pgrp);
1635 mutex_unlock(&tty_mutex); 1645 mutex_unlock(&tty_mutex);
1646 lock_kernel();
1636 /* XXX: here we race, there is nothing protecting tty */ 1647 /* XXX: here we race, there is nothing protecting tty */
1637 if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) 1648 if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
1638 tty_vhangup(tty); 1649 tty_vhangup(tty);
1650 unlock_kernel();
1639 } else if (on_exit) { 1651 } else if (on_exit) {
1640 struct pid *old_pgrp; 1652 struct pid *old_pgrp;
1641 spin_lock_irq(&current->sighand->siglock); 1653 spin_lock_irq(&current->sighand->siglock);
@@ -1648,7 +1660,6 @@ void disassociate_ctty(int on_exit)
1648 put_pid(old_pgrp); 1660 put_pid(old_pgrp);
1649 } 1661 }
1650 mutex_unlock(&tty_mutex); 1662 mutex_unlock(&tty_mutex);
1651 unlock_kernel();
1652 return; 1663 return;
1653 } 1664 }
1654 if (tty_pgrp) { 1665 if (tty_pgrp) {
@@ -1667,10 +1678,13 @@ void disassociate_ctty(int on_exit)
1667 /* It is possible that do_tty_hangup has free'd this tty */ 1678 /* It is possible that do_tty_hangup has free'd this tty */
1668 tty = get_current_tty(); 1679 tty = get_current_tty();
1669 if (tty) { 1680 if (tty) {
1681 unsigned long flags;
1682 spin_lock_irqsave(&tty->ctrl_lock, flags);
1670 put_pid(tty->session); 1683 put_pid(tty->session);
1671 put_pid(tty->pgrp); 1684 put_pid(tty->pgrp);
1672 tty->session = NULL; 1685 tty->session = NULL;
1673 tty->pgrp = NULL; 1686 tty->pgrp = NULL;
1687 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1674 } else { 1688 } else {
1675#ifdef TTY_DEBUG_HANGUP 1689#ifdef TTY_DEBUG_HANGUP
1676 printk(KERN_DEBUG "error attempted to write to tty [0x%p]" 1690 printk(KERN_DEBUG "error attempted to write to tty [0x%p]"
@@ -1683,7 +1697,6 @@ void disassociate_ctty(int on_exit)
1683 read_lock(&tasklist_lock); 1697 read_lock(&tasklist_lock);
1684 session_clear_tty(task_session(current)); 1698 session_clear_tty(task_session(current));
1685 read_unlock(&tasklist_lock); 1699 read_unlock(&tasklist_lock);
1686 unlock_kernel();
1687} 1700}
1688 1701
1689/** 1702/**
@@ -1693,8 +1706,10 @@ void disassociate_ctty(int on_exit)
1693void no_tty(void) 1706void no_tty(void)
1694{ 1707{
1695 struct task_struct *tsk = current; 1708 struct task_struct *tsk = current;
1709 lock_kernel();
1696 if (tsk->signal->leader) 1710 if (tsk->signal->leader)
1697 disassociate_ctty(0); 1711 disassociate_ctty(0);
1712 unlock_kernel();
1698 proc_clear_tty(tsk); 1713 proc_clear_tty(tsk);
1699} 1714}
1700 1715
@@ -1714,21 +1729,26 @@ void no_tty(void)
1714 * but not always. 1729 * but not always.
1715 * 1730 *
1716 * Locking: 1731 * Locking:
1717 * Broken. Relies on BKL which is unsafe here. 1732 * Uses the tty control lock internally
1718 */ 1733 */
1719 1734
1720void stop_tty(struct tty_struct *tty) 1735void stop_tty(struct tty_struct *tty)
1721{ 1736{
1722 if (tty->stopped) 1737 unsigned long flags;
1738 spin_lock_irqsave(&tty->ctrl_lock, flags);
1739 if (tty->stopped) {
1740 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1723 return; 1741 return;
1742 }
1724 tty->stopped = 1; 1743 tty->stopped = 1;
1725 if (tty->link && tty->link->packet) { 1744 if (tty->link && tty->link->packet) {
1726 tty->ctrl_status &= ~TIOCPKT_START; 1745 tty->ctrl_status &= ~TIOCPKT_START;
1727 tty->ctrl_status |= TIOCPKT_STOP; 1746 tty->ctrl_status |= TIOCPKT_STOP;
1728 wake_up_interruptible(&tty->link->read_wait); 1747 wake_up_interruptible(&tty->link->read_wait);
1729 } 1748 }
1730 if (tty->driver->stop) 1749 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1731 (tty->driver->stop)(tty); 1750 if (tty->ops->stop)
1751 (tty->ops->stop)(tty);
1732} 1752}
1733 1753
1734EXPORT_SYMBOL(stop_tty); 1754EXPORT_SYMBOL(stop_tty);
@@ -1743,21 +1763,26 @@ EXPORT_SYMBOL(stop_tty);
1743 * driver start method is invoked and the line discipline woken. 1763 * driver start method is invoked and the line discipline woken.
1744 * 1764 *
1745 * Locking: 1765 * Locking:
1746 * Broken. Relies on BKL which is unsafe here. 1766 * ctrl_lock
1747 */ 1767 */
1748 1768
1749void start_tty(struct tty_struct *tty) 1769void start_tty(struct tty_struct *tty)
1750{ 1770{
1751 if (!tty->stopped || tty->flow_stopped) 1771 unsigned long flags;
1772 spin_lock_irqsave(&tty->ctrl_lock, flags);
1773 if (!tty->stopped || tty->flow_stopped) {
1774 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1752 return; 1775 return;
1776 }
1753 tty->stopped = 0; 1777 tty->stopped = 0;
1754 if (tty->link && tty->link->packet) { 1778 if (tty->link && tty->link->packet) {
1755 tty->ctrl_status &= ~TIOCPKT_STOP; 1779 tty->ctrl_status &= ~TIOCPKT_STOP;
1756 tty->ctrl_status |= TIOCPKT_START; 1780 tty->ctrl_status |= TIOCPKT_START;
1757 wake_up_interruptible(&tty->link->read_wait); 1781 wake_up_interruptible(&tty->link->read_wait);
1758 } 1782 }
1759 if (tty->driver->start) 1783 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1760 (tty->driver->start)(tty); 1784 if (tty->ops->start)
1785 (tty->ops->start)(tty);
1761 /* If we have a running line discipline it may need kicking */ 1786 /* If we have a running line discipline it may need kicking */
1762 tty_wakeup(tty); 1787 tty_wakeup(tty);
1763} 1788}
@@ -1775,10 +1800,8 @@ EXPORT_SYMBOL(start_tty);
1775 * for hung up devices before calling the line discipline method. 1800 * for hung up devices before calling the line discipline method.
1776 * 1801 *
1777 * Locking: 1802 * Locking:
1778 * Locks the line discipline internally while needed 1803 * Locks the line discipline internally while needed. Multiple
1779 * For historical reasons the line discipline read method is 1804 * read calls may be outstanding in parallel.
1780 * invoked under the BKL. This will go away in time so do not rely on it
1781 * in new code. Multiple read calls may be outstanding in parallel.
1782 */ 1805 */
1783 1806
1784static ssize_t tty_read(struct file *file, char __user *buf, size_t count, 1807static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
@@ -1799,13 +1822,11 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
1799 /* We want to wait for the line discipline to sort out in this 1822 /* We want to wait for the line discipline to sort out in this
1800 situation */ 1823 situation */
1801 ld = tty_ldisc_ref_wait(tty); 1824 ld = tty_ldisc_ref_wait(tty);
1802 lock_kernel();
1803 if (ld->read) 1825 if (ld->read)
1804 i = (ld->read)(tty, file, buf, count); 1826 i = (ld->read)(tty, file, buf, count);
1805 else 1827 else
1806 i = -EIO; 1828 i = -EIO;
1807 tty_ldisc_deref(ld); 1829 tty_ldisc_deref(ld);
1808 unlock_kernel();
1809 if (i > 0) 1830 if (i > 0)
1810 inode->i_atime = current_fs_time(inode->i_sb); 1831 inode->i_atime = current_fs_time(inode->i_sb);
1811 return i; 1832 return i;
@@ -1893,9 +1914,7 @@ static inline ssize_t do_tty_write(
1893 ret = -EFAULT; 1914 ret = -EFAULT;
1894 if (copy_from_user(tty->write_buf, buf, size)) 1915 if (copy_from_user(tty->write_buf, buf, size))
1895 break; 1916 break;
1896 lock_kernel();
1897 ret = write(tty, file, tty->write_buf, size); 1917 ret = write(tty, file, tty->write_buf, size);
1898 unlock_kernel();
1899 if (ret <= 0) 1918 if (ret <= 0)
1900 break; 1919 break;
1901 written += ret; 1920 written += ret;
@@ -1948,10 +1967,13 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
1948 tty = (struct tty_struct *)file->private_data; 1967 tty = (struct tty_struct *)file->private_data;
1949 if (tty_paranoia_check(tty, inode, "tty_write")) 1968 if (tty_paranoia_check(tty, inode, "tty_write"))
1950 return -EIO; 1969 return -EIO;
1951 if (!tty || !tty->driver->write || 1970 if (!tty || !tty->ops->write ||
1952 (test_bit(TTY_IO_ERROR, &tty->flags))) 1971 (test_bit(TTY_IO_ERROR, &tty->flags)))
1953 return -EIO; 1972 return -EIO;
1954 1973 /* Short term debug to catch buggy drivers */
1974 if (tty->ops->write_room == NULL)
1975 printk(KERN_ERR "tty driver %s lacks a write_room method.\n",
1976 tty->driver->name);
1955 ld = tty_ldisc_ref_wait(tty); 1977 ld = tty_ldisc_ref_wait(tty);
1956 if (!ld->write) 1978 if (!ld->write)
1957 ret = -EIO; 1979 ret = -EIO;
@@ -2098,6 +2120,7 @@ static int init_dev(struct tty_driver *driver, int idx,
2098 goto fail_no_mem; 2120 goto fail_no_mem;
2099 initialize_tty_struct(tty); 2121 initialize_tty_struct(tty);
2100 tty->driver = driver; 2122 tty->driver = driver;
2123 tty->ops = driver->ops;
2101 tty->index = idx; 2124 tty->index = idx;
2102 tty_line_name(driver, idx, tty->name); 2125 tty_line_name(driver, idx, tty->name);
2103 2126
@@ -2128,6 +2151,7 @@ static int init_dev(struct tty_driver *driver, int idx,
2128 goto free_mem_out; 2151 goto free_mem_out;
2129 initialize_tty_struct(o_tty); 2152 initialize_tty_struct(o_tty);
2130 o_tty->driver = driver->other; 2153 o_tty->driver = driver->other;
2154 o_tty->ops = driver->ops;
2131 o_tty->index = idx; 2155 o_tty->index = idx;
2132 tty_line_name(driver->other, idx, o_tty->name); 2156 tty_line_name(driver->other, idx, o_tty->name);
2133 2157
@@ -2432,8 +2456,8 @@ static void release_dev(struct file *filp)
2432 } 2456 }
2433 } 2457 }
2434#endif 2458#endif
2435 if (tty->driver->close) 2459 if (tty->ops->close)
2436 tty->driver->close(tty, filp); 2460 tty->ops->close(tty, filp);
2437 2461
2438 /* 2462 /*
2439 * Sanity check: if tty->count is going to zero, there shouldn't be 2463 * Sanity check: if tty->count is going to zero, there shouldn't be
@@ -2612,15 +2636,9 @@ static void release_dev(struct file *filp)
2612 */ 2636 */
2613 release_tty(tty, idx); 2637 release_tty(tty, idx);
2614 2638
2615#ifdef CONFIG_UNIX98_PTYS
2616 /* Make this pty number available for reallocation */ 2639 /* Make this pty number available for reallocation */
2617 if (devpts) { 2640 if (devpts)
2618 mutex_lock(&allocated_ptys_lock); 2641 devpts_kill_index(idx);
2619 idr_remove(&allocated_ptys, idx);
2620 mutex_unlock(&allocated_ptys_lock);
2621 }
2622#endif
2623
2624} 2642}
2625 2643
2626/** 2644/**
@@ -2716,8 +2734,8 @@ got_driver:
2716 printk(KERN_DEBUG "opening %s...", tty->name); 2734 printk(KERN_DEBUG "opening %s...", tty->name);
2717#endif 2735#endif
2718 if (!retval) { 2736 if (!retval) {
2719 if (tty->driver->open) 2737 if (tty->ops->open)
2720 retval = tty->driver->open(tty, filp); 2738 retval = tty->ops->open(tty, filp);
2721 else 2739 else
2722 retval = -ENODEV; 2740 retval = -ENODEV;
2723 } 2741 }
@@ -2755,7 +2773,6 @@ got_driver:
2755 __proc_set_tty(current, tty); 2773 __proc_set_tty(current, tty);
2756 spin_unlock_irq(&current->sighand->siglock); 2774 spin_unlock_irq(&current->sighand->siglock);
2757 mutex_unlock(&tty_mutex); 2775 mutex_unlock(&tty_mutex);
2758 tty_audit_opening();
2759 return 0; 2776 return 0;
2760} 2777}
2761 2778
@@ -2777,29 +2794,13 @@ static int ptmx_open(struct inode *inode, struct file *filp)
2777 struct tty_struct *tty; 2794 struct tty_struct *tty;
2778 int retval; 2795 int retval;
2779 int index; 2796 int index;
2780 int idr_ret;
2781 2797
2782 nonseekable_open(inode, filp); 2798 nonseekable_open(inode, filp);
2783 2799
2784 /* find a device that is not in use. */ 2800 /* find a device that is not in use. */
2785 mutex_lock(&allocated_ptys_lock); 2801 index = devpts_new_index();
2786 if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) { 2802 if (index < 0)
2787 mutex_unlock(&allocated_ptys_lock); 2803 return index;
2788 return -ENOMEM;
2789 }
2790 idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
2791 if (idr_ret < 0) {
2792 mutex_unlock(&allocated_ptys_lock);
2793 if (idr_ret == -EAGAIN)
2794 return -ENOMEM;
2795 return -EIO;
2796 }
2797 if (index >= pty_limit) {
2798 idr_remove(&allocated_ptys, index);
2799 mutex_unlock(&allocated_ptys_lock);
2800 return -EIO;
2801 }
2802 mutex_unlock(&allocated_ptys_lock);
2803 2804
2804 mutex_lock(&tty_mutex); 2805 mutex_lock(&tty_mutex);
2805 retval = init_dev(ptm_driver, index, &tty); 2806 retval = init_dev(ptm_driver, index, &tty);
@@ -2812,23 +2813,19 @@ static int ptmx_open(struct inode *inode, struct file *filp)
2812 filp->private_data = tty; 2813 filp->private_data = tty;
2813 file_move(filp, &tty->tty_files); 2814 file_move(filp, &tty->tty_files);
2814 2815
2815 retval = -ENOMEM; 2816 retval = devpts_pty_new(tty->link);
2816 if (devpts_pty_new(tty->link)) 2817 if (retval)
2817 goto out1; 2818 goto out1;
2818 2819
2819 check_tty_count(tty, "tty_open"); 2820 check_tty_count(tty, "ptmx_open");
2820 retval = ptm_driver->open(tty, filp); 2821 retval = ptm_driver->ops->open(tty, filp);
2821 if (!retval) { 2822 if (!retval)
2822 tty_audit_opening();
2823 return 0; 2823 return 0;
2824 }
2825out1: 2824out1:
2826 release_dev(filp); 2825 release_dev(filp);
2827 return retval; 2826 return retval;
2828out: 2827out:
2829 mutex_lock(&allocated_ptys_lock); 2828 devpts_kill_index(index);
2830 idr_remove(&allocated_ptys, index);
2831 mutex_unlock(&allocated_ptys_lock);
2832 return retval; 2829 return retval;
2833} 2830}
2834#endif 2831#endif
@@ -2885,6 +2882,7 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
2885static int tty_fasync(int fd, struct file *filp, int on) 2882static int tty_fasync(int fd, struct file *filp, int on)
2886{ 2883{
2887 struct tty_struct *tty; 2884 struct tty_struct *tty;
2885 unsigned long flags;
2888 int retval; 2886 int retval;
2889 2887
2890 tty = (struct tty_struct *)filp->private_data; 2888 tty = (struct tty_struct *)filp->private_data;
@@ -2900,6 +2898,7 @@ static int tty_fasync(int fd, struct file *filp, int on)
2900 struct pid *pid; 2898 struct pid *pid;
2901 if (!waitqueue_active(&tty->read_wait)) 2899 if (!waitqueue_active(&tty->read_wait))
2902 tty->minimum_to_wake = 1; 2900 tty->minimum_to_wake = 1;
2901 spin_lock_irqsave(&tty->ctrl_lock, flags);
2903 if (tty->pgrp) { 2902 if (tty->pgrp) {
2904 pid = tty->pgrp; 2903 pid = tty->pgrp;
2905 type = PIDTYPE_PGID; 2904 type = PIDTYPE_PGID;
@@ -2907,6 +2906,7 @@ static int tty_fasync(int fd, struct file *filp, int on)
2907 pid = task_pid(current); 2906 pid = task_pid(current);
2908 type = PIDTYPE_PID; 2907 type = PIDTYPE_PID;
2909 } 2908 }
2909 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
2910 retval = __f_setown(filp, pid, type, 0); 2910 retval = __f_setown(filp, pid, type, 0);
2911 if (retval) 2911 if (retval)
2912 return retval; 2912 return retval;
@@ -2992,6 +2992,8 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
2992 struct winsize __user *arg) 2992 struct winsize __user *arg)
2993{ 2993{
2994 struct winsize tmp_ws; 2994 struct winsize tmp_ws;
2995 struct pid *pgrp, *rpgrp;
2996 unsigned long flags;
2995 2997
2996 if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) 2998 if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
2997 return -EFAULT; 2999 return -EFAULT;
@@ -3009,10 +3011,21 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
3009 } 3011 }
3010 } 3012 }
3011#endif 3013#endif
3012 if (tty->pgrp) 3014 /* Get the PID values and reference them so we can
3013 kill_pgrp(tty->pgrp, SIGWINCH, 1); 3015 avoid holding the tty ctrl lock while sending signals */
3014 if ((real_tty->pgrp != tty->pgrp) && real_tty->pgrp) 3016 spin_lock_irqsave(&tty->ctrl_lock, flags);
3015 kill_pgrp(real_tty->pgrp, SIGWINCH, 1); 3017 pgrp = get_pid(tty->pgrp);
3018 rpgrp = get_pid(real_tty->pgrp);
3019 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
3020
3021 if (pgrp)
3022 kill_pgrp(pgrp, SIGWINCH, 1);
3023 if (rpgrp != pgrp && rpgrp)
3024 kill_pgrp(rpgrp, SIGWINCH, 1);
3025
3026 put_pid(pgrp);
3027 put_pid(rpgrp);
3028
3016 tty->winsize = tmp_ws; 3029 tty->winsize = tmp_ws;
3017 real_tty->winsize = tmp_ws; 3030 real_tty->winsize = tmp_ws;
3018done: 3031done:
@@ -3073,10 +3086,13 @@ static int fionbio(struct file *file, int __user *p)
3073 if (get_user(nonblock, p)) 3086 if (get_user(nonblock, p))
3074 return -EFAULT; 3087 return -EFAULT;
3075 3088
3089 /* file->f_flags is still BKL protected in the fs layer - vomit */
3090 lock_kernel();
3076 if (nonblock) 3091 if (nonblock)
3077 file->f_flags |= O_NONBLOCK; 3092 file->f_flags |= O_NONBLOCK;
3078 else 3093 else
3079 file->f_flags &= ~O_NONBLOCK; 3094 file->f_flags &= ~O_NONBLOCK;
3095 unlock_kernel();
3080 return 0; 3096 return 0;
3081} 3097}
3082 3098
@@ -3134,6 +3150,27 @@ unlock:
3134} 3150}
3135 3151
3136/** 3152/**
3153 * tty_get_pgrp - return a ref counted pgrp pid
3154 * @tty: tty to read
3155 *
3156 * Returns a refcounted instance of the pid struct for the process
3157 * group controlling the tty.
3158 */
3159
3160struct pid *tty_get_pgrp(struct tty_struct *tty)
3161{
3162 unsigned long flags;
3163 struct pid *pgrp;
3164
3165 spin_lock_irqsave(&tty->ctrl_lock, flags);
3166 pgrp = get_pid(tty->pgrp);
3167 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
3168
3169 return pgrp;
3170}
3171EXPORT_SYMBOL_GPL(tty_get_pgrp);
3172
3173/**
3137 * tiocgpgrp - get process group 3174 * tiocgpgrp - get process group
3138 * @tty: tty passed by user 3175 * @tty: tty passed by user
3139 * @real_tty: tty side of the tty pased by the user if a pty else the tty 3176 * @real_tty: tty side of the tty pased by the user if a pty else the tty
@@ -3147,13 +3184,18 @@ unlock:
3147 3184
3148static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 3185static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
3149{ 3186{
3187 struct pid *pid;
3188 int ret;
3150 /* 3189 /*
3151 * (tty == real_tty) is a cheap way of 3190 * (tty == real_tty) is a cheap way of
3152 * testing if the tty is NOT a master pty. 3191 * testing if the tty is NOT a master pty.
3153 */ 3192 */
3154 if (tty == real_tty && current->signal->tty != real_tty) 3193 if (tty == real_tty && current->signal->tty != real_tty)
3155 return -ENOTTY; 3194 return -ENOTTY;
3156 return put_user(pid_vnr(real_tty->pgrp), p); 3195 pid = tty_get_pgrp(real_tty);
3196 ret = put_user(pid_vnr(pid), p);
3197 put_pid(pid);
3198 return ret;
3157} 3199}
3158 3200
3159/** 3201/**
@@ -3165,7 +3207,7 @@ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
3165 * Set the process group of the tty to the session passed. Only 3207 * Set the process group of the tty to the session passed. Only
3166 * permitted where the tty session is our session. 3208 * permitted where the tty session is our session.
3167 * 3209 *
3168 * Locking: None 3210 * Locking: RCU, ctrl lock
3169 */ 3211 */
3170 3212
3171static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 3213static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
@@ -3173,6 +3215,7 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
3173 struct pid *pgrp; 3215 struct pid *pgrp;
3174 pid_t pgrp_nr; 3216 pid_t pgrp_nr;
3175 int retval = tty_check_change(real_tty); 3217 int retval = tty_check_change(real_tty);
3218 unsigned long flags;
3176 3219
3177 if (retval == -EIO) 3220 if (retval == -EIO)
3178 return -ENOTTY; 3221 return -ENOTTY;
@@ -3195,8 +3238,10 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
3195 if (session_of_pgrp(pgrp) != task_session(current)) 3238 if (session_of_pgrp(pgrp) != task_session(current))
3196 goto out_unlock; 3239 goto out_unlock;
3197 retval = 0; 3240 retval = 0;
3241 spin_lock_irqsave(&tty->ctrl_lock, flags);
3198 put_pid(real_tty->pgrp); 3242 put_pid(real_tty->pgrp);
3199 real_tty->pgrp = get_pid(pgrp); 3243 real_tty->pgrp = get_pid(pgrp);
3244 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
3200out_unlock: 3245out_unlock:
3201 rcu_read_unlock(); 3246 rcu_read_unlock();
3202 return retval; 3247 return retval;
@@ -3240,10 +3285,16 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _
3240static int tiocsetd(struct tty_struct *tty, int __user *p) 3285static int tiocsetd(struct tty_struct *tty, int __user *p)
3241{ 3286{
3242 int ldisc; 3287 int ldisc;
3288 int ret;
3243 3289
3244 if (get_user(ldisc, p)) 3290 if (get_user(ldisc, p))
3245 return -EFAULT; 3291 return -EFAULT;
3246 return tty_set_ldisc(tty, ldisc); 3292
3293 lock_kernel();
3294 ret = tty_set_ldisc(tty, ldisc);
3295 unlock_kernel();
3296
3297 return ret;
3247} 3298}
3248 3299
3249/** 3300/**
@@ -3263,18 +3314,18 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
3263{ 3314{
3264 if (tty_write_lock(tty, 0) < 0) 3315 if (tty_write_lock(tty, 0) < 0)
3265 return -EINTR; 3316 return -EINTR;
3266 tty->driver->break_ctl(tty, -1); 3317 tty->ops->break_ctl(tty, -1);
3267 if (!signal_pending(current)) 3318 if (!signal_pending(current))
3268 msleep_interruptible(duration); 3319 msleep_interruptible(duration);
3269 tty->driver->break_ctl(tty, 0); 3320 tty->ops->break_ctl(tty, 0);
3270 tty_write_unlock(tty); 3321 tty_write_unlock(tty);
3271 if (signal_pending(current)) 3322 if (!signal_pending(current))
3272 return -EINTR; 3323 return -EINTR;
3273 return 0; 3324 return 0;
3274} 3325}
3275 3326
3276/** 3327/**
3277 * tiocmget - get modem status 3328 * tty_tiocmget - get modem status
3278 * @tty: tty device 3329 * @tty: tty device
3279 * @file: user file pointer 3330 * @file: user file pointer
3280 * @p: pointer to result 3331 * @p: pointer to result
@@ -3289,8 +3340,8 @@ static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p
3289{ 3340{
3290 int retval = -EINVAL; 3341 int retval = -EINVAL;
3291 3342
3292 if (tty->driver->tiocmget) { 3343 if (tty->ops->tiocmget) {
3293 retval = tty->driver->tiocmget(tty, file); 3344 retval = tty->ops->tiocmget(tty, file);
3294 3345
3295 if (retval >= 0) 3346 if (retval >= 0)
3296 retval = put_user(retval, p); 3347 retval = put_user(retval, p);
@@ -3299,7 +3350,7 @@ static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p
3299} 3350}
3300 3351
3301/** 3352/**
3302 * tiocmset - set modem status 3353 * tty_tiocmset - set modem status
3303 * @tty: tty device 3354 * @tty: tty device
3304 * @file: user file pointer 3355 * @file: user file pointer
3305 * @cmd: command - clear bits, set bits or set all 3356 * @cmd: command - clear bits, set bits or set all
@@ -3316,7 +3367,7 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
3316{ 3367{
3317 int retval = -EINVAL; 3368 int retval = -EINVAL;
3318 3369
3319 if (tty->driver->tiocmset) { 3370 if (tty->ops->tiocmset) {
3320 unsigned int set, clear, val; 3371 unsigned int set, clear, val;
3321 3372
3322 retval = get_user(val, p); 3373 retval = get_user(val, p);
@@ -3340,7 +3391,7 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
3340 set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; 3391 set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
3341 clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; 3392 clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
3342 3393
3343 retval = tty->driver->tiocmset(tty, file, set, clear); 3394 retval = tty->ops->tiocmset(tty, file, set, clear);
3344 } 3395 }
3345 return retval; 3396 return retval;
3346} 3397}
@@ -3348,20 +3399,18 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
3348/* 3399/*
3349 * Split this up, as gcc can choke on it otherwise.. 3400 * Split this up, as gcc can choke on it otherwise..
3350 */ 3401 */
3351int tty_ioctl(struct inode *inode, struct file *file, 3402long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3352 unsigned int cmd, unsigned long arg)
3353{ 3403{
3354 struct tty_struct *tty, *real_tty; 3404 struct tty_struct *tty, *real_tty;
3355 void __user *p = (void __user *)arg; 3405 void __user *p = (void __user *)arg;
3356 int retval; 3406 int retval;
3357 struct tty_ldisc *ld; 3407 struct tty_ldisc *ld;
3408 struct inode *inode = file->f_dentry->d_inode;
3358 3409
3359 tty = (struct tty_struct *)file->private_data; 3410 tty = (struct tty_struct *)file->private_data;
3360 if (tty_paranoia_check(tty, inode, "tty_ioctl")) 3411 if (tty_paranoia_check(tty, inode, "tty_ioctl"))
3361 return -EINVAL; 3412 return -EINVAL;
3362 3413
3363 /* CHECKME: is this safe as one end closes ? */
3364
3365 real_tty = tty; 3414 real_tty = tty;
3366 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 3415 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
3367 tty->driver->subtype == PTY_TYPE_MASTER) 3416 tty->driver->subtype == PTY_TYPE_MASTER)
@@ -3370,21 +3419,28 @@ int tty_ioctl(struct inode *inode, struct file *file,
3370 /* 3419 /*
3371 * Break handling by driver 3420 * Break handling by driver
3372 */ 3421 */
3373 if (!tty->driver->break_ctl) { 3422
3423 retval = -EINVAL;
3424
3425 if (!tty->ops->break_ctl) {
3374 switch (cmd) { 3426 switch (cmd) {
3375 case TIOCSBRK: 3427 case TIOCSBRK:
3376 case TIOCCBRK: 3428 case TIOCCBRK:
3377 if (tty->driver->ioctl) 3429 if (tty->ops->ioctl)
3378 return tty->driver->ioctl(tty, file, cmd, arg); 3430 retval = tty->ops->ioctl(tty, file, cmd, arg);
3379 return -EINVAL; 3431 if (retval != -EINVAL && retval != -ENOIOCTLCMD)
3432 printk(KERN_WARNING "tty: driver %s needs updating to use break_ctl\n", tty->driver->name);
3433 return retval;
3380 3434
3381 /* These two ioctl's always return success; even if */ 3435 /* These two ioctl's always return success; even if */
3382 /* the driver doesn't support them. */ 3436 /* the driver doesn't support them. */
3383 case TCSBRK: 3437 case TCSBRK:
3384 case TCSBRKP: 3438 case TCSBRKP:
3385 if (!tty->driver->ioctl) 3439 if (!tty->ops->ioctl)
3386 return 0; 3440 return 0;
3387 retval = tty->driver->ioctl(tty, file, cmd, arg); 3441 retval = tty->ops->ioctl(tty, file, cmd, arg);
3442 if (retval != -EINVAL && retval != -ENOIOCTLCMD)
3443 printk(KERN_WARNING "tty: driver %s needs updating to use break_ctl\n", tty->driver->name);
3388 if (retval == -ENOIOCTLCMD) 3444 if (retval == -ENOIOCTLCMD)
3389 retval = 0; 3445 retval = 0;
3390 return retval; 3446 return retval;
@@ -3442,7 +3498,6 @@ int tty_ioctl(struct inode *inode, struct file *file,
3442 case TIOCGSID: 3498 case TIOCGSID:
3443 return tiocgsid(tty, real_tty, p); 3499 return tiocgsid(tty, real_tty, p);
3444 case TIOCGETD: 3500 case TIOCGETD:
3445 /* FIXME: check this is ok */
3446 return put_user(tty->ldisc.num, (int __user *)p); 3501 return put_user(tty->ldisc.num, (int __user *)p);
3447 case TIOCSETD: 3502 case TIOCSETD:
3448 return tiocsetd(tty, p); 3503 return tiocsetd(tty, p);
@@ -3454,11 +3509,13 @@ int tty_ioctl(struct inode *inode, struct file *file,
3454 * Break handling 3509 * Break handling
3455 */ 3510 */
3456 case TIOCSBRK: /* Turn break on, unconditionally */ 3511 case TIOCSBRK: /* Turn break on, unconditionally */
3457 tty->driver->break_ctl(tty, -1); 3512 if (tty->ops->break_ctl)
3513 tty->ops->break_ctl(tty, -1);
3458 return 0; 3514 return 0;
3459 3515
3460 case TIOCCBRK: /* Turn break off, unconditionally */ 3516 case TIOCCBRK: /* Turn break off, unconditionally */
3461 tty->driver->break_ctl(tty, 0); 3517 if (tty->ops->break_ctl)
3518 tty->ops->break_ctl(tty, 0);
3462 return 0; 3519 return 0;
3463 case TCSBRK: /* SVID version: non-zero arg --> no break */ 3520 case TCSBRK: /* SVID version: non-zero arg --> no break */
3464 /* non-zero arg means wait for all output data 3521 /* non-zero arg means wait for all output data
@@ -3487,8 +3544,8 @@ int tty_ioctl(struct inode *inode, struct file *file,
3487 } 3544 }
3488 break; 3545 break;
3489 } 3546 }
3490 if (tty->driver->ioctl) { 3547 if (tty->ops->ioctl) {
3491 retval = (tty->driver->ioctl)(tty, file, cmd, arg); 3548 retval = (tty->ops->ioctl)(tty, file, cmd, arg);
3492 if (retval != -ENOIOCTLCMD) 3549 if (retval != -ENOIOCTLCMD)
3493 return retval; 3550 return retval;
3494 } 3551 }
@@ -3515,8 +3572,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
3515 if (tty_paranoia_check(tty, inode, "tty_ioctl")) 3572 if (tty_paranoia_check(tty, inode, "tty_ioctl"))
3516 return -EINVAL; 3573 return -EINVAL;
3517 3574
3518 if (tty->driver->compat_ioctl) { 3575 if (tty->ops->compat_ioctl) {
3519 retval = (tty->driver->compat_ioctl)(tty, file, cmd, arg); 3576 retval = (tty->ops->compat_ioctl)(tty, file, cmd, arg);
3520 if (retval != -ENOIOCTLCMD) 3577 if (retval != -ENOIOCTLCMD)
3521 return retval; 3578 return retval;
3522 } 3579 }
@@ -3566,8 +3623,7 @@ void __do_SAK(struct tty_struct *tty)
3566 3623
3567 tty_ldisc_flush(tty); 3624 tty_ldisc_flush(tty);
3568 3625
3569 if (tty->driver->flush_buffer) 3626 tty_driver_flush_buffer(tty);
3570 tty->driver->flush_buffer(tty);
3571 3627
3572 read_lock(&tasklist_lock); 3628 read_lock(&tasklist_lock);
3573 /* Kill the entire session */ 3629 /* Kill the entire session */
@@ -3773,19 +3829,32 @@ static void initialize_tty_struct(struct tty_struct *tty)
3773 mutex_init(&tty->atomic_read_lock); 3829 mutex_init(&tty->atomic_read_lock);
3774 mutex_init(&tty->atomic_write_lock); 3830 mutex_init(&tty->atomic_write_lock);
3775 spin_lock_init(&tty->read_lock); 3831 spin_lock_init(&tty->read_lock);
3832 spin_lock_init(&tty->ctrl_lock);
3776 INIT_LIST_HEAD(&tty->tty_files); 3833 INIT_LIST_HEAD(&tty->tty_files);
3777 INIT_WORK(&tty->SAK_work, do_SAK_work); 3834 INIT_WORK(&tty->SAK_work, do_SAK_work);
3778} 3835}
3779 3836
3780/* 3837/**
3781 * The default put_char routine if the driver did not define one. 3838 * tty_put_char - write one character to a tty
3839 * @tty: tty
3840 * @ch: character
3841 *
3842 * Write one byte to the tty using the provided put_char method
3843 * if present. Returns the number of characters successfully output.
3844 *
3845 * Note: the specific put_char operation in the driver layer may go
3846 * away soon. Don't call it directly, use this method
3782 */ 3847 */
3783 3848
3784static void tty_default_put_char(struct tty_struct *tty, unsigned char ch) 3849int tty_put_char(struct tty_struct *tty, unsigned char ch)
3785{ 3850{
3786 tty->driver->write(tty, &ch, 1); 3851 if (tty->ops->put_char)
3852 return tty->ops->put_char(tty, ch);
3853 return tty->ops->write(tty, &ch, 1);
3787} 3854}
3788 3855
3856EXPORT_SYMBOL_GPL(tty_put_char);
3857
3789static struct class *tty_class; 3858static struct class *tty_class;
3790 3859
3791/** 3860/**
@@ -3868,37 +3937,8 @@ void put_tty_driver(struct tty_driver *driver)
3868void tty_set_operations(struct tty_driver *driver, 3937void tty_set_operations(struct tty_driver *driver,
3869 const struct tty_operations *op) 3938 const struct tty_operations *op)
3870{ 3939{
3871 driver->open = op->open; 3940 driver->ops = op;
3872 driver->close = op->close; 3941};
3873 driver->write = op->write;
3874 driver->put_char = op->put_char;
3875 driver->flush_chars = op->flush_chars;
3876 driver->write_room = op->write_room;
3877 driver->chars_in_buffer = op->chars_in_buffer;
3878 driver->ioctl = op->ioctl;
3879 driver->compat_ioctl = op->compat_ioctl;
3880 driver->set_termios = op->set_termios;
3881 driver->throttle = op->throttle;
3882 driver->unthrottle = op->unthrottle;
3883 driver->stop = op->stop;
3884 driver->start = op->start;
3885 driver->hangup = op->hangup;
3886 driver->break_ctl = op->break_ctl;
3887 driver->flush_buffer = op->flush_buffer;
3888 driver->set_ldisc = op->set_ldisc;
3889 driver->wait_until_sent = op->wait_until_sent;
3890 driver->send_xchar = op->send_xchar;
3891 driver->read_proc = op->read_proc;
3892 driver->write_proc = op->write_proc;
3893 driver->tiocmget = op->tiocmget;
3894 driver->tiocmset = op->tiocmset;
3895#ifdef CONFIG_CONSOLE_POLL
3896 driver->poll_init = op->poll_init;
3897 driver->poll_get_char = op->poll_get_char;
3898 driver->poll_put_char = op->poll_put_char;
3899#endif
3900}
3901
3902 3942
3903EXPORT_SYMBOL(alloc_tty_driver); 3943EXPORT_SYMBOL(alloc_tty_driver);
3904EXPORT_SYMBOL(put_tty_driver); 3944EXPORT_SYMBOL(put_tty_driver);
@@ -3961,9 +4001,6 @@ int tty_register_driver(struct tty_driver *driver)
3961 return error; 4001 return error;
3962 } 4002 }
3963 4003
3964 if (!driver->put_char)
3965 driver->put_char = tty_default_put_char;
3966
3967 mutex_lock(&tty_mutex); 4004 mutex_lock(&tty_mutex);
3968 list_add(&driver->tty_drivers, &tty_drivers); 4005 list_add(&driver->tty_drivers, &tty_drivers);
3969 mutex_unlock(&tty_mutex); 4006 mutex_unlock(&tty_mutex);
@@ -4039,14 +4076,19 @@ void proc_clear_tty(struct task_struct *p)
4039} 4076}
4040EXPORT_SYMBOL(proc_clear_tty); 4077EXPORT_SYMBOL(proc_clear_tty);
4041 4078
4079/* Called under the sighand lock */
4080
4042static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty) 4081static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
4043{ 4082{
4044 if (tty) { 4083 if (tty) {
4045 /* We should not have a session or pgrp to here but.... */ 4084 unsigned long flags;
4085 /* We should not have a session or pgrp to put here but.... */
4086 spin_lock_irqsave(&tty->ctrl_lock, flags);
4046 put_pid(tty->session); 4087 put_pid(tty->session);
4047 put_pid(tty->pgrp); 4088 put_pid(tty->pgrp);
4048 tty->session = get_pid(task_session(tsk));
4049 tty->pgrp = get_pid(task_pgrp(tsk)); 4089 tty->pgrp = get_pid(task_pgrp(tsk));
4090 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
4091 tty->session = get_pid(task_session(tsk));
4050 } 4092 }
4051 put_pid(tsk->signal->tty_old_pgrp); 4093 put_pid(tsk->signal->tty_old_pgrp);
4052 tsk->signal->tty = tty; 4094 tsk->signal->tty = tty;
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index f95a80b2265f..b1a757a5ee27 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/smp_lock.h>
24 25
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
@@ -39,6 +40,50 @@
39#define TERMIOS_OLD 8 40#define TERMIOS_OLD 8
40 41
41 42
43int tty_chars_in_buffer(struct tty_struct *tty)
44{
45 if (tty->ops->chars_in_buffer)
46 return tty->ops->chars_in_buffer(tty);
47 else
48 return 0;
49}
50
51EXPORT_SYMBOL(tty_chars_in_buffer);
52
53int tty_write_room(struct tty_struct *tty)
54{
55 if (tty->ops->write_room)
56 return tty->ops->write_room(tty);
57 return 2048;
58}
59
60EXPORT_SYMBOL(tty_write_room);
61
62void tty_driver_flush_buffer(struct tty_struct *tty)
63{
64 if (tty->ops->flush_buffer)
65 tty->ops->flush_buffer(tty);
66}
67
68EXPORT_SYMBOL(tty_driver_flush_buffer);
69
70void tty_throttle(struct tty_struct *tty)
71{
72 /* check TTY_THROTTLED first so it indicates our state */
73 if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
74 tty->ops->throttle)
75 tty->ops->throttle(tty);
76}
77EXPORT_SYMBOL(tty_throttle);
78
79void tty_unthrottle(struct tty_struct *tty)
80{
81 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
82 tty->ops->unthrottle)
83 tty->ops->unthrottle(tty);
84}
85EXPORT_SYMBOL(tty_unthrottle);
86
42/** 87/**
43 * tty_wait_until_sent - wait for I/O to finish 88 * tty_wait_until_sent - wait for I/O to finish
44 * @tty: tty we are waiting for 89 * @tty: tty we are waiting for
@@ -57,15 +102,13 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
57 102
58 printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty, buf)); 103 printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty, buf));
59#endif 104#endif
60 if (!tty->driver->chars_in_buffer)
61 return;
62 if (!timeout) 105 if (!timeout)
63 timeout = MAX_SCHEDULE_TIMEOUT; 106 timeout = MAX_SCHEDULE_TIMEOUT;
64 if (wait_event_interruptible_timeout(tty->write_wait, 107 if (wait_event_interruptible_timeout(tty->write_wait,
65 !tty->driver->chars_in_buffer(tty), timeout) < 0) 108 !tty_chars_in_buffer(tty), timeout) >= 0) {
66 return; 109 if (tty->ops->wait_until_sent)
67 if (tty->driver->wait_until_sent) 110 tty->ops->wait_until_sent(tty, timeout);
68 tty->driver->wait_until_sent(tty, timeout); 111 }
69} 112}
70EXPORT_SYMBOL(tty_wait_until_sent); 113EXPORT_SYMBOL(tty_wait_until_sent);
71 114
@@ -393,8 +436,9 @@ EXPORT_SYMBOL(tty_termios_hw_change);
393static void change_termios(struct tty_struct *tty, struct ktermios *new_termios) 436static void change_termios(struct tty_struct *tty, struct ktermios *new_termios)
394{ 437{
395 int canon_change; 438 int canon_change;
396 struct ktermios old_termios = *tty->termios; 439 struct ktermios old_termios;
397 struct tty_ldisc *ld; 440 struct tty_ldisc *ld;
441 unsigned long flags;
398 442
399 /* 443 /*
400 * Perform the actual termios internal changes under lock. 444 * Perform the actual termios internal changes under lock.
@@ -404,7 +448,7 @@ static void change_termios(struct tty_struct *tty, struct ktermios *new_termios)
404 /* FIXME: we need to decide on some locking/ordering semantics 448 /* FIXME: we need to decide on some locking/ordering semantics
405 for the set_termios notification eventually */ 449 for the set_termios notification eventually */
406 mutex_lock(&tty->termios_mutex); 450 mutex_lock(&tty->termios_mutex);
407 451 old_termios = *tty->termios;
408 *tty->termios = *new_termios; 452 *tty->termios = *new_termios;
409 unset_locked_termios(tty->termios, &old_termios, tty->termios_locked); 453 unset_locked_termios(tty->termios, &old_termios, tty->termios_locked);
410 canon_change = (old_termios.c_lflag ^ tty->termios->c_lflag) & ICANON; 454 canon_change = (old_termios.c_lflag ^ tty->termios->c_lflag) & ICANON;
@@ -429,17 +473,19 @@ static void change_termios(struct tty_struct *tty, struct ktermios *new_termios)
429 STOP_CHAR(tty) == '\023' && 473 STOP_CHAR(tty) == '\023' &&
430 START_CHAR(tty) == '\021'); 474 START_CHAR(tty) == '\021');
431 if (old_flow != new_flow) { 475 if (old_flow != new_flow) {
476 spin_lock_irqsave(&tty->ctrl_lock, flags);
432 tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP); 477 tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
433 if (new_flow) 478 if (new_flow)
434 tty->ctrl_status |= TIOCPKT_DOSTOP; 479 tty->ctrl_status |= TIOCPKT_DOSTOP;
435 else 480 else
436 tty->ctrl_status |= TIOCPKT_NOSTOP; 481 tty->ctrl_status |= TIOCPKT_NOSTOP;
482 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
437 wake_up_interruptible(&tty->link->read_wait); 483 wake_up_interruptible(&tty->link->read_wait);
438 } 484 }
439 } 485 }
440 486
441 if (tty->driver->set_termios) 487 if (tty->ops->set_termios)
442 (*tty->driver->set_termios)(tty, &old_termios); 488 (*tty->ops->set_termios)(tty, &old_termios);
443 else 489 else
444 tty_termios_copy_hw(tty->termios, &old_termios); 490 tty_termios_copy_hw(tty->termios, &old_termios);
445 491
@@ -474,7 +520,9 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
474 if (retval) 520 if (retval)
475 return retval; 521 return retval;
476 522
523 mutex_lock(&tty->termios_mutex);
477 memcpy(&tmp_termios, tty->termios, sizeof(struct ktermios)); 524 memcpy(&tmp_termios, tty->termios, sizeof(struct ktermios));
525 mutex_unlock(&tty->termios_mutex);
478 526
479 if (opt & TERMIOS_TERMIO) { 527 if (opt & TERMIOS_TERMIO) {
480 if (user_termio_to_kernel_termios(&tmp_termios, 528 if (user_termio_to_kernel_termios(&tmp_termios,
@@ -660,12 +708,14 @@ static int get_tchars(struct tty_struct *tty, struct tchars __user *tchars)
660{ 708{
661 struct tchars tmp; 709 struct tchars tmp;
662 710
711 mutex_lock(&tty->termios_mutex);
663 tmp.t_intrc = tty->termios->c_cc[VINTR]; 712 tmp.t_intrc = tty->termios->c_cc[VINTR];
664 tmp.t_quitc = tty->termios->c_cc[VQUIT]; 713 tmp.t_quitc = tty->termios->c_cc[VQUIT];
665 tmp.t_startc = tty->termios->c_cc[VSTART]; 714 tmp.t_startc = tty->termios->c_cc[VSTART];
666 tmp.t_stopc = tty->termios->c_cc[VSTOP]; 715 tmp.t_stopc = tty->termios->c_cc[VSTOP];
667 tmp.t_eofc = tty->termios->c_cc[VEOF]; 716 tmp.t_eofc = tty->termios->c_cc[VEOF];
668 tmp.t_brkc = tty->termios->c_cc[VEOL2]; /* what is brkc anyway? */ 717 tmp.t_brkc = tty->termios->c_cc[VEOL2]; /* what is brkc anyway? */
718 mutex_unlock(&tty->termios_mutex);
669 return copy_to_user(tchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; 719 return copy_to_user(tchars, &tmp, sizeof(tmp)) ? -EFAULT : 0;
670} 720}
671 721
@@ -675,12 +725,14 @@ static int set_tchars(struct tty_struct *tty, struct tchars __user *tchars)
675 725
676 if (copy_from_user(&tmp, tchars, sizeof(tmp))) 726 if (copy_from_user(&tmp, tchars, sizeof(tmp)))
677 return -EFAULT; 727 return -EFAULT;
728 mutex_lock(&tty->termios_mutex);
678 tty->termios->c_cc[VINTR] = tmp.t_intrc; 729 tty->termios->c_cc[VINTR] = tmp.t_intrc;
679 tty->termios->c_cc[VQUIT] = tmp.t_quitc; 730 tty->termios->c_cc[VQUIT] = tmp.t_quitc;
680 tty->termios->c_cc[VSTART] = tmp.t_startc; 731 tty->termios->c_cc[VSTART] = tmp.t_startc;
681 tty->termios->c_cc[VSTOP] = tmp.t_stopc; 732 tty->termios->c_cc[VSTOP] = tmp.t_stopc;
682 tty->termios->c_cc[VEOF] = tmp.t_eofc; 733 tty->termios->c_cc[VEOF] = tmp.t_eofc;
683 tty->termios->c_cc[VEOL2] = tmp.t_brkc; /* what is brkc anyway? */ 734 tty->termios->c_cc[VEOL2] = tmp.t_brkc; /* what is brkc anyway? */
735 mutex_unlock(&tty->termios_mutex);
684 return 0; 736 return 0;
685} 737}
686#endif 738#endif
@@ -690,6 +742,7 @@ static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
690{ 742{
691 struct ltchars tmp; 743 struct ltchars tmp;
692 744
745 mutex_lock(&tty->termios_mutex);
693 tmp.t_suspc = tty->termios->c_cc[VSUSP]; 746 tmp.t_suspc = tty->termios->c_cc[VSUSP];
694 /* what is dsuspc anyway? */ 747 /* what is dsuspc anyway? */
695 tmp.t_dsuspc = tty->termios->c_cc[VSUSP]; 748 tmp.t_dsuspc = tty->termios->c_cc[VSUSP];
@@ -698,6 +751,7 @@ static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
698 tmp.t_flushc = tty->termios->c_cc[VEOL2]; 751 tmp.t_flushc = tty->termios->c_cc[VEOL2];
699 tmp.t_werasc = tty->termios->c_cc[VWERASE]; 752 tmp.t_werasc = tty->termios->c_cc[VWERASE];
700 tmp.t_lnextc = tty->termios->c_cc[VLNEXT]; 753 tmp.t_lnextc = tty->termios->c_cc[VLNEXT];
754 mutex_unlock(&tty->termios_mutex);
701 return copy_to_user(ltchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; 755 return copy_to_user(ltchars, &tmp, sizeof(tmp)) ? -EFAULT : 0;
702} 756}
703 757
@@ -708,6 +762,7 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
708 if (copy_from_user(&tmp, ltchars, sizeof(tmp))) 762 if (copy_from_user(&tmp, ltchars, sizeof(tmp)))
709 return -EFAULT; 763 return -EFAULT;
710 764
765 mutex_lock(&tty->termios_mutex);
711 tty->termios->c_cc[VSUSP] = tmp.t_suspc; 766 tty->termios->c_cc[VSUSP] = tmp.t_suspc;
712 /* what is dsuspc anyway? */ 767 /* what is dsuspc anyway? */
713 tty->termios->c_cc[VEOL2] = tmp.t_dsuspc; 768 tty->termios->c_cc[VEOL2] = tmp.t_dsuspc;
@@ -716,6 +771,7 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
716 tty->termios->c_cc[VEOL2] = tmp.t_flushc; 771 tty->termios->c_cc[VEOL2] = tmp.t_flushc;
717 tty->termios->c_cc[VWERASE] = tmp.t_werasc; 772 tty->termios->c_cc[VWERASE] = tmp.t_werasc;
718 tty->termios->c_cc[VLNEXT] = tmp.t_lnextc; 773 tty->termios->c_cc[VLNEXT] = tmp.t_lnextc;
774 mutex_unlock(&tty->termios_mutex);
719 return 0; 775 return 0;
720} 776}
721#endif 777#endif
@@ -732,8 +788,8 @@ static int send_prio_char(struct tty_struct *tty, char ch)
732{ 788{
733 int was_stopped = tty->stopped; 789 int was_stopped = tty->stopped;
734 790
735 if (tty->driver->send_xchar) { 791 if (tty->ops->send_xchar) {
736 tty->driver->send_xchar(tty, ch); 792 tty->ops->send_xchar(tty, ch);
737 return 0; 793 return 0;
738 } 794 }
739 795
@@ -742,7 +798,7 @@ static int send_prio_char(struct tty_struct *tty, char ch)
742 798
743 if (was_stopped) 799 if (was_stopped)
744 start_tty(tty); 800 start_tty(tty);
745 tty->driver->write(tty, &ch, 1); 801 tty->ops->write(tty, &ch, 1);
746 if (was_stopped) 802 if (was_stopped)
747 stop_tty(tty); 803 stop_tty(tty);
748 tty_write_unlock(tty); 804 tty_write_unlock(tty);
@@ -750,6 +806,33 @@ static int send_prio_char(struct tty_struct *tty, char ch)
750} 806}
751 807
752/** 808/**
809 * tty_change_softcar - carrier change ioctl helper
810 * @tty: tty to update
811 * @arg: enable/disable CLOCAL
812 *
813 * Perform a change to the CLOCAL state and call into the driver
814 * layer to make it visible. All done with the termios mutex
815 */
816
817static int tty_change_softcar(struct tty_struct *tty, int arg)
818{
819 int ret = 0;
820 int bit = arg ? CLOCAL : 0;
821 struct ktermios old;
822
823 mutex_lock(&tty->termios_mutex);
824 old = *tty->termios;
825 tty->termios->c_cflag &= ~CLOCAL;
826 tty->termios->c_cflag |= bit;
827 if (tty->ops->set_termios)
828 tty->ops->set_termios(tty, &old);
829 if ((tty->termios->c_cflag & CLOCAL) != bit)
830 ret = -EINVAL;
831 mutex_unlock(&tty->termios_mutex);
832 return ret;
833}
834
835/**
753 * tty_mode_ioctl - mode related ioctls 836 * tty_mode_ioctl - mode related ioctls
754 * @tty: tty for the ioctl 837 * @tty: tty for the ioctl
755 * @file: file pointer for the tty 838 * @file: file pointer for the tty
@@ -859,12 +942,7 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
859 case TIOCSSOFTCAR: 942 case TIOCSSOFTCAR:
860 if (get_user(arg, (unsigned int __user *) arg)) 943 if (get_user(arg, (unsigned int __user *) arg))
861 return -EFAULT; 944 return -EFAULT;
862 mutex_lock(&tty->termios_mutex); 945 return tty_change_softcar(tty, arg);
863 tty->termios->c_cflag =
864 ((tty->termios->c_cflag & ~CLOCAL) |
865 (arg ? CLOCAL : 0));
866 mutex_unlock(&tty->termios_mutex);
867 return 0;
868 default: 946 default:
869 return -ENOIOCTLCMD; 947 return -ENOIOCTLCMD;
870 } 948 }
@@ -889,8 +967,7 @@ int tty_perform_flush(struct tty_struct *tty, unsigned long arg)
889 ld->flush_buffer(tty); 967 ld->flush_buffer(tty);
890 /* fall through */ 968 /* fall through */
891 case TCOFLUSH: 969 case TCOFLUSH:
892 if (tty->driver->flush_buffer) 970 tty_driver_flush_buffer(tty);
893 tty->driver->flush_buffer(tty);
894 break; 971 break;
895 default: 972 default:
896 tty_ldisc_deref(ld); 973 tty_ldisc_deref(ld);
@@ -905,6 +982,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file,
905 unsigned int cmd, unsigned long arg) 982 unsigned int cmd, unsigned long arg)
906{ 983{
907 struct tty_struct *real_tty; 984 struct tty_struct *real_tty;
985 unsigned long flags;
908 int retval; 986 int retval;
909 987
910 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 988 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
@@ -946,9 +1024,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file,
946 case TCFLSH: 1024 case TCFLSH:
947 return tty_perform_flush(tty, arg); 1025 return tty_perform_flush(tty, arg);
948 case TIOCOUTQ: 1026 case TIOCOUTQ:
949 return put_user(tty->driver->chars_in_buffer ? 1027 return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
950 tty->driver->chars_in_buffer(tty) : 0,
951 (int __user *) arg);
952 case TIOCINQ: 1028 case TIOCINQ:
953 retval = tty->read_cnt; 1029 retval = tty->read_cnt;
954 if (L_ICANON(tty)) 1030 if (L_ICANON(tty))
@@ -963,6 +1039,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file,
963 return -ENOTTY; 1039 return -ENOTTY;
964 if (get_user(pktmode, (int __user *) arg)) 1040 if (get_user(pktmode, (int __user *) arg))
965 return -EFAULT; 1041 return -EFAULT;
1042 spin_lock_irqsave(&tty->ctrl_lock, flags);
966 if (pktmode) { 1043 if (pktmode) {
967 if (!tty->packet) { 1044 if (!tty->packet) {
968 tty->packet = 1; 1045 tty->packet = 1;
@@ -970,6 +1047,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file,
970 } 1047 }
971 } else 1048 } else
972 tty->packet = 0; 1049 tty->packet = 0;
1050 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
973 return 0; 1051 return 0;
974 } 1052 }
975 default: 1053 default:
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c
index 8de6b95aeb84..3d3e1c2b310f 100644
--- a/drivers/char/viocons.c
+++ b/drivers/char/viocons.c
@@ -628,13 +628,13 @@ static int viotty_write(struct tty_struct *tty, const unsigned char *buf,
628/* 628/*
629 * TTY put_char method 629 * TTY put_char method
630 */ 630 */
631static void viotty_put_char(struct tty_struct *tty, unsigned char ch) 631static int viotty_put_char(struct tty_struct *tty, unsigned char ch)
632{ 632{
633 struct port_info *pi; 633 struct port_info *pi;
634 634
635 pi = get_port_data(tty); 635 pi = get_port_data(tty);
636 if (pi == NULL) 636 if (pi == NULL)
637 return; 637 return 0;
638 638
639 /* This will append '\r' as well if the char is '\n' */ 639 /* This will append '\r' as well if the char is '\n' */
640 if (viochar_is_console(pi)) 640 if (viochar_is_console(pi))
@@ -642,6 +642,7 @@ static void viotty_put_char(struct tty_struct *tty, unsigned char ch)
642 642
643 if (viopath_isactive(pi->lp)) 643 if (viopath_isactive(pi->lp))
644 internal_write(pi, &ch, 1); 644 internal_write(pi, &ch, 1);
645 return 1;
645} 646}
646 647
647/* 648/*
@@ -704,8 +705,11 @@ static int viotty_ioctl(struct tty_struct *tty, struct file *file,
704 case KDSKBLED: 705 case KDSKBLED:
705 return 0; 706 return 0;
706 } 707 }
707 708 /* FIXME: WTF is this being called for ??? */
708 return n_tty_ioctl(tty, file, cmd, arg); 709 lock_kernel();
710 ret = n_tty_ioctl(tty, file, cmd, arg);
711 unlock_kernel();
712 return ret;
709} 713}
710 714
711/* 715/*
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index db7a731e2362..58aad63831f4 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -249,6 +249,7 @@ static int proc_viotape_open(struct inode *inode, struct file *file)
249} 249}
250 250
251static const struct file_operations proc_viotape_operations = { 251static const struct file_operations proc_viotape_operations = {
252 .owner = THIS_MODULE,
252 .open = proc_viotape_open, 253 .open = proc_viotape_open,
253 .read = seq_read, 254 .read = seq_read,
254 .llseek = seq_lseek, 255 .llseek = seq_lseek,
@@ -915,7 +916,6 @@ static struct vio_driver viotape_driver = {
915int __init viotap_init(void) 916int __init viotap_init(void)
916{ 917{
917 int ret; 918 int ret;
918 struct proc_dir_entry *e;
919 919
920 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 920 if (!firmware_has_feature(FW_FEATURE_ISERIES))
921 return -ENODEV; 921 return -ENODEV;
@@ -968,11 +968,8 @@ int __init viotap_init(void)
968 if (ret) 968 if (ret)
969 goto unreg_class; 969 goto unreg_class;
970 970
971 e = create_proc_entry("iSeries/viotape", S_IFREG|S_IRUGO, NULL); 971 proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
972 if (e) { 972 &proc_viotape_operations);
973 e->owner = THIS_MODULE;
974 e->proc_fops = &proc_viotape_operations;
975 }
976 973
977 return 0; 974 return 0;
978 975
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index df4c3ead9e2b..e458b08139af 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -301,7 +301,7 @@ static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
301 d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); 301 d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
302 s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr)); 302 s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
303 scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row); 303 scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
304 scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char, 304 scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_scrl_erase_char,
305 vc->vc_size_row * nr); 305 vc->vc_size_row * nr);
306} 306}
307 307
@@ -319,7 +319,7 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
319 s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); 319 s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
320 step = vc->vc_cols * nr; 320 step = vc->vc_cols * nr;
321 scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row); 321 scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
322 scr_memsetw(s, vc->vc_video_erase_char, 2 * step); 322 scr_memsetw(s, vc->vc_scrl_erase_char, 2 * step);
323} 323}
324 324
325static void do_update_region(struct vc_data *vc, unsigned long start, int count) 325static void do_update_region(struct vc_data *vc, unsigned long start, int count)
@@ -400,7 +400,7 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
400 * Bit 7 : blink 400 * Bit 7 : blink
401 */ 401 */
402 { 402 {
403 u8 a = vc->vc_color; 403 u8 a = _color;
404 if (!vc->vc_can_do_color) 404 if (!vc->vc_can_do_color)
405 return _intensity | 405 return _intensity |
406 (_italic ? 2 : 0) | 406 (_italic ? 2 : 0) |
@@ -434,6 +434,7 @@ static void update_attr(struct vc_data *vc)
434 vc->vc_blink, vc->vc_underline, 434 vc->vc_blink, vc->vc_underline,
435 vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic); 435 vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic);
436 vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' '; 436 vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' ';
437 vc->vc_scrl_erase_char = (build_attr(vc, vc->vc_def_color, 1, false, false, false, false) << 8) | ' ';
437} 438}
438 439
439/* Note: inverting the screen twice should revert to the original state */ 440/* Note: inverting the screen twice should revert to the original state */
@@ -908,15 +909,21 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
908 909
909 if (vc->vc_tty) { 910 if (vc->vc_tty) {
910 struct winsize ws, *cws = &vc->vc_tty->winsize; 911 struct winsize ws, *cws = &vc->vc_tty->winsize;
912 unsigned long flags;
911 913
912 memset(&ws, 0, sizeof(ws)); 914 memset(&ws, 0, sizeof(ws));
913 ws.ws_row = vc->vc_rows; 915 ws.ws_row = vc->vc_rows;
914 ws.ws_col = vc->vc_cols; 916 ws.ws_col = vc->vc_cols;
915 ws.ws_ypixel = vc->vc_scan_lines; 917 ws.ws_ypixel = vc->vc_scan_lines;
918
919 mutex_lock(&vc->vc_tty->termios_mutex);
920 spin_lock_irqsave(&vc->vc_tty->ctrl_lock, flags);
916 if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col) && 921 if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col) &&
917 vc->vc_tty->pgrp) 922 vc->vc_tty->pgrp)
918 kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1); 923 kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1);
924 spin_unlock_irqrestore(&vc->vc_tty->ctrl_lock, flags);
919 *cws = ws; 925 *cws = ws;
926 mutex_unlock(&vc->vc_tty->termios_mutex);
920 } 927 }
921 928
922 if (CON_IS_VISIBLE(vc)) 929 if (CON_IS_VISIBLE(vc))
@@ -2540,6 +2547,9 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2540 if (get_user(type, p)) 2547 if (get_user(type, p))
2541 return -EFAULT; 2548 return -EFAULT;
2542 ret = 0; 2549 ret = 0;
2550
2551 lock_kernel();
2552
2543 switch (type) 2553 switch (type)
2544 { 2554 {
2545 case TIOCL_SETSEL: 2555 case TIOCL_SETSEL:
@@ -2559,7 +2569,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2559 ret = sel_loadlut(p); 2569 ret = sel_loadlut(p);
2560 break; 2570 break;
2561 case TIOCL_GETSHIFTSTATE: 2571 case TIOCL_GETSHIFTSTATE:
2562 2572
2563 /* 2573 /*
2564 * Make it possible to react to Shift+Mousebutton. 2574 * Make it possible to react to Shift+Mousebutton.
2565 * Note that 'shift_state' is an undocumented 2575 * Note that 'shift_state' is an undocumented
@@ -2614,6 +2624,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2614 ret = -EINVAL; 2624 ret = -EINVAL;
2615 break; 2625 break;
2616 } 2626 }
2627 unlock_kernel();
2617 return ret; 2628 return ret;
2618} 2629}
2619 2630
@@ -2631,11 +2642,11 @@ static int con_write(struct tty_struct *tty, const unsigned char *buf, int count
2631 return retval; 2642 return retval;
2632} 2643}
2633 2644
2634static void con_put_char(struct tty_struct *tty, unsigned char ch) 2645static int con_put_char(struct tty_struct *tty, unsigned char ch)
2635{ 2646{
2636 if (in_interrupt()) 2647 if (in_interrupt())
2637 return; /* n_r3964 calls put_char() from interrupt context */ 2648 return 0; /* n_r3964 calls put_char() from interrupt context */
2638 do_con_write(tty, &ch, 1); 2649 return do_con_write(tty, &ch, 1);
2639} 2650}
2640 2651
2641static int con_write_room(struct tty_struct *tty) 2652static int con_write_room(struct tty_struct *tty)
@@ -3828,7 +3839,7 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
3828 goto out; 3839 goto out;
3829 3840
3830 c = (font.width+7)/8 * 32 * font.charcount; 3841 c = (font.width+7)/8 * 32 * font.charcount;
3831 3842
3832 if (op->data && font.charcount > op->charcount) 3843 if (op->data && font.charcount > op->charcount)
3833 rc = -ENOSPC; 3844 rc = -ENOSPC;
3834 if (!(op->flags & KD_FONT_FLAG_OLD)) { 3845 if (!(op->flags & KD_FONT_FLAG_OLD)) {
@@ -3993,6 +4004,7 @@ u16 screen_glyph(struct vc_data *vc, int offset)
3993 c |= 0x100; 4004 c |= 0x100;
3994 return c; 4005 return c;
3995} 4006}
4007EXPORT_SYMBOL_GPL(screen_glyph);
3996 4008
3997/* used by vcs - note the word offset */ 4009/* used by vcs - note the word offset */
3998unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed) 4010unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index e6f89e8b9258..3211afd9d57e 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -373,11 +373,17 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
373 unsigned char ucval; 373 unsigned char ucval;
374 void __user *up = (void __user *)arg; 374 void __user *up = (void __user *)arg;
375 int i, perm; 375 int i, perm;
376 376 int ret = 0;
377
377 console = vc->vc_num; 378 console = vc->vc_num;
378 379
379 if (!vc_cons_allocated(console)) /* impossible? */ 380 lock_kernel();
380 return -ENOIOCTLCMD; 381
382 if (!vc_cons_allocated(console)) { /* impossible? */
383 ret = -ENOIOCTLCMD;
384 goto out;
385 }
386
381 387
382 /* 388 /*
383 * To have permissions to do most of the vt ioctls, we either have 389 * To have permissions to do most of the vt ioctls, we either have
@@ -391,15 +397,15 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
391 switch (cmd) { 397 switch (cmd) {
392 case KIOCSOUND: 398 case KIOCSOUND:
393 if (!perm) 399 if (!perm)
394 return -EPERM; 400 goto eperm;
395 if (arg) 401 if (arg)
396 arg = CLOCK_TICK_RATE / arg; 402 arg = CLOCK_TICK_RATE / arg;
397 kd_mksound(arg, 0); 403 kd_mksound(arg, 0);
398 return 0; 404 break;
399 405
400 case KDMKTONE: 406 case KDMKTONE:
401 if (!perm) 407 if (!perm)
402 return -EPERM; 408 goto eperm;
403 { 409 {
404 unsigned int ticks, count; 410 unsigned int ticks, count;
405 411
@@ -412,7 +418,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
412 if (count) 418 if (count)
413 count = CLOCK_TICK_RATE / count; 419 count = CLOCK_TICK_RATE / count;
414 kd_mksound(count, ticks); 420 kd_mksound(count, ticks);
415 return 0; 421 break;
416 } 422 }
417 423
418 case KDGKBTYPE: 424 case KDGKBTYPE:
@@ -435,14 +441,18 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
435 * KDADDIO and KDDELIO may be able to add ports beyond what 441 * KDADDIO and KDDELIO may be able to add ports beyond what
436 * we reject here, but to be safe... 442 * we reject here, but to be safe...
437 */ 443 */
438 if (arg < GPFIRST || arg > GPLAST) 444 if (arg < GPFIRST || arg > GPLAST) {
439 return -EINVAL; 445 ret = -EINVAL;
440 return sys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0; 446 break;
447 }
448 ret = sys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0;
449 break;
441 450
442 case KDENABIO: 451 case KDENABIO:
443 case KDDISABIO: 452 case KDDISABIO:
444 return sys_ioperm(GPFIRST, GPNUM, 453 ret = sys_ioperm(GPFIRST, GPNUM,
445 (cmd == KDENABIO)) ? -ENXIO : 0; 454 (cmd == KDENABIO)) ? -ENXIO : 0;
455 break;
446#endif 456#endif
447 457
448 /* Linux m68k/i386 interface for setting the keyboard delay/repeat rate */ 458 /* Linux m68k/i386 interface for setting the keyboard delay/repeat rate */
@@ -450,19 +460,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
450 case KDKBDREP: 460 case KDKBDREP:
451 { 461 {
452 struct kbd_repeat kbrep; 462 struct kbd_repeat kbrep;
453 int err;
454 463
455 if (!capable(CAP_SYS_TTY_CONFIG)) 464 if (!capable(CAP_SYS_TTY_CONFIG))
456 return -EPERM; 465 goto eperm;
457 466
458 if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat))) 467 if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat))) {
459 return -EFAULT; 468 ret = -EFAULT;
460 err = kbd_rate(&kbrep); 469 break;
461 if (err) 470 }
462 return err; 471 ret = kbd_rate(&kbrep);
472 if (ret)
473 break;
463 if (copy_to_user(up, &kbrep, sizeof(struct kbd_repeat))) 474 if (copy_to_user(up, &kbrep, sizeof(struct kbd_repeat)))
464 return -EFAULT; 475 ret = -EFAULT;
465 return 0; 476 break;
466 } 477 }
467 478
468 case KDSETMODE: 479 case KDSETMODE:
@@ -475,7 +486,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
475 * need to restore their engine state. --BenH 486 * need to restore their engine state. --BenH
476 */ 487 */
477 if (!perm) 488 if (!perm)
478 return -EPERM; 489 goto eperm;
479 switch (arg) { 490 switch (arg) {
480 case KD_GRAPHICS: 491 case KD_GRAPHICS:
481 break; 492 break;
@@ -485,13 +496,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
485 case KD_TEXT: 496 case KD_TEXT:
486 break; 497 break;
487 default: 498 default:
488 return -EINVAL; 499 ret = -EINVAL;
500 goto out;
489 } 501 }
490 if (vc->vc_mode == (unsigned char) arg) 502 if (vc->vc_mode == (unsigned char) arg)
491 return 0; 503 break;
492 vc->vc_mode = (unsigned char) arg; 504 vc->vc_mode = (unsigned char) arg;
493 if (console != fg_console) 505 if (console != fg_console)
494 return 0; 506 break;
495 /* 507 /*
496 * explicitly blank/unblank the screen if switching modes 508 * explicitly blank/unblank the screen if switching modes
497 */ 509 */
@@ -501,7 +513,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
501 else 513 else
502 do_blank_screen(1); 514 do_blank_screen(1);
503 release_console_sem(); 515 release_console_sem();
504 return 0; 516 break;
505 517
506 case KDGETMODE: 518 case KDGETMODE:
507 ucval = vc->vc_mode; 519 ucval = vc->vc_mode;
@@ -513,11 +525,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
513 * these work like a combination of mmap and KDENABIO. 525 * these work like a combination of mmap and KDENABIO.
514 * this could be easily finished. 526 * this could be easily finished.
515 */ 527 */
516 return -EINVAL; 528 ret = -EINVAL;
529 break;
517 530
518 case KDSKBMODE: 531 case KDSKBMODE:
519 if (!perm) 532 if (!perm)
520 return -EPERM; 533 goto eperm;
521 switch(arg) { 534 switch(arg) {
522 case K_RAW: 535 case K_RAW:
523 kbd->kbdmode = VC_RAW; 536 kbd->kbdmode = VC_RAW;
@@ -534,10 +547,11 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
534 compute_shiftstate(); 547 compute_shiftstate();
535 break; 548 break;
536 default: 549 default:
537 return -EINVAL; 550 ret = -EINVAL;
551 goto out;
538 } 552 }
539 tty_ldisc_flush(tty); 553 tty_ldisc_flush(tty);
540 return 0; 554 break;
541 555
542 case KDGKBMODE: 556 case KDGKBMODE:
543 ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW : 557 ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW :
@@ -557,28 +571,32 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
557 set_vc_kbd_mode(kbd, VC_META); 571 set_vc_kbd_mode(kbd, VC_META);
558 break; 572 break;
559 default: 573 default:
560 return -EINVAL; 574 ret = -EINVAL;
561 } 575 }
562 return 0; 576 break;
563 577
564 case KDGKBMETA: 578 case KDGKBMETA:
565 ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT); 579 ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
566 setint: 580 setint:
567 return put_user(ucval, (int __user *)arg); 581 ret = put_user(ucval, (int __user *)arg);
582 break;
568 583
569 case KDGETKEYCODE: 584 case KDGETKEYCODE:
570 case KDSETKEYCODE: 585 case KDSETKEYCODE:
571 if(!capable(CAP_SYS_TTY_CONFIG)) 586 if(!capable(CAP_SYS_TTY_CONFIG))
572 perm=0; 587 perm = 0;
573 return do_kbkeycode_ioctl(cmd, up, perm); 588 ret = do_kbkeycode_ioctl(cmd, up, perm);
589 break;
574 590
575 case KDGKBENT: 591 case KDGKBENT:
576 case KDSKBENT: 592 case KDSKBENT:
577 return do_kdsk_ioctl(cmd, up, perm, kbd); 593 ret = do_kdsk_ioctl(cmd, up, perm, kbd);
594 break;
578 595
579 case KDGKBSENT: 596 case KDGKBSENT:
580 case KDSKBSENT: 597 case KDSKBSENT:
581 return do_kdgkb_ioctl(cmd, up, perm); 598 ret = do_kdgkb_ioctl(cmd, up, perm);
599 break;
582 600
583 case KDGKBDIACR: 601 case KDGKBDIACR:
584 { 602 {
@@ -586,26 +604,31 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
586 struct kbdiacr diacr; 604 struct kbdiacr diacr;
587 int i; 605 int i;
588 606
589 if (put_user(accent_table_size, &a->kb_cnt)) 607 if (put_user(accent_table_size, &a->kb_cnt)) {
590 return -EFAULT; 608 ret = -EFAULT;
609 break;
610 }
591 for (i = 0; i < accent_table_size; i++) { 611 for (i = 0; i < accent_table_size; i++) {
592 diacr.diacr = conv_uni_to_8bit(accent_table[i].diacr); 612 diacr.diacr = conv_uni_to_8bit(accent_table[i].diacr);
593 diacr.base = conv_uni_to_8bit(accent_table[i].base); 613 diacr.base = conv_uni_to_8bit(accent_table[i].base);
594 diacr.result = conv_uni_to_8bit(accent_table[i].result); 614 diacr.result = conv_uni_to_8bit(accent_table[i].result);
595 if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) 615 if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) {
596 return -EFAULT; 616 ret = -EFAULT;
617 break;
618 }
597 } 619 }
598 return 0; 620 break;
599 } 621 }
600 case KDGKBDIACRUC: 622 case KDGKBDIACRUC:
601 { 623 {
602 struct kbdiacrsuc __user *a = up; 624 struct kbdiacrsuc __user *a = up;
603 625
604 if (put_user(accent_table_size, &a->kb_cnt)) 626 if (put_user(accent_table_size, &a->kb_cnt))
605 return -EFAULT; 627 ret = -EFAULT;
606 if (copy_to_user(a->kbdiacruc, accent_table, accent_table_size*sizeof(struct kbdiacruc))) 628 else if (copy_to_user(a->kbdiacruc, accent_table,
607 return -EFAULT; 629 accent_table_size*sizeof(struct kbdiacruc)))
608 return 0; 630 ret = -EFAULT;
631 break;
609 } 632 }
610 633
611 case KDSKBDIACR: 634 case KDSKBDIACR:
@@ -616,20 +639,26 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
616 int i; 639 int i;
617 640
618 if (!perm) 641 if (!perm)
619 return -EPERM; 642 goto eperm;
620 if (get_user(ct,&a->kb_cnt)) 643 if (get_user(ct,&a->kb_cnt)) {
621 return -EFAULT; 644 ret = -EFAULT;
622 if (ct >= MAX_DIACR) 645 break;
623 return -EINVAL; 646 }
647 if (ct >= MAX_DIACR) {
648 ret = -EINVAL;
649 break;
650 }
624 accent_table_size = ct; 651 accent_table_size = ct;
625 for (i = 0; i < ct; i++) { 652 for (i = 0; i < ct; i++) {
626 if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) 653 if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) {
627 return -EFAULT; 654 ret = -EFAULT;
655 break;
656 }
628 accent_table[i].diacr = conv_8bit_to_uni(diacr.diacr); 657 accent_table[i].diacr = conv_8bit_to_uni(diacr.diacr);
629 accent_table[i].base = conv_8bit_to_uni(diacr.base); 658 accent_table[i].base = conv_8bit_to_uni(diacr.base);
630 accent_table[i].result = conv_8bit_to_uni(diacr.result); 659 accent_table[i].result = conv_8bit_to_uni(diacr.result);
631 } 660 }
632 return 0; 661 break;
633 } 662 }
634 663
635 case KDSKBDIACRUC: 664 case KDSKBDIACRUC:
@@ -638,15 +667,19 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
638 unsigned int ct; 667 unsigned int ct;
639 668
640 if (!perm) 669 if (!perm)
641 return -EPERM; 670 goto eperm;
642 if (get_user(ct,&a->kb_cnt)) 671 if (get_user(ct,&a->kb_cnt)) {
643 return -EFAULT; 672 ret = -EFAULT;
644 if (ct >= MAX_DIACR) 673 break;
645 return -EINVAL; 674 }
675 if (ct >= MAX_DIACR) {
676 ret = -EINVAL;
677 break;
678 }
646 accent_table_size = ct; 679 accent_table_size = ct;
647 if (copy_from_user(accent_table, a->kbdiacruc, ct*sizeof(struct kbdiacruc))) 680 if (copy_from_user(accent_table, a->kbdiacruc, ct*sizeof(struct kbdiacruc)))
648 return -EFAULT; 681 ret = -EFAULT;
649 return 0; 682 break;
650 } 683 }
651 684
652 /* the ioctls below read/set the flags usually shown in the leds */ 685 /* the ioctls below read/set the flags usually shown in the leds */
@@ -657,26 +690,29 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
657 690
658 case KDSKBLED: 691 case KDSKBLED:
659 if (!perm) 692 if (!perm)
660 return -EPERM; 693 goto eperm;
661 if (arg & ~0x77) 694 if (arg & ~0x77) {
662 return -EINVAL; 695 ret = -EINVAL;
696 break;
697 }
663 kbd->ledflagstate = (arg & 7); 698 kbd->ledflagstate = (arg & 7);
664 kbd->default_ledflagstate = ((arg >> 4) & 7); 699 kbd->default_ledflagstate = ((arg >> 4) & 7);
665 set_leds(); 700 set_leds();
666 return 0; 701 break;
667 702
668 /* the ioctls below only set the lights, not the functions */ 703 /* the ioctls below only set the lights, not the functions */
669 /* for those, see KDGKBLED and KDSKBLED above */ 704 /* for those, see KDGKBLED and KDSKBLED above */
670 case KDGETLED: 705 case KDGETLED:
671 ucval = getledstate(); 706 ucval = getledstate();
672 setchar: 707 setchar:
673 return put_user(ucval, (char __user *)arg); 708 ret = put_user(ucval, (char __user *)arg);
709 break;
674 710
675 case KDSETLED: 711 case KDSETLED:
676 if (!perm) 712 if (!perm)
677 return -EPERM; 713 goto eperm;
678 setledstate(kbd, arg); 714 setledstate(kbd, arg);
679 return 0; 715 break;
680 716
681 /* 717 /*
682 * A process can indicate its willingness to accept signals 718 * A process can indicate its willingness to accept signals
@@ -688,16 +724,17 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
688 case KDSIGACCEPT: 724 case KDSIGACCEPT:
689 { 725 {
690 if (!perm || !capable(CAP_KILL)) 726 if (!perm || !capable(CAP_KILL))
691 return -EPERM; 727 goto eperm;
692 if (!valid_signal(arg) || arg < 1 || arg == SIGKILL) 728 if (!valid_signal(arg) || arg < 1 || arg == SIGKILL)
693 return -EINVAL; 729 ret = -EINVAL;
694 730 else {
695 spin_lock_irq(&vt_spawn_con.lock); 731 spin_lock_irq(&vt_spawn_con.lock);
696 put_pid(vt_spawn_con.pid); 732 put_pid(vt_spawn_con.pid);
697 vt_spawn_con.pid = get_pid(task_pid(current)); 733 vt_spawn_con.pid = get_pid(task_pid(current));
698 vt_spawn_con.sig = arg; 734 vt_spawn_con.sig = arg;
699 spin_unlock_irq(&vt_spawn_con.lock); 735 spin_unlock_irq(&vt_spawn_con.lock);
700 return 0; 736 }
737 break;
701 } 738 }
702 739
703 case VT_SETMODE: 740 case VT_SETMODE:
@@ -705,11 +742,15 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
705 struct vt_mode tmp; 742 struct vt_mode tmp;
706 743
707 if (!perm) 744 if (!perm)
708 return -EPERM; 745 goto eperm;
709 if (copy_from_user(&tmp, up, sizeof(struct vt_mode))) 746 if (copy_from_user(&tmp, up, sizeof(struct vt_mode))) {
710 return -EFAULT; 747 ret = -EFAULT;
711 if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) 748 goto out;
712 return -EINVAL; 749 }
750 if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) {
751 ret = -EINVAL;
752 goto out;
753 }
713 acquire_console_sem(); 754 acquire_console_sem();
714 vc->vt_mode = tmp; 755 vc->vt_mode = tmp;
715 /* the frsig is ignored, so we set it to 0 */ 756 /* the frsig is ignored, so we set it to 0 */
@@ -719,7 +760,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
719 /* no switch is required -- saw@shade.msu.ru */ 760 /* no switch is required -- saw@shade.msu.ru */
720 vc->vt_newvt = -1; 761 vc->vt_newvt = -1;
721 release_console_sem(); 762 release_console_sem();
722 return 0; 763 break;
723 } 764 }
724 765
725 case VT_GETMODE: 766 case VT_GETMODE:
@@ -732,7 +773,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
732 release_console_sem(); 773 release_console_sem();
733 774
734 rc = copy_to_user(up, &tmp, sizeof(struct vt_mode)); 775 rc = copy_to_user(up, &tmp, sizeof(struct vt_mode));
735 return rc ? -EFAULT : 0; 776 if (rc)
777 ret = -EFAULT;
778 break;
736 } 779 }
737 780
738 /* 781 /*
@@ -746,12 +789,16 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
746 unsigned short state, mask; 789 unsigned short state, mask;
747 790
748 if (put_user(fg_console + 1, &vtstat->v_active)) 791 if (put_user(fg_console + 1, &vtstat->v_active))
749 return -EFAULT; 792 ret = -EFAULT;
750 state = 1; /* /dev/tty0 is always open */ 793 else {
751 for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask; ++i, mask <<= 1) 794 state = 1; /* /dev/tty0 is always open */
752 if (VT_IS_IN_USE(i)) 795 for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask;
753 state |= mask; 796 ++i, mask <<= 1)
754 return put_user(state, &vtstat->v_state); 797 if (VT_IS_IN_USE(i))
798 state |= mask;
799 ret = put_user(state, &vtstat->v_state);
800 }
801 break;
755 } 802 }
756 803
757 /* 804 /*
@@ -771,27 +818,31 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
771 */ 818 */
772 case VT_ACTIVATE: 819 case VT_ACTIVATE:
773 if (!perm) 820 if (!perm)
774 return -EPERM; 821 goto eperm;
775 if (arg == 0 || arg > MAX_NR_CONSOLES) 822 if (arg == 0 || arg > MAX_NR_CONSOLES)
776 return -ENXIO; 823 ret = -ENXIO;
777 arg--; 824 else {
778 acquire_console_sem(); 825 arg--;
779 i = vc_allocate(arg); 826 acquire_console_sem();
780 release_console_sem(); 827 ret = vc_allocate(arg);
781 if (i) 828 release_console_sem();
782 return i; 829 if (ret)
783 set_console(arg); 830 break;
784 return 0; 831 set_console(arg);
832 }
833 break;
785 834
786 /* 835 /*
787 * wait until the specified VT has been activated 836 * wait until the specified VT has been activated
788 */ 837 */
789 case VT_WAITACTIVE: 838 case VT_WAITACTIVE:
790 if (!perm) 839 if (!perm)
791 return -EPERM; 840 goto eperm;
792 if (arg == 0 || arg > MAX_NR_CONSOLES) 841 if (arg == 0 || arg > MAX_NR_CONSOLES)
793 return -ENXIO; 842 ret = -ENXIO;
794 return vt_waitactive(arg-1); 843 else
844 ret = vt_waitactive(arg - 1);
845 break;
795 846
796 /* 847 /*
797 * If a vt is under process control, the kernel will not switch to it 848 * If a vt is under process control, the kernel will not switch to it
@@ -805,10 +856,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
805 */ 856 */
806 case VT_RELDISP: 857 case VT_RELDISP:
807 if (!perm) 858 if (!perm)
808 return -EPERM; 859 goto eperm;
809 if (vc->vt_mode.mode != VT_PROCESS)
810 return -EINVAL;
811 860
861 if (vc->vt_mode.mode != VT_PROCESS) {
862 ret = -EINVAL;
863 break;
864 }
812 /* 865 /*
813 * Switching-from response 866 * Switching-from response
814 */ 867 */
@@ -829,10 +882,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
829 int newvt; 882 int newvt;
830 newvt = vc->vt_newvt; 883 newvt = vc->vt_newvt;
831 vc->vt_newvt = -1; 884 vc->vt_newvt = -1;
832 i = vc_allocate(newvt); 885 ret = vc_allocate(newvt);
833 if (i) { 886 if (ret) {
834 release_console_sem(); 887 release_console_sem();
835 return i; 888 break;
836 } 889 }
837 /* 890 /*
838 * When we actually do the console switch, 891 * When we actually do the console switch,
@@ -841,31 +894,27 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
841 */ 894 */
842 complete_change_console(vc_cons[newvt].d); 895 complete_change_console(vc_cons[newvt].d);
843 } 896 }
844 } 897 } else {
845 898 /*
846 /* 899 * Switched-to response
847 * Switched-to response 900 */
848 */
849 else
850 {
851 /* 901 /*
852 * If it's just an ACK, ignore it 902 * If it's just an ACK, ignore it
853 */ 903 */
854 if (arg != VT_ACKACQ) { 904 if (arg != VT_ACKACQ)
855 release_console_sem(); 905 ret = -EINVAL;
856 return -EINVAL;
857 }
858 } 906 }
859 release_console_sem(); 907 release_console_sem();
860 908 break;
861 return 0;
862 909
863 /* 910 /*
864 * Disallocate memory associated to VT (but leave VT1) 911 * Disallocate memory associated to VT (but leave VT1)
865 */ 912 */
866 case VT_DISALLOCATE: 913 case VT_DISALLOCATE:
867 if (arg > MAX_NR_CONSOLES) 914 if (arg > MAX_NR_CONSOLES) {
868 return -ENXIO; 915 ret = -ENXIO;
916 break;
917 }
869 if (arg == 0) { 918 if (arg == 0) {
870 /* deallocate all unused consoles, but leave 0 */ 919 /* deallocate all unused consoles, but leave 0 */
871 acquire_console_sem(); 920 acquire_console_sem();
@@ -877,14 +926,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
877 /* deallocate a single console, if possible */ 926 /* deallocate a single console, if possible */
878 arg--; 927 arg--;
879 if (VT_BUSY(arg)) 928 if (VT_BUSY(arg))
880 return -EBUSY; 929 ret = -EBUSY;
881 if (arg) { /* leave 0 */ 930 else if (arg) { /* leave 0 */
882 acquire_console_sem(); 931 acquire_console_sem();
883 vc_deallocate(arg); 932 vc_deallocate(arg);
884 release_console_sem(); 933 release_console_sem();
885 } 934 }
886 } 935 }
887 return 0; 936 break;
888 937
889 case VT_RESIZE: 938 case VT_RESIZE:
890 { 939 {
@@ -893,21 +942,21 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
893 942
894 ushort ll,cc; 943 ushort ll,cc;
895 if (!perm) 944 if (!perm)
896 return -EPERM; 945 goto eperm;
897 if (get_user(ll, &vtsizes->v_rows) || 946 if (get_user(ll, &vtsizes->v_rows) ||
898 get_user(cc, &vtsizes->v_cols)) 947 get_user(cc, &vtsizes->v_cols))
899 return -EFAULT; 948 ret = -EFAULT;
900 949 else {
901 for (i = 0; i < MAX_NR_CONSOLES; i++) { 950 for (i = 0; i < MAX_NR_CONSOLES; i++) {
902 vc = vc_cons[i].d; 951 vc = vc_cons[i].d;
903 952
904 if (vc) { 953 if (vc) {
905 vc->vc_resize_user = 1; 954 vc->vc_resize_user = 1;
906 vc_lock_resize(vc_cons[i].d, cc, ll); 955 vc_lock_resize(vc_cons[i].d, cc, ll);
956 }
907 } 957 }
908 } 958 }
909 959 break;
910 return 0;
911 } 960 }
912 961
913 case VT_RESIZEX: 962 case VT_RESIZEX:
@@ -915,10 +964,13 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
915 struct vt_consize __user *vtconsize = up; 964 struct vt_consize __user *vtconsize = up;
916 ushort ll,cc,vlin,clin,vcol,ccol; 965 ushort ll,cc,vlin,clin,vcol,ccol;
917 if (!perm) 966 if (!perm)
918 return -EPERM; 967 goto eperm;
919 if (!access_ok(VERIFY_READ, vtconsize, 968 if (!access_ok(VERIFY_READ, vtconsize,
920 sizeof(struct vt_consize))) 969 sizeof(struct vt_consize))) {
921 return -EFAULT; 970 ret = -EFAULT;
971 break;
972 }
973 /* FIXME: Should check the copies properly */
922 __get_user(ll, &vtconsize->v_rows); 974 __get_user(ll, &vtconsize->v_rows);
923 __get_user(cc, &vtconsize->v_cols); 975 __get_user(cc, &vtconsize->v_cols);
924 __get_user(vlin, &vtconsize->v_vlin); 976 __get_user(vlin, &vtconsize->v_vlin);
@@ -928,21 +980,28 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
928 vlin = vlin ? vlin : vc->vc_scan_lines; 980 vlin = vlin ? vlin : vc->vc_scan_lines;
929 if (clin) { 981 if (clin) {
930 if (ll) { 982 if (ll) {
931 if (ll != vlin/clin) 983 if (ll != vlin/clin) {
932 return -EINVAL; /* Parameters don't add up */ 984 /* Parameters don't add up */
985 ret = -EINVAL;
986 break;
987 }
933 } else 988 } else
934 ll = vlin/clin; 989 ll = vlin/clin;
935 } 990 }
936 if (vcol && ccol) { 991 if (vcol && ccol) {
937 if (cc) { 992 if (cc) {
938 if (cc != vcol/ccol) 993 if (cc != vcol/ccol) {
939 return -EINVAL; 994 ret = -EINVAL;
995 break;
996 }
940 } else 997 } else
941 cc = vcol/ccol; 998 cc = vcol/ccol;
942 } 999 }
943 1000
944 if (clin > 32) 1001 if (clin > 32) {
945 return -EINVAL; 1002 ret = -EINVAL;
1003 break;
1004 }
946 1005
947 for (i = 0; i < MAX_NR_CONSOLES; i++) { 1006 for (i = 0; i < MAX_NR_CONSOLES; i++) {
948 if (!vc_cons[i].d) 1007 if (!vc_cons[i].d)
@@ -956,19 +1015,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
956 vc_resize(vc_cons[i].d, cc, ll); 1015 vc_resize(vc_cons[i].d, cc, ll);
957 release_console_sem(); 1016 release_console_sem();
958 } 1017 }
959 return 0; 1018 break;
960 } 1019 }
961 1020
962 case PIO_FONT: { 1021 case PIO_FONT: {
963 if (!perm) 1022 if (!perm)
964 return -EPERM; 1023 goto eperm;
965 op.op = KD_FONT_OP_SET; 1024 op.op = KD_FONT_OP_SET;
966 op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */ 1025 op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */
967 op.width = 8; 1026 op.width = 8;
968 op.height = 0; 1027 op.height = 0;
969 op.charcount = 256; 1028 op.charcount = 256;
970 op.data = up; 1029 op.data = up;
971 return con_font_op(vc_cons[fg_console].d, &op); 1030 ret = con_font_op(vc_cons[fg_console].d, &op);
1031 break;
972 } 1032 }
973 1033
974 case GIO_FONT: { 1034 case GIO_FONT: {
@@ -978,100 +1038,124 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
978 op.height = 32; 1038 op.height = 32;
979 op.charcount = 256; 1039 op.charcount = 256;
980 op.data = up; 1040 op.data = up;
981 return con_font_op(vc_cons[fg_console].d, &op); 1041 ret = con_font_op(vc_cons[fg_console].d, &op);
1042 break;
982 } 1043 }
983 1044
984 case PIO_CMAP: 1045 case PIO_CMAP:
985 if (!perm) 1046 if (!perm)
986 return -EPERM; 1047 ret = -EPERM;
987 return con_set_cmap(up); 1048 else
1049 ret = con_set_cmap(up);
1050 break;
988 1051
989 case GIO_CMAP: 1052 case GIO_CMAP:
990 return con_get_cmap(up); 1053 ret = con_get_cmap(up);
1054 break;
991 1055
992 case PIO_FONTX: 1056 case PIO_FONTX:
993 case GIO_FONTX: 1057 case GIO_FONTX:
994 return do_fontx_ioctl(cmd, up, perm, &op); 1058 ret = do_fontx_ioctl(cmd, up, perm, &op);
1059 break;
995 1060
996 case PIO_FONTRESET: 1061 case PIO_FONTRESET:
997 { 1062 {
998 if (!perm) 1063 if (!perm)
999 return -EPERM; 1064 goto eperm;
1000 1065
1001#ifdef BROKEN_GRAPHICS_PROGRAMS 1066#ifdef BROKEN_GRAPHICS_PROGRAMS
1002 /* With BROKEN_GRAPHICS_PROGRAMS defined, the default 1067 /* With BROKEN_GRAPHICS_PROGRAMS defined, the default
1003 font is not saved. */ 1068 font is not saved. */
1004 return -ENOSYS; 1069 ret = -ENOSYS;
1070 break;
1005#else 1071#else
1006 { 1072 {
1007 op.op = KD_FONT_OP_SET_DEFAULT; 1073 op.op = KD_FONT_OP_SET_DEFAULT;
1008 op.data = NULL; 1074 op.data = NULL;
1009 i = con_font_op(vc_cons[fg_console].d, &op); 1075 ret = con_font_op(vc_cons[fg_console].d, &op);
1010 if (i) 1076 if (ret)
1011 return i; 1077 break;
1012 con_set_default_unimap(vc_cons[fg_console].d); 1078 con_set_default_unimap(vc_cons[fg_console].d);
1013 return 0; 1079 break;
1014 } 1080 }
1015#endif 1081#endif
1016 } 1082 }
1017 1083
1018 case KDFONTOP: { 1084 case KDFONTOP: {
1019 if (copy_from_user(&op, up, sizeof(op))) 1085 if (copy_from_user(&op, up, sizeof(op))) {
1020 return -EFAULT; 1086 ret = -EFAULT;
1087 break;
1088 }
1021 if (!perm && op.op != KD_FONT_OP_GET) 1089 if (!perm && op.op != KD_FONT_OP_GET)
1022 return -EPERM; 1090 goto eperm;
1023 i = con_font_op(vc, &op); 1091 ret = con_font_op(vc, &op);
1024 if (i) return i; 1092 if (ret)
1093 break;
1025 if (copy_to_user(up, &op, sizeof(op))) 1094 if (copy_to_user(up, &op, sizeof(op)))
1026 return -EFAULT; 1095 ret = -EFAULT;
1027 return 0; 1096 break;
1028 } 1097 }
1029 1098
1030 case PIO_SCRNMAP: 1099 case PIO_SCRNMAP:
1031 if (!perm) 1100 if (!perm)
1032 return -EPERM; 1101 ret = -EPERM;
1033 return con_set_trans_old(up); 1102 else
1103 ret = con_set_trans_old(up);
1104 break;
1034 1105
1035 case GIO_SCRNMAP: 1106 case GIO_SCRNMAP:
1036 return con_get_trans_old(up); 1107 ret = con_get_trans_old(up);
1108 break;
1037 1109
1038 case PIO_UNISCRNMAP: 1110 case PIO_UNISCRNMAP:
1039 if (!perm) 1111 if (!perm)
1040 return -EPERM; 1112 ret = -EPERM;
1041 return con_set_trans_new(up); 1113 else
1114 ret = con_set_trans_new(up);
1115 break;
1042 1116
1043 case GIO_UNISCRNMAP: 1117 case GIO_UNISCRNMAP:
1044 return con_get_trans_new(up); 1118 ret = con_get_trans_new(up);
1119 break;
1045 1120
1046 case PIO_UNIMAPCLR: 1121 case PIO_UNIMAPCLR:
1047 { struct unimapinit ui; 1122 { struct unimapinit ui;
1048 if (!perm) 1123 if (!perm)
1049 return -EPERM; 1124 goto eperm;
1050 i = copy_from_user(&ui, up, sizeof(struct unimapinit)); 1125 ret = copy_from_user(&ui, up, sizeof(struct unimapinit));
1051 if (i) return -EFAULT; 1126 if (!ret)
1052 con_clear_unimap(vc, &ui); 1127 con_clear_unimap(vc, &ui);
1053 return 0; 1128 break;
1054 } 1129 }
1055 1130
1056 case PIO_UNIMAP: 1131 case PIO_UNIMAP:
1057 case GIO_UNIMAP: 1132 case GIO_UNIMAP:
1058 return do_unimap_ioctl(cmd, up, perm, vc); 1133 ret = do_unimap_ioctl(cmd, up, perm, vc);
1134 break;
1059 1135
1060 case VT_LOCKSWITCH: 1136 case VT_LOCKSWITCH:
1061 if (!capable(CAP_SYS_TTY_CONFIG)) 1137 if (!capable(CAP_SYS_TTY_CONFIG))
1062 return -EPERM; 1138 goto eperm;
1063 vt_dont_switch = 1; 1139 vt_dont_switch = 1;
1064 return 0; 1140 break;
1065 case VT_UNLOCKSWITCH: 1141 case VT_UNLOCKSWITCH:
1066 if (!capable(CAP_SYS_TTY_CONFIG)) 1142 if (!capable(CAP_SYS_TTY_CONFIG))
1067 return -EPERM; 1143 goto eperm;
1068 vt_dont_switch = 0; 1144 vt_dont_switch = 0;
1069 return 0; 1145 break;
1070 case VT_GETHIFONTMASK: 1146 case VT_GETHIFONTMASK:
1071 return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg); 1147 ret = put_user(vc->vc_hi_font_mask,
1148 (unsigned short __user *)arg);
1149 break;
1072 default: 1150 default:
1073 return -ENOIOCTLCMD; 1151 ret = -ENOIOCTLCMD;
1074 } 1152 }
1153out:
1154 unlock_kernel();
1155 return ret;
1156eperm:
1157 ret = -EPERM;
1158 goto out;
1075} 1159}
1076 1160
1077/* 1161/*
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index dfe6907ae15b..3edf1fc12963 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -623,8 +623,8 @@ static int __devinit hwicap_setup(struct device *dev, int id,
623 623
624 if (!request_mem_region(drvdata->mem_start, 624 if (!request_mem_region(drvdata->mem_start,
625 drvdata->mem_size, DRIVER_NAME)) { 625 drvdata->mem_size, DRIVER_NAME)) {
626 dev_err(dev, "Couldn't lock memory region at %p\n", 626 dev_err(dev, "Couldn't lock memory region at %Lx\n",
627 (void *)regs_res->start); 627 regs_res->start);
628 retval = -EBUSY; 628 retval = -EBUSY;
629 goto failed1; 629 goto failed1;
630 } 630 }
@@ -643,7 +643,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
643 mutex_init(&drvdata->sem); 643 mutex_init(&drvdata->sem);
644 drvdata->is_open = 0; 644 drvdata->is_open = 0;
645 645
646 dev_info(dev, "ioremap %lx to %p with size %x\n", 646 dev_info(dev, "ioremap %lx to %p with size %Lx\n",
647 (unsigned long int)drvdata->mem_start, 647 (unsigned long int)drvdata->mem_start,
648 drvdata->base_address, drvdata->mem_size); 648 drvdata->base_address, drvdata->mem_size);
649 649
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index c159ae64eeb2..5f076aef74fa 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -69,6 +69,15 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
69 the frequency statically to the highest frequency supported by 69 the frequency statically to the highest frequency supported by
70 the CPU. 70 the CPU.
71 71
72config CPU_FREQ_DEFAULT_GOV_POWERSAVE
73 bool "powersave"
74 depends on EMBEDDED
75 select CPU_FREQ_GOV_POWERSAVE
76 help
77 Use the CPUFreq governor 'powersave' as default. This sets
78 the frequency statically to the lowest frequency supported by
79 the CPU.
80
72config CPU_FREQ_DEFAULT_GOV_USERSPACE 81config CPU_FREQ_DEFAULT_GOV_USERSPACE
73 bool "userspace" 82 bool "userspace"
74 select CPU_FREQ_GOV_USERSPACE 83 select CPU_FREQ_GOV_USERSPACE
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 35a26a3e5f68..7fce038fa57e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -118,9 +118,11 @@ static void handle_update(struct work_struct *work);
118static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 118static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
119static struct srcu_notifier_head cpufreq_transition_notifier_list; 119static struct srcu_notifier_head cpufreq_transition_notifier_list;
120 120
121static bool init_cpufreq_transition_notifier_list_called;
121static int __init init_cpufreq_transition_notifier_list(void) 122static int __init init_cpufreq_transition_notifier_list(void)
122{ 123{
123 srcu_init_notifier_head(&cpufreq_transition_notifier_list); 124 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
125 init_cpufreq_transition_notifier_list_called = true;
124 return 0; 126 return 0;
125} 127}
126pure_initcall(init_cpufreq_transition_notifier_list); 128pure_initcall(init_cpufreq_transition_notifier_list);
@@ -216,7 +218,7 @@ static void cpufreq_debug_disable_ratelimit(void)
216} 218}
217 219
218void cpufreq_debug_printk(unsigned int type, const char *prefix, 220void cpufreq_debug_printk(unsigned int type, const char *prefix,
219 const char *fmt, ...) 221 const char *fmt, ...)
220{ 222{
221 char s[256]; 223 char s[256];
222 va_list args; 224 va_list args;
@@ -378,7 +380,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor)
378/** 380/**
379 * cpufreq_parse_governor - parse a governor string 381 * cpufreq_parse_governor - parse a governor string
380 */ 382 */
381static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, 383static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
382 struct cpufreq_governor **governor) 384 struct cpufreq_governor **governor)
383{ 385{
384 int err = -EINVAL; 386 int err = -EINVAL;
@@ -446,7 +448,7 @@ extern struct sysdev_class cpu_sysdev_class;
446 448
447#define show_one(file_name, object) \ 449#define show_one(file_name, object) \
448static ssize_t show_##file_name \ 450static ssize_t show_##file_name \
449(struct cpufreq_policy * policy, char *buf) \ 451(struct cpufreq_policy *policy, char *buf) \
450{ \ 452{ \
451 return sprintf (buf, "%u\n", policy->object); \ 453 return sprintf (buf, "%u\n", policy->object); \
452} 454}
@@ -465,7 +467,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
465 */ 467 */
466#define store_one(file_name, object) \ 468#define store_one(file_name, object) \
467static ssize_t store_##file_name \ 469static ssize_t store_##file_name \
468(struct cpufreq_policy * policy, const char *buf, size_t count) \ 470(struct cpufreq_policy *policy, const char *buf, size_t count) \
469{ \ 471{ \
470 unsigned int ret = -EINVAL; \ 472 unsigned int ret = -EINVAL; \
471 struct cpufreq_policy new_policy; \ 473 struct cpufreq_policy new_policy; \
@@ -490,8 +492,8 @@ store_one(scaling_max_freq,max);
490/** 492/**
491 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 493 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
492 */ 494 */
493static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, 495static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
494 char *buf) 496 char *buf)
495{ 497{
496 unsigned int cur_freq = __cpufreq_get(policy->cpu); 498 unsigned int cur_freq = __cpufreq_get(policy->cpu);
497 if (!cur_freq) 499 if (!cur_freq)
@@ -503,8 +505,7 @@ static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
503/** 505/**
504 * show_scaling_governor - show the current policy for the specified CPU 506 * show_scaling_governor - show the current policy for the specified CPU
505 */ 507 */
506static ssize_t show_scaling_governor (struct cpufreq_policy * policy, 508static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
507 char *buf)
508{ 509{
509 if(policy->policy == CPUFREQ_POLICY_POWERSAVE) 510 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
510 return sprintf(buf, "powersave\n"); 511 return sprintf(buf, "powersave\n");
@@ -519,8 +520,8 @@ static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
519/** 520/**
520 * store_scaling_governor - store policy for the specified CPU 521 * store_scaling_governor - store policy for the specified CPU
521 */ 522 */
522static ssize_t store_scaling_governor (struct cpufreq_policy * policy, 523static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
523 const char *buf, size_t count) 524 const char *buf, size_t count)
524{ 525{
525 unsigned int ret = -EINVAL; 526 unsigned int ret = -EINVAL;
526 char str_governor[16]; 527 char str_governor[16];
@@ -554,7 +555,7 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
554/** 555/**
555 * show_scaling_driver - show the cpufreq driver currently loaded 556 * show_scaling_driver - show the cpufreq driver currently loaded
556 */ 557 */
557static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf) 558static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
558{ 559{
559 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); 560 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
560} 561}
@@ -562,8 +563,8 @@ static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
562/** 563/**
563 * show_scaling_available_governors - show the available CPUfreq governors 564 * show_scaling_available_governors - show the available CPUfreq governors
564 */ 565 */
565static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy, 566static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
566 char *buf) 567 char *buf)
567{ 568{
568 ssize_t i = 0; 569 ssize_t i = 0;
569 struct cpufreq_governor *t; 570 struct cpufreq_governor *t;
@@ -582,15 +583,13 @@ out:
582 i += sprintf(&buf[i], "\n"); 583 i += sprintf(&buf[i], "\n");
583 return i; 584 return i;
584} 585}
585/** 586
586 * show_affected_cpus - show the CPUs affected by each transition 587static ssize_t show_cpus(cpumask_t mask, char *buf)
587 */
588static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
589{ 588{
590 ssize_t i = 0; 589 ssize_t i = 0;
591 unsigned int cpu; 590 unsigned int cpu;
592 591
593 for_each_cpu_mask(cpu, policy->cpus) { 592 for_each_cpu_mask(cpu, mask) {
594 if (i) 593 if (i)
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
596 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -601,8 +600,27 @@ static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
601 return i; 600 return i;
602} 601}
603 602
603/**
604 * show_related_cpus - show the CPUs affected by each transition even if
605 * hw coordination is in use
606 */
607static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
608{
609 if (cpus_empty(policy->related_cpus))
610 return show_cpus(policy->cpus, buf);
611 return show_cpus(policy->related_cpus, buf);
612}
613
614/**
615 * show_affected_cpus - show the CPUs affected by each transition
616 */
617static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
618{
619 return show_cpus(policy->cpus, buf);
620}
621
604static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 622static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
605 const char *buf, size_t count) 623 const char *buf, size_t count)
606{ 624{
607 unsigned int freq = 0; 625 unsigned int freq = 0;
608 unsigned int ret; 626 unsigned int ret;
@@ -645,18 +663,20 @@ define_one_ro(cpuinfo_max_freq);
645define_one_ro(scaling_available_governors); 663define_one_ro(scaling_available_governors);
646define_one_ro(scaling_driver); 664define_one_ro(scaling_driver);
647define_one_ro(scaling_cur_freq); 665define_one_ro(scaling_cur_freq);
666define_one_ro(related_cpus);
648define_one_ro(affected_cpus); 667define_one_ro(affected_cpus);
649define_one_rw(scaling_min_freq); 668define_one_rw(scaling_min_freq);
650define_one_rw(scaling_max_freq); 669define_one_rw(scaling_max_freq);
651define_one_rw(scaling_governor); 670define_one_rw(scaling_governor);
652define_one_rw(scaling_setspeed); 671define_one_rw(scaling_setspeed);
653 672
654static struct attribute * default_attrs[] = { 673static struct attribute *default_attrs[] = {
655 &cpuinfo_min_freq.attr, 674 &cpuinfo_min_freq.attr,
656 &cpuinfo_max_freq.attr, 675 &cpuinfo_max_freq.attr,
657 &scaling_min_freq.attr, 676 &scaling_min_freq.attr,
658 &scaling_max_freq.attr, 677 &scaling_max_freq.attr,
659 &affected_cpus.attr, 678 &affected_cpus.attr,
679 &related_cpus.attr,
660 &scaling_governor.attr, 680 &scaling_governor.attr,
661 &scaling_driver.attr, 681 &scaling_driver.attr,
662 &scaling_available_governors.attr, 682 &scaling_available_governors.attr,
@@ -667,10 +687,10 @@ static struct attribute * default_attrs[] = {
667#define to_policy(k) container_of(k,struct cpufreq_policy,kobj) 687#define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
668#define to_attr(a) container_of(a,struct freq_attr,attr) 688#define to_attr(a) container_of(a,struct freq_attr,attr)
669 689
670static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) 690static ssize_t show(struct kobject *kobj, struct attribute *attr ,char *buf)
671{ 691{
672 struct cpufreq_policy * policy = to_policy(kobj); 692 struct cpufreq_policy *policy = to_policy(kobj);
673 struct freq_attr * fattr = to_attr(attr); 693 struct freq_attr *fattr = to_attr(attr);
674 ssize_t ret = -EINVAL; 694 ssize_t ret = -EINVAL;
675 policy = cpufreq_cpu_get(policy->cpu); 695 policy = cpufreq_cpu_get(policy->cpu);
676 if (!policy) 696 if (!policy)
@@ -691,11 +711,11 @@ no_policy:
691 return ret; 711 return ret;
692} 712}
693 713
694static ssize_t store(struct kobject * kobj, struct attribute * attr, 714static ssize_t store(struct kobject *kobj, struct attribute *attr,
695 const char * buf, size_t count) 715 const char *buf, size_t count)
696{ 716{
697 struct cpufreq_policy * policy = to_policy(kobj); 717 struct cpufreq_policy *policy = to_policy(kobj);
698 struct freq_attr * fattr = to_attr(attr); 718 struct freq_attr *fattr = to_attr(attr);
699 ssize_t ret = -EINVAL; 719 ssize_t ret = -EINVAL;
700 policy = cpufreq_cpu_get(policy->cpu); 720 policy = cpufreq_cpu_get(policy->cpu);
701 if (!policy) 721 if (!policy)
@@ -716,9 +736,9 @@ no_policy:
716 return ret; 736 return ret;
717} 737}
718 738
719static void cpufreq_sysfs_release(struct kobject * kobj) 739static void cpufreq_sysfs_release(struct kobject *kobj)
720{ 740{
721 struct cpufreq_policy * policy = to_policy(kobj); 741 struct cpufreq_policy *policy = to_policy(kobj);
722 dprintk("last reference is dropped\n"); 742 dprintk("last reference is dropped\n");
723 complete(&policy->kobj_unregister); 743 complete(&policy->kobj_unregister);
724} 744}
@@ -740,7 +760,7 @@ static struct kobj_type ktype_cpufreq = {
740 * 760 *
741 * Adds the cpufreq interface for a CPU device. 761 * Adds the cpufreq interface for a CPU device.
742 */ 762 */
743static int cpufreq_add_dev (struct sys_device * sys_dev) 763static int cpufreq_add_dev(struct sys_device *sys_dev)
744{ 764{
745 unsigned int cpu = sys_dev->id; 765 unsigned int cpu = sys_dev->id;
746 int ret = 0; 766 int ret = 0;
@@ -800,7 +820,6 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
800 ret = cpufreq_driver->init(policy); 820 ret = cpufreq_driver->init(policy);
801 if (ret) { 821 if (ret) {
802 dprintk("initialization failed\n"); 822 dprintk("initialization failed\n");
803 unlock_policy_rwsem_write(cpu);
804 goto err_out; 823 goto err_out;
805 } 824 }
806 policy->user_policy.min = policy->cpuinfo.min_freq; 825 policy->user_policy.min = policy->cpuinfo.min_freq;
@@ -823,7 +842,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
823 /* check for existing affected CPUs. They may not be aware 842 /* check for existing affected CPUs. They may not be aware
824 * of it due to CPU Hotplug. 843 * of it due to CPU Hotplug.
825 */ 844 */
826 managed_policy = cpufreq_cpu_get(j); 845 managed_policy = cpufreq_cpu_get(j); // FIXME: Where is this released? What about error paths?
827 if (unlikely(managed_policy)) { 846 if (unlikely(managed_policy)) {
828 847
829 /* Set proper policy_cpu */ 848 /* Set proper policy_cpu */
@@ -842,14 +861,11 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
842 ret = sysfs_create_link(&sys_dev->kobj, 861 ret = sysfs_create_link(&sys_dev->kobj,
843 &managed_policy->kobj, 862 &managed_policy->kobj,
844 "cpufreq"); 863 "cpufreq");
845 if (ret) { 864 if (ret)
846 unlock_policy_rwsem_write(cpu);
847 goto err_out_driver_exit; 865 goto err_out_driver_exit;
848 }
849 866
850 cpufreq_debug_enable_ratelimit(); 867 cpufreq_debug_enable_ratelimit();
851 ret = 0; 868 ret = 0;
852 unlock_policy_rwsem_write(cpu);
853 goto err_out_driver_exit; /* call driver->exit() */ 869 goto err_out_driver_exit; /* call driver->exit() */
854 } 870 }
855 } 871 }
@@ -859,33 +875,26 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
859 /* prepare interface data */ 875 /* prepare interface data */
860 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, 876 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
861 "cpufreq"); 877 "cpufreq");
862 if (ret) { 878 if (ret)
863 unlock_policy_rwsem_write(cpu);
864 goto err_out_driver_exit; 879 goto err_out_driver_exit;
865 } 880
866 /* set up files for this cpu device */ 881 /* set up files for this cpu device */
867 drv_attr = cpufreq_driver->attr; 882 drv_attr = cpufreq_driver->attr;
868 while ((drv_attr) && (*drv_attr)) { 883 while ((drv_attr) && (*drv_attr)) {
869 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 884 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
870 if (ret) { 885 if (ret)
871 unlock_policy_rwsem_write(cpu);
872 goto err_out_driver_exit; 886 goto err_out_driver_exit;
873 }
874 drv_attr++; 887 drv_attr++;
875 } 888 }
876 if (cpufreq_driver->get){ 889 if (cpufreq_driver->get) {
877 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 890 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
878 if (ret) { 891 if (ret)
879 unlock_policy_rwsem_write(cpu);
880 goto err_out_driver_exit; 892 goto err_out_driver_exit;
881 }
882 } 893 }
883 if (cpufreq_driver->target){ 894 if (cpufreq_driver->target) {
884 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 895 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
885 if (ret) { 896 if (ret)
886 unlock_policy_rwsem_write(cpu);
887 goto err_out_driver_exit; 897 goto err_out_driver_exit;
888 }
889 } 898 }
890 899
891 spin_lock_irqsave(&cpufreq_driver_lock, flags); 900 spin_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -907,10 +916,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
907 cpu_sys_dev = get_cpu_sysdev(j); 916 cpu_sys_dev = get_cpu_sysdev(j);
908 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, 917 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
909 "cpufreq"); 918 "cpufreq");
910 if (ret) { 919 if (ret)
911 unlock_policy_rwsem_write(cpu);
912 goto err_out_unregister; 920 goto err_out_unregister;
913 }
914 } 921 }
915 922
916 policy->governor = NULL; /* to assure that the starting sequence is 923 policy->governor = NULL; /* to assure that the starting sequence is
@@ -950,6 +957,7 @@ err_out_driver_exit:
950 cpufreq_driver->exit(policy); 957 cpufreq_driver->exit(policy);
951 958
952err_out: 959err_out:
960 unlock_policy_rwsem_write(cpu);
953 kfree(policy); 961 kfree(policy);
954 962
955nomem_out: 963nomem_out:
@@ -967,7 +975,7 @@ module_out:
967 * Caller should already have policy_rwsem in write mode for this CPU. 975 * Caller should already have policy_rwsem in write mode for this CPU.
968 * This routine frees the rwsem before returning. 976 * This routine frees the rwsem before returning.
969 */ 977 */
970static int __cpufreq_remove_dev (struct sys_device * sys_dev) 978static int __cpufreq_remove_dev(struct sys_device *sys_dev)
971{ 979{
972 unsigned int cpu = sys_dev->id; 980 unsigned int cpu = sys_dev->id;
973 unsigned long flags; 981 unsigned long flags;
@@ -1071,7 +1079,7 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev)
1071} 1079}
1072 1080
1073 1081
1074static int cpufreq_remove_dev (struct sys_device * sys_dev) 1082static int cpufreq_remove_dev(struct sys_device *sys_dev)
1075{ 1083{
1076 unsigned int cpu = sys_dev->id; 1084 unsigned int cpu = sys_dev->id;
1077 int retval; 1085 int retval;
@@ -1138,7 +1146,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
1138 cpufreq_cpu_put(policy); 1146 cpufreq_cpu_put(policy);
1139 } 1147 }
1140 1148
1141 return (ret_freq); 1149 return ret_freq;
1142} 1150}
1143EXPORT_SYMBOL(cpufreq_quick_get); 1151EXPORT_SYMBOL(cpufreq_quick_get);
1144 1152
@@ -1149,7 +1157,7 @@ static unsigned int __cpufreq_get(unsigned int cpu)
1149 unsigned int ret_freq = 0; 1157 unsigned int ret_freq = 0;
1150 1158
1151 if (!cpufreq_driver->get) 1159 if (!cpufreq_driver->get)
1152 return (ret_freq); 1160 return ret_freq;
1153 1161
1154 ret_freq = cpufreq_driver->get(cpu); 1162 ret_freq = cpufreq_driver->get(cpu);
1155 1163
@@ -1163,7 +1171,7 @@ static unsigned int __cpufreq_get(unsigned int cpu)
1163 } 1171 }
1164 } 1172 }
1165 1173
1166 return (ret_freq); 1174 return ret_freq;
1167} 1175}
1168 1176
1169/** 1177/**
@@ -1190,7 +1198,7 @@ unsigned int cpufreq_get(unsigned int cpu)
1190out_policy: 1198out_policy:
1191 cpufreq_cpu_put(policy); 1199 cpufreq_cpu_put(policy);
1192out: 1200out:
1193 return (ret_freq); 1201 return ret_freq;
1194} 1202}
1195EXPORT_SYMBOL(cpufreq_get); 1203EXPORT_SYMBOL(cpufreq_get);
1196 1204
@@ -1199,7 +1207,7 @@ EXPORT_SYMBOL(cpufreq_get);
1199 * cpufreq_suspend - let the low level driver prepare for suspend 1207 * cpufreq_suspend - let the low level driver prepare for suspend
1200 */ 1208 */
1201 1209
1202static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) 1210static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1203{ 1211{
1204 int cpu = sysdev->id; 1212 int cpu = sysdev->id;
1205 int ret = 0; 1213 int ret = 0;
@@ -1221,22 +1229,18 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
1221 return -EINVAL; 1229 return -EINVAL;
1222 1230
1223 /* only handle each CPU group once */ 1231 /* only handle each CPU group once */
1224 if (unlikely(cpu_policy->cpu != cpu)) { 1232 if (unlikely(cpu_policy->cpu != cpu))
1225 cpufreq_cpu_put(cpu_policy); 1233 goto out;
1226 return 0;
1227 }
1228 1234
1229 if (cpufreq_driver->suspend) { 1235 if (cpufreq_driver->suspend) {
1230 ret = cpufreq_driver->suspend(cpu_policy, pmsg); 1236 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1231 if (ret) { 1237 if (ret) {
1232 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1238 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1233 "step on CPU %u\n", cpu_policy->cpu); 1239 "step on CPU %u\n", cpu_policy->cpu);
1234 cpufreq_cpu_put(cpu_policy); 1240 goto out;
1235 return ret;
1236 } 1241 }
1237 } 1242 }
1238 1243
1239
1240 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS) 1244 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
1241 goto out; 1245 goto out;
1242 1246
@@ -1270,7 +1274,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
1270 1274
1271out: 1275out:
1272 cpufreq_cpu_put(cpu_policy); 1276 cpufreq_cpu_put(cpu_policy);
1273 return 0; 1277 return ret;
1274} 1278}
1275 1279
1276/** 1280/**
@@ -1281,7 +1285,7 @@ out:
1281 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are 1285 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
1282 * restored. 1286 * restored.
1283 */ 1287 */
1284static int cpufreq_resume(struct sys_device * sysdev) 1288static int cpufreq_resume(struct sys_device *sysdev)
1285{ 1289{
1286 int cpu = sysdev->id; 1290 int cpu = sysdev->id;
1287 int ret = 0; 1291 int ret = 0;
@@ -1302,18 +1306,15 @@ static int cpufreq_resume(struct sys_device * sysdev)
1302 return -EINVAL; 1306 return -EINVAL;
1303 1307
1304 /* only handle each CPU group once */ 1308 /* only handle each CPU group once */
1305 if (unlikely(cpu_policy->cpu != cpu)) { 1309 if (unlikely(cpu_policy->cpu != cpu))
1306 cpufreq_cpu_put(cpu_policy); 1310 goto fail;
1307 return 0;
1308 }
1309 1311
1310 if (cpufreq_driver->resume) { 1312 if (cpufreq_driver->resume) {
1311 ret = cpufreq_driver->resume(cpu_policy); 1313 ret = cpufreq_driver->resume(cpu_policy);
1312 if (ret) { 1314 if (ret) {
1313 printk(KERN_ERR "cpufreq: resume failed in ->resume " 1315 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1314 "step on CPU %u\n", cpu_policy->cpu); 1316 "step on CPU %u\n", cpu_policy->cpu);
1315 cpufreq_cpu_put(cpu_policy); 1317 goto fail;
1316 return ret;
1317 } 1318 }
1318 } 1319 }
1319 1320
@@ -1353,6 +1354,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
1353 1354
1354out: 1355out:
1355 schedule_work(&cpu_policy->update); 1356 schedule_work(&cpu_policy->update);
1357fail:
1356 cpufreq_cpu_put(cpu_policy); 1358 cpufreq_cpu_put(cpu_policy);
1357 return ret; 1359 return ret;
1358} 1360}
@@ -1386,6 +1388,8 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1386{ 1388{
1387 int ret; 1389 int ret;
1388 1390
1391 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1392
1389 switch (list) { 1393 switch (list) {
1390 case CPUFREQ_TRANSITION_NOTIFIER: 1394 case CPUFREQ_TRANSITION_NOTIFIER:
1391 ret = srcu_notifier_chain_register( 1395 ret = srcu_notifier_chain_register(
@@ -1848,7 +1852,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1848 cpufreq_debug_enable_ratelimit(); 1852 cpufreq_debug_enable_ratelimit();
1849 } 1853 }
1850 1854
1851 return (ret); 1855 return ret;
1852} 1856}
1853EXPORT_SYMBOL_GPL(cpufreq_register_driver); 1857EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1854 1858
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 13fe06b94b0a..88d2f44fba48 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -35,12 +35,12 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
35 return 0; 35 return 0;
36} 36}
37 37
38static struct cpufreq_governor cpufreq_gov_powersave = { 38struct cpufreq_governor cpufreq_gov_powersave = {
39 .name = "powersave", 39 .name = "powersave",
40 .governor = cpufreq_governor_powersave, 40 .governor = cpufreq_governor_powersave,
41 .owner = THIS_MODULE, 41 .owner = THIS_MODULE,
42}; 42};
43 43EXPORT_SYMBOL(cpufreq_gov_powersave);
44 44
45static int __init cpufreq_gov_powersave_init(void) 45static int __init cpufreq_gov_powersave_init(void)
46{ 46{
@@ -58,5 +58,9 @@ MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
58MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'"); 58MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60 60
61#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
62fs_initcall(cpufreq_gov_powersave_init);
63#else
61module_init(cpufreq_gov_powersave_init); 64module_init(cpufreq_gov_powersave_init);
65#endif
62module_exit(cpufreq_gov_powersave_exit); 66module_exit(cpufreq_gov_powersave_exit);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 070421a5480e..ae70d63a8b26 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -114,7 +114,7 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
114 stat->freq_table[i]); 114 stat->freq_table[i]);
115 } 115 }
116 if (len >= PAGE_SIZE) 116 if (len >= PAGE_SIZE)
117 return len; 117 return PAGE_SIZE;
118 118
119 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 119 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
120 120
@@ -131,8 +131,12 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
131 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 131 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
132 stat->trans_table[i*stat->max_state+j]); 132 stat->trans_table[i*stat->max_state+j]);
133 } 133 }
134 if (len >= PAGE_SIZE)
135 break;
134 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 136 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
135 } 137 }
138 if (len >= PAGE_SIZE)
139 return PAGE_SIZE;
136 return len; 140 return len;
137} 141}
138CPUFREQ_STATDEVICE_ATTR(trans_table,0444,show_trans_table); 142CPUFREQ_STATDEVICE_ATTR(trans_table,0444,show_trans_table);
@@ -284,7 +288,7 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
284 if (!stat) 288 if (!stat)
285 return 0; 289 return 0;
286 290
287 old_index = freq_table_get_index(stat, freq->old); 291 old_index = stat->last_index;
288 new_index = freq_table_get_index(stat, freq->new); 292 new_index = freq_table_get_index(stat, freq->new);
289 293
290 cpufreq_stats_update(freq->cpu); 294 cpufreq_stats_update(freq->cpu);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 2b382990fe58..6e6c3c4aea6b 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -67,7 +67,7 @@ config EDAC_E7XXX
67 E7205, E7500, E7501 and E7505 server chipsets. 67 E7205, E7500, E7501 and E7505 server chipsets.
68 68
69config EDAC_E752X 69config EDAC_E752X
70 tristate "Intel e752x (e7520, e7525, e7320)" 70 tristate "Intel e752x (e7520, e7525, e7320) and 3100"
71 depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG 71 depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG
72 help 72 help
73 Support for error detection and correction on the Intel 73 Support for error detection and correction on the Intel
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f22075410591..2b95f1a3edfc 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -17,6 +17,7 @@
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/pci_ids.h> 18#include <linux/pci_ids.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/edac.h>
20#include "edac_core.h" 21#include "edac_core.h"
21 22
22#define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ 23#define AMD76X_REVISION " Ver: 2.0.2 " __DATE__
@@ -344,6 +345,9 @@ static struct pci_driver amd76x_driver = {
344 345
345static int __init amd76x_init(void) 346static int __init amd76x_init(void)
346{ 347{
348 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
349 opstate_init();
350
347 return pci_register_driver(&amd76x_driver); 351 return pci_register_driver(&amd76x_driver);
348} 352}
349 353
@@ -358,3 +362,6 @@ module_exit(amd76x_exit);
358MODULE_LICENSE("GPL"); 362MODULE_LICENSE("GPL");
359MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); 363MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
360MODULE_DESCRIPTION("MC support for AMD 76x memory controllers"); 364MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
365
366module_param(edac_op_state, int, 0444);
367MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 6eb434749cd5..c94a0eb492cb 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -29,6 +29,7 @@
29#define EDAC_MOD_STR "e752x_edac" 29#define EDAC_MOD_STR "e752x_edac"
30 30
31static int force_function_unhide; 31static int force_function_unhide;
32static int sysbus_parity = -1;
32 33
33static struct edac_pci_ctl_info *e752x_pci; 34static struct edac_pci_ctl_info *e752x_pci;
34 35
@@ -62,6 +63,14 @@ static struct edac_pci_ctl_info *e752x_pci;
62#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 63#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
63#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ 64#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
64 65
66#ifndef PCI_DEVICE_ID_INTEL_3100_0
67#define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
68#endif /* PCI_DEVICE_ID_INTEL_3100_0 */
69
70#ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
71#define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
72#endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
73
65#define E752X_NR_CSROWS 8 /* number of csrows */ 74#define E752X_NR_CSROWS 8 /* number of csrows */
66 75
67/* E752X register addresses - device 0 function 0 */ 76/* E752X register addresses - device 0 function 0 */
@@ -152,6 +161,12 @@ static struct edac_pci_ctl_info *e752x_pci;
152 /* error syndrome register (16b) */ 161 /* error syndrome register (16b) */
153#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ 162#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
154 163
164/* 3100 IMCH specific register addresses - device 0 function 1 */
165#define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
166#define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
167#define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
168#define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
169
155/* ICH5R register addresses - device 30 function 0 */ 170/* ICH5R register addresses - device 30 function 0 */
156#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ 171#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
157#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ 172#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
@@ -160,7 +175,8 @@ static struct edac_pci_ctl_info *e752x_pci;
160enum e752x_chips { 175enum e752x_chips {
161 E7520 = 0, 176 E7520 = 0,
162 E7525 = 1, 177 E7525 = 1,
163 E7320 = 2 178 E7320 = 2,
179 I3100 = 3
164}; 180};
165 181
166struct e752x_pvt { 182struct e752x_pvt {
@@ -185,8 +201,10 @@ struct e752x_dev_info {
185struct e752x_error_info { 201struct e752x_error_info {
186 u32 ferr_global; 202 u32 ferr_global;
187 u32 nerr_global; 203 u32 nerr_global;
188 u8 hi_ferr; 204 u32 nsi_ferr; /* 3100 only */
189 u8 hi_nerr; 205 u32 nsi_nerr; /* 3100 only */
206 u8 hi_ferr; /* all but 3100 */
207 u8 hi_nerr; /* all but 3100 */
190 u16 sysbus_ferr; 208 u16 sysbus_ferr;
191 u16 sysbus_nerr; 209 u16 sysbus_nerr;
192 u8 buf_ferr; 210 u8 buf_ferr;
@@ -215,6 +233,10 @@ static const struct e752x_dev_info e752x_devs[] = {
215 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, 233 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
216 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, 234 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
217 .ctl_name = "E7320"}, 235 .ctl_name = "E7320"},
236 [I3100] = {
237 .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
238 .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
239 .ctl_name = "3100"},
218}; 240};
219 241
220static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, 242static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
@@ -402,7 +424,7 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
402static char *global_message[11] = { 424static char *global_message[11] = {
403 "PCI Express C1", "PCI Express C", "PCI Express B1", 425 "PCI Express C1", "PCI Express C", "PCI Express B1",
404 "PCI Express B", "PCI Express A1", "PCI Express A", 426 "PCI Express B", "PCI Express A1", "PCI Express A",
405 "DMA Controler", "HUB Interface", "System Bus", 427 "DMA Controler", "HUB or NS Interface", "System Bus",
406 "DRAM Controler", "Internal Buffer" 428 "DRAM Controler", "Internal Buffer"
407}; 429};
408 430
@@ -455,6 +477,63 @@ static inline void hub_error(int fatal, u8 errors, int *error_found,
455 do_hub_error(fatal, errors); 477 do_hub_error(fatal, errors);
456} 478}
457 479
480#define NSI_FATAL_MASK 0x0c080081
481#define NSI_NON_FATAL_MASK 0x23a0ba64
482#define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
483
484static char *nsi_message[30] = {
485 "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
486 "", /* reserved */
487 "NSI Parity Error", /* bit 2, non-fatal */
488 "", /* reserved */
489 "", /* reserved */
490 "Correctable Error Message", /* bit 5, non-fatal */
491 "Non-Fatal Error Message", /* bit 6, non-fatal */
492 "Fatal Error Message", /* bit 7, fatal */
493 "", /* reserved */
494 "Receiver Error", /* bit 9, non-fatal */
495 "", /* reserved */
496 "Bad TLP", /* bit 11, non-fatal */
497 "Bad DLLP", /* bit 12, non-fatal */
498 "REPLAY_NUM Rollover", /* bit 13, non-fatal */
499 "", /* reserved */
500 "Replay Timer Timeout", /* bit 15, non-fatal */
501 "", /* reserved */
502 "", /* reserved */
503 "", /* reserved */
504 "Data Link Protocol Error", /* bit 19, fatal */
505 "", /* reserved */
506 "Poisoned TLP", /* bit 21, non-fatal */
507 "", /* reserved */
508 "Completion Timeout", /* bit 23, non-fatal */
509 "Completer Abort", /* bit 24, non-fatal */
510 "Unexpected Completion", /* bit 25, non-fatal */
511 "Receiver Overflow", /* bit 26, fatal */
512 "Malformed TLP", /* bit 27, fatal */
513 "", /* reserved */
514 "Unsupported Request" /* bit 29, non-fatal */
515};
516
517static void do_nsi_error(int fatal, u32 errors)
518{
519 int i;
520
521 for (i = 0; i < 30; i++) {
522 if (errors & (1 << i))
523 printk(KERN_WARNING "%sError %s\n",
524 fatal_message[fatal], nsi_message[i]);
525 }
526}
527
528static inline void nsi_error(int fatal, u32 errors, int *error_found,
529 int handle_error)
530{
531 *error_found = 1;
532
533 if (handle_error)
534 do_nsi_error(fatal, errors);
535}
536
458static char *membuf_message[4] = { 537static char *membuf_message[4] = {
459 "Internal PMWB to DRAM parity", 538 "Internal PMWB to DRAM parity",
460 "Internal PMWB to System Bus Parity", 539 "Internal PMWB to System Bus Parity",
@@ -546,6 +625,31 @@ static void e752x_check_hub_interface(struct e752x_error_info *info,
546 } 625 }
547} 626}
548 627
628static void e752x_check_ns_interface(struct e752x_error_info *info,
629 int *error_found, int handle_error)
630{
631 u32 stat32;
632
633 stat32 = info->nsi_ferr;
634 if (stat32 & NSI_ERR_MASK) { /* Error, so process */
635 if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
636 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
637 handle_error);
638 if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
639 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
640 handle_error);
641 }
642 stat32 = info->nsi_nerr;
643 if (stat32 & NSI_ERR_MASK) {
644 if (stat32 & NSI_FATAL_MASK)
645 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
646 handle_error);
647 if (stat32 & NSI_NON_FATAL_MASK)
648 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
649 handle_error);
650 }
651}
652
549static void e752x_check_sysbus(struct e752x_error_info *info, 653static void e752x_check_sysbus(struct e752x_error_info *info,
550 int *error_found, int handle_error) 654 int *error_found, int handle_error)
551{ 655{
@@ -653,7 +757,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
653 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); 757 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
654 758
655 if (info->ferr_global) { 759 if (info->ferr_global) {
656 pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr); 760 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
761 pci_read_config_dword(dev, I3100_NSI_FERR,
762 &info->nsi_ferr);
763 info->hi_ferr = 0;
764 } else {
765 pci_read_config_byte(dev, E752X_HI_FERR,
766 &info->hi_ferr);
767 info->nsi_ferr = 0;
768 }
657 pci_read_config_word(dev, E752X_SYSBUS_FERR, 769 pci_read_config_word(dev, E752X_SYSBUS_FERR,
658 &info->sysbus_ferr); 770 &info->sysbus_ferr);
659 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); 771 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
@@ -669,10 +781,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
669 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, 781 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
670 &info->dram_retr_add); 782 &info->dram_retr_add);
671 783
784 /* ignore the reserved bits just in case */
672 if (info->hi_ferr & 0x7f) 785 if (info->hi_ferr & 0x7f)
673 pci_write_config_byte(dev, E752X_HI_FERR, 786 pci_write_config_byte(dev, E752X_HI_FERR,
674 info->hi_ferr); 787 info->hi_ferr);
675 788
789 if (info->nsi_ferr & NSI_ERR_MASK)
790 pci_write_config_dword(dev, I3100_NSI_FERR,
791 info->nsi_ferr);
792
676 if (info->sysbus_ferr) 793 if (info->sysbus_ferr)
677 pci_write_config_word(dev, E752X_SYSBUS_FERR, 794 pci_write_config_word(dev, E752X_SYSBUS_FERR,
678 info->sysbus_ferr); 795 info->sysbus_ferr);
@@ -692,7 +809,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
692 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global); 809 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
693 810
694 if (info->nerr_global) { 811 if (info->nerr_global) {
695 pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr); 812 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
813 pci_read_config_dword(dev, I3100_NSI_NERR,
814 &info->nsi_nerr);
815 info->hi_nerr = 0;
816 } else {
817 pci_read_config_byte(dev, E752X_HI_NERR,
818 &info->hi_nerr);
819 info->nsi_nerr = 0;
820 }
696 pci_read_config_word(dev, E752X_SYSBUS_NERR, 821 pci_read_config_word(dev, E752X_SYSBUS_NERR,
697 &info->sysbus_nerr); 822 &info->sysbus_nerr);
698 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); 823 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
@@ -706,6 +831,10 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
706 pci_write_config_byte(dev, E752X_HI_NERR, 831 pci_write_config_byte(dev, E752X_HI_NERR,
707 info->hi_nerr); 832 info->hi_nerr);
708 833
834 if (info->nsi_nerr & NSI_ERR_MASK)
835 pci_write_config_dword(dev, I3100_NSI_NERR,
836 info->nsi_nerr);
837
709 if (info->sysbus_nerr) 838 if (info->sysbus_nerr)
710 pci_write_config_word(dev, E752X_SYSBUS_NERR, 839 pci_write_config_word(dev, E752X_SYSBUS_NERR,
711 info->sysbus_nerr); 840 info->sysbus_nerr);
@@ -750,6 +879,7 @@ static int e752x_process_error_info(struct mem_ctl_info *mci,
750 global_error(0, stat32, &error_found, handle_errors); 879 global_error(0, stat32, &error_found, handle_errors);
751 880
752 e752x_check_hub_interface(info, &error_found, handle_errors); 881 e752x_check_hub_interface(info, &error_found, handle_errors);
882 e752x_check_ns_interface(info, &error_found, handle_errors);
753 e752x_check_sysbus(info, &error_found, handle_errors); 883 e752x_check_sysbus(info, &error_found, handle_errors);
754 e752x_check_membuf(info, &error_found, handle_errors); 884 e752x_check_membuf(info, &error_found, handle_errors);
755 e752x_check_dram(mci, info, &error_found, handle_errors); 885 e752x_check_dram(mci, info, &error_found, handle_errors);
@@ -920,15 +1050,53 @@ fail:
920 return 1; 1050 return 1;
921} 1051}
922 1052
1053/* Setup system bus parity mask register.
1054 * Sysbus parity supported on:
1055 * e7320/e7520/e7525 + Xeon
1056 * i3100 + Xeon/Celeron
1057 * Sysbus parity not supported on:
1058 * i3100 + Pentium M/Celeron M/Core Duo/Core2 Duo
1059 */
1060static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1061{
1062 char *cpu_id = cpu_data(0).x86_model_id;
1063 struct pci_dev *dev = pvt->dev_d0f1;
1064 int enable = 1;
1065
1066 /* Allow module paramter override, else see if CPU supports parity */
1067 if (sysbus_parity != -1) {
1068 enable = sysbus_parity;
1069 } else if (cpu_id[0] &&
1070 ((strstr(cpu_id, "Pentium") && strstr(cpu_id, " M ")) ||
1071 (strstr(cpu_id, "Celeron") && strstr(cpu_id, " M ")) ||
1072 (strstr(cpu_id, "Core") && strstr(cpu_id, "Duo")))) {
1073 e752x_printk(KERN_INFO, "System Bus Parity not "
1074 "supported by CPU, disabling\n");
1075 enable = 0;
1076 }
1077
1078 if (enable)
1079 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
1080 else
1081 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
1082}
1083
923static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) 1084static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
924{ 1085{
925 struct pci_dev *dev; 1086 struct pci_dev *dev;
926 1087
927 dev = pvt->dev_d0f1; 1088 dev = pvt->dev_d0f1;
928 /* Turn off error disable & SMI in case the BIOS turned it on */ 1089 /* Turn off error disable & SMI in case the BIOS turned it on */
929 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); 1090 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
930 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); 1091 pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
931 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00); 1092 pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
1093 } else {
1094 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
1095 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
1096 }
1097
1098 e752x_init_sysbus_parity_mask(pvt);
1099
932 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); 1100 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
933 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); 1101 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
934 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); 1102 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
@@ -949,16 +1117,6 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
949 debugf0("%s(): mci\n", __func__); 1117 debugf0("%s(): mci\n", __func__);
950 debugf0("Starting Probe1\n"); 1118 debugf0("Starting Probe1\n");
951 1119
952 /* make sure error reporting method is sane */
953 switch (edac_op_state) {
954 case EDAC_OPSTATE_POLL:
955 case EDAC_OPSTATE_NMI:
956 break;
957 default:
958 edac_op_state = EDAC_OPSTATE_POLL;
959 break;
960 }
961
962 /* check to see if device 0 function 1 is enabled; if it isn't, we 1120 /* check to see if device 0 function 1 is enabled; if it isn't, we
963 * assume the BIOS has reserved it for a reason and is expecting 1121 * assume the BIOS has reserved it for a reason and is expecting
964 * exclusive access, we take care not to violate that assumption and 1122 * exclusive access, we take care not to violate that assumption and
@@ -985,8 +1143,9 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
985 1143
986 debugf3("%s(): init mci\n", __func__); 1144 debugf3("%s(): init mci\n", __func__);
987 mci->mtype_cap = MEM_FLAG_RDDR; 1145 mci->mtype_cap = MEM_FLAG_RDDR;
988 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | 1146 /* 3100 IMCH supports SECDEC only */
989 EDAC_FLAG_S4ECD4ED; 1147 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
1148 (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
990 /* FIXME - what if different memory types are in different csrows? */ 1149 /* FIXME - what if different memory types are in different csrows? */
991 mci->mod_name = EDAC_MOD_STR; 1150 mci->mod_name = EDAC_MOD_STR;
992 mci->mod_ver = E752X_REVISION; 1151 mci->mod_ver = E752X_REVISION;
@@ -1018,7 +1177,10 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1018 e752x_init_csrows(mci, pdev, ddrcsr); 1177 e752x_init_csrows(mci, pdev, ddrcsr);
1019 e752x_init_mem_map_table(pdev, pvt); 1178 e752x_init_mem_map_table(pdev, pvt);
1020 1179
1021 mci->edac_cap |= EDAC_FLAG_NONE; 1180 if (dev_idx == I3100)
1181 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1182 else
1183 mci->edac_cap |= EDAC_FLAG_NONE;
1022 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 1184 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
1023 1185
1024 /* load the top of low memory, remap base, and remap limit vars */ 1186 /* load the top of low memory, remap base, and remap limit vars */
@@ -1110,6 +1272,9 @@ static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
1110 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1272 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1111 E7320}, 1273 E7320},
1112 { 1274 {
1275 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1276 I3100},
1277 {
1113 0, 1278 0,
1114 } /* 0 terminated list. */ 1279 } /* 0 terminated list. */
1115}; 1280};
@@ -1128,6 +1293,10 @@ static int __init e752x_init(void)
1128 int pci_rc; 1293 int pci_rc;
1129 1294
1130 debugf3("%s()\n", __func__); 1295 debugf3("%s()\n", __func__);
1296
1297 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1298 opstate_init();
1299
1131 pci_rc = pci_register_driver(&e752x_driver); 1300 pci_rc = pci_register_driver(&e752x_driver);
1132 return (pci_rc < 0) ? pci_rc : 0; 1301 return (pci_rc < 0) ? pci_rc : 0;
1133} 1302}
@@ -1143,10 +1312,15 @@ module_exit(e752x_exit);
1143 1312
1144MODULE_LICENSE("GPL"); 1313MODULE_LICENSE("GPL");
1145MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n"); 1314MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1146MODULE_DESCRIPTION("MC support for Intel e752x memory controllers"); 1315MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1147 1316
1148module_param(force_function_unhide, int, 0444); 1317module_param(force_function_unhide, int, 0444);
1149MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" 1318MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1150 " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access"); 1319 " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
1320
1151module_param(edac_op_state, int, 0444); 1321module_param(edac_op_state, int, 0444);
1152MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1322MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1323
1324module_param(sysbus_parity, int, 0444);
1325MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1326 " 1=enable system bus parity checking, default=auto-detect");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 96ecc4926641..c7d11cc4e21a 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -414,16 +414,6 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
414 414
415 debugf0("%s(): mci\n", __func__); 415 debugf0("%s(): mci\n", __func__);
416 416
417 /* make sure error reporting method is sane */
418 switch (edac_op_state) {
419 case EDAC_OPSTATE_POLL:
420 case EDAC_OPSTATE_NMI:
421 break;
422 default:
423 edac_op_state = EDAC_OPSTATE_POLL;
424 break;
425 }
426
427 pci_read_config_dword(pdev, E7XXX_DRC, &drc); 417 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
428 418
429 drc_chan = dual_channel_active(drc, dev_idx); 419 drc_chan = dual_channel_active(drc, dev_idx);
@@ -565,6 +555,9 @@ static struct pci_driver e7xxx_driver = {
565 555
566static int __init e7xxx_init(void) 556static int __init e7xxx_init(void)
567{ 557{
558 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
559 opstate_init();
560
568 return pci_register_driver(&e7xxx_driver); 561 return pci_register_driver(&e7xxx_driver);
569} 562}
570 563
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index a9aa845dbe74..b27b13c5eb5a 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -97,7 +97,7 @@ extern int edac_debug_level;
97#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \ 97#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
98 PCI_DEVICE_ID_ ## vend ## _ ## dev 98 PCI_DEVICE_ID_ ## vend ## _ ## dev
99 99
100#define dev_name(dev) (dev)->dev_name 100#define edac_dev_name(dev) (dev)->dev_name
101 101
102/* memory devices */ 102/* memory devices */
103enum dev_type { 103enum dev_type {
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index b9552bc03dea..5fcd3d89c75d 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -36,7 +36,7 @@
36 * is protected by the 'device_ctls_mutex' lock 36 * is protected by the 'device_ctls_mutex' lock
37 */ 37 */
38static DEFINE_MUTEX(device_ctls_mutex); 38static DEFINE_MUTEX(device_ctls_mutex);
39static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list); 39static LIST_HEAD(edac_device_list);
40 40
41#ifdef CONFIG_EDAC_DEBUG 41#ifdef CONFIG_EDAC_DEBUG
42static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) 42static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
@@ -333,7 +333,7 @@ static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
333fail0: 333fail0:
334 edac_printk(KERN_WARNING, EDAC_MC, 334 edac_printk(KERN_WARNING, EDAC_MC,
335 "%s (%s) %s %s already assigned %d\n", 335 "%s (%s) %s %s already assigned %d\n",
336 rover->dev->bus_id, dev_name(rover), 336 rover->dev->bus_id, edac_dev_name(rover),
337 rover->mod_name, rover->ctl_name, rover->dev_idx); 337 rover->mod_name, rover->ctl_name, rover->dev_idx);
338 return 1; 338 return 1;
339 339
@@ -375,37 +375,6 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info
375 wait_for_completion(&edac_device->removal_complete); 375 wait_for_completion(&edac_device->removal_complete);
376} 376}
377 377
378/**
379 * edac_device_find
380 * Search for a edac_device_ctl_info structure whose index is 'idx'.
381 *
382 * If found, return a pointer to the structure.
383 * Else return NULL.
384 *
385 * Caller must hold device_ctls_mutex.
386 */
387struct edac_device_ctl_info *edac_device_find(int idx)
388{
389 struct list_head *item;
390 struct edac_device_ctl_info *edac_dev;
391
392 /* Iterate over list, looking for exact match of ID */
393 list_for_each(item, &edac_device_list) {
394 edac_dev = list_entry(item, struct edac_device_ctl_info, link);
395
396 if (edac_dev->dev_idx >= idx) {
397 if (edac_dev->dev_idx == idx)
398 return edac_dev;
399
400 /* not on list, so terminate early */
401 break;
402 }
403 }
404
405 return NULL;
406}
407EXPORT_SYMBOL_GPL(edac_device_find);
408
409/* 378/*
410 * edac_device_workq_function 379 * edac_device_workq_function
411 * performs the operation scheduled by a workq request 380 * performs the operation scheduled by a workq request
@@ -569,7 +538,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
569 "'%s': DEV '%s' (%s)\n", 538 "'%s': DEV '%s' (%s)\n",
570 edac_dev->mod_name, 539 edac_dev->mod_name,
571 edac_dev->ctl_name, 540 edac_dev->ctl_name,
572 dev_name(edac_dev), 541 edac_dev_name(edac_dev),
573 edac_op_state_to_string(edac_dev->op_state)); 542 edac_op_state_to_string(edac_dev->op_state));
574 543
575 mutex_unlock(&device_ctls_mutex); 544 mutex_unlock(&device_ctls_mutex);
@@ -630,7 +599,7 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
630 edac_printk(KERN_INFO, EDAC_MC, 599 edac_printk(KERN_INFO, EDAC_MC,
631 "Removed device %d for %s %s: DEV %s\n", 600 "Removed device %d for %s %s: DEV %s\n",
632 edac_dev->dev_idx, 601 edac_dev->dev_idx,
633 edac_dev->mod_name, edac_dev->ctl_name, dev_name(edac_dev)); 602 edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
634 603
635 return edac_dev; 604 return edac_dev;
636} 605}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 063a1bffe38b..d110392d48f4 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -36,7 +36,7 @@
36 36
37/* lock to memory controller's control array */ 37/* lock to memory controller's control array */
38static DEFINE_MUTEX(mem_ctls_mutex); 38static DEFINE_MUTEX(mem_ctls_mutex);
39static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); 39static LIST_HEAD(mc_devices);
40 40
41#ifdef CONFIG_EDAC_DEBUG 41#ifdef CONFIG_EDAC_DEBUG
42 42
@@ -402,7 +402,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
402fail0: 402fail0:
403 edac_printk(KERN_WARNING, EDAC_MC, 403 edac_printk(KERN_WARNING, EDAC_MC,
404 "%s (%s) %s %s already assigned %d\n", p->dev->bus_id, 404 "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
405 dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); 405 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
406 return 1; 406 return 1;
407 407
408fail1: 408fail1:
@@ -517,7 +517,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
517 517
518 /* Report action taken */ 518 /* Report action taken */
519 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':" 519 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
520 " DEV %s\n", mci->mod_name, mci->ctl_name, dev_name(mci)); 520 " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
521 521
522 mutex_unlock(&mem_ctls_mutex); 522 mutex_unlock(&mem_ctls_mutex);
523 return 0; 523 return 0;
@@ -565,7 +565,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
565 565
566 edac_printk(KERN_INFO, EDAC_MC, 566 edac_printk(KERN_INFO, EDAC_MC,
567 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, 567 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
568 mci->mod_name, mci->ctl_name, dev_name(mci)); 568 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
569 569
570 return mci; 570 return mci;
571} 571}
@@ -886,24 +886,3 @@ void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
886 mci->csrows[csrow].channels[channel].ce_count++; 886 mci->csrows[csrow].channels[channel].ce_count++;
887} 887}
888EXPORT_SYMBOL(edac_mc_handle_fbd_ce); 888EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
889
890/*
891 * Iterate over all MC instances and check for ECC, et al, errors
892 */
893void edac_check_mc_devices(void)
894{
895 struct list_head *item;
896 struct mem_ctl_info *mci;
897
898 debugf3("%s()\n", __func__);
899 mutex_lock(&mem_ctls_mutex);
900
901 list_for_each(item, &mc_devices) {
902 mci = list_entry(item, struct mem_ctl_info, link);
903
904 if (mci->edac_check != NULL)
905 mci->edac_check(mci);
906 }
907
908 mutex_unlock(&mem_ctls_mutex);
909}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index cbc419c8ebc1..233d4798c3aa 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -27,7 +27,6 @@ extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci);
27extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci); 27extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
28extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); 28extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
29extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); 29extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
30extern void edac_check_mc_devices(void);
31extern int edac_get_log_ue(void); 30extern int edac_get_log_ue(void);
32extern int edac_get_log_ce(void); 31extern int edac_get_log_ce(void);
33extern int edac_get_panic_on_ue(void); 32extern int edac_get_panic_on_ue(void);
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 32be43576a8e..22ec9d5d4312 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -29,7 +29,7 @@
29#include "edac_module.h" 29#include "edac_module.h"
30 30
31static DEFINE_MUTEX(edac_pci_ctls_mutex); 31static DEFINE_MUTEX(edac_pci_ctls_mutex);
32static struct list_head edac_pci_list = LIST_HEAD_INIT(edac_pci_list); 32static LIST_HEAD(edac_pci_list);
33 33
34/* 34/*
35 * edac_pci_alloc_ctl_info 35 * edac_pci_alloc_ctl_info
@@ -150,7 +150,7 @@ static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
150fail0: 150fail0:
151 edac_printk(KERN_WARNING, EDAC_PCI, 151 edac_printk(KERN_WARNING, EDAC_PCI,
152 "%s (%s) %s %s already assigned %d\n", 152 "%s (%s) %s %s already assigned %d\n",
153 rover->dev->bus_id, dev_name(rover), 153 rover->dev->bus_id, edac_dev_name(rover),
154 rover->mod_name, rover->ctl_name, rover->pci_idx); 154 rover->mod_name, rover->ctl_name, rover->pci_idx);
155 return 1; 155 return 1;
156 156
@@ -189,6 +189,9 @@ static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
189 wait_for_completion(&pci->complete); 189 wait_for_completion(&pci->complete);
190} 190}
191 191
192#if 0
193/* Older code, but might use in the future */
194
192/* 195/*
193 * edac_pci_find() 196 * edac_pci_find()
194 * Search for an edac_pci_ctl_info structure whose index is 'idx' 197 * Search for an edac_pci_ctl_info structure whose index is 'idx'
@@ -219,6 +222,7 @@ struct edac_pci_ctl_info *edac_pci_find(int idx)
219 return NULL; 222 return NULL;
220} 223}
221EXPORT_SYMBOL_GPL(edac_pci_find); 224EXPORT_SYMBOL_GPL(edac_pci_find);
225#endif
222 226
223/* 227/*
224 * edac_pci_workq_function() 228 * edac_pci_workq_function()
@@ -356,7 +360,7 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
356 " DEV '%s' (%s)\n", 360 " DEV '%s' (%s)\n",
357 pci->mod_name, 361 pci->mod_name,
358 pci->ctl_name, 362 pci->ctl_name,
359 dev_name(pci), edac_op_state_to_string(pci->op_state)); 363 edac_dev_name(pci), edac_op_state_to_string(pci->op_state));
360 364
361 mutex_unlock(&edac_pci_ctls_mutex); 365 mutex_unlock(&edac_pci_ctls_mutex);
362 return 0; 366 return 0;
@@ -411,7 +415,7 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
411 415
412 edac_printk(KERN_INFO, EDAC_PCI, 416 edac_printk(KERN_INFO, EDAC_PCI,
413 "Removed device %d for %s %s: DEV %s\n", 417 "Removed device %d for %s %s: DEV %s\n",
414 pci->pci_idx, pci->mod_name, pci->ctl_name, dev_name(pci)); 418 pci->pci_idx, pci->mod_name, pci->ctl_name, edac_dev_name(pci));
415 419
416 return pci; 420 return pci;
417} 421}
@@ -422,7 +426,7 @@ EXPORT_SYMBOL_GPL(edac_pci_del_device);
422 * 426 *
423 * a Generic parity check API 427 * a Generic parity check API
424 */ 428 */
425void edac_pci_generic_check(struct edac_pci_ctl_info *pci) 429static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
426{ 430{
427 debugf4("%s()\n", __func__); 431 debugf4("%s()\n", __func__);
428 edac_pci_do_parity_check(); 432 edac_pci_do_parity_check();
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 71c3195d3704..2c1fa1bb6df2 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -37,17 +37,17 @@ int edac_pci_get_check_errors(void)
37 return check_pci_errors; 37 return check_pci_errors;
38} 38}
39 39
40int edac_pci_get_log_pe(void) 40static int edac_pci_get_log_pe(void)
41{ 41{
42 return edac_pci_log_pe; 42 return edac_pci_log_pe;
43} 43}
44 44
45int edac_pci_get_log_npe(void) 45static int edac_pci_get_log_npe(void)
46{ 46{
47 return edac_pci_log_npe; 47 return edac_pci_log_npe;
48} 48}
49 49
50int edac_pci_get_panic_on_pe(void) 50static int edac_pci_get_panic_on_pe(void)
51{ 51{
52 return edac_pci_panic_on_pe; 52 return edac_pci_panic_on_pe;
53} 53}
@@ -197,7 +197,8 @@ error_out:
197 * 197 *
198 * unregister the kobj for the EDAC PCI instance 198 * unregister the kobj for the EDAC PCI instance
199 */ 199 */
200void edac_pci_unregister_sysfs_instance_kobj(struct edac_pci_ctl_info *pci) 200static void edac_pci_unregister_sysfs_instance_kobj(
201 struct edac_pci_ctl_info *pci)
201{ 202{
202 debugf0("%s()\n", __func__); 203 debugf0("%s()\n", __func__);
203 204
@@ -337,7 +338,7 @@ static struct kobj_type ktype_edac_pci_main_kobj = {
337 * setup the sysfs for EDAC PCI attributes 338 * setup the sysfs for EDAC PCI attributes
338 * assumes edac_class has already been initialized 339 * assumes edac_class has already been initialized
339 */ 340 */
340int edac_pci_main_kobj_setup(void) 341static int edac_pci_main_kobj_setup(void)
341{ 342{
342 int err; 343 int err;
343 struct sysdev_class *edac_class; 344 struct sysdev_class *edac_class;
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 5d4292811c14..6c9a0f2a593c 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -326,15 +326,6 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
326 return -ENODEV; 326 return -ENODEV;
327 } 327 }
328 328
329 switch (edac_op_state) {
330 case EDAC_OPSTATE_POLL:
331 case EDAC_OPSTATE_NMI:
332 break;
333 default:
334 edac_op_state = EDAC_OPSTATE_POLL;
335 break;
336 }
337
338 c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */ 329 c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
339 c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */ 330 c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
340 c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */ 331 c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
@@ -503,6 +494,10 @@ static int __init i3000_init(void)
503 int pci_rc; 494 int pci_rc;
504 495
505 debugf3("MC: %s()\n", __func__); 496 debugf3("MC: %s()\n", __func__);
497
498 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
499 opstate_init();
500
506 pci_rc = pci_register_driver(&i3000_driver); 501 pci_rc = pci_register_driver(&i3000_driver);
507 if (pci_rc < 0) 502 if (pci_rc < 0)
508 goto fail0; 503 goto fail0;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 5a852017c17a..4a16b5b61cfb 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1286,16 +1286,6 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1286 if (PCI_FUNC(pdev->devfn) != 0) 1286 if (PCI_FUNC(pdev->devfn) != 0)
1287 return -ENODEV; 1287 return -ENODEV;
1288 1288
1289 /* make sure error reporting method is sane */
1290 switch (edac_op_state) {
1291 case EDAC_OPSTATE_POLL:
1292 case EDAC_OPSTATE_NMI:
1293 break;
1294 default:
1295 edac_op_state = EDAC_OPSTATE_POLL;
1296 break;
1297 }
1298
1299 /* Ask the devices for the number of CSROWS and CHANNELS so 1289 /* Ask the devices for the number of CSROWS and CHANNELS so
1300 * that we can calculate the memory resources, etc 1290 * that we can calculate the memory resources, etc
1301 * 1291 *
@@ -1478,6 +1468,9 @@ static int __init i5000_init(void)
1478 1468
1479 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1469 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1480 1470
1471 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1472 opstate_init();
1473
1481 pci_rc = pci_register_driver(&i5000_driver); 1474 pci_rc = pci_register_driver(&i5000_driver);
1482 1475
1483 return (pci_rc < 0) ? pci_rc : 0; 1476 return (pci_rc < 0) ? pci_rc : 0;
@@ -1501,5 +1494,6 @@ MODULE_AUTHOR
1501 ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>"); 1494 ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
1502MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " 1495MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
1503 I5000_REVISION); 1496 I5000_REVISION);
1497
1504module_param(edac_op_state, int, 0444); 1498module_param(edac_op_state, int, 0444);
1505MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); 1499MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 83bfe37c4bbb..c5305e3ee434 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include <linux/edac.h>
32#include "edac_core.h" 33#include "edac_core.h"
33 34
34#define I82443_REVISION "0.1" 35#define I82443_REVISION "0.1"
@@ -386,6 +387,9 @@ static struct pci_driver i82443bxgx_edacmc_driver = {
386 387
387static int __init i82443bxgx_edacmc_init(void) 388static int __init i82443bxgx_edacmc_init(void)
388{ 389{
390 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
391 opstate_init();
392
389 return pci_register_driver(&i82443bxgx_edacmc_driver); 393 return pci_register_driver(&i82443bxgx_edacmc_driver);
390} 394}
391 395
@@ -400,3 +404,6 @@ module_exit(i82443bxgx_edacmc_exit);
400MODULE_LICENSE("GPL"); 404MODULE_LICENSE("GPL");
401MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD"); 405MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD");
402MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers"); 406MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
407
408module_param(edac_op_state, int, 0444);
409MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index f5ecd2c4d813..c0088ba9672b 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -14,6 +14,7 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/pci_ids.h> 15#include <linux/pci_ids.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/edac.h>
17#include "edac_core.h" 18#include "edac_core.h"
18 19
19#define I82860_REVISION " Ver: 2.0.2 " __DATE__ 20#define I82860_REVISION " Ver: 2.0.2 " __DATE__
@@ -294,6 +295,9 @@ static int __init i82860_init(void)
294 295
295 debugf3("%s()\n", __func__); 296 debugf3("%s()\n", __func__);
296 297
298 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
299 opstate_init();
300
297 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) 301 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
298 goto fail0; 302 goto fail0;
299 303
@@ -345,3 +349,6 @@ MODULE_LICENSE("GPL");
345MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " 349MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
346 "Ben Woodard <woodard@redhat.com>"); 350 "Ben Woodard <woodard@redhat.com>");
347MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); 351MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
352
353module_param(edac_op_state, int, 0444);
354MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 031abadc439a..e43bdc43a1bf 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -18,6 +18,7 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/pci_ids.h> 19#include <linux/pci_ids.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/edac.h>
21#include "edac_core.h" 22#include "edac_core.h"
22 23
23#define I82875P_REVISION " Ver: 2.0.2 " __DATE__ 24#define I82875P_REVISION " Ver: 2.0.2 " __DATE__
@@ -393,6 +394,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
393 struct i82875p_error_info discard; 394 struct i82875p_error_info discard;
394 395
395 debugf0("%s()\n", __func__); 396 debugf0("%s()\n", __func__);
397
396 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); 398 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
397 399
398 if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) 400 if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
@@ -532,6 +534,10 @@ static int __init i82875p_init(void)
532 int pci_rc; 534 int pci_rc;
533 535
534 debugf3("%s()\n", __func__); 536 debugf3("%s()\n", __func__);
537
538 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
539 opstate_init();
540
535 pci_rc = pci_register_driver(&i82875p_driver); 541 pci_rc = pci_register_driver(&i82875p_driver);
536 542
537 if (pci_rc < 0) 543 if (pci_rc < 0)
@@ -586,3 +592,6 @@ module_exit(i82875p_exit);
586MODULE_LICENSE("GPL"); 592MODULE_LICENSE("GPL");
587MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); 593MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
588MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); 594MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
595
596module_param(edac_op_state, int, 0444);
597MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0ee888456932..2eed3ea2cf62 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -14,7 +14,7 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/pci_ids.h> 15#include <linux/pci_ids.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17 17#include <linux/edac.h>
18#include "edac_core.h" 18#include "edac_core.h"
19 19
20#define I82975X_REVISION " Ver: 1.0.0 " __DATE__ 20#define I82975X_REVISION " Ver: 1.0.0 " __DATE__
@@ -611,6 +611,9 @@ static int __init i82975x_init(void)
611 611
612 debugf3("%s()\n", __func__); 612 debugf3("%s()\n", __func__);
613 613
614 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
615 opstate_init();
616
614 pci_rc = pci_register_driver(&i82975x_driver); 617 pci_rc = pci_register_driver(&i82975x_driver);
615 if (pci_rc < 0) 618 if (pci_rc < 0)
616 goto fail0; 619 goto fail0;
@@ -664,3 +667,6 @@ module_exit(i82975x_exit);
664MODULE_LICENSE("GPL"); 667MODULE_LICENSE("GPL");
665MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>"); 668MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
666MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); 669MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
670
671module_param(edac_op_state, int, 0444);
672MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 90320917be28..8e6b91bd2e99 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -26,6 +26,7 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/pci_ids.h> 27#include <linux/pci_ids.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/edac.h>
29#include "edac_core.h" 30#include "edac_core.h"
30 31
31#define MODULE_NAME "pasemi_edac" 32#define MODULE_NAME "pasemi_edac"
@@ -284,6 +285,9 @@ static struct pci_driver pasemi_edac_driver = {
284 285
285static int __init pasemi_edac_init(void) 286static int __init pasemi_edac_init(void)
286{ 287{
288 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
289 opstate_init();
290
287 return pci_register_driver(&pasemi_edac_driver); 291 return pci_register_driver(&pasemi_edac_driver);
288} 292}
289 293
@@ -298,3 +302,6 @@ module_exit(pasemi_edac_exit);
298MODULE_LICENSE("GPL"); 302MODULE_LICENSE("GPL");
299MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); 303MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
300MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller"); 304MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller");
305module_param(edac_op_state, int, 0444);
306MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
307
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index e25f712f2dc3..9900675e9598 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -20,6 +20,7 @@
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/pci_ids.h> 21#include <linux/pci_ids.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/edac.h>
23#include "edac_core.h" 24#include "edac_core.h"
24 25
25#define R82600_REVISION " Ver: 2.0.2 " __DATE__ 26#define R82600_REVISION " Ver: 2.0.2 " __DATE__
@@ -393,6 +394,9 @@ static struct pci_driver r82600_driver = {
393 394
394static int __init r82600_init(void) 395static int __init r82600_init(void)
395{ 396{
397 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
398 opstate_init();
399
396 return pci_register_driver(&r82600_driver); 400 return pci_register_driver(&r82600_driver);
397} 401}
398 402
@@ -412,3 +416,6 @@ MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
412module_param(disable_hardware_scrub, bool, 0644); 416module_param(disable_hardware_scrub, bool, 0644);
413MODULE_PARM_DESC(disable_hardware_scrub, 417MODULE_PARM_DESC(disable_hardware_scrub,
414 "If set, disable the chipset's automatic scrub for CEs"); 418 "If set, disable the chipset's automatic scrub for CEs");
419
420module_param(edac_op_state, int, 0444);
421MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 2a999373863e..b2458bb8e9ca 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -784,7 +784,7 @@ static void sbp2_release_target(struct kref *kref)
784 kfree(lu); 784 kfree(lu);
785 } 785 }
786 scsi_remove_host(shost); 786 scsi_remove_host(shost);
787 fw_notify("released %s\n", tgt->bus_id); 787 fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
788 788
789 fw_unit_put(tgt->unit); 789 fw_unit_put(tgt->unit);
790 scsi_host_put(shost); 790 scsi_host_put(shost);
@@ -1487,7 +1487,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1487 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0) 1487 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
1488 goto out; 1488 goto out;
1489 1489
1490 memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); 1490 memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
1491 1491
1492 orb->base.callback = complete_command_orb; 1492 orb->base.callback = complete_command_orb;
1493 orb->base.request_bus = 1493 orb->base.request_bus =
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 40ffd767647d..dc2cec6127d1 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -17,6 +17,15 @@ config EDD
17 obscure configurations. Most disk controller BIOS vendors do 17 obscure configurations. Most disk controller BIOS vendors do
18 not yet implement this feature. 18 not yet implement this feature.
19 19
20config EDD_OFF
21 bool "Sets default behavior for EDD detection to off"
22 depends on EDD
23 default n
24 help
25 Say Y if you want EDD disabled by default, even though it is compiled into the
26 kernel. Say N if you want EDD enabled by default. EDD can be dynamically set
27 using the kernel parameter 'edd={on|skipmbr|off}'.
28
20config EFI_VARS 29config EFI_VARS
21 tristate "EFI Variable Support via sysfs" 30 tristate "EFI Variable Support via sysfs"
22 depends on EFI 31 depends on EFI
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index f235940719e7..25918f7dfd0f 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -63,7 +63,7 @@ static void smi_data_buf_free(void)
63 return; 63 return;
64 64
65 dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", 65 dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
66 __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size); 66 __func__, smi_data_buf_phys_addr, smi_data_buf_size);
67 67
68 dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf, 68 dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf,
69 smi_data_buf_handle); 69 smi_data_buf_handle);
@@ -92,7 +92,7 @@ static int smi_data_buf_realloc(unsigned long size)
92 if (!buf) { 92 if (!buf) {
93 dev_dbg(&dcdbas_pdev->dev, 93 dev_dbg(&dcdbas_pdev->dev,
94 "%s: failed to allocate memory size %lu\n", 94 "%s: failed to allocate memory size %lu\n",
95 __FUNCTION__, size); 95 __func__, size);
96 return -ENOMEM; 96 return -ENOMEM;
97 } 97 }
98 /* memory zeroed by dma_alloc_coherent */ 98 /* memory zeroed by dma_alloc_coherent */
@@ -110,7 +110,7 @@ static int smi_data_buf_realloc(unsigned long size)
110 smi_data_buf_size = size; 110 smi_data_buf_size = size;
111 111
112 dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", 112 dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
113 __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size); 113 __func__, smi_data_buf_phys_addr, smi_data_buf_size);
114 114
115 return 0; 115 return 0;
116} 116}
@@ -258,7 +258,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
258 258
259 if (smi_cmd->magic != SMI_CMD_MAGIC) { 259 if (smi_cmd->magic != SMI_CMD_MAGIC) {
260 dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n", 260 dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
261 __FUNCTION__); 261 __func__);
262 return -EBADR; 262 return -EBADR;
263 } 263 }
264 264
@@ -267,7 +267,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
267 set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); 267 set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
268 if (smp_processor_id() != 0) { 268 if (smp_processor_id() != 0) {
269 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", 269 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
270 __FUNCTION__); 270 __func__);
271 ret = -EBUSY; 271 ret = -EBUSY;
272 goto out; 272 goto out;
273 } 273 }
@@ -428,7 +428,7 @@ static int host_control_smi(void)
428 428
429 default: 429 default:
430 dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n", 430 dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
431 __FUNCTION__, host_control_smi_type); 431 __func__, host_control_smi_type);
432 return -ENOSYS; 432 return -ENOSYS;
433 } 433 }
434 434
@@ -456,13 +456,13 @@ static void dcdbas_host_control(void)
456 host_control_action = HC_ACTION_NONE; 456 host_control_action = HC_ACTION_NONE;
457 457
458 if (!smi_data_buf) { 458 if (!smi_data_buf) {
459 dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __FUNCTION__); 459 dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
460 return; 460 return;
461 } 461 }
462 462
463 if (smi_data_buf_size < sizeof(struct apm_cmd)) { 463 if (smi_data_buf_size < sizeof(struct apm_cmd)) {
464 dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n", 464 dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
465 __FUNCTION__); 465 __func__);
466 return; 466 return;
467 } 467 }
468 468
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 477a3d0e3caf..6a8b1e037e07 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -123,7 +123,7 @@ static int create_packet(void *data, size_t length)
123 if (!newpacket) { 123 if (!newpacket) {
124 printk(KERN_WARNING 124 printk(KERN_WARNING
125 "dell_rbu:%s: failed to allocate new " 125 "dell_rbu:%s: failed to allocate new "
126 "packet\n", __FUNCTION__); 126 "packet\n", __func__);
127 retval = -ENOMEM; 127 retval = -ENOMEM;
128 spin_lock(&rbu_data.lock); 128 spin_lock(&rbu_data.lock);
129 goto out_noalloc; 129 goto out_noalloc;
@@ -152,7 +152,7 @@ static int create_packet(void *data, size_t length)
152 printk(KERN_WARNING 152 printk(KERN_WARNING
153 "dell_rbu:%s: failed to allocate " 153 "dell_rbu:%s: failed to allocate "
154 "invalid_addr_packet_array \n", 154 "invalid_addr_packet_array \n",
155 __FUNCTION__); 155 __func__);
156 retval = -ENOMEM; 156 retval = -ENOMEM;
157 spin_lock(&rbu_data.lock); 157 spin_lock(&rbu_data.lock);
158 goto out_alloc_packet; 158 goto out_alloc_packet;
@@ -164,7 +164,7 @@ static int create_packet(void *data, size_t length)
164 if (!packet_data_temp_buf) { 164 if (!packet_data_temp_buf) {
165 printk(KERN_WARNING 165 printk(KERN_WARNING
166 "dell_rbu:%s: failed to allocate new " 166 "dell_rbu:%s: failed to allocate new "
167 "packet\n", __FUNCTION__); 167 "packet\n", __func__);
168 retval = -ENOMEM; 168 retval = -ENOMEM;
169 spin_lock(&rbu_data.lock); 169 spin_lock(&rbu_data.lock);
170 goto out_alloc_packet_array; 170 goto out_alloc_packet_array;
@@ -416,7 +416,7 @@ static int img_update_realloc(unsigned long size)
416 */ 416 */
417 if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { 417 if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
418 printk(KERN_ERR "dell_rbu:%s: corruption " 418 printk(KERN_ERR "dell_rbu:%s: corruption "
419 "check failed\n", __FUNCTION__); 419 "check failed\n", __func__);
420 return -EINVAL; 420 return -EINVAL;
421 } 421 }
422 /* 422 /*
@@ -642,7 +642,7 @@ static ssize_t write_rbu_image_type(struct kobject *kobj,
642 if (req_firm_rc) { 642 if (req_firm_rc) {
643 printk(KERN_ERR 643 printk(KERN_ERR
644 "dell_rbu:%s request_firmware_nowait" 644 "dell_rbu:%s request_firmware_nowait"
645 " failed %d\n", __FUNCTION__, rc); 645 " failed %d\n", __func__, rc);
646 rc = -EIO; 646 rc = -EIO;
647 } else 647 } else
648 rbu_data.entry_created = 1; 648 rbu_data.entry_created = 1;
@@ -718,7 +718,7 @@ static int __init dcdrbu_init(void)
718 if (IS_ERR(rbu_device)) { 718 if (IS_ERR(rbu_device)) {
719 printk(KERN_ERR 719 printk(KERN_ERR
720 "dell_rbu:%s:platform_device_register_simple " 720 "dell_rbu:%s:platform_device_register_simple "
721 "failed\n", __FUNCTION__); 721 "failed\n", __func__);
722 return PTR_ERR(rbu_device); 722 return PTR_ERR(rbu_device);
723 } 723 }
724 724
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index d0e5fa4ea51b..11f17440fea6 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -58,7 +58,7 @@ void __init reserve_ibft_region(void)
58 unsigned int len = 0; 58 unsigned int len = 0;
59 void *virt; 59 void *virt;
60 60
61 ibft_addr = 0; 61 ibft_addr = NULL;
62 62
63 for (pos = IBFT_START; pos < IBFT_END; pos += 16) { 63 for (pos = IBFT_START; pos < IBFT_END; pos += 16) {
64 /* The table can't be inside the VGA BIOS reserved space, 64 /* The table can't be inside the VGA BIOS reserved space,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 24c62b848bf9..7f138c6195ff 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -382,7 +382,7 @@ fail:
382 spin_unlock_irqrestore(&gpio_lock, flags); 382 spin_unlock_irqrestore(&gpio_lock, flags);
383 if (status) 383 if (status)
384 pr_debug("%s: gpio-%d status %d\n", 384 pr_debug("%s: gpio-%d status %d\n",
385 __FUNCTION__, gpio, status); 385 __func__, gpio, status);
386 return status; 386 return status;
387} 387}
388EXPORT_SYMBOL_GPL(gpio_direction_input); 388EXPORT_SYMBOL_GPL(gpio_direction_input);
@@ -420,7 +420,7 @@ fail:
420 spin_unlock_irqrestore(&gpio_lock, flags); 420 spin_unlock_irqrestore(&gpio_lock, flags);
421 if (status) 421 if (status)
422 pr_debug("%s: gpio-%d status %d\n", 422 pr_debug("%s: gpio-%d status %d\n",
423 __FUNCTION__, gpio, status); 423 __func__, gpio, status);
424 return status; 424 return status;
425} 425}
426EXPORT_SYMBOL_GPL(gpio_direction_output); 426EXPORT_SYMBOL_GPL(gpio_direction_output);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index e0e0af536108..93f916720b13 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -23,21 +23,19 @@
23#define PCA953X_INVERT 2 23#define PCA953X_INVERT 2
24#define PCA953X_DIRECTION 3 24#define PCA953X_DIRECTION 3
25 25
26/* This is temporary - in 2.6.26 i2c_driver_data should replace it. */ 26static const struct i2c_device_id pca953x_id[] = {
27struct pca953x_desc {
28 char name[I2C_NAME_SIZE];
29 unsigned long driver_data;
30};
31
32static const struct pca953x_desc pca953x_descs[] = {
33 { "pca9534", 8, }, 27 { "pca9534", 8, },
34 { "pca9535", 16, }, 28 { "pca9535", 16, },
35 { "pca9536", 4, }, 29 { "pca9536", 4, },
36 { "pca9537", 4, }, 30 { "pca9537", 4, },
37 { "pca9538", 8, }, 31 { "pca9538", 8, },
38 { "pca9539", 16, }, 32 { "pca9539", 16, },
33 { "pca9555", 16, },
34 { "pca9557", 8, },
39 /* REVISIT several pca955x parts should work here too */ 35 /* REVISIT several pca955x parts should work here too */
36 { }
40}; 37};
38MODULE_DEVICE_TABLE(i2c, pca953x_id);
41 39
42struct pca953x_chip { 40struct pca953x_chip {
43 unsigned gpio_start; 41 unsigned gpio_start;
@@ -192,26 +190,17 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
192 gc->owner = THIS_MODULE; 190 gc->owner = THIS_MODULE;
193} 191}
194 192
195static int __devinit pca953x_probe(struct i2c_client *client) 193static int __devinit pca953x_probe(struct i2c_client *client,
194 const struct i2c_device_id *id)
196{ 195{
197 struct pca953x_platform_data *pdata; 196 struct pca953x_platform_data *pdata;
198 struct pca953x_chip *chip; 197 struct pca953x_chip *chip;
199 int ret, i; 198 int ret;
200 const struct pca953x_desc *id = NULL;
201 199
202 pdata = client->dev.platform_data; 200 pdata = client->dev.platform_data;
203 if (pdata == NULL) 201 if (pdata == NULL)
204 return -ENODEV; 202 return -ENODEV;
205 203
206 /* this loop vanishes when we get i2c_device_id */
207 for (i = 0; i < ARRAY_SIZE(pca953x_descs); i++)
208 if (!strcmp(pca953x_descs[i].name, client->name)) {
209 id = pca953x_descs + i;
210 break;
211 }
212 if (!id)
213 return -ENODEV;
214
215 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); 204 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
216 if (chip == NULL) 205 if (chip == NULL)
217 return -ENOMEM; 206 return -ENOMEM;
@@ -291,6 +280,7 @@ static struct i2c_driver pca953x_driver = {
291 }, 280 },
292 .probe = pca953x_probe, 281 .probe = pca953x_probe,
293 .remove = pca953x_remove, 282 .remove = pca953x_remove,
283 .id_table = pca953x_id,
294}; 284};
295 285
296static int __init pca953x_init(void) 286static int __init pca953x_init(void)
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c
index 1106aa15ac79..aa6cc8b2a2bc 100644
--- a/drivers/gpio/pcf857x.c
+++ b/drivers/gpio/pcf857x.c
@@ -26,6 +26,21 @@
26#include <asm/gpio.h> 26#include <asm/gpio.h>
27 27
28 28
29static const struct i2c_device_id pcf857x_id[] = {
30 { "pcf8574", 8 },
31 { "pca8574", 8 },
32 { "pca9670", 8 },
33 { "pca9672", 8 },
34 { "pca9674", 8 },
35 { "pcf8575", 16 },
36 { "pca8575", 16 },
37 { "pca9671", 16 },
38 { "pca9673", 16 },
39 { "pca9675", 16 },
40 { }
41};
42MODULE_DEVICE_TABLE(i2c, pcf857x_id);
43
29/* 44/*
30 * The pcf857x, pca857x, and pca967x chips only expose one read and one 45 * The pcf857x, pca857x, and pca967x chips only expose one read and one
31 * write register. Writing a "one" bit (to match the reset state) lets 46 * write register. Writing a "one" bit (to match the reset state) lets
@@ -142,7 +157,8 @@ static void pcf857x_set16(struct gpio_chip *chip, unsigned offset, int value)
142 157
143/*-------------------------------------------------------------------------*/ 158/*-------------------------------------------------------------------------*/
144 159
145static int pcf857x_probe(struct i2c_client *client) 160static int pcf857x_probe(struct i2c_client *client,
161 const struct i2c_device_id *id)
146{ 162{
147 struct pcf857x_platform_data *pdata; 163 struct pcf857x_platform_data *pdata;
148 struct pcf857x *gpio; 164 struct pcf857x *gpio;
@@ -172,13 +188,8 @@ static int pcf857x_probe(struct i2c_client *client)
172 * 188 *
173 * NOTE: we don't distinguish here between *4 and *4a parts. 189 * NOTE: we don't distinguish here between *4 and *4a parts.
174 */ 190 */
175 if (strcmp(client->name, "pcf8574") == 0 191 gpio->chip.ngpio = id->driver_data;
176 || strcmp(client->name, "pca8574") == 0 192 if (gpio->chip.ngpio == 8) {
177 || strcmp(client->name, "pca9670") == 0
178 || strcmp(client->name, "pca9672") == 0
179 || strcmp(client->name, "pca9674") == 0
180 ) {
181 gpio->chip.ngpio = 8;
182 gpio->chip.direction_input = pcf857x_input8; 193 gpio->chip.direction_input = pcf857x_input8;
183 gpio->chip.get = pcf857x_get8; 194 gpio->chip.get = pcf857x_get8;
184 gpio->chip.direction_output = pcf857x_output8; 195 gpio->chip.direction_output = pcf857x_output8;
@@ -198,13 +209,7 @@ static int pcf857x_probe(struct i2c_client *client)
198 * 209 *
199 * NOTE: we don't distinguish here between '75 and '75c parts. 210 * NOTE: we don't distinguish here between '75 and '75c parts.
200 */ 211 */
201 } else if (strcmp(client->name, "pcf8575") == 0 212 } else if (gpio->chip.ngpio == 16) {
202 || strcmp(client->name, "pca8575") == 0
203 || strcmp(client->name, "pca9671") == 0
204 || strcmp(client->name, "pca9673") == 0
205 || strcmp(client->name, "pca9675") == 0
206 ) {
207 gpio->chip.ngpio = 16;
208 gpio->chip.direction_input = pcf857x_input16; 213 gpio->chip.direction_input = pcf857x_input16;
209 gpio->chip.get = pcf857x_get16; 214 gpio->chip.get = pcf857x_get16;
210 gpio->chip.direction_output = pcf857x_output16; 215 gpio->chip.direction_output = pcf857x_output16;
@@ -313,6 +318,7 @@ static struct i2c_driver pcf857x_driver = {
313 }, 318 },
314 .probe = pcf857x_probe, 319 .probe = pcf857x_probe,
315 .remove = pcf857x_remove, 320 .remove = pcf857x_remove,
321 .id_table = pcf857x_id,
316}; 322};
317 323
318static int __init pcf857x_init(void) 324static int __init pcf857x_init(void)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e03c67dd3e63..f43d6d3cf2fa 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -606,7 +606,7 @@ static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
606 case 2: 606 case 2:
607 if ((end - start) < 2) 607 if ((end - start) < 2)
608 return NULL; 608 return NULL;
609 item->data.u16 = le16_to_cpu(get_unaligned((__le16*)start)); 609 item->data.u16 = get_unaligned_le16(start);
610 start = (__u8 *)((__le16 *)start + 1); 610 start = (__u8 *)((__le16 *)start + 1);
611 return start; 611 return start;
612 612
@@ -614,7 +614,7 @@ static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
614 item->size++; 614 item->size++;
615 if ((end - start) < 4) 615 if ((end - start) < 4)
616 return NULL; 616 return NULL;
617 item->data.u32 = le32_to_cpu(get_unaligned((__le32*)start)); 617 item->data.u32 = get_unaligned_le32(start);
618 start = (__u8 *)((__le32 *)start + 1); 618 start = (__u8 *)((__le32 *)start + 1);
619 return start; 619 return start;
620 } 620 }
@@ -765,7 +765,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
765 765
766 report += offset >> 3; /* adjust byte index */ 766 report += offset >> 3; /* adjust byte index */
767 offset &= 7; /* now only need bit offset into one byte */ 767 offset &= 7; /* now only need bit offset into one byte */
768 x = le64_to_cpu(get_unaligned((__le64 *) report)); 768 x = get_unaligned_le64(report);
769 x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */ 769 x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */
770 return (u32) x; 770 return (u32) x;
771} 771}
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index ed71a8bc70dc..5c8b6e0ff47c 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -224,7 +224,7 @@ static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind)
224 if (in_data & 0xF000) { 224 if (in_data & 0xF000) {
225 printk(KERN_DEBUG 225 printk(KERN_DEBUG
226 "%s : Doesn't look like an ads7828 device\n", 226 "%s : Doesn't look like an ads7828 device\n",
227 __FUNCTION__); 227 __func__);
228 goto exit_free; 228 goto exit_free;
229 } 229 }
230 } 230 }
diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c
index 9587869bdba0..c1009d6f9796 100644
--- a/drivers/hwmon/adt7473.c
+++ b/drivers/hwmon/adt7473.c
@@ -422,18 +422,14 @@ static ssize_t show_volt(struct device *dev, struct device_attribute *devattr,
422 * number in the range -128 to 127, or as an unsigned number that must 422 * number in the range -128 to 127, or as an unsigned number that must
423 * be offset by 64. 423 * be offset by 64.
424 */ 424 */
425static int decode_temp(struct adt7473_data *data, u8 raw) 425static int decode_temp(u8 twos_complement, u8 raw)
426{ 426{
427 if (data->temp_twos_complement) 427 return twos_complement ? (s8)raw : raw - 64;
428 return (s8)raw;
429 return raw - 64;
430} 428}
431 429
432static u8 encode_temp(struct adt7473_data *data, int cooked) 430static u8 encode_temp(u8 twos_complement, int cooked)
433{ 431{
434 if (data->temp_twos_complement) 432 return twos_complement ? cooked & 0xFF : cooked + 64;
435 return (cooked & 0xFF);
436 return cooked + 64;
437} 433}
438 434
439static ssize_t show_temp_min(struct device *dev, 435static ssize_t show_temp_min(struct device *dev,
@@ -442,8 +438,9 @@ static ssize_t show_temp_min(struct device *dev,
442{ 438{
443 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 439 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
444 struct adt7473_data *data = adt7473_update_device(dev); 440 struct adt7473_data *data = adt7473_update_device(dev);
445 return sprintf(buf, "%d\n", 441 return sprintf(buf, "%d\n", 1000 * decode_temp(
446 1000 * decode_temp(data, data->temp_min[attr->index])); 442 data->temp_twos_complement,
443 data->temp_min[attr->index]));
447} 444}
448 445
449static ssize_t set_temp_min(struct device *dev, 446static ssize_t set_temp_min(struct device *dev,
@@ -455,7 +452,7 @@ static ssize_t set_temp_min(struct device *dev,
455 struct i2c_client *client = to_i2c_client(dev); 452 struct i2c_client *client = to_i2c_client(dev);
456 struct adt7473_data *data = i2c_get_clientdata(client); 453 struct adt7473_data *data = i2c_get_clientdata(client);
457 int temp = simple_strtol(buf, NULL, 10) / 1000; 454 int temp = simple_strtol(buf, NULL, 10) / 1000;
458 temp = encode_temp(data, temp); 455 temp = encode_temp(data->temp_twos_complement, temp);
459 456
460 mutex_lock(&data->lock); 457 mutex_lock(&data->lock);
461 data->temp_min[attr->index] = temp; 458 data->temp_min[attr->index] = temp;
@@ -472,8 +469,9 @@ static ssize_t show_temp_max(struct device *dev,
472{ 469{
473 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 470 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
474 struct adt7473_data *data = adt7473_update_device(dev); 471 struct adt7473_data *data = adt7473_update_device(dev);
475 return sprintf(buf, "%d\n", 472 return sprintf(buf, "%d\n", 1000 * decode_temp(
476 1000 * decode_temp(data, data->temp_max[attr->index])); 473 data->temp_twos_complement,
474 data->temp_max[attr->index]));
477} 475}
478 476
479static ssize_t set_temp_max(struct device *dev, 477static ssize_t set_temp_max(struct device *dev,
@@ -485,7 +483,7 @@ static ssize_t set_temp_max(struct device *dev,
485 struct i2c_client *client = to_i2c_client(dev); 483 struct i2c_client *client = to_i2c_client(dev);
486 struct adt7473_data *data = i2c_get_clientdata(client); 484 struct adt7473_data *data = i2c_get_clientdata(client);
487 int temp = simple_strtol(buf, NULL, 10) / 1000; 485 int temp = simple_strtol(buf, NULL, 10) / 1000;
488 temp = encode_temp(data, temp); 486 temp = encode_temp(data->temp_twos_complement, temp);
489 487
490 mutex_lock(&data->lock); 488 mutex_lock(&data->lock);
491 data->temp_max[attr->index] = temp; 489 data->temp_max[attr->index] = temp;
@@ -501,8 +499,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
501{ 499{
502 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 500 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
503 struct adt7473_data *data = adt7473_update_device(dev); 501 struct adt7473_data *data = adt7473_update_device(dev);
504 return sprintf(buf, "%d\n", 502 return sprintf(buf, "%d\n", 1000 * decode_temp(
505 1000 * decode_temp(data, data->temp[attr->index])); 503 data->temp_twos_complement,
504 data->temp[attr->index]));
506} 505}
507 506
508static ssize_t show_fan_min(struct device *dev, 507static ssize_t show_fan_min(struct device *dev,
@@ -671,8 +670,9 @@ static ssize_t show_temp_tmax(struct device *dev,
671{ 670{
672 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 671 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
673 struct adt7473_data *data = adt7473_update_device(dev); 672 struct adt7473_data *data = adt7473_update_device(dev);
674 return sprintf(buf, "%d\n", 673 return sprintf(buf, "%d\n", 1000 * decode_temp(
675 1000 * decode_temp(data, data->temp_tmax[attr->index])); 674 data->temp_twos_complement,
675 data->temp_tmax[attr->index]));
676} 676}
677 677
678static ssize_t set_temp_tmax(struct device *dev, 678static ssize_t set_temp_tmax(struct device *dev,
@@ -684,7 +684,7 @@ static ssize_t set_temp_tmax(struct device *dev,
684 struct i2c_client *client = to_i2c_client(dev); 684 struct i2c_client *client = to_i2c_client(dev);
685 struct adt7473_data *data = i2c_get_clientdata(client); 685 struct adt7473_data *data = i2c_get_clientdata(client);
686 int temp = simple_strtol(buf, NULL, 10) / 1000; 686 int temp = simple_strtol(buf, NULL, 10) / 1000;
687 temp = encode_temp(data, temp); 687 temp = encode_temp(data->temp_twos_complement, temp);
688 688
689 mutex_lock(&data->lock); 689 mutex_lock(&data->lock);
690 data->temp_tmax[attr->index] = temp; 690 data->temp_tmax[attr->index] = temp;
@@ -701,8 +701,9 @@ static ssize_t show_temp_tmin(struct device *dev,
701{ 701{
702 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 702 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
703 struct adt7473_data *data = adt7473_update_device(dev); 703 struct adt7473_data *data = adt7473_update_device(dev);
704 return sprintf(buf, "%d\n", 704 return sprintf(buf, "%d\n", 1000 * decode_temp(
705 1000 * decode_temp(data, data->temp_tmin[attr->index])); 705 data->temp_twos_complement,
706 data->temp_tmin[attr->index]));
706} 707}
707 708
708static ssize_t set_temp_tmin(struct device *dev, 709static ssize_t set_temp_tmin(struct device *dev,
@@ -714,7 +715,7 @@ static ssize_t set_temp_tmin(struct device *dev,
714 struct i2c_client *client = to_i2c_client(dev); 715 struct i2c_client *client = to_i2c_client(dev);
715 struct adt7473_data *data = i2c_get_clientdata(client); 716 struct adt7473_data *data = i2c_get_clientdata(client);
716 int temp = simple_strtol(buf, NULL, 10) / 1000; 717 int temp = simple_strtol(buf, NULL, 10) / 1000;
717 temp = encode_temp(data, temp); 718 temp = encode_temp(data->temp_twos_complement, temp);
718 719
719 mutex_lock(&data->lock); 720 mutex_lock(&data->lock);
720 data->temp_tmin[attr->index] = temp; 721 data->temp_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 84712a22acea..fe2eea4d799b 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -953,12 +953,8 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 value)
953static void asb100_init_client(struct i2c_client *client) 953static void asb100_init_client(struct i2c_client *client)
954{ 954{
955 struct asb100_data *data = i2c_get_clientdata(client); 955 struct asb100_data *data = i2c_get_clientdata(client);
956 int vid = 0;
957 956
958 vid = asb100_read_value(client, ASB100_REG_VID_FANDIV) & 0x0f;
959 vid |= (asb100_read_value(client, ASB100_REG_CHIPID) & 0x01) << 4;
960 data->vrm = vid_which_vrm(); 957 data->vrm = vid_which_vrm();
961 vid = vid_from_reg(vid, data->vrm);
962 958
963 /* Start monitoring */ 959 /* Start monitoring */
964 asb100_write_value(client, ASB100_REG_CONFIG, 960 asb100_write_value(client, ASB100_REG_CONFIG,
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 1464338e4e11..dc1f30e432ea 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -117,7 +117,8 @@ struct f75375_data {
117static int f75375_attach_adapter(struct i2c_adapter *adapter); 117static int f75375_attach_adapter(struct i2c_adapter *adapter);
118static int f75375_detect(struct i2c_adapter *adapter, int address, int kind); 118static int f75375_detect(struct i2c_adapter *adapter, int address, int kind);
119static int f75375_detach_client(struct i2c_client *client); 119static int f75375_detach_client(struct i2c_client *client);
120static int f75375_probe(struct i2c_client *client); 120static int f75375_probe(struct i2c_client *client,
121 const struct i2c_device_id *id);
121static int f75375_remove(struct i2c_client *client); 122static int f75375_remove(struct i2c_client *client);
122 123
123static struct i2c_driver f75375_legacy_driver = { 124static struct i2c_driver f75375_legacy_driver = {
@@ -128,12 +129,20 @@ static struct i2c_driver f75375_legacy_driver = {
128 .detach_client = f75375_detach_client, 129 .detach_client = f75375_detach_client,
129}; 130};
130 131
132static const struct i2c_device_id f75375_id[] = {
133 { "f75373", f75373 },
134 { "f75375", f75375 },
135 { }
136};
137MODULE_DEVICE_TABLE(i2c, f75375_id);
138
131static struct i2c_driver f75375_driver = { 139static struct i2c_driver f75375_driver = {
132 .driver = { 140 .driver = {
133 .name = "f75375", 141 .name = "f75375",
134 }, 142 },
135 .probe = f75375_probe, 143 .probe = f75375_probe,
136 .remove = f75375_remove, 144 .remove = f75375_remove,
145 .id_table = f75375_id,
137}; 146};
138 147
139static inline int f75375_read8(struct i2c_client *client, u8 reg) 148static inline int f75375_read8(struct i2c_client *client, u8 reg)
@@ -628,7 +637,8 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
628 637
629} 638}
630 639
631static int f75375_probe(struct i2c_client *client) 640static int f75375_probe(struct i2c_client *client,
641 const struct i2c_device_id *id)
632{ 642{
633 struct f75375_data *data = i2c_get_clientdata(client); 643 struct f75375_data *data = i2c_get_clientdata(client);
634 struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data; 644 struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data;
@@ -643,15 +653,7 @@ static int f75375_probe(struct i2c_client *client)
643 i2c_set_clientdata(client, data); 653 i2c_set_clientdata(client, data);
644 data->client = client; 654 data->client = client;
645 mutex_init(&data->update_lock); 655 mutex_init(&data->update_lock);
646 656 data->kind = id->driver_data;
647 if (strcmp(client->name, "f75375") == 0)
648 data->kind = f75375;
649 else if (strcmp(client->name, "f75373") == 0)
650 data->kind = f75373;
651 else {
652 dev_err(&client->dev, "Unsupported device: %s\n", client->name);
653 return -ENODEV;
654 }
655 657
656 if ((err = sysfs_create_group(&client->dev.kobj, &f75375_group))) 658 if ((err = sysfs_create_group(&client->dev.kobj, &f75375_group)))
657 goto exit_free; 659 goto exit_free;
@@ -712,6 +714,7 @@ static int f75375_detect(struct i2c_adapter *adapter, int address, int kind)
712 u8 version = 0; 714 u8 version = 0;
713 int err = 0; 715 int err = 0;
714 const char *name = ""; 716 const char *name = "";
717 struct i2c_device_id id;
715 718
716 if (!(client = kzalloc(sizeof(*client), GFP_KERNEL))) { 719 if (!(client = kzalloc(sizeof(*client), GFP_KERNEL))) {
717 err = -ENOMEM; 720 err = -ENOMEM;
@@ -748,7 +751,9 @@ static int f75375_detect(struct i2c_adapter *adapter, int address, int kind)
748 if ((err = i2c_attach_client(client))) 751 if ((err = i2c_attach_client(client)))
749 goto exit_free; 752 goto exit_free;
750 753
751 if ((err = f75375_probe(client)) < 0) 754 strlcpy(id.name, name, I2C_NAME_SIZE);
755 id.driver_data = kind;
756 if ((err = f75375_probe(client, &id)) < 0)
752 goto exit_detach; 757 goto exit_detach;
753 758
754 return 0; 759 return 0;
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 115f4090b98e..fa7696905154 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -248,7 +248,7 @@ static int lm75_detach_client(struct i2c_client *client)
248 248
249/* All registers are word-sized, except for the configuration register. 249/* All registers are word-sized, except for the configuration register.
250 LM75 uses a high-byte first convention, which is exactly opposite to 250 LM75 uses a high-byte first convention, which is exactly opposite to
251 the usual practice. */ 251 the SMBus standard. */
252static int lm75_read_value(struct i2c_client *client, u8 reg) 252static int lm75_read_value(struct i2c_client *client, u8 reg)
253{ 253{
254 if (reg == LM75_REG_CONF) 254 if (reg == LM75_REG_CONF)
@@ -257,9 +257,6 @@ static int lm75_read_value(struct i2c_client *client, u8 reg)
257 return swab16(i2c_smbus_read_word_data(client, reg)); 257 return swab16(i2c_smbus_read_word_data(client, reg));
258} 258}
259 259
260/* All registers are word-sized, except for the configuration register.
261 LM75 uses a high-byte first convention, which is exactly opposite to
262 the usual practice. */
263static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value) 260static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
264{ 261{
265 if (reg == LM75_REG_CONF) 262 if (reg == LM75_REG_CONF)
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index f61d8f4185b2..eb03544c731c 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -335,11 +335,23 @@ exit:
335static int __init smsc47b397_find(unsigned short *addr) 335static int __init smsc47b397_find(unsigned short *addr)
336{ 336{
337 u8 id, rev; 337 u8 id, rev;
338 char *name;
338 339
339 superio_enter(); 340 superio_enter();
340 id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID); 341 id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
341 342
342 if ((id != 0x6f) && (id != 0x81) && (id != 0x85)) { 343 switch(id) {
344 case 0x81:
345 name = "SCH5307-NS";
346 break;
347 case 0x6f:
348 name = "LPC47B397-NC";
349 break;
350 case 0x85:
351 case 0x8c:
352 name = "SCH5317";
353 break;
354 default:
343 superio_exit(); 355 superio_exit();
344 return -ENODEV; 356 return -ENODEV;
345 } 357 }
@@ -352,8 +364,7 @@ static int __init smsc47b397_find(unsigned short *addr)
352 364
353 printk(KERN_INFO DRVNAME ": found SMSC %s " 365 printk(KERN_INFO DRVNAME ": found SMSC %s "
354 "(base address 0x%04x, revision %u)\n", 366 "(base address 0x%04x, revision %u)\n",
355 id == 0x81 ? "SCH5307-NS" : id == 0x85 ? "SCH5317" : 367 name, *addr, rev);
356 "LPC47B397-NC", *addr, rev);
357 368
358 superio_exit(); 369 superio_exit();
359 return 0; 370 return 0;
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index ee35af93b574..ed3c019b78c7 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1024,10 +1024,9 @@ static struct sensor_device_attribute_2 w83793_vid[] = {
1024 SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0), 1024 SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0),
1025 SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1), 1025 SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1),
1026}; 1026};
1027static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm);
1027 1028
1028static struct sensor_device_attribute_2 sda_single_files[] = { 1029static struct sensor_device_attribute_2 sda_single_files[] = {
1029 SENSOR_ATTR_2(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm,
1030 NOT_USED, NOT_USED),
1031 SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep, 1030 SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
1032 store_chassis_clear, ALARM_STATUS, 30), 1031 store_chassis_clear, ALARM_STATUS, 30),
1033 SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable, 1032 SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
@@ -1080,6 +1079,7 @@ static int w83793_detach_client(struct i2c_client *client)
1080 1079
1081 for (i = 0; i < ARRAY_SIZE(w83793_vid); i++) 1080 for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
1082 device_remove_file(dev, &w83793_vid[i].dev_attr); 1081 device_remove_file(dev, &w83793_vid[i].dev_attr);
1082 device_remove_file(dev, &dev_attr_vrm);
1083 1083
1084 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++) 1084 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
1085 device_remove_file(dev, &w83793_left_fan[i].dev_attr); 1085 device_remove_file(dev, &w83793_left_fan[i].dev_attr);
@@ -1282,7 +1282,6 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1282 /* Initialize the chip */ 1282 /* Initialize the chip */
1283 w83793_init_client(client); 1283 w83793_init_client(client);
1284 1284
1285 data->vrm = vid_which_vrm();
1286 /* 1285 /*
1287 Only fan 1-5 has their own input pins, 1286 Only fan 1-5 has their own input pins,
1288 Pwm 1-3 has their own pins 1287 Pwm 1-3 has their own pins
@@ -1293,7 +1292,9 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1293 val = w83793_read_value(client, W83793_REG_FANIN_CTRL); 1292 val = w83793_read_value(client, W83793_REG_FANIN_CTRL);
1294 1293
1295 /* check the function of pins 49-56 */ 1294 /* check the function of pins 49-56 */
1296 if (!(tmp & 0x80)) { 1295 if (tmp & 0x80) {
1296 data->has_vid |= 0x2; /* has VIDB */
1297 } else {
1297 data->has_pwm |= 0x18; /* pwm 4,5 */ 1298 data->has_pwm |= 0x18; /* pwm 4,5 */
1298 if (val & 0x01) { /* fan 6 */ 1299 if (val & 0x01) { /* fan 6 */
1299 data->has_fan |= 0x20; 1300 data->has_fan |= 0x20;
@@ -1309,13 +1310,15 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1309 } 1310 }
1310 } 1311 }
1311 1312
1313 /* check the function of pins 37-40 */
1314 if (!(tmp & 0x29))
1315 data->has_vid |= 0x1; /* has VIDA */
1312 if (0x08 == (tmp & 0x0c)) { 1316 if (0x08 == (tmp & 0x0c)) {
1313 if (val & 0x08) /* fan 9 */ 1317 if (val & 0x08) /* fan 9 */
1314 data->has_fan |= 0x100; 1318 data->has_fan |= 0x100;
1315 if (val & 0x10) /* fan 10 */ 1319 if (val & 0x10) /* fan 10 */
1316 data->has_fan |= 0x200; 1320 data->has_fan |= 0x200;
1317 } 1321 }
1318
1319 if (0x20 == (tmp & 0x30)) { 1322 if (0x20 == (tmp & 0x30)) {
1320 if (val & 0x20) /* fan 11 */ 1323 if (val & 0x20) /* fan 11 */
1321 data->has_fan |= 0x400; 1324 data->has_fan |= 0x400;
@@ -1359,13 +1362,6 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1359 if (tmp & 0x02) 1362 if (tmp & 0x02)
1360 data->has_temp |= 0x20; 1363 data->has_temp |= 0x20;
1361 1364
1362 /* Detect the VID usage and ignore unused input */
1363 tmp = w83793_read_value(client, W83793_REG_MFC);
1364 if (!(tmp & 0x29))
1365 data->has_vid |= 0x1; /* has VIDA */
1366 if (tmp & 0x80)
1367 data->has_vid |= 0x2; /* has VIDB */
1368
1369 /* Register sysfs hooks */ 1365 /* Register sysfs hooks */
1370 for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) { 1366 for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) {
1371 err = device_create_file(dev, 1367 err = device_create_file(dev,
@@ -1381,6 +1377,12 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1381 if (err) 1377 if (err)
1382 goto exit_remove; 1378 goto exit_remove;
1383 } 1379 }
1380 if (data->has_vid) {
1381 data->vrm = vid_which_vrm();
1382 err = device_create_file(dev, &dev_attr_vrm);
1383 if (err)
1384 goto exit_remove;
1385 }
1384 1386
1385 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) { 1387 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
1386 err = device_create_file(dev, &sda_single_files[i].dev_attr); 1388 err = device_create_file(dev, &sda_single_files[i].dev_attr);
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 77f2d482888b..52e268e25dab 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -301,8 +301,8 @@ static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval)
301 msleep(i); 301 msleep(i);
302 } 302 }
303 303
304 dev_err(&client->dev, "Couldn't read value from register 0x%02x. " 304 dev_err(&client->dev, "Couldn't read value from register 0x%02x.\n",
305 "Please report.\n", reg); 305 reg);
306 return defval; 306 return defval;
307} 307}
308 308
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
index e5e96c817566..c38a0a112208 100644
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard 2 * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard
3 * 3 *
4 * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org> 4 * Copyright (C) 2004, 2008 Jean Delvare <khali@linux-fr.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -231,7 +231,8 @@ ERROR2:
231 kfree(s4882_adapter); 231 kfree(s4882_adapter);
232 s4882_adapter = NULL; 232 s4882_adapter = NULL;
233ERROR1: 233ERROR1:
234 i2c_del_adapter(&amd756_smbus); 234 /* Restore physical bus */
235 i2c_add_adapter(&amd756_smbus);
235ERROR0: 236ERROR0:
236 return error; 237 return error;
237} 238}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 9bbe96cef719..fdc9ad805e35 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -38,7 +38,6 @@
38#include <linux/ioport.h> 38#include <linux/ioport.h>
39#include <linux/i2c.h> 39#include <linux/i2c.h>
40#include <linux/init.h> 40#include <linux/init.h>
41#include <linux/apm_bios.h>
42#include <linux/dmi.h> 41#include <linux/dmi.h>
43#include <asm/io.h> 42#include <asm/io.h>
44 43
@@ -223,7 +222,7 @@ static int piix4_transaction(void)
223 dev_err(&piix4_adapter.dev, "Failed! (%02x)\n", temp); 222 dev_err(&piix4_adapter.dev, "Failed! (%02x)\n", temp);
224 return -1; 223 return -1;
225 } else { 224 } else {
226 dev_dbg(&piix4_adapter.dev, "Successfull!\n"); 225 dev_dbg(&piix4_adapter.dev, "Successful!\n");
227 } 226 }
228 } 227 }
229 228
@@ -343,12 +342,7 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
343 342
344 343
345 switch (size) { 344 switch (size) {
346 case PIIX4_BYTE: /* Where is the result put? I assume here it is in 345 case PIIX4_BYTE:
347 SMBHSTDAT0 but it might just as well be in the
348 SMBHSTCMD. No clue in the docs */
349
350 data->byte = inb_p(SMBHSTDAT0);
351 break;
352 case PIIX4_BYTE_DATA: 346 case PIIX4_BYTE_DATA:
353 data->byte = inb_p(SMBHSTDAT0); 347 data->byte = inb_p(SMBHSTDAT0);
354 break; 348 break;
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 283769cecee2..9ca8f9155f95 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -238,7 +238,7 @@ static int sis5595_transaction(struct i2c_adapter *adap)
238 dev_dbg(&adap->dev, "Failed! (%02x)\n", temp); 238 dev_dbg(&adap->dev, "Failed! (%02x)\n", temp);
239 return -1; 239 return -1;
240 } else { 240 } else {
241 dev_dbg(&adap->dev, "Successfull!\n"); 241 dev_dbg(&adap->dev, "Successful!\n");
242 } 242 }
243 } 243 }
244 244
@@ -316,14 +316,8 @@ static s32 sis5595_access(struct i2c_adapter *adap, u16 addr,
316 } 316 }
317 size = (size == I2C_SMBUS_PROC_CALL) ? SIS5595_PROC_CALL : SIS5595_WORD_DATA; 317 size = (size == I2C_SMBUS_PROC_CALL) ? SIS5595_PROC_CALL : SIS5595_WORD_DATA;
318 break; 318 break;
319/*
320 case I2C_SMBUS_BLOCK_DATA:
321 printk(KERN_WARNING "sis5595.o: Block data not yet implemented!\n");
322 return -1;
323 break;
324*/
325 default: 319 default:
326 printk(KERN_WARNING "sis5595.o: Unsupported transaction %d\n", size); 320 dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
327 return -1; 321 return -1;
328 } 322 }
329 323
@@ -338,9 +332,7 @@ static s32 sis5595_access(struct i2c_adapter *adap, u16 addr,
338 332
339 333
340 switch (size) { 334 switch (size) {
341 case SIS5595_BYTE: /* Where is the result put? I assume here it is in 335 case SIS5595_BYTE:
342 SMB_DATA but it might just as well be in the
343 SMB_CMD. No clue in the docs */
344 case SIS5595_BYTE_DATA: 336 case SIS5595_BYTE_DATA:
345 data->byte = sis5595_read(SMB_BYTE); 337 data->byte = sis5595_read(SMB_BYTE);
346 break; 338 break;
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 5fd734f99ee9..3765dd7f450f 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -136,7 +136,7 @@ static int sis630_transaction_start(struct i2c_adapter *adap, int size, u8 *oldc
136 dev_dbg(&adap->dev, "Failed! (%02x)\n", temp); 136 dev_dbg(&adap->dev, "Failed! (%02x)\n", temp);
137 return -1; 137 return -1;
138 } else { 138 } else {
139 dev_dbg(&adap->dev, "Successfull!\n"); 139 dev_dbg(&adap->dev, "Successful!\n");
140 } 140 }
141 } 141 }
142 142
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
index c2a9f8c94f5e..d08eeec53913 100644
--- a/drivers/i2c/busses/i2c-stub.c
+++ b/drivers/i2c/busses/i2c-stub.c
@@ -33,7 +33,7 @@
33static unsigned short chip_addr[MAX_CHIPS]; 33static unsigned short chip_addr[MAX_CHIPS];
34module_param_array(chip_addr, ushort, NULL, S_IRUGO); 34module_param_array(chip_addr, ushort, NULL, S_IRUGO);
35MODULE_PARM_DESC(chip_addr, 35MODULE_PARM_DESC(chip_addr,
36 "Chip addresses (up to 10, between 0x03 and 0x77)\n"); 36 "Chip addresses (up to 10, between 0x03 and 0x77)");
37 37
38struct stub_chip { 38struct stub_chip {
39 u8 pointer; 39 u8 pointer;
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 1b0cfd5472fd..de9db49e54d9 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -51,7 +51,6 @@ struct taos_data {
51/* TAOS TSL2550 EVM */ 51/* TAOS TSL2550 EVM */
52static struct i2c_board_info tsl2550_info = { 52static struct i2c_board_info tsl2550_info = {
53 I2C_BOARD_INFO("tsl2550", 0x39), 53 I2C_BOARD_INFO("tsl2550", 0x39),
54 .type = "tsl2550",
55}; 54};
56 55
57/* Instantiate i2c devices based on the adapter name */ 56/* Instantiate i2c devices based on the adapter name */
@@ -59,7 +58,7 @@ static struct i2c_client *taos_instantiate_device(struct i2c_adapter *adapter)
59{ 58{
60 if (!strncmp(adapter->name, "TAOS TSL2550 EVM", 16)) { 59 if (!strncmp(adapter->name, "TAOS TSL2550 EVM", 16)) {
61 dev_info(&adapter->dev, "Instantiating device %s at 0x%02x\n", 60 dev_info(&adapter->dev, "Instantiating device %s at 0x%02x\n",
62 tsl2550_info.driver_name, tsl2550_info.addr); 61 tsl2550_info.type, tsl2550_info.addr);
63 return i2c_new_device(adapter, &tsl2550_info); 62 return i2c_new_device(adapter, &tsl2550_info);
64 } 63 }
65 64
diff --git a/drivers/i2c/chips/ds1682.c b/drivers/i2c/chips/ds1682.c
index 9e94542c18a2..23be4d42cb02 100644
--- a/drivers/i2c/chips/ds1682.c
+++ b/drivers/i2c/chips/ds1682.c
@@ -200,7 +200,8 @@ static struct bin_attribute ds1682_eeprom_attr = {
200/* 200/*
201 * Called when a ds1682 device is matched with this driver 201 * Called when a ds1682 device is matched with this driver
202 */ 202 */
203static int ds1682_probe(struct i2c_client *client) 203static int ds1682_probe(struct i2c_client *client,
204 const struct i2c_device_id *id)
204{ 205{
205 int rc; 206 int rc;
206 207
@@ -234,12 +235,19 @@ static int ds1682_remove(struct i2c_client *client)
234 return 0; 235 return 0;
235} 236}
236 237
238static const struct i2c_device_id ds1682_id[] = {
239 { "ds1682", 0 },
240 { }
241};
242MODULE_DEVICE_TABLE(i2c, ds1682_id);
243
237static struct i2c_driver ds1682_driver = { 244static struct i2c_driver ds1682_driver = {
238 .driver = { 245 .driver = {
239 .name = "ds1682", 246 .name = "ds1682",
240 }, 247 },
241 .probe = ds1682_probe, 248 .probe = ds1682_probe,
242 .remove = ds1682_remove, 249 .remove = ds1682_remove,
250 .id_table = ds1682_id,
243}; 251};
244 252
245static int __init ds1682_init(void) 253static int __init ds1682_init(void)
diff --git a/drivers/i2c/chips/menelaus.c b/drivers/i2c/chips/menelaus.c
index 2dea0123a958..b36db1797c11 100644
--- a/drivers/i2c/chips/menelaus.c
+++ b/drivers/i2c/chips/menelaus.c
@@ -1149,7 +1149,8 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
1149 1149
1150static struct i2c_driver menelaus_i2c_driver; 1150static struct i2c_driver menelaus_i2c_driver;
1151 1151
1152static int menelaus_probe(struct i2c_client *client) 1152static int menelaus_probe(struct i2c_client *client,
1153 const struct i2c_device_id *id)
1153{ 1154{
1154 struct menelaus_chip *menelaus; 1155 struct menelaus_chip *menelaus;
1155 int rev = 0, val; 1156 int rev = 0, val;
@@ -1242,12 +1243,19 @@ static int __exit menelaus_remove(struct i2c_client *client)
1242 return 0; 1243 return 0;
1243} 1244}
1244 1245
1246static const struct i2c_device_id menelaus_id[] = {
1247 { "menelaus", 0 },
1248 { }
1249};
1250MODULE_DEVICE_TABLE(i2c, menelaus_id);
1251
1245static struct i2c_driver menelaus_i2c_driver = { 1252static struct i2c_driver menelaus_i2c_driver = {
1246 .driver = { 1253 .driver = {
1247 .name = DRIVER_NAME, 1254 .name = DRIVER_NAME,
1248 }, 1255 },
1249 .probe = menelaus_probe, 1256 .probe = menelaus_probe,
1250 .remove = __exit_p(menelaus_remove), 1257 .remove = __exit_p(menelaus_remove),
1258 .id_table = menelaus_id,
1251}; 1259};
1252 1260
1253static int __init menelaus_init(void) 1261static int __init menelaus_init(void)
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index b67f69c2e7f3..85949685191b 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -64,7 +64,6 @@ static struct i2c_driver tps65010_driver;
64 * as part of board setup by a bootloader. 64 * as part of board setup by a bootloader.
65 */ 65 */
66enum tps_model { 66enum tps_model {
67 TPS_UNKNOWN = 0,
68 TPS65010, 67 TPS65010,
69 TPS65011, 68 TPS65011,
70 TPS65012, 69 TPS65012,
@@ -527,11 +526,13 @@ static int __exit tps65010_remove(struct i2c_client *client)
527 flush_scheduled_work(); 526 flush_scheduled_work();
528 debugfs_remove(tps->file); 527 debugfs_remove(tps->file);
529 kfree(tps); 528 kfree(tps);
529 i2c_set_clientdata(client, NULL);
530 the_tps = NULL; 530 the_tps = NULL;
531 return 0; 531 return 0;
532} 532}
533 533
534static int tps65010_probe(struct i2c_client *client) 534static int tps65010_probe(struct i2c_client *client,
535 const struct i2c_device_id *id)
535{ 536{
536 struct tps65010 *tps; 537 struct tps65010 *tps;
537 int status; 538 int status;
@@ -552,20 +553,7 @@ static int tps65010_probe(struct i2c_client *client)
552 mutex_init(&tps->lock); 553 mutex_init(&tps->lock);
553 INIT_DELAYED_WORK(&tps->work, tps65010_work); 554 INIT_DELAYED_WORK(&tps->work, tps65010_work);
554 tps->client = client; 555 tps->client = client;
555 556 tps->model = id->driver_data;
556 if (strcmp(client->name, "tps65010") == 0)
557 tps->model = TPS65010;
558 else if (strcmp(client->name, "tps65011") == 0)
559 tps->model = TPS65011;
560 else if (strcmp(client->name, "tps65012") == 0)
561 tps->model = TPS65012;
562 else if (strcmp(client->name, "tps65013") == 0)
563 tps->model = TPS65013;
564 else {
565 dev_warn(&client->dev, "unknown chip '%s'\n", client->name);
566 status = -ENODEV;
567 goto fail1;
568 }
569 557
570 /* the IRQ is active low, but many gpio lines can't support that 558 /* the IRQ is active low, but many gpio lines can't support that
571 * so this driver uses falling-edge triggers instead. 559 * so this driver uses falling-edge triggers instead.
@@ -594,9 +582,6 @@ static int tps65010_probe(struct i2c_client *client)
594 case TPS65012: 582 case TPS65012:
595 tps->por = 1; 583 tps->por = 1;
596 break; 584 break;
597 case TPS_UNKNOWN:
598 printk(KERN_WARNING "%s: unknown TPS chip\n", DRIVER_NAME);
599 break;
600 /* else CHGCONFIG.POR is replaced by AUA, enabling a WAIT mode */ 585 /* else CHGCONFIG.POR is replaced by AUA, enabling a WAIT mode */
601 } 586 }
602 tps->chgconf = i2c_smbus_read_byte_data(client, TPS_CHGCONFIG); 587 tps->chgconf = i2c_smbus_read_byte_data(client, TPS_CHGCONFIG);
@@ -615,6 +600,7 @@ static int tps65010_probe(struct i2c_client *client)
615 i2c_smbus_read_byte_data(client, TPS_DEFGPIO), 600 i2c_smbus_read_byte_data(client, TPS_DEFGPIO),
616 i2c_smbus_read_byte_data(client, TPS_MASK3)); 601 i2c_smbus_read_byte_data(client, TPS_MASK3));
617 602
603 i2c_set_clientdata(client, tps);
618 the_tps = tps; 604 the_tps = tps;
619 605
620#if defined(CONFIG_USB_GADGET) && !defined(CONFIG_USB_OTG) 606#if defined(CONFIG_USB_GADGET) && !defined(CONFIG_USB_OTG)
@@ -682,12 +668,22 @@ fail1:
682 return status; 668 return status;
683} 669}
684 670
671static const struct i2c_device_id tps65010_id[] = {
672 { "tps65010", TPS65010 },
673 { "tps65011", TPS65011 },
674 { "tps65012", TPS65012 },
675 { "tps65013", TPS65013 },
676 { }
677};
678MODULE_DEVICE_TABLE(i2c, tps65010_id);
679
685static struct i2c_driver tps65010_driver = { 680static struct i2c_driver tps65010_driver = {
686 .driver = { 681 .driver = {
687 .name = "tps65010", 682 .name = "tps65010",
688 }, 683 },
689 .probe = tps65010_probe, 684 .probe = tps65010_probe,
690 .remove = __exit_p(tps65010_remove), 685 .remove = __exit_p(tps65010_remove),
686 .id_table = tps65010_id,
691}; 687};
692 688
693/*-------------------------------------------------------------------------*/ 689/*-------------------------------------------------------------------------*/
diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c
index a10fd2791a69..1a9cc135219f 100644
--- a/drivers/i2c/chips/tsl2550.c
+++ b/drivers/i2c/chips/tsl2550.c
@@ -364,7 +364,8 @@ static int tsl2550_init_client(struct i2c_client *client)
364 */ 364 */
365 365
366static struct i2c_driver tsl2550_driver; 366static struct i2c_driver tsl2550_driver;
367static int __devinit tsl2550_probe(struct i2c_client *client) 367static int __devinit tsl2550_probe(struct i2c_client *client,
368 const struct i2c_device_id *id)
368{ 369{
369 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 370 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
370 struct tsl2550_data *data; 371 struct tsl2550_data *data;
@@ -451,6 +452,12 @@ static int tsl2550_resume(struct i2c_client *client)
451 452
452#endif /* CONFIG_PM */ 453#endif /* CONFIG_PM */
453 454
455static const struct i2c_device_id tsl2550_id[] = {
456 { "tsl2550", 0 },
457 { }
458};
459MODULE_DEVICE_TABLE(i2c, tsl2550_id);
460
454static struct i2c_driver tsl2550_driver = { 461static struct i2c_driver tsl2550_driver = {
455 .driver = { 462 .driver = {
456 .name = TSL2550_DRV_NAME, 463 .name = TSL2550_DRV_NAME,
@@ -460,6 +467,7 @@ static struct i2c_driver tsl2550_driver = {
460 .resume = tsl2550_resume, 467 .resume = tsl2550_resume,
461 .probe = tsl2550_probe, 468 .probe = tsl2550_probe,
462 .remove = __devexit_p(tsl2550_remove), 469 .remove = __devexit_p(tsl2550_remove),
470 .id_table = tsl2550_id,
463}; 471};
464 472
465static int __init tsl2550_init(void) 473static int __init tsl2550_init(void)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6c7fa8d53c0e..26384daccb96 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -48,6 +48,17 @@ static DEFINE_IDR(i2c_adapter_idr);
48 48
49/* ------------------------------------------------------------------------- */ 49/* ------------------------------------------------------------------------- */
50 50
51static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
52 const struct i2c_client *client)
53{
54 while (id->name[0]) {
55 if (strcmp(client->name, id->name) == 0)
56 return id;
57 id++;
58 }
59 return NULL;
60}
61
51static int i2c_device_match(struct device *dev, struct device_driver *drv) 62static int i2c_device_match(struct device *dev, struct device_driver *drv)
52{ 63{
53 struct i2c_client *client = to_i2c_client(dev); 64 struct i2c_client *client = to_i2c_client(dev);
@@ -59,6 +70,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
59 if (!is_newstyle_driver(driver)) 70 if (!is_newstyle_driver(driver))
60 return 0; 71 return 0;
61 72
73 /* match on an id table if there is one */
74 if (driver->id_table)
75 return i2c_match_id(driver->id_table, client) != NULL;
76
62 /* new style drivers use the same kind of driver matching policy 77 /* new style drivers use the same kind of driver matching policy
63 * as platform devices or SPI: compare device and driver IDs. 78 * as platform devices or SPI: compare device and driver IDs.
64 */ 79 */
@@ -73,11 +88,17 @@ static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
73 struct i2c_client *client = to_i2c_client(dev); 88 struct i2c_client *client = to_i2c_client(dev);
74 89
75 /* by definition, legacy drivers can't hotplug */ 90 /* by definition, legacy drivers can't hotplug */
76 if (dev->driver || !client->driver_name) 91 if (dev->driver)
77 return 0; 92 return 0;
78 93
79 if (add_uevent_var(env, "MODALIAS=%s", client->driver_name)) 94 if (client->driver_name[0]) {
80 return -ENOMEM; 95 if (add_uevent_var(env, "MODALIAS=%s", client->driver_name))
96 return -ENOMEM;
97 } else {
98 if (add_uevent_var(env, "MODALIAS=%s%s",
99 I2C_MODULE_PREFIX, client->name))
100 return -ENOMEM;
101 }
81 dev_dbg(dev, "uevent\n"); 102 dev_dbg(dev, "uevent\n");
82 return 0; 103 return 0;
83} 104}
@@ -90,13 +111,19 @@ static int i2c_device_probe(struct device *dev)
90{ 111{
91 struct i2c_client *client = to_i2c_client(dev); 112 struct i2c_client *client = to_i2c_client(dev);
92 struct i2c_driver *driver = to_i2c_driver(dev->driver); 113 struct i2c_driver *driver = to_i2c_driver(dev->driver);
114 const struct i2c_device_id *id;
93 int status; 115 int status;
94 116
95 if (!driver->probe) 117 if (!driver->probe)
96 return -ENODEV; 118 return -ENODEV;
97 client->driver = driver; 119 client->driver = driver;
98 dev_dbg(dev, "probe\n"); 120 dev_dbg(dev, "probe\n");
99 status = driver->probe(client); 121
122 if (driver->id_table)
123 id = i2c_match_id(driver->id_table, client);
124 else
125 id = NULL;
126 status = driver->probe(client, id);
100 if (status) 127 if (status)
101 client->driver = NULL; 128 client->driver = NULL;
102 return status; 129 return status;
@@ -179,9 +206,9 @@ static ssize_t show_client_name(struct device *dev, struct device_attribute *att
179static ssize_t show_modalias(struct device *dev, struct device_attribute *attr, char *buf) 206static ssize_t show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
180{ 207{
181 struct i2c_client *client = to_i2c_client(dev); 208 struct i2c_client *client = to_i2c_client(dev);
182 return client->driver_name 209 return client->driver_name[0]
183 ? sprintf(buf, "%s\n", client->driver_name) 210 ? sprintf(buf, "%s\n", client->driver_name)
184 : 0; 211 : sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
185} 212}
186 213
187static struct device_attribute i2c_dev_attrs[] = { 214static struct device_attribute i2c_dev_attrs[] = {
@@ -300,15 +327,21 @@ void i2c_unregister_device(struct i2c_client *client)
300EXPORT_SYMBOL_GPL(i2c_unregister_device); 327EXPORT_SYMBOL_GPL(i2c_unregister_device);
301 328
302 329
303static int dummy_nop(struct i2c_client *client) 330static int dummy_probe(struct i2c_client *client,
331 const struct i2c_device_id *id)
332{
333 return 0;
334}
335
336static int dummy_remove(struct i2c_client *client)
304{ 337{
305 return 0; 338 return 0;
306} 339}
307 340
308static struct i2c_driver dummy_driver = { 341static struct i2c_driver dummy_driver = {
309 .driver.name = "dummy", 342 .driver.name = "dummy",
310 .probe = dummy_nop, 343 .probe = dummy_probe,
311 .remove = dummy_nop, 344 .remove = dummy_remove,
312}; 345};
313 346
314/** 347/**
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 3f9e10001e19..f702f9152ce6 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -862,40 +862,6 @@ config BLK_DEV_IDE_BAST
862 Say Y here if you want to support the onboard IDE channels on the 862 Say Y here if you want to support the onboard IDE channels on the
863 Simtec BAST or the Thorcom VR1000 863 Simtec BAST or the Thorcom VR1000
864 864
865config ETRAX_IDE
866 tristate "ETRAX IDE support"
867 depends on CRIS && BROKEN
868 select BLK_DEV_IDEDMA
869 help
870 Enables the ETRAX IDE driver.
871
872 You can't use parallel ports or SCSI ports at the same time.
873
874config ETRAX_IDE_DELAY
875 int "Delay for drives to regain consciousness"
876 depends on ETRAX_IDE && ETRAX_ARCH_V10
877 default 15
878 help
879 Number of seconds to wait for IDE drives to spin up after an IDE
880 reset.
881
882choice
883 prompt "IDE reset pin"
884 depends on ETRAX_IDE && ETRAX_ARCH_V10
885 default ETRAX_IDE_PB7_RESET
886
887config ETRAX_IDE_PB7_RESET
888 bool "Port_PB_Bit_7"
889 help
890 IDE reset on pin 7 on port B
891
892config ETRAX_IDE_G27_RESET
893 bool "Port_G_Bit_27"
894 help
895 IDE reset on pin 27 on port G
896
897endchoice
898
899config IDE_H8300 865config IDE_H8300
900 tristate "H8300 IDE support" 866 tristate "H8300 IDE support"
901 depends on H8300 867 depends on H8300
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 571544c37bb2..f94b679b611e 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -35,7 +35,7 @@ ifeq ($(CONFIG_BLK_DEV_CMD640), y)
35 obj-y += cmd640-core.o 35 obj-y += cmd640-core.o
36endif 36endif
37 37
38obj-$(CONFIG_BLK_DEV_IDE) += cris/ ppc/ 38obj-$(CONFIG_BLK_DEV_IDE) += ppc/
39obj-$(CONFIG_IDE_H8300) += h8300/ 39obj-$(CONFIG_IDE_H8300) += h8300/
40obj-$(CONFIG_IDE_GENERIC) += ide-generic.o 40obj-$(CONFIG_IDE_GENERIC) += ide-generic.o
41obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o 41obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 65038ca35e10..061456914ca3 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -483,7 +483,7 @@ static const struct ide_port_info icside_v6_port_info __initdata = {
483 .init_dma = icside_dma_off_init, 483 .init_dma = icside_dma_off_init,
484 .port_ops = &icside_v6_no_dma_port_ops, 484 .port_ops = &icside_v6_no_dma_port_ops,
485 .dma_ops = &icside_v6_dma_ops, 485 .dma_ops = &icside_v6_dma_ops,
486 .host_flags = IDE_HFLAG_SERIALIZE, 486 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
487 .mwdma_mask = ATA_MWDMA2, 487 .mwdma_mask = ATA_MWDMA2,
488 .swdma_mask = ATA_SWDMA2, 488 .swdma_mask = ATA_SWDMA2,
489}; 489};
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index aaf32541622d..96378ebfb31f 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -342,6 +342,7 @@ static const struct ide_port_ops palm_bk3710_ports_ops = {
342static const struct ide_port_info __devinitdata palm_bk3710_port_info = { 342static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
343 .init_dma = palm_bk3710_init_dma, 343 .init_dma = palm_bk3710_init_dma,
344 .port_ops = &palm_bk3710_ports_ops, 344 .port_ops = &palm_bk3710_ports_ops,
345 .host_flags = IDE_HFLAG_MMIO,
345 .pio_mask = ATA_PIO4, 346 .pio_mask = ATA_PIO4,
346 .udma_mask = ATA_UDMA4, /* (input clk 99MHz) */ 347 .udma_mask = ATA_UDMA4, /* (input clk 99MHz) */
347 .mwdma_mask = ATA_MWDMA2, 348 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index babc1a5e128d..1747b2358775 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -53,6 +53,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
53 53
54 ide_init_port_hw(hwif, &hw); 54 ide_init_port_hw(hwif, &hw);
55 55
56 hwif->host_flags = IDE_HFLAG_MMIO;
56 default_hwif_mmiops(hwif); 57 default_hwif_mmiops(hwif);
57 58
58 idx[0] = hwif->index; 59 idx[0] = hwif->index;
diff --git a/drivers/ide/cris/Makefile b/drivers/ide/cris/Makefile
deleted file mode 100644
index 20b95960531f..000000000000
--- a/drivers/ide/cris/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1EXTRA_CFLAGS += -Idrivers/ide
2
3obj-$(CONFIG_IDE_ETRAX) += ide-cris.o
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
deleted file mode 100644
index 9df26855bc05..000000000000
--- a/drivers/ide/cris/ide-cris.c
+++ /dev/null
@@ -1,1086 +0,0 @@
1/*
2 * Etrax specific IDE functions, like init and PIO-mode setting etc.
3 * Almost the entire ide.c is used for the rest of the Etrax ATA driver.
4 * Copyright (c) 2000-2005 Axis Communications AB
5 *
6 * Authors: Bjorn Wesen (initial version)
7 * Mikael Starvik (crisv32 port)
8 */
9
10/* Regarding DMA:
11 *
12 * There are two forms of DMA - "DMA handshaking" between the interface and the drive,
13 * and DMA between the memory and the interface. We can ALWAYS use the latter, since it's
14 * something built-in in the Etrax. However only some drives support the DMA-mode handshaking
15 * on the ATA-bus. The normal PC driver and Triton interface disables memory-if DMA when the
16 * device can't do DMA handshaking for some stupid reason. We don't need to do that.
17 */
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/timer.h>
22#include <linux/mm.h>
23#include <linux/interrupt.h>
24#include <linux/delay.h>
25#include <linux/blkdev.h>
26#include <linux/hdreg.h>
27#include <linux/ide.h>
28#include <linux/init.h>
29
30#include <asm/io.h>
31#include <asm/dma.h>
32
33/* number of DMA descriptors */
34#define MAX_DMA_DESCRS 64
35
36/* number of times to retry busy-flags when reading/writing IDE-registers
37 * this can't be too high because a hung harddisk might cause the watchdog
38 * to trigger (sometimes INB and OUTB are called with irq's disabled)
39 */
40
41#define IDE_REGISTER_TIMEOUT 300
42
43#define LOWDB(x)
44#define D(x)
45
46enum /* Transfer types */
47{
48 TYPE_PIO,
49 TYPE_DMA,
50 TYPE_UDMA
51};
52
53/* CRISv32 specifics */
54#ifdef CONFIG_ETRAX_ARCH_V32
55#include <asm/arch/hwregs/ata_defs.h>
56#include <asm/arch/hwregs/dma_defs.h>
57#include <asm/arch/hwregs/dma.h>
58#include <asm/arch/pinmux.h>
59
60#define ATA_UDMA2_CYC 2
61#define ATA_UDMA2_DVS 3
62#define ATA_UDMA1_CYC 2
63#define ATA_UDMA1_DVS 4
64#define ATA_UDMA0_CYC 4
65#define ATA_UDMA0_DVS 6
66#define ATA_DMA2_STROBE 7
67#define ATA_DMA2_HOLD 1
68#define ATA_DMA1_STROBE 8
69#define ATA_DMA1_HOLD 3
70#define ATA_DMA0_STROBE 25
71#define ATA_DMA0_HOLD 19
72#define ATA_PIO4_SETUP 3
73#define ATA_PIO4_STROBE 7
74#define ATA_PIO4_HOLD 1
75#define ATA_PIO3_SETUP 3
76#define ATA_PIO3_STROBE 9
77#define ATA_PIO3_HOLD 3
78#define ATA_PIO2_SETUP 3
79#define ATA_PIO2_STROBE 13
80#define ATA_PIO2_HOLD 5
81#define ATA_PIO1_SETUP 5
82#define ATA_PIO1_STROBE 23
83#define ATA_PIO1_HOLD 9
84#define ATA_PIO0_SETUP 9
85#define ATA_PIO0_STROBE 39
86#define ATA_PIO0_HOLD 9
87
88int
89cris_ide_ack_intr(ide_hwif_t* hwif)
90{
91 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
92 hwif->io_ports.data_addr);
93 REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
94 return 1;
95}
96
97static inline int
98cris_ide_busy(void)
99{
100 reg_ata_rs_stat_data stat_data;
101 stat_data = REG_RD(ata, regi_ata, rs_stat_data);
102 return stat_data.busy;
103}
104
105static inline int
106cris_ide_ready(void)
107{
108 return !cris_ide_busy();
109}
110
111static inline int
112cris_ide_data_available(unsigned short* data)
113{
114 reg_ata_rs_stat_data stat_data;
115 stat_data = REG_RD(ata, regi_ata, rs_stat_data);
116 *data = stat_data.data;
117 return stat_data.dav;
118}
119
120static void
121cris_ide_write_command(unsigned long command)
122{
123 REG_WR_INT(ata, regi_ata, rw_ctrl2, command); /* write data to the drive's register */
124}
125
126static void
127cris_ide_set_speed(int type, int setup, int strobe, int hold)
128{
129 reg_ata_rw_ctrl0 ctrl0 = REG_RD(ata, regi_ata, rw_ctrl0);
130 reg_ata_rw_ctrl1 ctrl1 = REG_RD(ata, regi_ata, rw_ctrl1);
131
132 if (type == TYPE_PIO) {
133 ctrl0.pio_setup = setup;
134 ctrl0.pio_strb = strobe;
135 ctrl0.pio_hold = hold;
136 } else if (type == TYPE_DMA) {
137 ctrl0.dma_strb = strobe;
138 ctrl0.dma_hold = hold;
139 } else if (type == TYPE_UDMA) {
140 ctrl1.udma_tcyc = setup;
141 ctrl1.udma_tdvs = strobe;
142 }
143 REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
144 REG_WR(ata, regi_ata, rw_ctrl1, ctrl1);
145}
146
147static unsigned long
148cris_ide_base_address(int bus)
149{
150 reg_ata_rw_ctrl2 ctrl2 = {0};
151 ctrl2.sel = bus;
152 return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
153}
154
155static unsigned long
156cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
157{
158 reg_ata_rw_ctrl2 ctrl2 = {0};
159 ctrl2.addr = addr;
160 ctrl2.cs1 = cs1;
161 ctrl2.cs0 = cs0;
162 return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
163}
164
165static __init void
166cris_ide_reset(unsigned val)
167{
168 reg_ata_rw_ctrl0 ctrl0 = {0};
169 ctrl0.rst = val ? regk_ata_active : regk_ata_inactive;
170 REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
171}
172
173static __init void
174cris_ide_init(void)
175{
176 reg_ata_rw_ctrl0 ctrl0 = {0};
177 reg_ata_rw_intr_mask intr_mask = {0};
178
179 ctrl0.en = regk_ata_yes;
180 REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
181
182 intr_mask.bus0 = regk_ata_yes;
183 intr_mask.bus1 = regk_ata_yes;
184 intr_mask.bus2 = regk_ata_yes;
185 intr_mask.bus3 = regk_ata_yes;
186
187 REG_WR(ata, regi_ata, rw_intr_mask, intr_mask);
188
189 crisv32_request_dma(2, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
190 crisv32_request_dma(3, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
191
192 crisv32_pinmux_alloc_fixed(pinmux_ata);
193 crisv32_pinmux_alloc_fixed(pinmux_ata0);
194 crisv32_pinmux_alloc_fixed(pinmux_ata1);
195 crisv32_pinmux_alloc_fixed(pinmux_ata2);
196 crisv32_pinmux_alloc_fixed(pinmux_ata3);
197
198 DMA_RESET(regi_dma2);
199 DMA_ENABLE(regi_dma2);
200 DMA_RESET(regi_dma3);
201 DMA_ENABLE(regi_dma3);
202
203 DMA_WR_CMD (regi_dma2, regk_dma_set_w_size2);
204 DMA_WR_CMD (regi_dma3, regk_dma_set_w_size2);
205}
206
207static dma_descr_context mycontext __attribute__ ((__aligned__(32)));
208
209#define cris_dma_descr_type dma_descr_data
210#define cris_pio_read regk_ata_rd
211#define cris_ultra_mask 0x7
212#define MAX_DESCR_SIZE 0xffffffffUL
213
214static unsigned long
215cris_ide_get_reg(unsigned long reg)
216{
217 return (reg & 0x0e000000) >> 25;
218}
219
220static void
221cris_ide_fill_descriptor(cris_dma_descr_type *d, void* buf, unsigned int len, int last)
222{
223 d->buf = (char*)virt_to_phys(buf);
224 d->after = d->buf + len;
225 d->eol = last;
226}
227
228static void
229cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type,int len)
230{
231 ide_hwif_t *hwif = drive->hwif;
232
233 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
234 hwif->io_ports.data_addr);
235 reg_ata_rw_trf_cnt trf_cnt = {0};
236
237 mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
238 mycontext.saved_data_buf = d->buf;
239 /* start the dma channel */
240 DMA_START_CONTEXT(dir ? regi_dma3 : regi_dma2, virt_to_phys(&mycontext));
241
242 /* initiate a multi word dma read using PIO handshaking */
243 trf_cnt.cnt = len >> 1;
244 /* Due to a "feature" the transfer count has to be one extra word for UDMA. */
245 if (type == TYPE_UDMA)
246 trf_cnt.cnt++;
247 REG_WR(ata, regi_ata, rw_trf_cnt, trf_cnt);
248
249 ctrl2.rw = dir ? regk_ata_rd : regk_ata_wr;
250 ctrl2.trf_mode = regk_ata_dma;
251 ctrl2.hsh = type == TYPE_PIO ? regk_ata_pio :
252 type == TYPE_DMA ? regk_ata_dma : regk_ata_udma;
253 ctrl2.multi = regk_ata_yes;
254 ctrl2.dma_size = regk_ata_word;
255 REG_WR(ata, regi_ata, rw_ctrl2, ctrl2);
256}
257
258static void
259cris_ide_wait_dma(int dir)
260{
261 reg_dma_rw_stat status;
262 do
263 {
264 status = REG_RD(dma, dir ? regi_dma3 : regi_dma2, rw_stat);
265 } while(status.list_state != regk_dma_data_at_eol);
266}
267
268static int cris_dma_test_irq(ide_drive_t *drive)
269{
270 ide_hwif_t *hwif = drive->hwif;
271 int intr = REG_RD_INT(ata, regi_ata, r_intr);
272
273 reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
274 hwif->io_ports.data_addr);
275
276 return intr & (1 << ctrl2.sel) ? 1 : 0;
277}
278
279static void cris_ide_initialize_dma(int dir)
280{
281}
282
283#else
284/* CRISv10 specifics */
285#include <asm/arch/svinto.h>
286#include <asm/arch/io_interface_mux.h>
287
288/* PIO timing (in R_ATA_CONFIG)
289 *
290 * _____________________________
291 * ADDRESS : ________/
292 *
293 * _______________
294 * DIOR : ____________/ \__________
295 *
296 * _______________
297 * DATA : XXXXXXXXXXXXXXXX_______________XXXXXXXX
298 *
299 *
300 * DIOR is unbuffered while address and data is buffered.
301 * This creates two problems:
302 * 1. The DIOR pulse is to early (because it is unbuffered)
303 * 2. The rise time of DIOR is long
304 *
305 * There are at least three different plausible solutions
306 * 1. Use a pad capable of larger currents in Etrax
307 * 2. Use an external buffer
308 * 3. Make the strobe pulse longer
309 *
310 * Some of the strobe timings below are modified to compensate
311 * for this. This implies a slight performance decrease.
312 *
313 * THIS SHOULD NEVER BE CHANGED!
314 *
315 * TODO: Is this true for the latest LX boards still ?
316 */
317
318#define ATA_UDMA2_CYC 0 /* No UDMA supported, just to make it compile. */
319#define ATA_UDMA2_DVS 0
320#define ATA_UDMA1_CYC 0
321#define ATA_UDMA1_DVS 0
322#define ATA_UDMA0_CYC 0
323#define ATA_UDMA0_DVS 0
324#define ATA_DMA2_STROBE 4
325#define ATA_DMA2_HOLD 0
326#define ATA_DMA1_STROBE 4
327#define ATA_DMA1_HOLD 1
328#define ATA_DMA0_STROBE 12
329#define ATA_DMA0_HOLD 9
330#define ATA_PIO4_SETUP 1
331#define ATA_PIO4_STROBE 5
332#define ATA_PIO4_HOLD 0
333#define ATA_PIO3_SETUP 1
334#define ATA_PIO3_STROBE 5
335#define ATA_PIO3_HOLD 1
336#define ATA_PIO2_SETUP 1
337#define ATA_PIO2_STROBE 6
338#define ATA_PIO2_HOLD 2
339#define ATA_PIO1_SETUP 2
340#define ATA_PIO1_STROBE 11
341#define ATA_PIO1_HOLD 4
342#define ATA_PIO0_SETUP 4
343#define ATA_PIO0_STROBE 19
344#define ATA_PIO0_HOLD 4
345
346int
347cris_ide_ack_intr(ide_hwif_t* hwif)
348{
349 return 1;
350}
351
352static inline int
353cris_ide_busy(void)
354{
355 return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy) ;
356}
357
358static inline int
359cris_ide_ready(void)
360{
361 return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy) ;
362}
363
364static inline int
365cris_ide_data_available(unsigned short* data)
366{
367 unsigned long status = *R_ATA_STATUS_DATA;
368 *data = (unsigned short)status;
369 return status & IO_MASK(R_ATA_STATUS_DATA, dav);
370}
371
372static void
373cris_ide_write_command(unsigned long command)
374{
375 *R_ATA_CTRL_DATA = command;
376}
377
378static void
379cris_ide_set_speed(int type, int setup, int strobe, int hold)
380{
381 static int pio_setup = ATA_PIO4_SETUP;
382 static int pio_strobe = ATA_PIO4_STROBE;
383 static int pio_hold = ATA_PIO4_HOLD;
384 static int dma_strobe = ATA_DMA2_STROBE;
385 static int dma_hold = ATA_DMA2_HOLD;
386
387 if (type == TYPE_PIO) {
388 pio_setup = setup;
389 pio_strobe = strobe;
390 pio_hold = hold;
391 } else if (type == TYPE_DMA) {
392 dma_strobe = strobe;
393 dma_hold = hold;
394 }
395 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
396 IO_FIELD( R_ATA_CONFIG, dma_strobe, dma_strobe ) |
397 IO_FIELD( R_ATA_CONFIG, dma_hold, dma_hold ) |
398 IO_FIELD( R_ATA_CONFIG, pio_setup, pio_setup ) |
399 IO_FIELD( R_ATA_CONFIG, pio_strobe, pio_strobe ) |
400 IO_FIELD( R_ATA_CONFIG, pio_hold, pio_hold ) );
401}
402
403static unsigned long
404cris_ide_base_address(int bus)
405{
406 return IO_FIELD(R_ATA_CTRL_DATA, sel, bus);
407}
408
409static unsigned long
410cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
411{
412 return IO_FIELD(R_ATA_CTRL_DATA, addr, addr) |
413 IO_FIELD(R_ATA_CTRL_DATA, cs0, cs0) |
414 IO_FIELD(R_ATA_CTRL_DATA, cs1, cs1);
415}
416
417static __init void
418cris_ide_reset(unsigned val)
419{
420#ifdef CONFIG_ETRAX_IDE_G27_RESET
421 REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, val);
422#endif
423#ifdef CONFIG_ETRAX_IDE_PB7_RESET
424 port_pb_dir_shadow = port_pb_dir_shadow |
425 IO_STATE(R_PORT_PB_DIR, dir7, output);
426 *R_PORT_PB_DIR = port_pb_dir_shadow;
427 REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 7, val);
428#endif
429}
430
431static __init void
432cris_ide_init(void)
433{
434 volatile unsigned int dummy;
435
436 *R_ATA_CTRL_DATA = 0;
437 *R_ATA_TRANSFER_CNT = 0;
438 *R_ATA_CONFIG = 0;
439
440 if (cris_request_io_interface(if_ata, "ETRAX100LX IDE")) {
441 printk(KERN_CRIT "ide: Failed to get IO interface\n");
442 return;
443 } else if (cris_request_dma(ATA_TX_DMA_NBR,
444 "ETRAX100LX IDE TX",
445 DMA_VERBOSE_ON_ERROR,
446 dma_ata)) {
447 cris_free_io_interface(if_ata);
448 printk(KERN_CRIT "ide: Failed to get Tx DMA channel\n");
449 return;
450 } else if (cris_request_dma(ATA_RX_DMA_NBR,
451 "ETRAX100LX IDE RX",
452 DMA_VERBOSE_ON_ERROR,
453 dma_ata)) {
454 cris_free_dma(ATA_TX_DMA_NBR, "ETRAX100LX IDE Tx");
455 cris_free_io_interface(if_ata);
456 printk(KERN_CRIT "ide: Failed to get Rx DMA channel\n");
457 return;
458 }
459
460 /* make a dummy read to set the ata controller in a proper state */
461 dummy = *R_ATA_STATUS_DATA;
462
463 *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ));
464 *R_ATA_CTRL_DATA = ( IO_STATE( R_ATA_CTRL_DATA, rw, read) |
465 IO_FIELD( R_ATA_CTRL_DATA, addr, 1 ) );
466
467 while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); /* wait for busy flag*/
468
469 *R_IRQ_MASK0_SET = ( IO_STATE( R_IRQ_MASK0_SET, ata_irq0, set ) |
470 IO_STATE( R_IRQ_MASK0_SET, ata_irq1, set ) |
471 IO_STATE( R_IRQ_MASK0_SET, ata_irq2, set ) |
472 IO_STATE( R_IRQ_MASK0_SET, ata_irq3, set ) );
473
474 /* reset the dma channels we will use */
475
476 RESET_DMA(ATA_TX_DMA_NBR);
477 RESET_DMA(ATA_RX_DMA_NBR);
478 WAIT_DMA(ATA_TX_DMA_NBR);
479 WAIT_DMA(ATA_RX_DMA_NBR);
480}
481
482#define cris_dma_descr_type etrax_dma_descr
483#define cris_pio_read IO_STATE(R_ATA_CTRL_DATA, rw, read)
484#define cris_ultra_mask 0x0
485#define MAX_DESCR_SIZE 0x10000UL
486
487static unsigned long
488cris_ide_get_reg(unsigned long reg)
489{
490 return (reg & 0x0e000000) >> 25;
491}
492
493static void
494cris_ide_fill_descriptor(cris_dma_descr_type *d, void* buf, unsigned int len, int last)
495{
496 d->buf = virt_to_phys(buf);
497 d->sw_len = len == MAX_DESCR_SIZE ? 0 : len;
498 if (last)
499 d->ctrl |= d_eol;
500}
501
502static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir, int type, int len)
503{
504 unsigned long cmd;
505
506 if (dir) {
507 /* need to do this before RX DMA due to a chip bug
508 * it is enough to just flush the part of the cache that
509 * corresponds to the buffers we start, but since HD transfers
510 * usually are more than 8 kB, it is easier to optimize for the
511 * normal case and just flush the entire cache. its the only
512 * way to be sure! (OB movie quote)
513 */
514 flush_etrax_cache();
515 *R_DMA_CH3_FIRST = virt_to_phys(d);
516 *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, start);
517
518 } else {
519 *R_DMA_CH2_FIRST = virt_to_phys(d);
520 *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, start);
521 }
522
523 /* initiate a multi word dma read using DMA handshaking */
524
525 *R_ATA_TRANSFER_CNT =
526 IO_FIELD(R_ATA_TRANSFER_CNT, count, len >> 1);
527
528 cmd = dir ? IO_STATE(R_ATA_CTRL_DATA, rw, read) : IO_STATE(R_ATA_CTRL_DATA, rw, write);
529 cmd |= type == TYPE_PIO ? IO_STATE(R_ATA_CTRL_DATA, handsh, pio) :
530 IO_STATE(R_ATA_CTRL_DATA, handsh, dma);
531 *R_ATA_CTRL_DATA =
532 cmd |
533 IO_FIELD(R_ATA_CTRL_DATA, data,
534 drive->hwif->io_ports.data_addr) |
535 IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
536 IO_STATE(R_ATA_CTRL_DATA, multi, on) |
537 IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
538}
539
540static void
541cris_ide_wait_dma(int dir)
542{
543 if (dir)
544 WAIT_DMA(ATA_RX_DMA_NBR);
545 else
546 WAIT_DMA(ATA_TX_DMA_NBR);
547}
548
549static int cris_dma_test_irq(ide_drive_t *drive)
550{
551 int intr = *R_IRQ_MASK0_RD;
552 int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel,
553 drive->hwif->io_ports.data_addr);
554
555 return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0;
556}
557
558
559static void cris_ide_initialize_dma(int dir)
560{
561 if (dir)
562 {
563 RESET_DMA(ATA_RX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
564 WAIT_DMA(ATA_RX_DMA_NBR);
565 }
566 else
567 {
568 RESET_DMA(ATA_TX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
569 WAIT_DMA(ATA_TX_DMA_NBR);
570 }
571}
572
573#endif
574
575void
576cris_ide_outw(unsigned short data, unsigned long reg) {
577 int timeleft;
578
579 LOWDB(printk("ow: data 0x%x, reg 0x%x\n", data, reg));
580
581 /* note the lack of handling any timeouts. we stop waiting, but we don't
582 * really notify anybody.
583 */
584
585 timeleft = IDE_REGISTER_TIMEOUT;
586 /* wait for busy flag */
587 do {
588 timeleft--;
589 } while(timeleft && cris_ide_busy());
590
591 /*
592 * Fall through at a timeout, so the ongoing command will be
593 * aborted by the write below, which is expected to be a dummy
594 * command to the command register. This happens when a faulty
595 * drive times out on a command. See comment on timeout in
596 * INB.
597 */
598 if(!timeleft)
599 printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data);
600
601 cris_ide_write_command(reg|data); /* write data to the drive's register */
602
603 timeleft = IDE_REGISTER_TIMEOUT;
604 /* wait for transmitter ready */
605 do {
606 timeleft--;
607 } while(timeleft && !cris_ide_ready());
608}
609
610void
611cris_ide_outb(unsigned char data, unsigned long reg)
612{
613 cris_ide_outw(data, reg);
614}
615
616void
617cris_ide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port)
618{
619 cris_ide_outw(addr, port);
620}
621
622unsigned short
623cris_ide_inw(unsigned long reg) {
624 int timeleft;
625 unsigned short val;
626
627 timeleft = IDE_REGISTER_TIMEOUT;
628 /* wait for busy flag */
629 do {
630 timeleft--;
631 } while(timeleft && cris_ide_busy());
632
633 if(!timeleft) {
634 /*
635 * If we're asked to read the status register, like for
636 * example when a command does not complete for an
637 * extended time, but the ATA interface is stuck in a
638 * busy state at the *ETRAX* ATA interface level (as has
639 * happened repeatedly with at least one bad disk), then
640 * the best thing to do is to pretend that we read
641 * "busy" in the status register, so the IDE driver will
642 * time-out, abort the ongoing command and perform a
643 * reset sequence. Note that the subsequent OUT_BYTE
644 * call will also timeout on busy, but as long as the
645 * write is still performed, everything will be fine.
646 */
647 if (cris_ide_get_reg(reg) == 7)
648 return BUSY_STAT;
649 else
650 /* For other rare cases we assume 0 is good enough. */
651 return 0;
652 }
653
654 cris_ide_write_command(reg | cris_pio_read);
655
656 timeleft = IDE_REGISTER_TIMEOUT;
657 /* wait for available */
658 do {
659 timeleft--;
660 } while(timeleft && !cris_ide_data_available(&val));
661
662 if(!timeleft)
663 return 0;
664
665 LOWDB(printk("inb: 0x%x from reg 0x%x\n", val & 0xff, reg));
666
667 return val;
668}
669
670unsigned char
671cris_ide_inb(unsigned long reg)
672{
673 return (unsigned char)cris_ide_inw(reg);
674}
675
676static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
677static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
678static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
679static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
680
681static void cris_dma_host_set(ide_drive_t *drive, int on)
682{
683}
684
685static void cris_set_pio_mode(ide_drive_t *drive, const u8 pio)
686{
687 int setup, strobe, hold;
688
689 switch(pio)
690 {
691 case 0:
692 setup = ATA_PIO0_SETUP;
693 strobe = ATA_PIO0_STROBE;
694 hold = ATA_PIO0_HOLD;
695 break;
696 case 1:
697 setup = ATA_PIO1_SETUP;
698 strobe = ATA_PIO1_STROBE;
699 hold = ATA_PIO1_HOLD;
700 break;
701 case 2:
702 setup = ATA_PIO2_SETUP;
703 strobe = ATA_PIO2_STROBE;
704 hold = ATA_PIO2_HOLD;
705 break;
706 case 3:
707 setup = ATA_PIO3_SETUP;
708 strobe = ATA_PIO3_STROBE;
709 hold = ATA_PIO3_HOLD;
710 break;
711 case 4:
712 setup = ATA_PIO4_SETUP;
713 strobe = ATA_PIO4_STROBE;
714 hold = ATA_PIO4_HOLD;
715 break;
716 default:
717 return;
718 }
719
720 cris_ide_set_speed(TYPE_PIO, setup, strobe, hold);
721}
722
723static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
724{
725 int cyc = 0, dvs = 0, strobe = 0, hold = 0;
726
727 switch(speed)
728 {
729 case XFER_UDMA_0:
730 cyc = ATA_UDMA0_CYC;
731 dvs = ATA_UDMA0_DVS;
732 break;
733 case XFER_UDMA_1:
734 cyc = ATA_UDMA1_CYC;
735 dvs = ATA_UDMA1_DVS;
736 break;
737 case XFER_UDMA_2:
738 cyc = ATA_UDMA2_CYC;
739 dvs = ATA_UDMA2_DVS;
740 break;
741 case XFER_MW_DMA_0:
742 strobe = ATA_DMA0_STROBE;
743 hold = ATA_DMA0_HOLD;
744 break;
745 case XFER_MW_DMA_1:
746 strobe = ATA_DMA1_STROBE;
747 hold = ATA_DMA1_HOLD;
748 break;
749 case XFER_MW_DMA_2:
750 strobe = ATA_DMA2_STROBE;
751 hold = ATA_DMA2_HOLD;
752 break;
753 }
754
755 if (speed >= XFER_UDMA_0)
756 cris_ide_set_speed(TYPE_UDMA, cyc, dvs, 0);
757 else
758 cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
759}
760
761static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base)
762{
763 int i;
764
765 memset(hw, 0, sizeof(*hw));
766
767 for (i = 0; i <= 7; i++)
768 hw->io_ports_array[i] = base + cris_ide_reg_addr(i, 0, 1);
769
770 /*
771 * the IDE control register is at ATA address 6,
772 * with CS1 active instead of CS0
773 */
774 hw->io_ports.ctl_addr = base + cris_ide_reg_addr(6, 1, 0);
775
776 hw->irq = ide_default_irq(0);
777 hw->ack_intr = cris_ide_ack_intr;
778}
779
780static const struct ide_port_ops cris_port_ops = {
781 .set_pio_mode = cris_set_pio_mode,
782 .set_dma_mode = cris_set_dma_mode,
783};
784
785static const struct ide_dma_ops cris_dma_ops;
786
787static const struct ide_port_info cris_port_info __initdata = {
788 .chipset = ide_etrax100,
789 .port_ops = &cris_port_ops,
790 .dma_ops = &cris_dma_ops,
791 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
792 IDE_HFLAG_NO_DMA, /* no SFF-style DMA */
793 .pio_mask = ATA_PIO4,
794 .udma_mask = cris_ultra_mask,
795 .mwdma_mask = ATA_MWDMA2,
796};
797
798static int __init init_e100_ide(void)
799{
800 hw_regs_t hw;
801 int h;
802 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
803
804 printk("ide: ETRAX FS built-in ATA DMA controller\n");
805
806 for (h = 0; h < 4; h++) {
807 ide_hwif_t *hwif = NULL;
808
809 cris_setup_ports(&hw, cris_ide_base_address(h));
810
811 hwif = ide_find_port();
812 if (hwif == NULL)
813 continue;
814 ide_init_port_data(hwif, hwif->index);
815 ide_init_port_hw(hwif, &hw);
816
817 hwif->ata_input_data = &cris_ide_input_data;
818 hwif->ata_output_data = &cris_ide_output_data;
819 hwif->atapi_input_bytes = &cris_atapi_input_bytes;
820 hwif->atapi_output_bytes = &cris_atapi_output_bytes;
821 hwif->OUTB = &cris_ide_outb;
822 hwif->OUTW = &cris_ide_outw;
823 hwif->OUTBSYNC = &cris_ide_outbsync;
824 hwif->INB = &cris_ide_inb;
825 hwif->INW = &cris_ide_inw;
826 hwif->cbl = ATA_CBL_PATA40;
827
828 idx[h] = hwif->index;
829 }
830
831 /* Reset pulse */
832 cris_ide_reset(0);
833 udelay(25);
834 cris_ide_reset(1);
835
836 cris_ide_init();
837
838 cris_ide_set_speed(TYPE_PIO, ATA_PIO4_SETUP, ATA_PIO4_STROBE, ATA_PIO4_HOLD);
839 cris_ide_set_speed(TYPE_DMA, 0, ATA_DMA2_STROBE, ATA_DMA2_HOLD);
840 cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0);
841
842 ide_device_add(idx, &cris_port_info);
843
844 return 0;
845}
846
847static cris_dma_descr_type mydescr __attribute__ ((__aligned__(16)));
848
849/*
850 * The following routines are mainly used by the ATAPI drivers.
851 *
852 * These routines will round up any request for an odd number of bytes,
853 * so if an odd bytecount is specified, be sure that there's at least one
854 * extra byte allocated for the buffer.
855 */
856static void
857cris_atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
858{
859 D(printk("atapi_input_bytes, buffer 0x%x, count %d\n",
860 buffer, bytecount));
861
862 if(bytecount & 1) {
863 printk("warning, odd bytecount in cdrom_in_bytes = %d.\n", bytecount);
864 bytecount++; /* to round off */
865 }
866
867 /* setup DMA and start transfer */
868
869 cris_ide_fill_descriptor(&mydescr, buffer, bytecount, 1);
870 cris_ide_start_dma(drive, &mydescr, 1, TYPE_PIO, bytecount);
871
872 /* wait for completion */
873 LED_DISK_READ(1);
874 cris_ide_wait_dma(1);
875 LED_DISK_READ(0);
876}
877
878static void
879cris_atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
880{
881 D(printk("atapi_output_bytes, buffer 0x%x, count %d\n",
882 buffer, bytecount));
883
884 if(bytecount & 1) {
885 printk("odd bytecount %d in atapi_out_bytes!\n", bytecount);
886 bytecount++;
887 }
888
889 cris_ide_fill_descriptor(&mydescr, buffer, bytecount, 1);
890 cris_ide_start_dma(drive, &mydescr, 0, TYPE_PIO, bytecount);
891
892 /* wait for completion */
893
894 LED_DISK_WRITE(1);
895 LED_DISK_READ(1);
896 cris_ide_wait_dma(0);
897 LED_DISK_WRITE(0);
898}
899
900/*
901 * This is used for most PIO data transfers *from* the IDE interface
902 */
903static void
904cris_ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
905{
906 cris_atapi_input_bytes(drive, buffer, wcount << 2);
907}
908
909/*
910 * This is used for most PIO data transfers *to* the IDE interface
911 */
912static void
913cris_ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
914{
915 cris_atapi_output_bytes(drive, buffer, wcount << 2);
916}
917
918/* we only have one DMA channel on the chip for ATA, so we can keep these statically */
919static cris_dma_descr_type ata_descrs[MAX_DMA_DESCRS] __attribute__ ((__aligned__(16)));
920static unsigned int ata_tot_size;
921
922/*
923 * cris_ide_build_dmatable() prepares a dma request.
924 * Returns 0 if all went okay, returns 1 otherwise.
925 */
926static int cris_ide_build_dmatable (ide_drive_t *drive)
927{
928 ide_hwif_t *hwif = drive->hwif;
929 struct scatterlist* sg;
930 struct request *rq = drive->hwif->hwgroup->rq;
931 unsigned long size, addr;
932 unsigned int count = 0;
933 int i = 0;
934
935 sg = hwif->sg_table;
936
937 ata_tot_size = 0;
938
939 ide_map_sg(drive, rq);
940 i = hwif->sg_nents;
941
942 while(i) {
943 /*
944 * Determine addr and size of next buffer area. We assume that
945 * individual virtual buffers are always composed linearly in
946 * physical memory. For example, we assume that any 8kB buffer
947 * is always composed of two adjacent physical 4kB pages rather
948 * than two possibly non-adjacent physical 4kB pages.
949 */
950 /* group sequential buffers into one large buffer */
951 addr = sg_phys(sg);
952 size = sg_dma_len(sg);
953 while (--i) {
954 sg = sg_next(sg);
955 if ((addr + size) != sg_phys(sg))
956 break;
957 size += sg_dma_len(sg);
958 }
959
960 /* did we run out of descriptors? */
961
962 if(count >= MAX_DMA_DESCRS) {
963 printk("%s: too few DMA descriptors\n", drive->name);
964 return 1;
965 }
966
967 /* however, this case is more difficult - rw_trf_cnt cannot be more
968 than 65536 words per transfer, so in that case we need to either
969 1) use a DMA interrupt to re-trigger rw_trf_cnt and continue with
970 the descriptors, or
971 2) simply do the request here, and get dma_intr to only ide_end_request on
972 those blocks that were actually set-up for transfer.
973 */
974
975 if(ata_tot_size + size > 131072) {
976 printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size);
977 return 1;
978 }
979
980 /* If size > MAX_DESCR_SIZE it has to be splitted into new descriptors. Since we
981 don't handle size > 131072 only one split is necessary */
982
983 if(size > MAX_DESCR_SIZE) {
984 cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, MAX_DESCR_SIZE, 0);
985 count++;
986 ata_tot_size += MAX_DESCR_SIZE;
987 size -= MAX_DESCR_SIZE;
988 addr += MAX_DESCR_SIZE;
989 }
990
991 cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, size,i ? 0 : 1);
992 count++;
993 ata_tot_size += size;
994 }
995
996 if (count) {
997 /* return and say all is ok */
998 return 0;
999 }
1000
1001 printk("%s: empty DMA table?\n", drive->name);
1002 return 1; /* let the PIO routines handle this weirdness */
1003}
1004
1005/*
1006 * cris_dma_intr() is the handler for disk read/write DMA interrupts
1007 */
1008static ide_startstop_t cris_dma_intr (ide_drive_t *drive)
1009{
1010 LED_DISK_READ(0);
1011 LED_DISK_WRITE(0);
1012
1013 return ide_dma_intr(drive);
1014}
1015
1016/*
1017 * Functions below initiates/aborts DMA read/write operations on a drive.
1018 *
1019 * The caller is assumed to have selected the drive and programmed the drive's
1020 * sector address using CHS or LBA. All that remains is to prepare for DMA
1021 * and then issue the actual read/write DMA/PIO command to the drive.
1022 *
1023 * For ATAPI devices, we just prepare for DMA and return. The caller should
1024 * then issue the packet command to the drive and call us again with
1025 * cris_dma_start afterwards.
1026 *
1027 * Returns 0 if all went well.
1028 * Returns 1 if DMA read/write could not be started, in which case
1029 * the caller should revert to PIO for the current request.
1030 */
1031
1032static int cris_dma_end(ide_drive_t *drive)
1033{
1034 drive->waiting_for_dma = 0;
1035 return 0;
1036}
1037
1038static int cris_dma_setup(ide_drive_t *drive)
1039{
1040 struct request *rq = drive->hwif->hwgroup->rq;
1041
1042 cris_ide_initialize_dma(!rq_data_dir(rq));
1043 if (cris_ide_build_dmatable (drive)) {
1044 ide_map_sg(drive, rq);
1045 return 1;
1046 }
1047
1048 drive->waiting_for_dma = 1;
1049 return 0;
1050}
1051
1052static void cris_dma_exec_cmd(ide_drive_t *drive, u8 command)
1053{
1054 ide_execute_command(drive, command, &cris_dma_intr, WAIT_CMD, NULL);
1055}
1056
1057static void cris_dma_start(ide_drive_t *drive)
1058{
1059 struct request *rq = drive->hwif->hwgroup->rq;
1060 int writing = rq_data_dir(rq);
1061 int type = TYPE_DMA;
1062
1063 if (drive->current_speed >= XFER_UDMA_0)
1064 type = TYPE_UDMA;
1065
1066 cris_ide_start_dma(drive, &ata_descrs[0], writing ? 0 : 1, type, ata_tot_size);
1067
1068 if (writing) {
1069 LED_DISK_WRITE(1);
1070 } else {
1071 LED_DISK_READ(1);
1072 }
1073}
1074
1075static const struct ide_dma_ops cris_dma_ops = {
1076 .dma_host_set = cris_dma_host_set,
1077 .dma_setup = cris_dma_setup,
1078 .dma_exec_cmd = cris_dma_exec_cmd,
1079 .dma_start = cris_dma_start,
1080 .dma_end = cris_dma_end,
1081 .dma_test_irq = cris_dma_test_irq,
1082};
1083
1084module_init(init_e100_ide);
1085
1086MODULE_LICENSE("GPL");
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index fd23f12e17aa..ecf53bb0d2aa 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -42,6 +42,91 @@ static u16 mm_inw(unsigned long a)
42 return r; 42 return r;
43} 43}
44 44
45static void h8300_tf_load(ide_drive_t *drive, ide_task_t *task)
46{
47 ide_hwif_t *hwif = drive->hwif;
48 struct ide_io_ports *io_ports = &hwif->io_ports;
49 struct ide_taskfile *tf = &task->tf;
50 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
51
52 if (task->tf_flags & IDE_TFLAG_FLAGGED)
53 HIHI = 0xFF;
54
55 ide_set_irq(drive, 1);
56
57 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
58 mm_outw((tf->hob_data << 8) | tf->data, io_ports->data_addr);
59
60 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
61 outb(tf->hob_feature, io_ports->feature_addr);
62 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
63 outb(tf->hob_nsect, io_ports->nsect_addr);
64 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
65 outb(tf->hob_lbal, io_ports->lbal_addr);
66 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
67 outb(tf->hob_lbam, io_ports->lbam_addr);
68 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
69 outb(tf->hob_lbah, io_ports->lbah_addr);
70
71 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
72 outb(tf->feature, io_ports->feature_addr);
73 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
74 outb(tf->nsect, io_ports->nsect_addr);
75 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
76 outb(tf->lbal, io_ports->lbal_addr);
77 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
78 outb(tf->lbam, io_ports->lbam_addr);
79 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
80 outb(tf->lbah, io_ports->lbah_addr);
81
82 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
83 outb((tf->device & HIHI) | drive->select.all,
84 io_ports->device_addr);
85}
86
87static void h8300_tf_read(ide_drive_t *drive, ide_task_t *task)
88{
89 ide_hwif_t *hwif = drive->hwif;
90 struct ide_io_ports *io_ports = &hwif->io_ports;
91 struct ide_taskfile *tf = &task->tf;
92
93 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
94 u16 data = mm_inw(io_ports->data_addr);
95
96 tf->data = data & 0xff;
97 tf->hob_data = (data >> 8) & 0xff;
98 }
99
100 /* be sure we're looking at the low order bits */
101 outb(drive->ctl & ~0x80, io_ports->ctl_addr);
102
103 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
104 tf->nsect = inb(io_ports->nsect_addr);
105 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
106 tf->lbal = inb(io_ports->lbal_addr);
107 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
108 tf->lbam = inb(io_ports->lbam_addr);
109 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
110 tf->lbah = inb(io_ports->lbah_addr);
111 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
112 tf->device = inb(io_ports->device_addr);
113
114 if (task->tf_flags & IDE_TFLAG_LBA48) {
115 outb(drive->ctl | 0x80, io_ports->ctl_addr);
116
117 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
118 tf->hob_feature = inb(io_ports->feature_addr);
119 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
120 tf->hob_nsect = inb(io_ports->nsect_addr);
121 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
122 tf->hob_lbal = inb(io_ports->lbal_addr);
123 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
124 tf->hob_lbam = inb(io_ports->lbam_addr);
125 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
126 tf->hob_lbah = inb(io_ports->lbah_addr);
127 }
128}
129
45static void mm_outsw(unsigned long addr, void *buf, u32 len) 130static void mm_outsw(unsigned long addr, void *buf, u32 len)
46{ 131{
47 unsigned short *bp = (unsigned short *)buf; 132 unsigned short *bp = (unsigned short *)buf;
@@ -56,6 +141,18 @@ static void mm_insw(unsigned long addr, void *buf, u32 len)
56 *bp = bswap(*(volatile u16 *)addr); 141 *bp = bswap(*(volatile u16 *)addr);
57} 142}
58 143
144static void h8300_input_data(ide_drive_t *drive, struct request *rq,
145 void *buf, unsigned int len)
146{
147 mm_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
148}
149
150static void h8300_output_data(ide_drive_t *drive, struct request *rq,
151 void *buf, unsigned int len)
152{
153 mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
154}
155
59#define H8300_IDE_GAP (2) 156#define H8300_IDE_GAP (2)
60 157
61static inline void hw_setup(hw_regs_t *hw) 158static inline void hw_setup(hw_regs_t *hw)
@@ -74,12 +171,11 @@ static inline void hwif_setup(ide_hwif_t *hwif)
74{ 171{
75 default_hwif_iops(hwif); 172 default_hwif_iops(hwif);
76 173
77 hwif->OUTW = mm_outw; 174 hwif->tf_load = h8300_tf_load;
78 hwif->OUTSW = mm_outsw; 175 hwif->tf_read = h8300_tf_read;
79 hwif->INW = mm_inw; 176
80 hwif->INSW = mm_insw; 177 hwif->input_data = h8300_input_data;
81 hwif->OUTSL = NULL; 178 hwif->output_data = h8300_output_data;
82 hwif->INSL = NULL;
83} 179}
84 180
85static int __init h8300_ide_init(void) 181static int __init h8300_ide_init(void)
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index b34fd2bde96f..68e7f19dc036 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -142,7 +142,6 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
142{ 142{
143 unsigned long sector; 143 unsigned long sector;
144 unsigned long bio_sectors; 144 unsigned long bio_sectors;
145 unsigned long valid;
146 struct cdrom_info *info = drive->driver_data; 145 struct cdrom_info *info = drive->driver_data;
147 146
148 if (!cdrom_log_sense(drive, failed_command, sense)) 147 if (!cdrom_log_sense(drive, failed_command, sense))
@@ -173,17 +172,13 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
173 (sense->information[2] << 8) | 172 (sense->information[2] << 8) |
174 (sense->information[3]); 173 (sense->information[3]);
175 174
176 bio_sectors = bio_sectors(failed_command->bio);
177 if (bio_sectors < 4)
178 bio_sectors = 4;
179 if (drive->queue->hardsect_size == 2048) 175 if (drive->queue->hardsect_size == 2048)
180 /* device sector size is 2K */ 176 /* device sector size is 2K */
181 sector <<= 2; 177 sector <<= 2;
178
179 bio_sectors = max(bio_sectors(failed_command->bio), 4U);
182 sector &= ~(bio_sectors - 1); 180 sector &= ~(bio_sectors - 1);
183 valid = (sector - failed_command->sector) << 9;
184 181
185 if (valid < 0)
186 valid = 0;
187 if (sector < get_capacity(info->disk) && 182 if (sector < get_capacity(info->disk) &&
188 drive->probed_capacity - sector < 4 * 75) 183 drive->probed_capacity - sector < 4 * 75)
189 set_capacity(info->disk, sector); 184 set_capacity(info->disk, sector);
@@ -555,14 +550,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
555 ATAPI_WAIT_PC, cdrom_timer_expiry); 550 ATAPI_WAIT_PC, cdrom_timer_expiry);
556 return ide_started; 551 return ide_started;
557 } else { 552 } else {
558 unsigned long flags; 553 ide_execute_pkt_cmd(drive);
559
560 /* packet command */
561 spin_lock_irqsave(&ide_lock, flags);
562 hwif->OUTBSYNC(drive, WIN_PACKETCMD,
563 hwif->io_ports.command_addr);
564 ndelay(400);
565 spin_unlock_irqrestore(&ide_lock, flags);
566 554
567 return (*handler) (drive); 555 return (*handler) (drive);
568 } 556 }
@@ -613,7 +601,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
613 cmd_len = ATAPI_MIN_CDB_BYTES; 601 cmd_len = ATAPI_MIN_CDB_BYTES;
614 602
615 /* send the command to the device */ 603 /* send the command to the device */
616 HWIF(drive)->atapi_output_bytes(drive, rq->cmd, cmd_len); 604 hwif->output_data(drive, NULL, rq->cmd, cmd_len);
617 605
618 /* start the DMA if need be */ 606 /* start the DMA if need be */
619 if (info->dma) 607 if (info->dma)
@@ -629,7 +617,7 @@ static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len)
629{ 617{
630 while (len > 0) { 618 while (len > 0) {
631 int dum = 0; 619 int dum = 0;
632 xf(drive, &dum, sizeof(dum)); 620 xf(drive, NULL, &dum, sizeof(dum));
633 len -= sizeof(dum); 621 len -= sizeof(dum);
634 } 622 }
635} 623}
@@ -639,7 +627,7 @@ static void ide_cd_drain_data(ide_drive_t *drive, int nsects)
639 while (nsects > 0) { 627 while (nsects > 0) {
640 static char dum[SECTOR_SIZE]; 628 static char dum[SECTOR_SIZE];
641 629
642 drive->hwif->atapi_input_bytes(drive, dum, sizeof(dum)); 630 drive->hwif->input_data(drive, NULL, dum, sizeof(dum));
643 nsects--; 631 nsects--;
644 } 632 }
645} 633}
@@ -666,7 +654,7 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
666 printk(KERN_ERR "%s: %s: wrong transfer direction!\n", 654 printk(KERN_ERR "%s: %s: wrong transfer direction!\n",
667 drive->name, __func__); 655 drive->name, __func__);
668 656
669 xf = rw ? hwif->atapi_output_bytes : hwif->atapi_input_bytes; 657 xf = rw ? hwif->output_data : hwif->input_data;
670 ide_cd_pad_transfer(drive, xf, len); 658 ide_cd_pad_transfer(drive, xf, len);
671 } else if (rw == 0 && ireason == 1) { 659 } else if (rw == 0 && ireason == 1) {
672 /* 660 /*
@@ -794,7 +782,7 @@ static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
794 782
795 sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS); 783 sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS);
796 784
797 memset(rq->cmd, 0, sizeof(rq->cmd)); 785 memset(rq->cmd, 0, BLK_MAX_CDB);
798 rq->cmd[0] = GPCMD_SEEK; 786 rq->cmd[0] = GPCMD_SEEK;
799 put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]); 787 put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
800 788
@@ -1019,10 +1007,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1019 1007
1020 if (ireason == 0) { 1008 if (ireason == 0) {
1021 write = 1; 1009 write = 1;
1022 xferfunc = HWIF(drive)->atapi_output_bytes; 1010 xferfunc = hwif->output_data;
1023 } else { 1011 } else {
1024 write = 0; 1012 write = 0;
1025 xferfunc = HWIF(drive)->atapi_input_bytes; 1013 xferfunc = hwif->input_data;
1026 } 1014 }
1027 1015
1028 /* transfer data */ 1016 /* transfer data */
@@ -1061,7 +1049,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1061 if (blen > thislen) 1049 if (blen > thislen)
1062 blen = thislen; 1050 blen = thislen;
1063 1051
1064 xferfunc(drive, ptr, blen); 1052 xferfunc(drive, NULL, ptr, blen);
1065 1053
1066 thislen -= blen; 1054 thislen -= blen;
1067 len -= blen; 1055 len -= blen;
@@ -1706,7 +1694,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1706 long block = (long)rq->hard_sector / (hard_sect >> 9); 1694 long block = (long)rq->hard_sector / (hard_sect >> 9);
1707 unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); 1695 unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
1708 1696
1709 memset(rq->cmd, 0, sizeof(rq->cmd)); 1697 memset(rq->cmd, 0, BLK_MAX_CDB);
1710 1698
1711 if (rq_data_dir(rq) == READ) 1699 if (rq_data_dir(rq) == READ)
1712 rq->cmd[0] = GPCMD_READ_10; 1700 rq->cmd[0] = GPCMD_READ_10;
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index 6ed7ca071331..6490a2dea96b 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -326,7 +326,7 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
326 326
327 printk(KERN_ERR " The failed \"%s\" packet command " 327 printk(KERN_ERR " The failed \"%s\" packet command "
328 "was: \n \"", s); 328 "was: \n \"", s);
329 for (i = 0; i < sizeof(failed_command->cmd); i++) 329 for (i = 0; i < BLK_MAX_CDB; i++)
330 printk(KERN_CONT "%02x ", failed_command->cmd[i]); 330 printk(KERN_CONT "%02x ", failed_command->cmd[i]);
331 printk(KERN_CONT "\"\n"); 331 printk(KERN_CONT "\"\n");
332 } 332 }
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index c352cf27b6e7..653b1ade13d3 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -464,9 +464,10 @@ int ide_dma_setup(ide_drive_t *drive)
464 464
465 /* PRD table */ 465 /* PRD table */
466 if (hwif->mmio) 466 if (hwif->mmio)
467 writel(hwif->dmatable_dma, (void __iomem *)hwif->dma_prdtable); 467 writel(hwif->dmatable_dma,
468 (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS));
468 else 469 else
469 outl(hwif->dmatable_dma, hwif->dma_prdtable); 470 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
470 471
471 /* specify r/w */ 472 /* specify r/w */
472 hwif->OUTB(reading, hwif->dma_command); 473 hwif->OUTB(reading, hwif->dma_command);
@@ -858,14 +859,8 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
858 859
859 if (!hwif->dma_command) 860 if (!hwif->dma_command)
860 hwif->dma_command = hwif->dma_base + 0; 861 hwif->dma_command = hwif->dma_base + 0;
861 if (!hwif->dma_vendor1)
862 hwif->dma_vendor1 = hwif->dma_base + 1;
863 if (!hwif->dma_status) 862 if (!hwif->dma_status)
864 hwif->dma_status = hwif->dma_base + 2; 863 hwif->dma_status = hwif->dma_base + 2;
865 if (!hwif->dma_vendor3)
866 hwif->dma_vendor3 = hwif->dma_base + 3;
867 if (!hwif->dma_prdtable)
868 hwif->dma_prdtable = hwif->dma_base + 4;
869 864
870 hwif->dma_ops = &sff_dma_ops; 865 hwif->dma_ops = &sff_dma_ops;
871} 866}
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 489079b8ed03..f05fbc2bd7a8 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -231,6 +231,7 @@ static int idefloppy_end_request(ide_drive_t *drive, int uptodate, int nsecs)
231static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, 231static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
232 unsigned int bcount, int direction) 232 unsigned int bcount, int direction)
233{ 233{
234 ide_hwif_t *hwif = drive->hwif;
234 struct request *rq = pc->rq; 235 struct request *rq = pc->rq;
235 struct req_iterator iter; 236 struct req_iterator iter;
236 struct bio_vec *bvec; 237 struct bio_vec *bvec;
@@ -246,9 +247,9 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
246 247
247 data = bvec_kmap_irq(bvec, &flags); 248 data = bvec_kmap_irq(bvec, &flags);
248 if (direction) 249 if (direction)
249 drive->hwif->atapi_output_bytes(drive, data, count); 250 hwif->output_data(drive, NULL, data, count);
250 else 251 else
251 drive->hwif->atapi_input_bytes(drive, data, count); 252 hwif->input_data(drive, NULL, data, count);
252 bvec_kunmap_irq(data, &flags); 253 bvec_kunmap_irq(data, &flags);
253 254
254 bcount -= count; 255 bcount -= count;
@@ -261,10 +262,7 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
261 if (bcount) { 262 if (bcount) {
262 printk(KERN_ERR "%s: leftover data in %s, bcount == %d\n", 263 printk(KERN_ERR "%s: leftover data in %s, bcount == %d\n",
263 drive->name, __func__, bcount); 264 drive->name, __func__, bcount);
264 if (direction) 265 ide_pad_transfer(drive, direction, bcount);
265 ide_atapi_write_zeros(drive, bcount);
266 else
267 ide_atapi_discard_data(drive, bcount);
268 } 266 }
269} 267}
270 268
@@ -490,7 +488,7 @@ static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
490 printk(KERN_ERR "ide-floppy: The floppy wants " 488 printk(KERN_ERR "ide-floppy: The floppy wants "
491 "to send us more data than expected " 489 "to send us more data than expected "
492 "- discarding data\n"); 490 "- discarding data\n");
493 ide_atapi_discard_data(drive, bcount); 491 ide_pad_transfer(drive, 0, bcount);
494 492
495 ide_set_handler(drive, 493 ide_set_handler(drive,
496 &idefloppy_pc_intr, 494 &idefloppy_pc_intr,
@@ -503,12 +501,12 @@ static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
503 } 501 }
504 } 502 }
505 if (pc->flags & PC_FLAG_WRITING) 503 if (pc->flags & PC_FLAG_WRITING)
506 xferfunc = hwif->atapi_output_bytes; 504 xferfunc = hwif->output_data;
507 else 505 else
508 xferfunc = hwif->atapi_input_bytes; 506 xferfunc = hwif->input_data;
509 507
510 if (pc->buf) 508 if (pc->buf)
511 xferfunc(drive, pc->cur_pos, bcount); 509 xferfunc(drive, NULL, pc->cur_pos, bcount);
512 else 510 else
513 ide_floppy_io_buffers(drive, pc, bcount, 511 ide_floppy_io_buffers(drive, pc, bcount,
514 !!(pc->flags & PC_FLAG_WRITING)); 512 !!(pc->flags & PC_FLAG_WRITING));
@@ -548,8 +546,10 @@ static ide_startstop_t idefloppy_transfer_pc(ide_drive_t *drive)
548 546
549 /* Set the interrupt routine */ 547 /* Set the interrupt routine */
550 ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL); 548 ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL);
549
551 /* Send the actual packet */ 550 /* Send the actual packet */
552 HWIF(drive)->atapi_output_bytes(drive, floppy->pc->c, 12); 551 hwif->output_data(drive, NULL, floppy->pc->c, 12);
552
553 return ide_started; 553 return ide_started;
554} 554}
555 555
@@ -569,7 +569,8 @@ static int idefloppy_transfer_pc2(ide_drive_t *drive)
569 idefloppy_floppy_t *floppy = drive->driver_data; 569 idefloppy_floppy_t *floppy = drive->driver_data;
570 570
571 /* Send the actual packet */ 571 /* Send the actual packet */
572 HWIF(drive)->atapi_output_bytes(drive, floppy->pc->c, 12); 572 drive->hwif->output_data(drive, NULL, floppy->pc->c, 12);
573
573 /* Timeout for the packet command */ 574 /* Timeout for the packet command */
574 return IDEFLOPPY_WAIT_CMD; 575 return IDEFLOPPY_WAIT_CMD;
575} 576}
@@ -692,7 +693,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
692 return ide_started; 693 return ide_started;
693 } else { 694 } else {
694 /* Issue the packet command */ 695 /* Issue the packet command */
695 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr); 696 ide_execute_pkt_cmd(drive);
696 return (*pkt_xfer_routine) (drive); 697 return (*pkt_xfer_routine) (drive);
697 } 698 }
698} 699}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 3a2d8930d17f..696525342e9a 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -295,49 +295,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
295 spin_unlock_irqrestore(&ide_lock, flags); 295 spin_unlock_irqrestore(&ide_lock, flags);
296} 296}
297 297
298void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
299{
300 ide_hwif_t *hwif = drive->hwif;
301 struct ide_io_ports *io_ports = &hwif->io_ports;
302 struct ide_taskfile *tf = &task->tf;
303
304 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
305 u16 data = hwif->INW(io_ports->data_addr);
306
307 tf->data = data & 0xff;
308 tf->hob_data = (data >> 8) & 0xff;
309 }
310
311 /* be sure we're looking at the low order bits */
312 hwif->OUTB(drive->ctl & ~0x80, io_ports->ctl_addr);
313
314 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
315 tf->nsect = hwif->INB(io_ports->nsect_addr);
316 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
317 tf->lbal = hwif->INB(io_ports->lbal_addr);
318 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
319 tf->lbam = hwif->INB(io_ports->lbam_addr);
320 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
321 tf->lbah = hwif->INB(io_ports->lbah_addr);
322 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
323 tf->device = hwif->INB(io_ports->device_addr);
324
325 if (task->tf_flags & IDE_TFLAG_LBA48) {
326 hwif->OUTB(drive->ctl | 0x80, io_ports->ctl_addr);
327
328 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
329 tf->hob_feature = hwif->INB(io_ports->feature_addr);
330 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
331 tf->hob_nsect = hwif->INB(io_ports->nsect_addr);
332 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
333 tf->hob_lbal = hwif->INB(io_ports->lbal_addr);
334 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
335 tf->hob_lbam = hwif->INB(io_ports->lbam_addr);
336 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
337 tf->hob_lbah = hwif->INB(io_ports->lbah_addr);
338 }
339}
340
341/** 298/**
342 * ide_end_drive_cmd - end an explicit drive command 299 * ide_end_drive_cmd - end an explicit drive command
343 * @drive: command 300 * @drive: command
@@ -373,7 +330,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
373 tf->error = err; 330 tf->error = err;
374 tf->status = stat; 331 tf->status = stat;
375 332
376 ide_tf_read(drive, task); 333 drive->hwif->tf_read(drive, task);
377 334
378 if (task->tf_flags & IDE_TFLAG_DYN) 335 if (task->tf_flags & IDE_TFLAG_DYN)
379 kfree(task); 336 kfree(task);
@@ -422,7 +379,7 @@ static void try_to_flush_leftover_data (ide_drive_t *drive)
422 u32 wcount = (i > 16) ? 16 : i; 379 u32 wcount = (i > 16) ? 16 : i;
423 380
424 i -= wcount; 381 i -= wcount;
425 HWIF(drive)->ata_input_data(drive, buffer, wcount); 382 drive->hwif->input_data(drive, NULL, buffer, wcount * 4);
426 } 383 }
427} 384}
428 385
@@ -502,7 +459,8 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
502 459
503 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 460 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
504 /* force an abort */ 461 /* force an abort */
505 hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr); 462 hwif->OUTBSYNC(drive, WIN_IDLEIMMEDIATE,
463 hwif->io_ports.command_addr);
506 464
507 if (rq->errors >= ERROR_MAX) { 465 if (rq->errors >= ERROR_MAX) {
508 ide_kill_rq(drive, rq); 466 ide_kill_rq(drive, rq);
@@ -1592,8 +1550,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1592 1550
1593void ide_init_drive_cmd (struct request *rq) 1551void ide_init_drive_cmd (struct request *rq)
1594{ 1552{
1595 memset(rq, 0, sizeof(*rq)); 1553 blk_rq_init(NULL, rq);
1596 rq->ref_count = 1;
1597} 1554}
1598 1555
1599EXPORT_SYMBOL(ide_init_drive_cmd); 1556EXPORT_SYMBOL(ide_init_drive_cmd);
@@ -1679,7 +1636,23 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1679 task.tf.lbam = bcount & 0xff; 1636 task.tf.lbam = bcount & 0xff;
1680 task.tf.lbah = (bcount >> 8) & 0xff; 1637 task.tf.lbah = (bcount >> 8) & 0xff;
1681 1638
1682 ide_tf_load(drive, &task); 1639 ide_tf_dump(drive->name, &task.tf);
1640 drive->hwif->tf_load(drive, &task);
1683} 1641}
1684 1642
1685EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1643EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
1644
1645void ide_pad_transfer(ide_drive_t *drive, int write, int len)
1646{
1647 ide_hwif_t *hwif = drive->hwif;
1648 u8 buf[4] = { 0 };
1649
1650 while (len > 0) {
1651 if (write)
1652 hwif->output_data(drive, NULL, buf, min(4, len));
1653 else
1654 hwif->input_data(drive, NULL, buf, min(4, len));
1655 len -= 4;
1656 }
1657}
1658EXPORT_SYMBOL_GPL(ide_pad_transfer);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 5425d3038ec2..57d9a9a79a6f 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -37,21 +37,6 @@ static u8 ide_inb (unsigned long port)
37 return (u8) inb(port); 37 return (u8) inb(port);
38} 38}
39 39
40static u16 ide_inw (unsigned long port)
41{
42 return (u16) inw(port);
43}
44
45static void ide_insw (unsigned long port, void *addr, u32 count)
46{
47 insw(port, addr, count);
48}
49
50static void ide_insl (unsigned long port, void *addr, u32 count)
51{
52 insl(port, addr, count);
53}
54
55static void ide_outb (u8 val, unsigned long port) 40static void ide_outb (u8 val, unsigned long port)
56{ 41{
57 outb(val, port); 42 outb(val, port);
@@ -62,32 +47,11 @@ static void ide_outbsync (ide_drive_t *drive, u8 addr, unsigned long port)
62 outb(addr, port); 47 outb(addr, port);
63} 48}
64 49
65static void ide_outw (u16 val, unsigned long port)
66{
67 outw(val, port);
68}
69
70static void ide_outsw (unsigned long port, void *addr, u32 count)
71{
72 outsw(port, addr, count);
73}
74
75static void ide_outsl (unsigned long port, void *addr, u32 count)
76{
77 outsl(port, addr, count);
78}
79
80void default_hwif_iops (ide_hwif_t *hwif) 50void default_hwif_iops (ide_hwif_t *hwif)
81{ 51{
82 hwif->OUTB = ide_outb; 52 hwif->OUTB = ide_outb;
83 hwif->OUTBSYNC = ide_outbsync; 53 hwif->OUTBSYNC = ide_outbsync;
84 hwif->OUTW = ide_outw;
85 hwif->OUTSW = ide_outsw;
86 hwif->OUTSL = ide_outsl;
87 hwif->INB = ide_inb; 54 hwif->INB = ide_inb;
88 hwif->INW = ide_inw;
89 hwif->INSW = ide_insw;
90 hwif->INSL = ide_insl;
91} 55}
92 56
93/* 57/*
@@ -99,21 +63,6 @@ static u8 ide_mm_inb (unsigned long port)
99 return (u8) readb((void __iomem *) port); 63 return (u8) readb((void __iomem *) port);
100} 64}
101 65
102static u16 ide_mm_inw (unsigned long port)
103{
104 return (u16) readw((void __iomem *) port);
105}
106
107static void ide_mm_insw (unsigned long port, void *addr, u32 count)
108{
109 __ide_mm_insw((void __iomem *) port, addr, count);
110}
111
112static void ide_mm_insl (unsigned long port, void *addr, u32 count)
113{
114 __ide_mm_insl((void __iomem *) port, addr, count);
115}
116
117static void ide_mm_outb (u8 value, unsigned long port) 66static void ide_mm_outb (u8 value, unsigned long port)
118{ 67{
119 writeb(value, (void __iomem *) port); 68 writeb(value, (void __iomem *) port);
@@ -124,34 +73,13 @@ static void ide_mm_outbsync (ide_drive_t *drive, u8 value, unsigned long port)
124 writeb(value, (void __iomem *) port); 73 writeb(value, (void __iomem *) port);
125} 74}
126 75
127static void ide_mm_outw (u16 value, unsigned long port)
128{
129 writew(value, (void __iomem *) port);
130}
131
132static void ide_mm_outsw (unsigned long port, void *addr, u32 count)
133{
134 __ide_mm_outsw((void __iomem *) port, addr, count);
135}
136
137static void ide_mm_outsl (unsigned long port, void *addr, u32 count)
138{
139 __ide_mm_outsl((void __iomem *) port, addr, count);
140}
141
142void default_hwif_mmiops (ide_hwif_t *hwif) 76void default_hwif_mmiops (ide_hwif_t *hwif)
143{ 77{
144 hwif->OUTB = ide_mm_outb; 78 hwif->OUTB = ide_mm_outb;
145 /* Most systems will need to override OUTBSYNC, alas however 79 /* Most systems will need to override OUTBSYNC, alas however
146 this one is controller specific! */ 80 this one is controller specific! */
147 hwif->OUTBSYNC = ide_mm_outbsync; 81 hwif->OUTBSYNC = ide_mm_outbsync;
148 hwif->OUTW = ide_mm_outw;
149 hwif->OUTSW = ide_mm_outsw;
150 hwif->OUTSL = ide_mm_outsl;
151 hwif->INB = ide_mm_inb; 82 hwif->INB = ide_mm_inb;
152 hwif->INW = ide_mm_inw;
153 hwif->INSW = ide_mm_insw;
154 hwif->INSL = ide_mm_insl;
155} 83}
156 84
157EXPORT_SYMBOL(default_hwif_mmiops); 85EXPORT_SYMBOL(default_hwif_mmiops);
@@ -175,6 +103,123 @@ void SELECT_MASK (ide_drive_t *drive, int mask)
175 port_ops->maskproc(drive, mask); 103 port_ops->maskproc(drive, mask);
176} 104}
177 105
106static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
107{
108 ide_hwif_t *hwif = drive->hwif;
109 struct ide_io_ports *io_ports = &hwif->io_ports;
110 struct ide_taskfile *tf = &task->tf;
111 void (*tf_outb)(u8 addr, unsigned long port);
112 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
113 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
114
115 if (mmio)
116 tf_outb = ide_mm_outb;
117 else
118 tf_outb = ide_outb;
119
120 if (task->tf_flags & IDE_TFLAG_FLAGGED)
121 HIHI = 0xFF;
122
123 ide_set_irq(drive, 1);
124
125 if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
126 SELECT_MASK(drive, 0);
127
128 if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
129 u16 data = (tf->hob_data << 8) | tf->data;
130
131 if (mmio)
132 writew(data, (void __iomem *)io_ports->data_addr);
133 else
134 outw(data, io_ports->data_addr);
135 }
136
137 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
138 tf_outb(tf->hob_feature, io_ports->feature_addr);
139 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
140 tf_outb(tf->hob_nsect, io_ports->nsect_addr);
141 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
142 tf_outb(tf->hob_lbal, io_ports->lbal_addr);
143 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
144 tf_outb(tf->hob_lbam, io_ports->lbam_addr);
145 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
146 tf_outb(tf->hob_lbah, io_ports->lbah_addr);
147
148 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
149 tf_outb(tf->feature, io_ports->feature_addr);
150 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
151 tf_outb(tf->nsect, io_ports->nsect_addr);
152 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
153 tf_outb(tf->lbal, io_ports->lbal_addr);
154 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
155 tf_outb(tf->lbam, io_ports->lbam_addr);
156 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
157 tf_outb(tf->lbah, io_ports->lbah_addr);
158
159 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
160 tf_outb((tf->device & HIHI) | drive->select.all,
161 io_ports->device_addr);
162}
163
164static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
165{
166 ide_hwif_t *hwif = drive->hwif;
167 struct ide_io_ports *io_ports = &hwif->io_ports;
168 struct ide_taskfile *tf = &task->tf;
169 void (*tf_outb)(u8 addr, unsigned long port);
170 u8 (*tf_inb)(unsigned long port);
171 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
172
173 if (mmio) {
174 tf_outb = ide_mm_outb;
175 tf_inb = ide_mm_inb;
176 } else {
177 tf_outb = ide_outb;
178 tf_inb = ide_inb;
179 }
180
181 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
182 u16 data;
183
184 if (mmio)
185 data = readw((void __iomem *)io_ports->data_addr);
186 else
187 data = inw(io_ports->data_addr);
188
189 tf->data = data & 0xff;
190 tf->hob_data = (data >> 8) & 0xff;
191 }
192
193 /* be sure we're looking at the low order bits */
194 tf_outb(drive->ctl & ~0x80, io_ports->ctl_addr);
195
196 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
197 tf->nsect = tf_inb(io_ports->nsect_addr);
198 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
199 tf->lbal = tf_inb(io_ports->lbal_addr);
200 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
201 tf->lbam = tf_inb(io_ports->lbam_addr);
202 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
203 tf->lbah = tf_inb(io_ports->lbah_addr);
204 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
205 tf->device = tf_inb(io_ports->device_addr);
206
207 if (task->tf_flags & IDE_TFLAG_LBA48) {
208 tf_outb(drive->ctl | 0x80, io_ports->ctl_addr);
209
210 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
211 tf->hob_feature = tf_inb(io_ports->feature_addr);
212 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
213 tf->hob_nsect = tf_inb(io_ports->nsect_addr);
214 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
215 tf->hob_lbal = tf_inb(io_ports->lbal_addr);
216 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
217 tf->hob_lbam = tf_inb(io_ports->lbam_addr);
218 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
219 tf->hob_lbah = tf_inb(io_ports->lbah_addr);
220 }
221}
222
178/* 223/*
179 * Some localbus EIDE interfaces require a special access sequence 224 * Some localbus EIDE interfaces require a special access sequence
180 * when using 32-bit I/O instructions to transfer data. We call this 225 * when using 32-bit I/O instructions to transfer data. We call this
@@ -182,109 +227,112 @@ void SELECT_MASK (ide_drive_t *drive, int mask)
182 * of the sector count register location, with interrupts disabled 227 * of the sector count register location, with interrupts disabled
183 * to ensure that the reads all happen together. 228 * to ensure that the reads all happen together.
184 */ 229 */
185static void ata_vlb_sync(ide_drive_t *drive, unsigned long port) 230static void ata_vlb_sync(unsigned long port)
186{ 231{
187 (void) HWIF(drive)->INB(port); 232 (void)inb(port);
188 (void) HWIF(drive)->INB(port); 233 (void)inb(port);
189 (void) HWIF(drive)->INB(port); 234 (void)inb(port);
190} 235}
191 236
192/* 237/*
193 * This is used for most PIO data transfers *from* the IDE interface 238 * This is used for most PIO data transfers *from* the IDE interface
239 *
240 * These routines will round up any request for an odd number of bytes,
241 * so if an odd len is specified, be sure that there's at least one
242 * extra byte allocated for the buffer.
194 */ 243 */
195static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount) 244static void ata_input_data(ide_drive_t *drive, struct request *rq,
245 void *buf, unsigned int len)
196{ 246{
197 ide_hwif_t *hwif = drive->hwif; 247 ide_hwif_t *hwif = drive->hwif;
198 struct ide_io_ports *io_ports = &hwif->io_ports; 248 struct ide_io_ports *io_ports = &hwif->io_ports;
249 unsigned long data_addr = io_ports->data_addr;
199 u8 io_32bit = drive->io_32bit; 250 u8 io_32bit = drive->io_32bit;
251 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
252
253 len++;
200 254
201 if (io_32bit) { 255 if (io_32bit) {
202 if (io_32bit & 2) { 256 unsigned long uninitialized_var(flags);
203 unsigned long flags;
204 257
258 if ((io_32bit & 2) && !mmio) {
205 local_irq_save(flags); 259 local_irq_save(flags);
206 ata_vlb_sync(drive, io_ports->nsect_addr); 260 ata_vlb_sync(io_ports->nsect_addr);
207 hwif->INSL(io_ports->data_addr, buffer, wcount); 261 }
262
263 if (mmio)
264 __ide_mm_insl((void __iomem *)data_addr, buf, len / 4);
265 else
266 insl(data_addr, buf, len / 4);
267
268 if ((io_32bit & 2) && !mmio)
208 local_irq_restore(flags); 269 local_irq_restore(flags);
209 } else 270
210 hwif->INSL(io_ports->data_addr, buffer, wcount); 271 if ((len & 3) >= 2) {
211 } else 272 if (mmio)
212 hwif->INSW(io_ports->data_addr, buffer, wcount << 1); 273 __ide_mm_insw((void __iomem *)data_addr,
274 (u8 *)buf + (len & ~3), 1);
275 else
276 insw(data_addr, (u8 *)buf + (len & ~3), 1);
277 }
278 } else {
279 if (mmio)
280 __ide_mm_insw((void __iomem *)data_addr, buf, len / 2);
281 else
282 insw(data_addr, buf, len / 2);
283 }
213} 284}
214 285
215/* 286/*
216 * This is used for most PIO data transfers *to* the IDE interface 287 * This is used for most PIO data transfers *to* the IDE interface
217 */ 288 */
218static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount) 289static void ata_output_data(ide_drive_t *drive, struct request *rq,
290 void *buf, unsigned int len)
219{ 291{
220 ide_hwif_t *hwif = drive->hwif; 292 ide_hwif_t *hwif = drive->hwif;
221 struct ide_io_ports *io_ports = &hwif->io_ports; 293 struct ide_io_ports *io_ports = &hwif->io_ports;
294 unsigned long data_addr = io_ports->data_addr;
222 u8 io_32bit = drive->io_32bit; 295 u8 io_32bit = drive->io_32bit;
296 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
223 297
224 if (io_32bit) { 298 if (io_32bit) {
225 if (io_32bit & 2) { 299 unsigned long uninitialized_var(flags);
226 unsigned long flags;
227 300
301 if ((io_32bit & 2) && !mmio) {
228 local_irq_save(flags); 302 local_irq_save(flags);
229 ata_vlb_sync(drive, io_ports->nsect_addr); 303 ata_vlb_sync(io_ports->nsect_addr);
230 hwif->OUTSL(io_ports->data_addr, buffer, wcount); 304 }
231 local_irq_restore(flags);
232 } else
233 hwif->OUTSL(io_ports->data_addr, buffer, wcount);
234 } else
235 hwif->OUTSW(io_ports->data_addr, buffer, wcount << 1);
236}
237
238/*
239 * The following routines are mainly used by the ATAPI drivers.
240 *
241 * These routines will round up any request for an odd number of bytes,
242 * so if an odd bytecount is specified, be sure that there's at least one
243 * extra byte allocated for the buffer.
244 */
245
246static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
247{
248 ide_hwif_t *hwif = HWIF(drive);
249 305
250 ++bytecount; 306 if (mmio)
251#if defined(CONFIG_ATARI) || defined(CONFIG_Q40) 307 __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4);
252 if (MACH_IS_ATARI || MACH_IS_Q40) { 308 else
253 /* Atari has a byte-swapped IDE interface */ 309 outsl(data_addr, buf, len / 4);
254 insw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
255 return;
256 }
257#endif /* CONFIG_ATARI || CONFIG_Q40 */
258 hwif->ata_input_data(drive, buffer, bytecount / 4);
259 if ((bytecount & 0x03) >= 2)
260 hwif->INSW(hwif->io_ports.data_addr,
261 (u8 *)buffer + (bytecount & ~0x03), 1);
262}
263 310
264static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount) 311 if ((io_32bit & 2) && !mmio)
265{ 312 local_irq_restore(flags);
266 ide_hwif_t *hwif = HWIF(drive);
267 313
268 ++bytecount; 314 if ((len & 3) >= 2) {
269#if defined(CONFIG_ATARI) || defined(CONFIG_Q40) 315 if (mmio)
270 if (MACH_IS_ATARI || MACH_IS_Q40) { 316 __ide_mm_outsw((void __iomem *)data_addr,
271 /* Atari has a byte-swapped IDE interface */ 317 (u8 *)buf + (len & ~3), 1);
272 outsw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2); 318 else
273 return; 319 outsw(data_addr, (u8 *)buf + (len & ~3), 1);
320 }
321 } else {
322 if (mmio)
323 __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2);
324 else
325 outsw(data_addr, buf, len / 2);
274 } 326 }
275#endif /* CONFIG_ATARI || CONFIG_Q40 */
276 hwif->ata_output_data(drive, buffer, bytecount / 4);
277 if ((bytecount & 0x03) >= 2)
278 hwif->OUTSW(hwif->io_ports.data_addr,
279 (u8 *)buffer + (bytecount & ~0x03), 1);
280} 327}
281 328
282void default_hwif_transport(ide_hwif_t *hwif) 329void default_hwif_transport(ide_hwif_t *hwif)
283{ 330{
284 hwif->ata_input_data = ata_input_data; 331 hwif->tf_load = ide_tf_load;
285 hwif->ata_output_data = ata_output_data; 332 hwif->tf_read = ide_tf_read;
286 hwif->atapi_input_bytes = atapi_input_bytes; 333
287 hwif->atapi_output_bytes = atapi_output_bytes; 334 hwif->input_data = ata_input_data;
335 hwif->output_data = ata_output_data;
288} 336}
289 337
290void ide_fix_driveid (struct hd_driveid *id) 338void ide_fix_driveid (struct hd_driveid *id)
@@ -577,6 +625,8 @@ static const struct drive_list_entry ivb_list[] = {
577 { "TSSTcorp CDDVDW SH-S202J" , "SB01" }, 625 { "TSSTcorp CDDVDW SH-S202J" , "SB01" },
578 { "TSSTcorp CDDVDW SH-S202N" , "SB00" }, 626 { "TSSTcorp CDDVDW SH-S202N" , "SB00" },
579 { "TSSTcorp CDDVDW SH-S202N" , "SB01" }, 627 { "TSSTcorp CDDVDW SH-S202N" , "SB01" },
628 { "TSSTcorp CDDVDW SH-S202H" , "SB00" },
629 { "TSSTcorp CDDVDW SH-S202H" , "SB01" },
580 { NULL , NULL } 630 { NULL , NULL }
581}; 631};
582 632
@@ -641,7 +691,7 @@ int ide_driveid_update(ide_drive_t *drive)
641 SELECT_MASK(drive, 1); 691 SELECT_MASK(drive, 1);
642 ide_set_irq(drive, 1); 692 ide_set_irq(drive, 1);
643 msleep(50); 693 msleep(50);
644 hwif->OUTB(WIN_IDENTIFY, hwif->io_ports.command_addr); 694 hwif->OUTBSYNC(drive, WIN_IDENTIFY, hwif->io_ports.command_addr);
645 timeout = jiffies + WAIT_WORSTCASE; 695 timeout = jiffies + WAIT_WORSTCASE;
646 do { 696 do {
647 if (time_after(jiffies, timeout)) { 697 if (time_after(jiffies, timeout)) {
@@ -668,7 +718,7 @@ int ide_driveid_update(ide_drive_t *drive)
668 local_irq_restore(flags); 718 local_irq_restore(flags);
669 return 0; 719 return 0;
670 } 720 }
671 hwif->ata_input_data(drive, id, SECTOR_WORDS); 721 hwif->input_data(drive, NULL, id, SECTOR_SIZE);
672 (void)ide_read_status(drive); /* clear drive IRQ */ 722 (void)ide_read_status(drive); /* clear drive IRQ */
673 local_irq_enable(); 723 local_irq_enable();
674 local_irq_restore(flags); 724 local_irq_restore(flags);
@@ -849,9 +899,19 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
849 ndelay(400); 899 ndelay(400);
850 spin_unlock_irqrestore(&ide_lock, flags); 900 spin_unlock_irqrestore(&ide_lock, flags);
851} 901}
852
853EXPORT_SYMBOL(ide_execute_command); 902EXPORT_SYMBOL(ide_execute_command);
854 903
904void ide_execute_pkt_cmd(ide_drive_t *drive)
905{
906 ide_hwif_t *hwif = drive->hwif;
907 unsigned long flags;
908
909 spin_lock_irqsave(&ide_lock, flags);
910 hwif->OUTBSYNC(drive, WIN_PACKETCMD, hwif->io_ports.command_addr);
911 ndelay(400);
912 spin_unlock_irqrestore(&ide_lock, flags);
913}
914EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
855 915
856/* needed below */ 916/* needed below */
857static ide_startstop_t do_reset1 (ide_drive_t *, int); 917static ide_startstop_t do_reset1 (ide_drive_t *, int);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 6f04ea3e93a8..47af80df6872 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -487,7 +487,7 @@ static void ide_dump_sector(ide_drive_t *drive)
487 else 487 else
488 task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; 488 task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
489 489
490 ide_tf_read(drive, &task); 490 drive->hwif->tf_read(drive, &task);
491 491
492 if (lba48 || (tf->device & ATA_LBA)) 492 if (lba48 || (tf->device & ATA_LBA))
493 printk(", LBAsect=%llu", 493 printk(", LBAsect=%llu",
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 862f02603f9b..34b0d4f26b58 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -124,7 +124,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
124 124
125 id = drive->id; 125 id = drive->id;
126 /* read 512 bytes of id info */ 126 /* read 512 bytes of id info */
127 hwif->ata_input_data(drive, id, SECTOR_WORDS); 127 hwif->input_data(drive, NULL, id, SECTOR_SIZE);
128 128
129 drive->id_read = 1; 129 drive->id_read = 1;
130 local_irq_enable(); 130 local_irq_enable();
@@ -293,7 +293,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
293 hwif->OUTB(0, io_ports->feature_addr); 293 hwif->OUTB(0, io_ports->feature_addr);
294 294
295 /* ask drive for ID */ 295 /* ask drive for ID */
296 hwif->OUTB(cmd, io_ports->command_addr); 296 hwif->OUTBSYNC(drive, cmd, io_ports->command_addr);
297 297
298 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; 298 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
299 timeout += jiffies; 299 timeout += jiffies;
@@ -480,7 +480,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
480 msleep(50); 480 msleep(50);
481 hwif->OUTB(drive->select.all, io_ports->device_addr); 481 hwif->OUTB(drive->select.all, io_ports->device_addr);
482 msleep(50); 482 msleep(50);
483 hwif->OUTB(WIN_SRST, io_ports->command_addr); 483 hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr);
484 (void)ide_busy_sleep(hwif); 484 (void)ide_busy_sleep(hwif);
485 rc = try_to_identify(drive, cmd); 485 rc = try_to_identify(drive, cmd);
486 } 486 }
@@ -516,7 +516,7 @@ static void enable_nest (ide_drive_t *drive)
516 printk("%s: enabling %s -- ", hwif->name, drive->id->model); 516 printk("%s: enabling %s -- ", hwif->name, drive->id->model);
517 SELECT_DRIVE(drive); 517 SELECT_DRIVE(drive);
518 msleep(50); 518 msleep(50);
519 hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr); 519 hwif->OUTBSYNC(drive, EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
520 520
521 if (ide_busy_sleep(hwif)) { 521 if (ide_busy_sleep(hwif)) {
522 printk(KERN_CONT "failed (timeout)\n"); 522 printk(KERN_CONT "failed (timeout)\n");
@@ -1347,19 +1347,14 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1347 (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS)) 1347 (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
1348 hwif->irq = port ? 15 : 14; 1348 hwif->irq = port ? 15 : 14;
1349 1349
1350 hwif->host_flags = d->host_flags; 1350 /* ->host_flags may be set by ->init_iops (or even earlier...) */
1351 hwif->host_flags |= d->host_flags;
1351 hwif->pio_mask = d->pio_mask; 1352 hwif->pio_mask = d->pio_mask;
1352 1353
1353 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */ 1354 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
1354 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0) 1355 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
1355 hwif->port_ops = d->port_ops; 1356 hwif->port_ops = d->port_ops;
1356 1357
1357 if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
1358 ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) {
1359 if (hwif->mate)
1360 hwif->mate->serialized = hwif->serialized = 1;
1361 }
1362
1363 hwif->swdma_mask = d->swdma_mask; 1358 hwif->swdma_mask = d->swdma_mask;
1364 hwif->mwdma_mask = d->mwdma_mask; 1359 hwif->mwdma_mask = d->mwdma_mask;
1365 hwif->ultra_mask = d->udma_mask; 1360 hwif->ultra_mask = d->udma_mask;
@@ -1381,6 +1376,12 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1381 hwif->dma_ops = d->dma_ops; 1376 hwif->dma_ops = d->dma_ops;
1382 } 1377 }
1383 1378
1379 if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
1380 ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) {
1381 if (hwif->mate)
1382 hwif->mate->serialized = hwif->serialized = 1;
1383 }
1384
1384 if (d->host_flags & IDE_HFLAG_RQSIZE_256) 1385 if (d->host_flags & IDE_HFLAG_RQSIZE_256)
1385 hwif->rqsize = 256; 1386 hwif->rqsize = 256;
1386 1387
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 7b2f3815a838..8d6ad812a014 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -822,6 +822,7 @@ static int ide_drivers_open(struct inode *inode, struct file *file)
822} 822}
823 823
824static const struct file_operations ide_drivers_operations = { 824static const struct file_operations ide_drivers_operations = {
825 .owner = THIS_MODULE,
825 .open = ide_drivers_open, 826 .open = ide_drivers_open,
826 .read = seq_read, 827 .read = seq_read,
827 .llseek = seq_lseek, 828 .llseek = seq_lseek,
@@ -830,16 +831,12 @@ static const struct file_operations ide_drivers_operations = {
830 831
831void proc_ide_create(void) 832void proc_ide_create(void)
832{ 833{
833 struct proc_dir_entry *entry;
834
835 proc_ide_root = proc_mkdir("ide", NULL); 834 proc_ide_root = proc_mkdir("ide", NULL);
836 835
837 if (!proc_ide_root) 836 if (!proc_ide_root)
838 return; 837 return;
839 838
840 entry = create_proc_entry("drivers", 0, proc_ide_root); 839 proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations);
841 if (entry)
842 entry->proc_fops = &ide_drivers_operations;
843} 840}
844 841
845void proc_ide_destroy(void) 842void proc_ide_destroy(void)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 29870c415110..1e1f26331a24 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -395,13 +395,13 @@ static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
395 if (bh == NULL) { 395 if (bh == NULL) {
396 printk(KERN_ERR "ide-tape: bh == NULL in " 396 printk(KERN_ERR "ide-tape: bh == NULL in "
397 "idetape_input_buffers\n"); 397 "idetape_input_buffers\n");
398 ide_atapi_discard_data(drive, bcount); 398 ide_pad_transfer(drive, 0, bcount);
399 return; 399 return;
400 } 400 }
401 count = min( 401 count = min(
402 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)), 402 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
403 bcount); 403 bcount);
404 HWIF(drive)->atapi_input_bytes(drive, bh->b_data + 404 drive->hwif->input_data(drive, NULL, bh->b_data +
405 atomic_read(&bh->b_count), count); 405 atomic_read(&bh->b_count), count);
406 bcount -= count; 406 bcount -= count;
407 atomic_add(count, &bh->b_count); 407 atomic_add(count, &bh->b_count);
@@ -427,7 +427,7 @@ static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
427 return; 427 return;
428 } 428 }
429 count = min((unsigned int)pc->b_count, (unsigned int)bcount); 429 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
430 HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count); 430 drive->hwif->output_data(drive, NULL, pc->b_data, count);
431 bcount -= count; 431 bcount -= count;
432 pc->b_data += count; 432 pc->b_data += count;
433 pc->b_count -= count; 433 pc->b_count -= count;
@@ -662,7 +662,7 @@ static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
662 662
663static void idetape_init_rq(struct request *rq, u8 cmd) 663static void idetape_init_rq(struct request *rq, u8 cmd)
664{ 664{
665 memset(rq, 0, sizeof(*rq)); 665 blk_rq_init(NULL, rq);
666 rq->cmd_type = REQ_TYPE_SPECIAL; 666 rq->cmd_type = REQ_TYPE_SPECIAL;
667 rq->cmd[0] = cmd; 667 rq->cmd[0] = cmd;
668} 668}
@@ -871,7 +871,7 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
871 printk(KERN_ERR "ide-tape: The tape wants to " 871 printk(KERN_ERR "ide-tape: The tape wants to "
872 "send us more data than expected " 872 "send us more data than expected "
873 "- discarding data\n"); 873 "- discarding data\n");
874 ide_atapi_discard_data(drive, bcount); 874 ide_pad_transfer(drive, 0, bcount);
875 ide_set_handler(drive, &idetape_pc_intr, 875 ide_set_handler(drive, &idetape_pc_intr,
876 IDETAPE_WAIT_CMD, NULL); 876 IDETAPE_WAIT_CMD, NULL);
877 return ide_started; 877 return ide_started;
@@ -880,16 +880,16 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
880 "data than expected - allowing transfer\n"); 880 "data than expected - allowing transfer\n");
881 } 881 }
882 iobuf = &idetape_input_buffers; 882 iobuf = &idetape_input_buffers;
883 xferfunc = hwif->atapi_input_bytes; 883 xferfunc = hwif->input_data;
884 } else { 884 } else {
885 iobuf = &idetape_output_buffers; 885 iobuf = &idetape_output_buffers;
886 xferfunc = hwif->atapi_output_bytes; 886 xferfunc = hwif->output_data;
887 } 887 }
888 888
889 if (pc->bh) 889 if (pc->bh)
890 iobuf(drive, pc, bcount); 890 iobuf(drive, pc, bcount);
891 else 891 else
892 xferfunc(drive, pc->cur_pos, bcount); 892 xferfunc(drive, NULL, pc->cur_pos, bcount);
893 893
894 /* Update the current position */ 894 /* Update the current position */
895 pc->xferred += bcount; 895 pc->xferred += bcount;
@@ -979,7 +979,8 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
979 hwif->dma_ops->dma_start(drive); 979 hwif->dma_ops->dma_start(drive);
980#endif 980#endif
981 /* Send the actual packet */ 981 /* Send the actual packet */
982 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12); 982 hwif->output_data(drive, NULL, pc->c, 12);
983
983 return ide_started; 984 return ide_started;
984} 985}
985 986
@@ -1055,7 +1056,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1055 IDETAPE_WAIT_CMD, NULL); 1056 IDETAPE_WAIT_CMD, NULL);
1056 return ide_started; 1057 return ide_started;
1057 } else { 1058 } else {
1058 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr); 1059 ide_execute_pkt_cmd(drive);
1059 return idetape_transfer_pc(drive); 1060 return idetape_transfer_pc(drive);
1060 } 1061 }
1061} 1062}
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 9f9ad9fb6b89..0c908ca3ff79 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -33,60 +33,18 @@
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <asm/io.h> 34#include <asm/io.h>
35 35
36void ide_tf_load(ide_drive_t *drive, ide_task_t *task) 36void ide_tf_dump(const char *s, struct ide_taskfile *tf)
37{ 37{
38 ide_hwif_t *hwif = drive->hwif;
39 struct ide_io_ports *io_ports = &hwif->io_ports;
40 struct ide_taskfile *tf = &task->tf;
41 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
42
43 if (task->tf_flags & IDE_TFLAG_FLAGGED)
44 HIHI = 0xFF;
45
46#ifdef DEBUG 38#ifdef DEBUG
47 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " 39 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
48 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", 40 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
49 drive->name, tf->feature, tf->nsect, tf->lbal, 41 s, tf->feature, tf->nsect, tf->lbal,
50 tf->lbam, tf->lbah, tf->device, tf->command); 42 tf->lbam, tf->lbah, tf->device, tf->command);
51 printk("%s: hob: nsect 0x%02x lbal 0x%02x " 43 printk("%s: hob: nsect 0x%02x lbal 0x%02x "
52 "lbam 0x%02x lbah 0x%02x\n", 44 "lbam 0x%02x lbah 0x%02x\n",
53 drive->name, tf->hob_nsect, tf->hob_lbal, 45 s, tf->hob_nsect, tf->hob_lbal,
54 tf->hob_lbam, tf->hob_lbah); 46 tf->hob_lbam, tf->hob_lbah);
55#endif 47#endif
56
57 ide_set_irq(drive, 1);
58
59 if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
60 SELECT_MASK(drive, 0);
61
62 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
63 hwif->OUTW((tf->hob_data << 8) | tf->data, io_ports->data_addr);
64
65 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
66 hwif->OUTB(tf->hob_feature, io_ports->feature_addr);
67 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
68 hwif->OUTB(tf->hob_nsect, io_ports->nsect_addr);
69 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
70 hwif->OUTB(tf->hob_lbal, io_ports->lbal_addr);
71 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
72 hwif->OUTB(tf->hob_lbam, io_ports->lbam_addr);
73 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
74 hwif->OUTB(tf->hob_lbah, io_ports->lbah_addr);
75
76 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
77 hwif->OUTB(tf->feature, io_ports->feature_addr);
78 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
79 hwif->OUTB(tf->nsect, io_ports->nsect_addr);
80 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
81 hwif->OUTB(tf->lbal, io_ports->lbal_addr);
82 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
83 hwif->OUTB(tf->lbam, io_ports->lbam_addr);
84 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
85 hwif->OUTB(tf->lbah, io_ports->lbah_addr);
86
87 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
88 hwif->OUTB((tf->device & HIHI) | drive->select.all,
89 io_ports->device_addr);
90} 48}
91 49
92int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) 50int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
@@ -149,8 +107,10 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
149 if (task->tf_flags & IDE_TFLAG_FLAGGED) 107 if (task->tf_flags & IDE_TFLAG_FLAGGED)
150 task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS; 108 task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
151 109
152 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) 110 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
153 ide_tf_load(drive, task); 111 ide_tf_dump(drive->name, tf);
112 hwif->tf_load(drive, task);
113 }
154 114
155 switch (task->data_phase) { 115 switch (task->data_phase) {
156 case TASKFILE_MULTI_OUT: 116 case TASKFILE_MULTI_OUT:
@@ -283,7 +243,8 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
283 return stat; 243 return stat;
284} 244}
285 245
286static void ide_pio_sector(ide_drive_t *drive, unsigned int write) 246static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
247 unsigned int write)
287{ 248{
288 ide_hwif_t *hwif = drive->hwif; 249 ide_hwif_t *hwif = drive->hwif;
289 struct scatterlist *sg = hwif->sg_table; 250 struct scatterlist *sg = hwif->sg_table;
@@ -323,9 +284,9 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
323 284
324 /* do the actual data transfer */ 285 /* do the actual data transfer */
325 if (write) 286 if (write)
326 hwif->ata_output_data(drive, buf, SECTOR_WORDS); 287 hwif->output_data(drive, rq, buf, SECTOR_SIZE);
327 else 288 else
328 hwif->ata_input_data(drive, buf, SECTOR_WORDS); 289 hwif->input_data(drive, rq, buf, SECTOR_SIZE);
329 290
330 kunmap_atomic(buf, KM_BIO_SRC_IRQ); 291 kunmap_atomic(buf, KM_BIO_SRC_IRQ);
331#ifdef CONFIG_HIGHMEM 292#ifdef CONFIG_HIGHMEM
@@ -333,13 +294,14 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
333#endif 294#endif
334} 295}
335 296
336static void ide_pio_multi(ide_drive_t *drive, unsigned int write) 297static void ide_pio_multi(ide_drive_t *drive, struct request *rq,
298 unsigned int write)
337{ 299{
338 unsigned int nsect; 300 unsigned int nsect;
339 301
340 nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count); 302 nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
341 while (nsect--) 303 while (nsect--)
342 ide_pio_sector(drive, write); 304 ide_pio_sector(drive, rq, write);
343} 305}
344 306
345static void ide_pio_datablock(ide_drive_t *drive, struct request *rq, 307static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
@@ -362,10 +324,10 @@ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
362 switch (drive->hwif->data_phase) { 324 switch (drive->hwif->data_phase) {
363 case TASKFILE_MULTI_IN: 325 case TASKFILE_MULTI_IN:
364 case TASKFILE_MULTI_OUT: 326 case TASKFILE_MULTI_OUT:
365 ide_pio_multi(drive, write); 327 ide_pio_multi(drive, rq, write);
366 break; 328 break;
367 default: 329 default:
368 ide_pio_sector(drive, write); 330 ide_pio_sector(drive, rq, write);
369 break; 331 break;
370 } 332 }
371 333
@@ -532,8 +494,7 @@ int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
532{ 494{
533 struct request rq; 495 struct request rq;
534 496
535 memset(&rq, 0, sizeof(rq)); 497 blk_rq_init(NULL, &rq);
536 rq.ref_count = 1;
537 rq.cmd_type = REQ_TYPE_ATA_TASKFILE; 498 rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
538 rq.buffer = buf; 499 rq.buffer = buf;
539 500
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 999584c03d97..c758dcb13b14 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -564,7 +564,7 @@ static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
564 if (!(drive->dn % 2)) 564 if (!(drive->dn % 2))
565 ide_acpi_get_timing(hwif); 565 ide_acpi_get_timing(hwif);
566 566
567 memset(&rq, 0, sizeof(rq)); 567 blk_rq_init(NULL, &rq);
568 memset(&rqpm, 0, sizeof(rqpm)); 568 memset(&rqpm, 0, sizeof(rqpm));
569 memset(&args, 0, sizeof(args)); 569 memset(&args, 0, sizeof(args));
570 rq.cmd_type = REQ_TYPE_PM_SUSPEND; 570 rq.cmd_type = REQ_TYPE_PM_SUSPEND;
@@ -602,7 +602,7 @@ static int generic_ide_resume(struct device *dev)
602 602
603 ide_acpi_exec_tfs(drive); 603 ide_acpi_exec_tfs(drive);
604 604
605 memset(&rq, 0, sizeof(rq)); 605 blk_rq_init(NULL, &rq);
606 memset(&rqpm, 0, sizeof(rqpm)); 606 memset(&rqpm, 0, sizeof(rqpm));
607 memset(&args, 0, sizeof(args)); 607 memset(&args, 0, sizeof(args));
608 rq.cmd_type = REQ_TYPE_PM_RESUME; 608 rq.cmd_type = REQ_TYPE_PM_RESUME;
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 56cdaa0eeea5..9e449a0c623f 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -44,6 +44,28 @@
44int falconide_intr_lock; 44int falconide_intr_lock;
45EXPORT_SYMBOL(falconide_intr_lock); 45EXPORT_SYMBOL(falconide_intr_lock);
46 46
47static void falconide_input_data(ide_drive_t *drive, struct request *rq,
48 void *buf, unsigned int len)
49{
50 unsigned long data_addr = drive->hwif->io_ports.data_addr;
51
52 if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
53 return insw(data_addr, buf, (len + 1) / 2);
54
55 insw_swapw(data_addr, buf, (len + 1) / 2);
56}
57
58static void falconide_output_data(ide_drive_t *drive, struct request *rq,
59 void *buf, unsigned int len)
60{
61 unsigned long data_addr = drive->hwif->io_ports.data_addr;
62
63 if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
64 return outsw(data_addr, buf, (len + 1) / 2);
65
66 outsw_swapw(data_addr, buf, (len + 1) / 2);
67}
68
47static void __init falconide_setup_ports(hw_regs_t *hw) 69static void __init falconide_setup_ports(hw_regs_t *hw)
48{ 70{
49 int i; 71 int i;
@@ -90,6 +112,10 @@ static int __init falconide_init(void)
90 ide_init_port_data(hwif, index); 112 ide_init_port_data(hwif, index);
91 ide_init_port_hw(hwif, &hw); 113 ide_init_port_hw(hwif, &hw);
92 114
115 /* Atari has a byte-swapped IDE interface */
116 hwif->input_data = falconide_input_data;
117 hwif->output_data = falconide_output_data;
118
93 ide_get_lock(NULL, NULL); 119 ide_get_lock(NULL, NULL);
94 ide_device_add(idx, NULL); 120 ide_device_add(idx, NULL);
95 ide_release_lock(); 121 ide_release_lock();
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index 8279dc7ca4c0..d3bc3f24e05d 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -101,8 +101,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
101 101
102 ide_init_port_hw(hwif, &hw); 102 ide_init_port_hw(hwif, &hw);
103 103
104 if (mmio) 104 if (mmio) {
105 hwif->host_flags = IDE_HFLAG_MMIO;
105 default_hwif_mmiops(hwif); 106 default_hwif_mmiops(hwif);
107 }
106 108
107 idx[0] = hwif->index; 109 idx[0] = hwif->index;
108 110
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index f9210458aea0..6f535d00e638 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -72,7 +72,27 @@ static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base,
72 hw->ack_intr = ack_intr; 72 hw->ack_intr = ack_intr;
73} 73}
74 74
75static void q40ide_input_data(ide_drive_t *drive, struct request *rq,
76 void *buf, unsigned int len)
77{
78 unsigned long data_addr = drive->hwif->io_ports.data_addr;
79
80 if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
81 return insw(data_addr, buf, (len + 1) / 2);
75 82
83 insw_swapw(data_addr, buf, (len + 1) / 2);
84}
85
86static void q40ide_output_data(ide_drive_t *drive, struct request *rq,
87 void *buf, unsigned int len)
88{
89 unsigned long data_addr = drive->hwif->io_ports.data_addr;
90
91 if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
92 return outsw(data_addr, buf, (len + 1) / 2);
93
94 outsw_swapw(data_addr, buf, (len + 1) / 2);
95}
76 96
77/* 97/*
78 * the static array is needed to have the name reported in /proc/ioports, 98 * the static array is needed to have the name reported in /proc/ioports,
@@ -123,6 +143,10 @@ static int __init q40ide_init(void)
123 ide_init_port_data(hwif, hwif->index); 143 ide_init_port_data(hwif, hwif->index);
124 ide_init_port_hw(hwif, &hw); 144 ide_init_port_hw(hwif, &hw);
125 145
146 /* Q40 has a byte-swapped IDE interface */
147 hwif->input_data = q40ide_input_data;
148 hwif->output_data = q40ide_output_data;
149
126 idx[i] = hwif->index; 150 idx[i] = hwif->index;
127 } 151 }
128 } 152 }
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index e0cf5e2dbab7..1a6c27b32498 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -48,8 +48,6 @@
48 48
49static _auide_hwif auide_hwif; 49static _auide_hwif auide_hwif;
50 50
51static int auide_ddma_init(_auide_hwif *auide);
52
53#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) 51#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
54 52
55void auide_insw(unsigned long port, void *addr, u32 count) 53void auide_insw(unsigned long port, void *addr, u32 count)
@@ -88,6 +86,17 @@ void auide_outsw(unsigned long port, void *addr, u32 count)
88 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); 86 ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
89} 87}
90 88
89static void au1xxx_input_data(ide_drive_t *drive, struct request *rq,
90 void *buf, unsigned int len)
91{
92 auide_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
93}
94
95static void au1xxx_output_data(ide_drive_t *drive, struct request *rq,
96 void *buf, unsigned int len)
97{
98 auide_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
99}
91#endif 100#endif
92 101
93static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio) 102static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -598,8 +607,8 @@ static int au_ide_probe(struct device *dev)
598 */ 607 */
599 608
600#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA 609#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
601 hwif->INSW = auide_insw; 610 hwif->input_data = au1xxx_input_data;
602 hwif->OUTSW = auide_outsw; 611 hwif->output_data = au1xxx_output_data;
603#endif 612#endif
604 hwif->select_data = 0; /* no chipset-specific code */ 613 hwif->select_data = 0; /* no chipset-specific code */
605 hwif->config_data = 0; /* no chipset-specific code */ 614 hwif->config_data = 0; /* no chipset-specific code */
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 68947626e4aa..712d17bdd470 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -109,6 +109,7 @@ static int __devinit swarm_ide_probe(struct device *dev)
109 base = ioremap(offset, size); 109 base = ioremap(offset, size);
110 110
111 /* Setup MMIO ops. */ 111 /* Setup MMIO ops. */
112 hwif->host_flags = IDE_HFLAG_MMIO;
112 default_hwif_mmiops(hwif); 113 default_hwif_mmiops(hwif);
113 114
114 hwif->chipset = ide_generic; 115 hwif->chipset = ide_generic;
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index b36a22b8c213..c1922f9cfe80 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -412,14 +412,14 @@ static u8 __devinit ali_cable_detect(ide_hwif_t *hwif)
412 return cbl; 412 return cbl;
413} 413}
414 414
415#ifndef CONFIG_SPARC64 415#if !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC)
416/** 416/**
417 * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff 417 * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
418 * @hwif: interface to configure 418 * @hwif: interface to configure
419 * 419 *
420 * Obtain the IRQ tables for an ALi based IDE solution on the PC 420 * Obtain the IRQ tables for an ALi based IDE solution on the PC
421 * class platforms. This part of the code isn't applicable to the 421 * class platforms. This part of the code isn't applicable to the
422 * Sparc systems 422 * Sparc and PowerPC systems.
423 */ 423 */
424 424
425static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif) 425static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
@@ -463,7 +463,9 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
463 hwif->irq = irq; 463 hwif->irq = irq;
464 } 464 }
465} 465}
466#endif 466#else
467#define init_hwif_ali15x3 NULL
468#endif /* !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC) */
467 469
468/** 470/**
469 * init_dma_ali15x3 - set up DMA on ALi15x3 471 * init_dma_ali15x3 - set up DMA on ALi15x3
@@ -517,9 +519,7 @@ static const struct ide_dma_ops ali_dma_ops = {
517static const struct ide_port_info ali15x3_chipset __devinitdata = { 519static const struct ide_port_info ali15x3_chipset __devinitdata = {
518 .name = "ALI15X3", 520 .name = "ALI15X3",
519 .init_chipset = init_chipset_ali15x3, 521 .init_chipset = init_chipset_ali15x3,
520#ifndef CONFIG_SPARC64
521 .init_hwif = init_hwif_ali15x3, 522 .init_hwif = init_hwif_ali15x3,
522#endif
523 .init_dma = init_dma_ali15x3, 523 .init_dma = init_dma_ali15x3,
524 .port_ops = &ali_port_ops, 524 .port_ops = &ali_port_ops,
525 .pio_mask = ATA_PIO5, 525 .pio_mask = ATA_PIO5,
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index c13e299077ec..fec4955f449b 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -63,6 +63,48 @@ static u8 superio_ide_inb (unsigned long port)
63 return inb(port); 63 return inb(port);
64} 64}
65 65
66static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
67{
68 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
69 struct ide_taskfile *tf = &task->tf;
70
71 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
72 u16 data = inw(io_ports->data_addr);
73
74 tf->data = data & 0xff;
75 tf->hob_data = (data >> 8) & 0xff;
76 }
77
78 /* be sure we're looking at the low order bits */
79 outb(drive->ctl & ~0x80, io_ports->ctl_addr);
80
81 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
82 tf->nsect = inb(io_ports->nsect_addr);
83 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
84 tf->lbal = inb(io_ports->lbal_addr);
85 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
86 tf->lbam = inb(io_ports->lbam_addr);
87 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
88 tf->lbah = inb(io_ports->lbah_addr);
89 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
90 tf->device = superio_ide_inb(io_ports->device_addr);
91
92 if (task->tf_flags & IDE_TFLAG_LBA48) {
93 outb(drive->ctl | 0x80, io_ports->ctl_addr);
94
95 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
96 tf->hob_feature = inb(io_ports->feature_addr);
97 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
98 tf->hob_nsect = inb(io_ports->nsect_addr);
99 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
100 tf->hob_lbal = inb(io_ports->lbal_addr);
101 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
102 tf->hob_lbam = inb(io_ports->lbam_addr);
103 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
104 tf->hob_lbah = inb(io_ports->lbah_addr);
105 }
106}
107
66static void __devinit superio_ide_init_iops (struct hwif_s *hwif) 108static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
67{ 109{
68 struct pci_dev *pdev = to_pci_dev(hwif->dev); 110 struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -80,6 +122,8 @@ static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
80 tmp = superio_ide_inb(superio_ide_dma_status[port]); 122 tmp = superio_ide_inb(superio_ide_dma_status[port]);
81 outb(tmp | 0x66, superio_ide_dma_status[port]); 123 outb(tmp | 0x66, superio_ide_dma_status[port]);
82 124
125 hwif->tf_read = superio_tf_read;
126
83 /* We need to override inb to workaround a SuperIO errata */ 127 /* We need to override inb to workaround a SuperIO errata */
84 hwif->INB = superio_ide_inb; 128 hwif->INB = superio_ide_inb;
85} 129}
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index ec9bd7b352fc..070df8ab3b21 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -83,8 +83,8 @@ static u8 get_indexed_reg(ide_hwif_t *hwif, u8 index)
83{ 83{
84 u8 value; 84 u8 value;
85 85
86 outb(index, hwif->dma_vendor1); 86 outb(index, hwif->dma_base + 1);
87 value = inb(hwif->dma_vendor3); 87 value = inb(hwif->dma_base + 3);
88 88
89 DBG("index[%02X] value[%02X]\n", index, value); 89 DBG("index[%02X] value[%02X]\n", index, value);
90 return value; 90 return value;
@@ -97,8 +97,8 @@ static u8 get_indexed_reg(ide_hwif_t *hwif, u8 index)
97 */ 97 */
98static void set_indexed_reg(ide_hwif_t *hwif, u8 index, u8 value) 98static void set_indexed_reg(ide_hwif_t *hwif, u8 index, u8 value)
99{ 99{
100 outb(index, hwif->dma_vendor1); 100 outb(index, hwif->dma_base + 1);
101 outb(value, hwif->dma_vendor3); 101 outb(value, hwif->dma_base + 3);
102 DBG("index[%02X] value[%02X]\n", index, value); 102 DBG("index[%02X] value[%02X]\n", index, value);
103} 103}
104 104
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 21c5dd23f928..f04738d14a6f 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -250,6 +250,7 @@ static const struct ich_laptop ich_laptop[] = {
250 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ 250 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
251 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ 251 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
252 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */ 252 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */
253 { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
253 /* end marker */ 254 /* end marker */
254 { 0, } 255 { 0, }
255}; 256};
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index ad7cdf9060ca..910fb00deb71 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -126,12 +126,6 @@ static u8 scc_ide_inb(unsigned long port)
126 return (u8)data; 126 return (u8)data;
127} 127}
128 128
129static u16 scc_ide_inw(unsigned long port)
130{
131 u32 data = in_be32((void*)port);
132 return (u16)data;
133}
134
135static void scc_ide_insw(unsigned long port, void *addr, u32 count) 129static void scc_ide_insw(unsigned long port, void *addr, u32 count)
136{ 130{
137 u16 *ptr = (u16 *)addr; 131 u16 *ptr = (u16 *)addr;
@@ -154,11 +148,6 @@ static void scc_ide_outb(u8 addr, unsigned long port)
154 out_be32((void*)port, addr); 148 out_be32((void*)port, addr);
155} 149}
156 150
157static void scc_ide_outw(u16 addr, unsigned long port)
158{
159 out_be32((void*)port, addr);
160}
161
162static void 151static void
163scc_ide_outbsync(ide_drive_t * drive, u8 addr, unsigned long port) 152scc_ide_outbsync(ide_drive_t * drive, u8 addr, unsigned long port)
164{ 153{
@@ -271,6 +260,20 @@ static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
271 out_be32((void __iomem *)udenvt_port, reg); 260 out_be32((void __iomem *)udenvt_port, reg);
272} 261}
273 262
263static void scc_dma_host_set(ide_drive_t *drive, int on)
264{
265 ide_hwif_t *hwif = drive->hwif;
266 u8 unit = (drive->select.b.unit & 0x01);
267 u8 dma_stat = scc_ide_inb(hwif->dma_status);
268
269 if (on)
270 dma_stat |= (1 << (5 + unit));
271 else
272 dma_stat &= ~(1 << (5 + unit));
273
274 scc_ide_outb(dma_stat, hwif->dma_status);
275}
276
274/** 277/**
275 * scc_ide_dma_setup - begin a DMA phase 278 * scc_ide_dma_setup - begin a DMA phase
276 * @drive: target device 279 * @drive: target device
@@ -301,7 +304,7 @@ static int scc_dma_setup(ide_drive_t *drive)
301 } 304 }
302 305
303 /* PRD table */ 306 /* PRD table */
304 out_be32((void __iomem *)hwif->dma_prdtable, hwif->dmatable_dma); 307 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
305 308
306 /* specify r/w */ 309 /* specify r/w */
307 out_be32((void __iomem *)hwif->dma_command, reading); 310 out_be32((void __iomem *)hwif->dma_command, reading);
@@ -315,13 +318,45 @@ static int scc_dma_setup(ide_drive_t *drive)
315 return 0; 318 return 0;
316} 319}
317 320
321static void scc_dma_start(ide_drive_t *drive)
322{
323 ide_hwif_t *hwif = drive->hwif;
324 u8 dma_cmd = scc_ide_inb(hwif->dma_command);
325
326 /* start DMA */
327 scc_ide_outb(dma_cmd | 1, hwif->dma_command);
328 hwif->dma = 1;
329 wmb();
330}
331
332static int __scc_dma_end(ide_drive_t *drive)
333{
334 ide_hwif_t *hwif = drive->hwif;
335 u8 dma_stat, dma_cmd;
336
337 drive->waiting_for_dma = 0;
338 /* get DMA command mode */
339 dma_cmd = scc_ide_inb(hwif->dma_command);
340 /* stop DMA */
341 scc_ide_outb(dma_cmd & ~1, hwif->dma_command);
342 /* get DMA status */
343 dma_stat = scc_ide_inb(hwif->dma_status);
344 /* clear the INTR & ERROR bits */
345 scc_ide_outb(dma_stat | 6, hwif->dma_status);
346 /* purge DMA mappings */
347 ide_destroy_dmatable(drive);
348 /* verify good DMA status */
349 hwif->dma = 0;
350 wmb();
351 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
352}
318 353
319/** 354/**
320 * scc_dma_end - Stop DMA 355 * scc_dma_end - Stop DMA
321 * @drive: IDE drive 356 * @drive: IDE drive
322 * 357 *
323 * Check and clear INT Status register. 358 * Check and clear INT Status register.
324 * Then call __ide_dma_end(). 359 * Then call __scc_dma_end().
325 */ 360 */
326 361
327static int scc_dma_end(ide_drive_t *drive) 362static int scc_dma_end(ide_drive_t *drive)
@@ -425,7 +460,7 @@ static int scc_dma_end(ide_drive_t *drive)
425 break; 460 break;
426 } 461 }
427 462
428 dma_stat = __ide_dma_end(drive); 463 dma_stat = __scc_dma_end(drive);
429 if (data_loss) 464 if (data_loss)
430 dma_stat |= 2; /* emulate DMA error (to retry command) */ 465 dma_stat |= 2; /* emulate DMA error (to retry command) */
431 return dma_stat; 466 return dma_stat;
@@ -618,6 +653,122 @@ static int __devinit init_setup_scc(struct pci_dev *dev,
618 return rc; 653 return rc;
619} 654}
620 655
656static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
657{
658 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
659 struct ide_taskfile *tf = &task->tf;
660 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
661
662 if (task->tf_flags & IDE_TFLAG_FLAGGED)
663 HIHI = 0xFF;
664
665 ide_set_irq(drive, 1);
666
667 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
668 out_be32((void *)io_ports->data_addr,
669 (tf->hob_data << 8) | tf->data);
670
671 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
672 scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
673 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
674 scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
675 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
676 scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
677 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
678 scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
679 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
680 scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
681
682 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
683 scc_ide_outb(tf->feature, io_ports->feature_addr);
684 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
685 scc_ide_outb(tf->nsect, io_ports->nsect_addr);
686 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
687 scc_ide_outb(tf->lbal, io_ports->lbal_addr);
688 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
689 scc_ide_outb(tf->lbam, io_ports->lbam_addr);
690 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
691 scc_ide_outb(tf->lbah, io_ports->lbah_addr);
692
693 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
694 scc_ide_outb((tf->device & HIHI) | drive->select.all,
695 io_ports->device_addr);
696}
697
698static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
699{
700 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
701 struct ide_taskfile *tf = &task->tf;
702
703 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
704 u16 data = (u16)in_be32((void *)io_ports->data_addr);
705
706 tf->data = data & 0xff;
707 tf->hob_data = (data >> 8) & 0xff;
708 }
709
710 /* be sure we're looking at the low order bits */
711 scc_ide_outb(drive->ctl & ~0x80, io_ports->ctl_addr);
712
713 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
714 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
715 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
716 tf->lbal = scc_ide_inb(io_ports->lbal_addr);
717 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
718 tf->lbam = scc_ide_inb(io_ports->lbam_addr);
719 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
720 tf->lbah = scc_ide_inb(io_ports->lbah_addr);
721 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
722 tf->device = scc_ide_inb(io_ports->device_addr);
723
724 if (task->tf_flags & IDE_TFLAG_LBA48) {
725 scc_ide_outb(drive->ctl | 0x80, io_ports->ctl_addr);
726
727 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
728 tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
729 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
730 tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr);
731 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
732 tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr);
733 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
734 tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr);
735 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
736 tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr);
737 }
738}
739
740static void scc_input_data(ide_drive_t *drive, struct request *rq,
741 void *buf, unsigned int len)
742{
743 unsigned long data_addr = drive->hwif->io_ports.data_addr;
744
745 len++;
746
747 if (drive->io_32bit) {
748 scc_ide_insl(data_addr, buf, len / 4);
749
750 if ((len & 3) >= 2)
751 scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
752 } else
753 scc_ide_insw(data_addr, buf, len / 2);
754}
755
756static void scc_output_data(ide_drive_t *drive, struct request *rq,
757 void *buf, unsigned int len)
758{
759 unsigned long data_addr = drive->hwif->io_ports.data_addr;
760
761 len++;
762
763 if (drive->io_32bit) {
764 scc_ide_outsl(data_addr, buf, len / 4);
765
766 if ((len & 3) >= 2)
767 scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
768 } else
769 scc_ide_outsw(data_addr, buf, len / 2);
770}
771
621/** 772/**
622 * init_mmio_iops_scc - set up the iops for MMIO 773 * init_mmio_iops_scc - set up the iops for MMIO
623 * @hwif: interface to set up 774 * @hwif: interface to set up
@@ -632,15 +783,15 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
632 783
633 ide_set_hwifdata(hwif, ports); 784 ide_set_hwifdata(hwif, ports);
634 785
786 hwif->tf_load = scc_tf_load;
787 hwif->tf_read = scc_tf_read;
788
789 hwif->input_data = scc_input_data;
790 hwif->output_data = scc_output_data;
791
635 hwif->INB = scc_ide_inb; 792 hwif->INB = scc_ide_inb;
636 hwif->INW = scc_ide_inw;
637 hwif->INSW = scc_ide_insw;
638 hwif->INSL = scc_ide_insl;
639 hwif->OUTB = scc_ide_outb; 793 hwif->OUTB = scc_ide_outb;
640 hwif->OUTBSYNC = scc_ide_outbsync; 794 hwif->OUTBSYNC = scc_ide_outbsync;
641 hwif->OUTW = scc_ide_outw;
642 hwif->OUTSW = scc_ide_outsw;
643 hwif->OUTSL = scc_ide_outsl;
644 795
645 hwif->dma_base = dma_base; 796 hwif->dma_base = dma_base;
646 hwif->config_data = ports->ctl; 797 hwif->config_data = ports->ctl;
@@ -687,7 +838,6 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
687 838
688 hwif->dma_command = hwif->dma_base; 839 hwif->dma_command = hwif->dma_base;
689 hwif->dma_status = hwif->dma_base + 0x04; 840 hwif->dma_status = hwif->dma_base + 0x04;
690 hwif->dma_prdtable = hwif->dma_base + 0x08;
691 841
692 /* PTERADD */ 842 /* PTERADD */
693 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); 843 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
@@ -706,10 +856,10 @@ static const struct ide_port_ops scc_port_ops = {
706}; 856};
707 857
708static const struct ide_dma_ops scc_dma_ops = { 858static const struct ide_dma_ops scc_dma_ops = {
709 .dma_host_set = ide_dma_host_set, 859 .dma_host_set = scc_dma_host_set,
710 .dma_setup = scc_dma_setup, 860 .dma_setup = scc_dma_setup,
711 .dma_exec_cmd = ide_dma_exec_cmd, 861 .dma_exec_cmd = ide_dma_exec_cmd,
712 .dma_start = ide_dma_start, 862 .dma_start = scc_dma_start,
713 .dma_end = scc_dma_end, 863 .dma_end = scc_dma_end,
714 .dma_test_irq = scc_dma_test_irq, 864 .dma_test_irq = scc_dma_test_irq,
715 .dma_lost_irq = ide_dma_lost_irq, 865 .dma_lost_irq = ide_dma_lost_irq,
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 63e28f4e6d3b..16a0bce17d69 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -573,6 +573,7 @@ static const struct ide_port_info sgiioc4_port_info __devinitdata = {
573 .init_dma = ide_dma_sgiioc4, 573 .init_dma = ide_dma_sgiioc4,
574 .port_ops = &sgiioc4_port_ops, 574 .port_ops = &sgiioc4_port_ops,
575 .dma_ops = &sgiioc4_dma_ops, 575 .dma_ops = &sgiioc4_dma_ops,
576 .host_flags = IDE_HFLAG_MMIO,
576 .mwdma_mask = ATA_MWDMA2_ONLY, 577 .mwdma_mask = ATA_MWDMA2_ONLY,
577}; 578};
578 579
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index c2040a017f47..0006b9e58567 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2003 Red Hat <alan@redhat.com> 3 * Copyright (C) 2003 Red Hat <alan@redhat.com>
4 * Copyright (C) 2007 MontaVista Software, Inc. 4 * Copyright (C) 2007-2008 MontaVista Software, Inc.
5 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 5 * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
6 * 6 *
7 * May be copied or modified under the terms of the GNU General Public License 7 * May be copied or modified under the terms of the GNU General Public License
8 * 8 *
@@ -17,10 +17,10 @@
17 * 17 *
18 * FAQ Items: 18 * FAQ Items:
19 * If you are using Marvell SATA-IDE adapters with Maxtor drives 19 * If you are using Marvell SATA-IDE adapters with Maxtor drives
20 * ensure the system is set up for ATA100/UDMA5 not UDMA6. 20 * ensure the system is set up for ATA100/UDMA5, not UDMA6.
21 * 21 *
22 * If you are using WD drives with SATA bridges you must set the 22 * If you are using WD drives with SATA bridges you must set the
23 * drive to "Single". "Master" will hang 23 * drive to "Single". "Master" will hang.
24 * 24 *
25 * If you have strange problems with nVidia chipset systems please 25 * If you have strange problems with nVidia chipset systems please
26 * see the SI support documentation and update your system BIOS 26 * see the SI support documentation and update your system BIOS
@@ -42,25 +42,24 @@
42#include <linux/hdreg.h> 42#include <linux/hdreg.h>
43#include <linux/ide.h> 43#include <linux/ide.h>
44#include <linux/init.h> 44#include <linux/init.h>
45 45#include <linux/io.h>
46#include <asm/io.h>
47 46
48/** 47/**
49 * pdev_is_sata - check if device is SATA 48 * pdev_is_sata - check if device is SATA
50 * @pdev: PCI device to check 49 * @pdev: PCI device to check
51 * 50 *
52 * Returns true if this is a SATA controller 51 * Returns true if this is a SATA controller
53 */ 52 */
54 53
55static int pdev_is_sata(struct pci_dev *pdev) 54static int pdev_is_sata(struct pci_dev *pdev)
56{ 55{
57#ifdef CONFIG_BLK_DEV_IDE_SATA 56#ifdef CONFIG_BLK_DEV_IDE_SATA
58 switch(pdev->device) { 57 switch (pdev->device) {
59 case PCI_DEVICE_ID_SII_3112: 58 case PCI_DEVICE_ID_SII_3112:
60 case PCI_DEVICE_ID_SII_1210SA: 59 case PCI_DEVICE_ID_SII_1210SA:
61 return 1; 60 return 1;
62 case PCI_DEVICE_ID_SII_680: 61 case PCI_DEVICE_ID_SII_680:
63 return 0; 62 return 0;
64 } 63 }
65 BUG(); 64 BUG();
66#endif 65#endif
@@ -70,10 +69,10 @@ static int pdev_is_sata(struct pci_dev *pdev)
70/** 69/**
71 * is_sata - check if hwif is SATA 70 * is_sata - check if hwif is SATA
72 * @hwif: interface to check 71 * @hwif: interface to check
73 * 72 *
74 * Returns true if this is a SATA controller 73 * Returns true if this is a SATA controller
75 */ 74 */
76 75
77static inline int is_sata(ide_hwif_t *hwif) 76static inline int is_sata(ide_hwif_t *hwif)
78{ 77{
79 return pdev_is_sata(to_pci_dev(hwif->dev)); 78 return pdev_is_sata(to_pci_dev(hwif->dev));
@@ -86,21 +85,22 @@ static inline int is_sata(ide_hwif_t *hwif)
86 * 85 *
87 * Turn a config register offset into the right address in either 86 * Turn a config register offset into the right address in either
88 * PCI space or MMIO space to access the control register in question 87 * PCI space or MMIO space to access the control register in question
89 * Thankfully this is a configuration operation so isnt performance 88 * Thankfully this is a configuration operation, so isn't performance
90 * criticial. 89 * critical.
91 */ 90 */
92 91
93static unsigned long siimage_selreg(ide_hwif_t *hwif, int r) 92static unsigned long siimage_selreg(ide_hwif_t *hwif, int r)
94{ 93{
95 unsigned long base = (unsigned long)hwif->hwif_data; 94 unsigned long base = (unsigned long)hwif->hwif_data;
95
96 base += 0xA0 + r; 96 base += 0xA0 + r;
97 if(hwif->mmio) 97 if (hwif->mmio)
98 base += (hwif->channel << 6); 98 base += hwif->channel << 6;
99 else 99 else
100 base += (hwif->channel << 4); 100 base += hwif->channel << 4;
101 return base; 101 return base;
102} 102}
103 103
104/** 104/**
105 * siimage_seldev - return register base 105 * siimage_seldev - return register base
106 * @hwif: interface 106 * @hwif: interface
@@ -110,20 +110,69 @@ static unsigned long siimage_selreg(ide_hwif_t *hwif, int r)
110 * PCI space or MMIO space to access the control register in question 110 * PCI space or MMIO space to access the control register in question
111 * including accounting for the unit shift. 111 * including accounting for the unit shift.
112 */ 112 */
113 113
114static inline unsigned long siimage_seldev(ide_drive_t *drive, int r) 114static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
115{ 115{
116 ide_hwif_t *hwif = HWIF(drive); 116 ide_hwif_t *hwif = HWIF(drive);
117 unsigned long base = (unsigned long)hwif->hwif_data; 117 unsigned long base = (unsigned long)hwif->hwif_data;
118
118 base += 0xA0 + r; 119 base += 0xA0 + r;
119 if(hwif->mmio) 120 if (hwif->mmio)
120 base += (hwif->channel << 6); 121 base += hwif->channel << 6;
121 else 122 else
122 base += (hwif->channel << 4); 123 base += hwif->channel << 4;
123 base |= drive->select.b.unit << drive->select.b.unit; 124 base |= drive->select.b.unit << drive->select.b.unit;
124 return base; 125 return base;
125} 126}
126 127
128static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr)
129{
130 u8 tmp = 0;
131
132 if (pci_get_drvdata(dev))
133 tmp = readb((void __iomem *)addr);
134 else
135 pci_read_config_byte(dev, addr, &tmp);
136
137 return tmp;
138}
139
140static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr)
141{
142 u16 tmp = 0;
143
144 if (pci_get_drvdata(dev))
145 tmp = readw((void __iomem *)addr);
146 else
147 pci_read_config_word(dev, addr, &tmp);
148
149 return tmp;
150}
151
152static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr)
153{
154 if (pci_get_drvdata(dev))
155 writeb(val, (void __iomem *)addr);
156 else
157 pci_write_config_byte(dev, addr, val);
158}
159
160static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr)
161{
162 if (pci_get_drvdata(dev))
163 writew(val, (void __iomem *)addr);
164 else
165 pci_write_config_word(dev, addr, val);
166}
167
168static void sil_iowrite32(struct pci_dev *dev, u32 val, unsigned long addr)
169{
170 if (pci_get_drvdata(dev))
171 writel(val, (void __iomem *)addr);
172 else
173 pci_write_config_dword(dev, addr, val);
174}
175
127/** 176/**
128 * sil_udma_filter - compute UDMA mask 177 * sil_udma_filter - compute UDMA mask
129 * @drive: IDE device 178 * @drive: IDE device
@@ -136,24 +185,26 @@ static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
136 185
137static u8 sil_pata_udma_filter(ide_drive_t *drive) 186static u8 sil_pata_udma_filter(ide_drive_t *drive)
138{ 187{
139 ide_hwif_t *hwif = drive->hwif; 188 ide_hwif_t *hwif = drive->hwif;
140 struct pci_dev *dev = to_pci_dev(hwif->dev); 189 struct pci_dev *dev = to_pci_dev(hwif->dev);
141 unsigned long base = (unsigned long) hwif->hwif_data; 190 unsigned long base = (unsigned long)hwif->hwif_data;
142 u8 mask = 0, scsc = 0; 191 u8 scsc, mask = 0;
143 192
144 if (hwif->mmio) 193 scsc = sil_ioread8(dev, base + (hwif->mmio ? 0x4A : 0x8A));
145 scsc = hwif->INB(base + 0x4A);
146 else
147 pci_read_config_byte(dev, 0x8A, &scsc);
148 194
149 if ((scsc & 0x30) == 0x10) /* 133 */ 195 switch (scsc & 0x30) {
196 case 0x10: /* 133 */
150 mask = ATA_UDMA6; 197 mask = ATA_UDMA6;
151 else if ((scsc & 0x30) == 0x20) /* 2xPCI */ 198 break;
199 case 0x20: /* 2xPCI */
152 mask = ATA_UDMA6; 200 mask = ATA_UDMA6;
153 else if ((scsc & 0x30) == 0x00) /* 100 */ 201 break;
202 case 0x00: /* 100 */
154 mask = ATA_UDMA5; 203 mask = ATA_UDMA5;
155 else /* Disabled ? */ 204 break;
205 default: /* Disabled ? */
156 BUG(); 206 BUG();
207 }
157 208
158 return mask; 209 return mask;
159} 210}
@@ -175,15 +226,16 @@ static u8 sil_sata_udma_filter(ide_drive_t *drive)
175 226
176static void sil_set_pio_mode(ide_drive_t *drive, u8 pio) 227static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
177{ 228{
178 const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 }; 229 static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
179 const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 230 static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
180 231
181 ide_hwif_t *hwif = HWIF(drive); 232 ide_hwif_t *hwif = HWIF(drive);
233 struct pci_dev *dev = to_pci_dev(hwif->dev);
182 ide_drive_t *pair = ide_get_paired_drive(drive); 234 ide_drive_t *pair = ide_get_paired_drive(drive);
183 u32 speedt = 0; 235 u32 speedt = 0;
184 u16 speedp = 0; 236 u16 speedp = 0;
185 unsigned long addr = siimage_seldev(drive, 0x04); 237 unsigned long addr = siimage_seldev(drive, 0x04);
186 unsigned long tfaddr = siimage_selreg(hwif, 0x02); 238 unsigned long tfaddr = siimage_selreg(hwif, 0x02);
187 unsigned long base = (unsigned long)hwif->hwif_data; 239 unsigned long base = (unsigned long)hwif->hwif_data;
188 u8 tf_pio = pio; 240 u8 tf_pio = pio;
189 u8 addr_mask = hwif->channel ? (hwif->mmio ? 0xF4 : 0x84) 241 u8 addr_mask = hwif->channel ? (hwif->mmio ? 0xF4 : 0x84)
@@ -203,36 +255,20 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
203 speedp = data_speed[pio]; 255 speedp = data_speed[pio];
204 speedt = tf_speed[tf_pio]; 256 speedt = tf_speed[tf_pio];
205 257
206 if (hwif->mmio) { 258 sil_iowrite16(dev, speedp, addr);
207 hwif->OUTW(speedp, addr); 259 sil_iowrite16(dev, speedt, tfaddr);
208 hwif->OUTW(speedt, tfaddr); 260
209 /* Now set up IORDY */ 261 /* now set up IORDY */
210 if (pio > 2) 262 speedp = sil_ioread16(dev, tfaddr - 2);
211 hwif->OUTW(hwif->INW(tfaddr-2)|0x200, tfaddr-2); 263 speedp &= ~0x200;
212 else 264 if (pio > 2)
213 hwif->OUTW(hwif->INW(tfaddr-2)&~0x200, tfaddr-2); 265 speedp |= 0x200;
214 266 sil_iowrite16(dev, speedp, tfaddr - 2);
215 mode = hwif->INB(base + addr_mask); 267
216 mode &= ~(unit ? 0x30 : 0x03); 268 mode = sil_ioread8(dev, base + addr_mask);
217 mode |= (unit ? 0x10 : 0x01); 269 mode &= ~(unit ? 0x30 : 0x03);
218 hwif->OUTB(mode, base + addr_mask); 270 mode |= unit ? 0x10 : 0x01;
219 } else { 271 sil_iowrite8(dev, mode, base + addr_mask);
220 struct pci_dev *dev = to_pci_dev(hwif->dev);
221
222 pci_write_config_word(dev, addr, speedp);
223 pci_write_config_word(dev, tfaddr, speedt);
224 pci_read_config_word(dev, tfaddr - 2, &speedp);
225 speedp &= ~0x200;
226 /* Set IORDY for mode 3 or 4 */
227 if (pio > 2)
228 speedp |= 0x200;
229 pci_write_config_word(dev, tfaddr - 2, speedp);
230
231 pci_read_config_byte(dev, addr_mask, &mode);
232 mode &= ~(unit ? 0x30 : 0x03);
233 mode |= (unit ? 0x10 : 0x01);
234 pci_write_config_byte(dev, addr_mask, mode);
235 }
236} 272}
237 273
238/** 274/**
@@ -245,59 +281,45 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
245 281
246static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed) 282static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
247{ 283{
248 u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }; 284 static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
249 u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 }; 285 static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
250 u16 dma[] = { 0x2208, 0x10C2, 0x10C1 }; 286 static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 };
251 287
252 ide_hwif_t *hwif = HWIF(drive); 288 ide_hwif_t *hwif = HWIF(drive);
253 struct pci_dev *dev = to_pci_dev(hwif->dev); 289 struct pci_dev *dev = to_pci_dev(hwif->dev);
254 u16 ultra = 0, multi = 0; 290 u16 ultra = 0, multi = 0;
255 u8 mode = 0, unit = drive->select.b.unit; 291 u8 mode = 0, unit = drive->select.b.unit;
256 unsigned long base = (unsigned long)hwif->hwif_data; 292 unsigned long base = (unsigned long)hwif->hwif_data;
257 u8 scsc = 0, addr_mask = ((hwif->channel) ? 293 u8 scsc = 0, addr_mask = hwif->channel ?
258 ((hwif->mmio) ? 0xF4 : 0x84) : 294 (hwif->mmio ? 0xF4 : 0x84) :
259 ((hwif->mmio) ? 0xB4 : 0x80)); 295 (hwif->mmio ? 0xB4 : 0x80);
260
261 unsigned long ma = siimage_seldev(drive, 0x08); 296 unsigned long ma = siimage_seldev(drive, 0x08);
262 unsigned long ua = siimage_seldev(drive, 0x0C); 297 unsigned long ua = siimage_seldev(drive, 0x0C);
263 298
264 if (hwif->mmio) { 299 scsc = sil_ioread8 (dev, base + (hwif->mmio ? 0x4A : 0x8A));
265 scsc = hwif->INB(base + 0x4A); 300 mode = sil_ioread8 (dev, base + addr_mask);
266 mode = hwif->INB(base + addr_mask); 301 multi = sil_ioread16(dev, ma);
267 multi = hwif->INW(ma); 302 ultra = sil_ioread16(dev, ua);
268 ultra = hwif->INW(ua);
269 } else {
270 pci_read_config_byte(dev, 0x8A, &scsc);
271 pci_read_config_byte(dev, addr_mask, &mode);
272 pci_read_config_word(dev, ma, &multi);
273 pci_read_config_word(dev, ua, &ultra);
274 }
275 303
276 mode &= ~((unit) ? 0x30 : 0x03); 304 mode &= ~(unit ? 0x30 : 0x03);
277 ultra &= ~0x3F; 305 ultra &= ~0x3F;
278 scsc = ((scsc & 0x30) == 0x00) ? 0 : 1; 306 scsc = ((scsc & 0x30) == 0x00) ? 0 : 1;
279 307
280 scsc = is_sata(hwif) ? 1 : scsc; 308 scsc = is_sata(hwif) ? 1 : scsc;
281 309
282 if (speed >= XFER_UDMA_0) { 310 if (speed >= XFER_UDMA_0) {
283 multi = dma[2]; 311 multi = dma[2];
284 ultra |= (scsc ? ultra6[speed - XFER_UDMA_0] : 312 ultra |= scsc ? ultra6[speed - XFER_UDMA_0] :
285 ultra5[speed - XFER_UDMA_0]); 313 ultra5[speed - XFER_UDMA_0];
286 mode |= (unit ? 0x30 : 0x03); 314 mode |= unit ? 0x30 : 0x03;
287 } else { 315 } else {
288 multi = dma[speed - XFER_MW_DMA_0]; 316 multi = dma[speed - XFER_MW_DMA_0];
289 mode |= (unit ? 0x20 : 0x02); 317 mode |= unit ? 0x20 : 0x02;
290 } 318 }
291 319
292 if (hwif->mmio) { 320 sil_iowrite8 (dev, mode, base + addr_mask);
293 hwif->OUTB(mode, base + addr_mask); 321 sil_iowrite16(dev, multi, ma);
294 hwif->OUTW(multi, ma); 322 sil_iowrite16(dev, ultra, ua);
295 hwif->OUTW(ultra, ua);
296 } else {
297 pci_write_config_byte(dev, addr_mask, mode);
298 pci_write_config_word(dev, ma, multi);
299 pci_write_config_word(dev, ua, ultra);
300 }
301} 323}
302 324
303/* returns 1 if dma irq issued, 0 otherwise */ 325/* returns 1 if dma irq issued, 0 otherwise */
@@ -309,13 +331,14 @@ static int siimage_io_dma_test_irq(ide_drive_t *drive)
309 unsigned long addr = siimage_selreg(hwif, 1); 331 unsigned long addr = siimage_selreg(hwif, 1);
310 332
311 /* return 1 if INTR asserted */ 333 /* return 1 if INTR asserted */
312 if ((hwif->INB(hwif->dma_status) & 4) == 4) 334 if (hwif->INB(hwif->dma_status) & 4)
313 return 1; 335 return 1;
314 336
315 /* return 1 if Device INTR asserted */ 337 /* return 1 if Device INTR asserted */
316 pci_read_config_byte(dev, addr, &dma_altstat); 338 pci_read_config_byte(dev, addr, &dma_altstat);
317 if (dma_altstat & 8) 339 if (dma_altstat & 8)
318 return 0; //return 1; 340 return 0; /* return 1; */
341
319 return 0; 342 return 0;
320} 343}
321 344
@@ -335,9 +358,9 @@ static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
335 = (void __iomem *)hwif->sata_scr[SATA_ERROR_OFFSET]; 358 = (void __iomem *)hwif->sata_scr[SATA_ERROR_OFFSET];
336 359
337 if (sata_error_addr) { 360 if (sata_error_addr) {
338 unsigned long base = (unsigned long)hwif->hwif_data; 361 unsigned long base = (unsigned long)hwif->hwif_data;
339 u32 ext_stat = readl((void __iomem *)(base + 0x10)); 362 u32 ext_stat = readl((void __iomem *)(base + 0x10));
340 u8 watchdog = 0; 363 u8 watchdog = 0;
341 364
342 if (ext_stat & ((hwif->channel) ? 0x40 : 0x10)) { 365 if (ext_stat & ((hwif->channel) ? 0x40 : 0x10)) {
343 u32 sata_error = readl(sata_error_addr); 366 u32 sata_error = readl(sata_error_addr);
@@ -346,25 +369,22 @@ static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
346 watchdog = (sata_error & 0x00680000) ? 1 : 0; 369 watchdog = (sata_error & 0x00680000) ? 1 : 0;
347 printk(KERN_WARNING "%s: sata_error = 0x%08x, " 370 printk(KERN_WARNING "%s: sata_error = 0x%08x, "
348 "watchdog = %d, %s\n", 371 "watchdog = %d, %s\n",
349 drive->name, sata_error, watchdog, 372 drive->name, sata_error, watchdog, __func__);
350 __func__); 373 } else
351
352 } else {
353 watchdog = (ext_stat & 0x8000) ? 1 : 0; 374 watchdog = (ext_stat & 0x8000) ? 1 : 0;
354 }
355 ext_stat >>= 16;
356 375
376 ext_stat >>= 16;
357 if (!(ext_stat & 0x0404) && !watchdog) 377 if (!(ext_stat & 0x0404) && !watchdog)
358 return 0; 378 return 0;
359 } 379 }
360 380
361 /* return 1 if INTR asserted */ 381 /* return 1 if INTR asserted */
362 if ((readb((void __iomem *)hwif->dma_status) & 0x04) == 0x04) 382 if (readb((void __iomem *)hwif->dma_status) & 0x04)
363 return 1; 383 return 1;
364 384
365 /* return 1 if Device INTR asserted */ 385 /* return 1 if Device INTR asserted */
366 if ((readb((void __iomem *)addr) & 8) == 8) 386 if (readb((void __iomem *)addr) & 8)
367 return 0; //return 1; 387 return 0; /* return 1; */
368 388
369 return 0; 389 return 0;
370} 390}
@@ -423,63 +443,33 @@ static void sil_sata_pre_reset(ide_drive_t *drive)
423} 443}
424 444
425/** 445/**
426 * proc_reports_siimage - add siimage controller to proc 446 * setup_mmio_siimage - switch controller into MMIO mode
427 * @dev: PCI device
428 * @clocking: SCSC value
429 * @name: controller name
430 *
431 * Report the clocking mode of the controller and add it to
432 * the /proc interface layer
433 */
434
435static void proc_reports_siimage (struct pci_dev *dev, u8 clocking, const char *name)
436{
437 if (!pdev_is_sata(dev)) {
438 printk(KERN_INFO "%s: BASE CLOCK ", name);
439 clocking &= 0x03;
440 switch (clocking) {
441 case 0x03: printk("DISABLED!\n"); break;
442 case 0x02: printk("== 2X PCI\n"); break;
443 case 0x01: printk("== 133\n"); break;
444 case 0x00: printk("== 100\n"); break;
445 }
446 }
447}
448
449/**
450 * setup_mmio_siimage - switch an SI controller into MMIO
451 * @dev: PCI device we are configuring 447 * @dev: PCI device we are configuring
452 * @name: device name 448 * @name: device name
453 * 449 *
454 * Attempt to put the device into mmio mode. There are some slight 450 * Attempt to put the device into MMIO mode. There are some slight
455 * complications here with certain systems where the mmio bar isnt 451 * complications here with certain systems where the MMIO BAR isn't
456 * mapped so we have to be sure we can fall back to I/O. 452 * mapped, so we have to be sure that we can fall back to I/O.
457 */ 453 */
458 454
459static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name) 455static unsigned int setup_mmio_siimage(struct pci_dev *dev, const char *name)
460{ 456{
461 resource_size_t bar5 = pci_resource_start(dev, 5); 457 resource_size_t bar5 = pci_resource_start(dev, 5);
462 unsigned long barsize = pci_resource_len(dev, 5); 458 unsigned long barsize = pci_resource_len(dev, 5);
463 u8 tmpbyte = 0;
464 void __iomem *ioaddr; 459 void __iomem *ioaddr;
465 u32 tmp, irq_mask;
466 460
467 /* 461 /*
468 * Drop back to PIO if we can't map the mmio. Some 462 * Drop back to PIO if we can't map the MMIO. Some systems
469 * systems seem to get terminally confused in the PCI 463 * seem to get terminally confused in the PCI spaces.
470 * spaces.
471 */ 464 */
472 465 if (!request_mem_region(bar5, barsize, name)) {
473 if(!request_mem_region(bar5, barsize, name)) 466 printk(KERN_WARNING "siimage: IDE controller MMIO ports not "
474 { 467 "available.\n");
475 printk(KERN_WARNING "siimage: IDE controller MMIO ports not available.\n");
476 return 0; 468 return 0;
477 } 469 }
478
479 ioaddr = ioremap(bar5, barsize);
480 470
481 if (ioaddr == NULL) 471 ioaddr = ioremap(bar5, barsize);
482 { 472 if (ioaddr == NULL) {
483 release_mem_region(bar5, barsize); 473 release_mem_region(bar5, barsize);
484 return 0; 474 return 0;
485 } 475 }
@@ -487,62 +477,6 @@ static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name)
487 pci_set_master(dev); 477 pci_set_master(dev);
488 pci_set_drvdata(dev, (void *) ioaddr); 478 pci_set_drvdata(dev, (void *) ioaddr);
489 479
490 if (pdev_is_sata(dev)) {
491 /* make sure IDE0/1 interrupts are not masked */
492 irq_mask = (1 << 22) | (1 << 23);
493 tmp = readl(ioaddr + 0x48);
494 if (tmp & irq_mask) {
495 tmp &= ~irq_mask;
496 writel(tmp, ioaddr + 0x48);
497 readl(ioaddr + 0x48); /* flush */
498 }
499 writel(0, ioaddr + 0x148);
500 writel(0, ioaddr + 0x1C8);
501 }
502
503 writeb(0, ioaddr + 0xB4);
504 writeb(0, ioaddr + 0xF4);
505 tmpbyte = readb(ioaddr + 0x4A);
506
507 switch(tmpbyte & 0x30) {
508 case 0x00:
509 /* In 100 MHz clocking, try and switch to 133 */
510 writeb(tmpbyte|0x10, ioaddr + 0x4A);
511 break;
512 case 0x10:
513 /* On 133Mhz clocking */
514 break;
515 case 0x20:
516 /* On PCIx2 clocking */
517 break;
518 case 0x30:
519 /* Clocking is disabled */
520 /* 133 clock attempt to force it on */
521 writeb(tmpbyte & ~0x20, ioaddr + 0x4A);
522 break;
523 }
524
525 writeb( 0x72, ioaddr + 0xA1);
526 writew( 0x328A, ioaddr + 0xA2);
527 writel(0x62DD62DD, ioaddr + 0xA4);
528 writel(0x43924392, ioaddr + 0xA8);
529 writel(0x40094009, ioaddr + 0xAC);
530 writeb( 0x72, ioaddr + 0xE1);
531 writew( 0x328A, ioaddr + 0xE2);
532 writel(0x62DD62DD, ioaddr + 0xE4);
533 writel(0x43924392, ioaddr + 0xE8);
534 writel(0x40094009, ioaddr + 0xEC);
535
536 if (pdev_is_sata(dev)) {
537 writel(0xFFFF0000, ioaddr + 0x108);
538 writel(0xFFFF0000, ioaddr + 0x188);
539 writel(0x00680000, ioaddr + 0x148);
540 writel(0x00680000, ioaddr + 0x1C8);
541 }
542
543 tmpbyte = readb(ioaddr + 0x4A);
544
545 proc_reports_siimage(dev, (tmpbyte>>4), name);
546 return 1; 480 return 1;
547} 481}
548 482
@@ -552,55 +486,92 @@ static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name)
552 * @name: device name 486 * @name: device name
553 * 487 *
554 * Perform the initial PCI set up for this device. Attempt to switch 488 * Perform the initial PCI set up for this device. Attempt to switch
555 * to 133MHz clocking if the system isn't already set up to do it. 489 * to 133 MHz clocking if the system isn't already set up to do it.
556 */ 490 */
557 491
558static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev, const char *name) 492static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev,
493 const char *name)
559{ 494{
560 u8 rev = dev->revision, tmpbyte = 0, BA5_EN = 0; 495 unsigned long base, scsc_addr;
496 void __iomem *ioaddr = NULL;
497 u8 rev = dev->revision, tmp, BA5_EN;
561 498
562 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255); 499 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255);
563 500
564 pci_read_config_byte(dev, 0x8A, &BA5_EN); 501 pci_read_config_byte(dev, 0x8A, &BA5_EN);
565 if ((BA5_EN & 0x01) || (pci_resource_start(dev, 5))) { 502
566 if (setup_mmio_siimage(dev, name)) { 503 if ((BA5_EN & 0x01) || pci_resource_start(dev, 5))
567 return 0; 504 if (setup_mmio_siimage(dev, name))
505 ioaddr = pci_get_drvdata(dev);
506
507 base = (unsigned long)ioaddr;
508
509 if (ioaddr && pdev_is_sata(dev)) {
510 u32 tmp32, irq_mask;
511
512 /* make sure IDE0/1 interrupts are not masked */
513 irq_mask = (1 << 22) | (1 << 23);
514 tmp32 = readl(ioaddr + 0x48);
515 if (tmp32 & irq_mask) {
516 tmp32 &= ~irq_mask;
517 writel(tmp32, ioaddr + 0x48);
518 readl(ioaddr + 0x48); /* flush */
568 } 519 }
520 writel(0, ioaddr + 0x148);
521 writel(0, ioaddr + 0x1C8);
522 }
523
524 sil_iowrite8(dev, 0, base ? (base + 0xB4) : 0x80);
525 sil_iowrite8(dev, 0, base ? (base + 0xF4) : 0x84);
526
527 scsc_addr = base ? (base + 0x4A) : 0x8A;
528 tmp = sil_ioread8(dev, scsc_addr);
529
530 switch (tmp & 0x30) {
531 case 0x00:
532 /* On 100 MHz clocking, try and switch to 133 MHz */
533 sil_iowrite8(dev, tmp | 0x10, scsc_addr);
534 break;
535 case 0x30:
536 /* Clocking is disabled, attempt to force 133MHz clocking. */
537 sil_iowrite8(dev, tmp & ~0x20, scsc_addr);
538 case 0x10:
539 /* On 133Mhz clocking. */
540 break;
541 case 0x20:
542 /* On PCIx2 clocking. */
543 break;
569 } 544 }
570 545
571 pci_write_config_byte(dev, 0x80, 0x00); 546 tmp = sil_ioread8(dev, scsc_addr);
572 pci_write_config_byte(dev, 0x84, 0x00); 547
573 pci_read_config_byte(dev, 0x8A, &tmpbyte); 548 sil_iowrite8 (dev, 0x72, base + 0xA1);
574 switch(tmpbyte & 0x30) { 549 sil_iowrite16(dev, 0x328A, base + 0xA2);
575 case 0x00: 550 sil_iowrite32(dev, 0x62DD62DD, base + 0xA4);
576 /* 133 clock attempt to force it on */ 551 sil_iowrite32(dev, 0x43924392, base + 0xA8);
577 pci_write_config_byte(dev, 0x8A, tmpbyte|0x10); 552 sil_iowrite32(dev, 0x40094009, base + 0xAC);
578 case 0x30: 553 sil_iowrite8 (dev, 0x72, base ? (base + 0xE1) : 0xB1);
579 /* if clocking is disabled */ 554 sil_iowrite16(dev, 0x328A, base ? (base + 0xE2) : 0xB2);
580 /* 133 clock attempt to force it on */ 555 sil_iowrite32(dev, 0x62DD62DD, base ? (base + 0xE4) : 0xB4);
581 pci_write_config_byte(dev, 0x8A, tmpbyte & ~0x20); 556 sil_iowrite32(dev, 0x43924392, base ? (base + 0xE8) : 0xB8);
582 case 0x10: 557 sil_iowrite32(dev, 0x40094009, base ? (base + 0xEC) : 0xBC);
583 /* 133 already */ 558
584 break; 559 if (base && pdev_is_sata(dev)) {
585 case 0x20: 560 writel(0xFFFF0000, ioaddr + 0x108);
586 /* BIOS set PCI x2 clocking */ 561 writel(0xFFFF0000, ioaddr + 0x188);
587 break; 562 writel(0x00680000, ioaddr + 0x148);
563 writel(0x00680000, ioaddr + 0x1C8);
588 } 564 }
589 565
590 pci_read_config_byte(dev, 0x8A, &tmpbyte); 566 /* report the clocking mode of the controller */
567 if (!pdev_is_sata(dev)) {
568 static const char *clk_str[] =
569 { "== 100", "== 133", "== 2X PCI", "DISABLED!" };
591 570
592 pci_write_config_byte(dev, 0xA1, 0x72); 571 tmp >>= 4;
593 pci_write_config_word(dev, 0xA2, 0x328A); 572 printk(KERN_INFO "%s: BASE CLOCK %s\n", name, clk_str[tmp & 3]);
594 pci_write_config_dword(dev, 0xA4, 0x62DD62DD); 573 }
595 pci_write_config_dword(dev, 0xA8, 0x43924392);
596 pci_write_config_dword(dev, 0xAC, 0x40094009);
597 pci_write_config_byte(dev, 0xB1, 0x72);
598 pci_write_config_word(dev, 0xB2, 0x328A);
599 pci_write_config_dword(dev, 0xB4, 0x62DD62DD);
600 pci_write_config_dword(dev, 0xB8, 0x43924392);
601 pci_write_config_dword(dev, 0xBC, 0x40094009);
602 574
603 proc_reports_siimage(dev, (tmpbyte>>4), name);
604 return 0; 575 return 0;
605} 576}
606 577
@@ -610,8 +581,7 @@ static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev, const ch
610 * 581 *
611 * The basic setup here is fairly simple, we can use standard MMIO 582 * The basic setup here is fairly simple, we can use standard MMIO
612 * operations. However we do have to set the taskfile register offsets 583 * operations. However we do have to set the taskfile register offsets
613 * by hand as there isnt a standard defined layout for them this 584 * by hand as there isn't a standard defined layout for them this time.
614 * time.
615 * 585 *
616 * The hardware supports buffered taskfiles and also some rather nice 586 * The hardware supports buffered taskfiles and also some rather nice
617 * extended PRD tables. For better SI3112 support use the libata driver 587 * extended PRD tables. For better SI3112 support use the libata driver
@@ -622,23 +592,20 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
622 struct pci_dev *dev = to_pci_dev(hwif->dev); 592 struct pci_dev *dev = to_pci_dev(hwif->dev);
623 void *addr = pci_get_drvdata(dev); 593 void *addr = pci_get_drvdata(dev);
624 u8 ch = hwif->channel; 594 u8 ch = hwif->channel;
625 unsigned long base;
626
627 struct ide_io_ports *io_ports = &hwif->io_ports; 595 struct ide_io_ports *io_ports = &hwif->io_ports;
596 unsigned long base;
628 597
629 /* 598 /*
630 * Fill in the basic HWIF bits 599 * Fill in the basic hwif bits
631 */ 600 */
632 601 hwif->host_flags |= IDE_HFLAG_MMIO;
633 default_hwif_mmiops(hwif); 602 default_hwif_mmiops(hwif);
634 hwif->hwif_data = addr; 603 hwif->hwif_data = addr;
635 604
636 /* 605 /*
637 * Now set up the hw. We have to do this ourselves as 606 * Now set up the hw. We have to do this ourselves as the
638 * the MMIO layout isnt the same as the standard port 607 * MMIO layout isn't the same as the standard port based I/O.
639 * based I/O
640 */ 608 */
641
642 memset(io_ports, 0, sizeof(*io_ports)); 609 memset(io_ports, 0, sizeof(*io_ports));
643 610
644 base = (unsigned long)addr; 611 base = (unsigned long)addr;
@@ -648,10 +615,9 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
648 base += 0x80; 615 base += 0x80;
649 616
650 /* 617 /*
651 * The buffered task file doesn't have status/control 618 * The buffered task file doesn't have status/control, so we
652 * so we can't currently use it sanely since we want to 619 * can't currently use it sanely since we want to use LBA48 mode.
653 * use LBA48 mode. 620 */
654 */
655 io_ports->data_addr = base; 621 io_ports->data_addr = base;
656 io_ports->error_addr = base + 1; 622 io_ports->error_addr = base + 1;
657 io_ports->nsect_addr = base + 2; 623 io_ports->nsect_addr = base + 2;
@@ -680,19 +646,17 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
680 646
681static int is_dev_seagate_sata(ide_drive_t *drive) 647static int is_dev_seagate_sata(ide_drive_t *drive)
682{ 648{
683 const char *s = &drive->id->model[0]; 649 const char *s = &drive->id->model[0];
684 unsigned len; 650 unsigned len = strnlen(s, sizeof(drive->id->model));
685
686 len = strnlen(s, sizeof(drive->id->model));
687 651
688 if ((len > 4) && (!memcmp(s, "ST", 2))) { 652 if ((len > 4) && (!memcmp(s, "ST", 2)))
689 if ((!memcmp(s + len - 2, "AS", 2)) || 653 if ((!memcmp(s + len - 2, "AS", 2)) ||
690 (!memcmp(s + len - 3, "ASL", 3))) { 654 (!memcmp(s + len - 3, "ASL", 3))) {
691 printk(KERN_INFO "%s: applying pessimistic Seagate " 655 printk(KERN_INFO "%s: applying pessimistic Seagate "
692 "errata fix\n", drive->name); 656 "errata fix\n", drive->name);
693 return 1; 657 return 1;
694 } 658 }
695 } 659
696 return 0; 660 return 0;
697} 661}
698 662
@@ -709,7 +673,7 @@ static void __devinit sil_quirkproc(ide_drive_t *drive)
709{ 673{
710 ide_hwif_t *hwif = drive->hwif; 674 ide_hwif_t *hwif = drive->hwif;
711 675
712 /* Try and raise the rqsize */ 676 /* Try and rise the rqsize */
713 if (!is_sata(hwif) || !is_dev_seagate_sata(drive)) 677 if (!is_sata(hwif) || !is_dev_seagate_sata(drive))
714 hwif->rqsize = 128; 678 hwif->rqsize = 128;
715} 679}
@@ -743,20 +707,14 @@ static void __devinit init_iops_siimage(ide_hwif_t *hwif)
743 * sil_cable_detect - cable detection 707 * sil_cable_detect - cable detection
744 * @hwif: interface to check 708 * @hwif: interface to check
745 * 709 *
746 * Check for the presence of an ATA66 capable cable on the 710 * Check for the presence of an ATA66 capable cable on the interface.
747 * interface.
748 */ 711 */
749 712
750static u8 __devinit sil_cable_detect(ide_hwif_t *hwif) 713static u8 __devinit sil_cable_detect(ide_hwif_t *hwif)
751{ 714{
752 struct pci_dev *dev = to_pci_dev(hwif->dev); 715 struct pci_dev *dev = to_pci_dev(hwif->dev);
753 unsigned long addr = siimage_selreg(hwif, 0); 716 unsigned long addr = siimage_selreg(hwif, 0);
754 u8 ata66 = 0; 717 u8 ata66 = sil_ioread8(dev, addr);
755
756 if (pci_get_drvdata(dev) == NULL)
757 pci_read_config_byte(dev, addr, &ata66);
758 else
759 ata66 = hwif->INB(addr);
760 718
761 return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; 719 return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
762} 720}
@@ -779,8 +737,15 @@ static const struct ide_port_ops sil_sata_port_ops = {
779 .cable_detect = sil_cable_detect, 737 .cable_detect = sil_cable_detect,
780}; 738};
781 739
782static struct ide_dma_ops sil_dma_ops = { 740static const struct ide_dma_ops sil_dma_ops = {
741 .dma_host_set = ide_dma_host_set,
742 .dma_setup = ide_dma_setup,
743 .dma_exec_cmd = ide_dma_exec_cmd,
744 .dma_start = ide_dma_start,
745 .dma_end = __ide_dma_end,
783 .dma_test_irq = siimage_dma_test_irq, 746 .dma_test_irq = siimage_dma_test_irq,
747 .dma_timeout = ide_dma_timeout,
748 .dma_lost_irq = ide_dma_lost_irq,
784}; 749};
785 750
786#define DECLARE_SII_DEV(name_str, p_ops) \ 751#define DECLARE_SII_DEV(name_str, p_ops) \
@@ -802,15 +767,16 @@ static const struct ide_port_info siimage_chipsets[] __devinitdata = {
802}; 767};
803 768
804/** 769/**
805 * siimage_init_one - pci layer discovery entry 770 * siimage_init_one - PCI layer discovery entry
806 * @dev: PCI device 771 * @dev: PCI device
807 * @id: ident table entry 772 * @id: ident table entry
808 * 773 *
809 * Called by the PCI code when it finds an SI680 or SI3112 controller. 774 * Called by the PCI code when it finds an SiI680 or SiI3112 controller.
810 * We then use the IDE PCI generic helper to do most of the work. 775 * We then use the IDE PCI generic helper to do most of the work.
811 */ 776 */
812 777
813static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id) 778static int __devinit siimage_init_one(struct pci_dev *dev,
779 const struct pci_device_id *id)
814{ 780{
815 struct ide_port_info d; 781 struct ide_port_info d;
816 u8 idx = id->driver_data; 782 u8 idx = id->driver_data;
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 3cac6b2790dd..48aa019127bc 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -941,6 +941,7 @@ static const struct ide_port_info pmac_port_info = {
941 .port_ops = &pmac_ide_port_ops, 941 .port_ops = &pmac_ide_port_ops,
942 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 942 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
943 IDE_HFLAG_POST_SET_MODE | 943 IDE_HFLAG_POST_SET_MODE |
944 IDE_HFLAG_MMIO |
944 IDE_HFLAG_UNMASK_IRQS, 945 IDE_HFLAG_UNMASK_IRQS,
945 .pio_mask = ATA_PIO4, 946 .pio_mask = ATA_PIO4,
946 .mwdma_mask = ATA_MWDMA2, 947 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 29d833e71cbf..05710c7c1220 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -520,8 +520,11 @@ static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
520 char *scratch = buf; 520 char *scratch = buf;
521 521
522 driver = container_of(drv, struct hpsb_protocol_driver, driver); 522 driver = container_of(drv, struct hpsb_protocol_driver, driver);
523 id = driver->id_table;
524 if (!id)
525 return 0;
523 526
524 for (id = driver->id_table; id->match_flags != 0; id++) { 527 for (; id->match_flags != 0; id++) {
525 int need_coma = 0; 528 int need_coma = 0;
526 529
527 if (id->match_flags & IEEE1394_MATCH_VENDOR_ID) { 530 if (id->match_flags & IEEE1394_MATCH_VENDOR_ID) {
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 4e3128ff73c1..fe78f7d25099 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -38,6 +38,7 @@
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/hugetlb.h> 40#include <linux/hugetlb.h>
41#include <linux/dma-attrs.h>
41 42
42#include "uverbs.h" 43#include "uverbs.h"
43 44
@@ -72,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
72 * @addr: userspace virtual address to start at 73 * @addr: userspace virtual address to start at
73 * @size: length of region to pin 74 * @size: length of region to pin
74 * @access: IB_ACCESS_xxx flags for memory being pinned 75 * @access: IB_ACCESS_xxx flags for memory being pinned
76 * @dmasync: flush in-flight DMA when the memory region is written
75 */ 77 */
76struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 78struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
77 size_t size, int access) 79 size_t size, int access, int dmasync)
78{ 80{
79 struct ib_umem *umem; 81 struct ib_umem *umem;
80 struct page **page_list; 82 struct page **page_list;
@@ -87,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
87 int ret; 89 int ret;
88 int off; 90 int off;
89 int i; 91 int i;
92 DEFINE_DMA_ATTRS(attrs);
93
94 if (dmasync)
95 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
90 96
91 if (!can_do_mlock()) 97 if (!can_do_mlock())
92 return ERR_PTR(-EPERM); 98 return ERR_PTR(-EPERM);
@@ -174,10 +180,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
174 sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); 180 sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
175 } 181 }
176 182
177 chunk->nmap = ib_dma_map_sg(context->device, 183 chunk->nmap = ib_dma_map_sg_attrs(context->device,
178 &chunk->page_list[0], 184 &chunk->page_list[0],
179 chunk->nents, 185 chunk->nents,
180 DMA_BIDIRECTIONAL); 186 DMA_BIDIRECTIONAL,
187 &attrs);
181 if (chunk->nmap <= 0) { 188 if (chunk->nmap <= 0) {
182 for (i = 0; i < chunk->nents; ++i) 189 for (i = 0; i < chunk->nents; ++i)
183 put_page(sg_page(&chunk->page_list[i])); 190 put_page(sg_page(&chunk->page_list[i]));
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 6af2c0f79a67..2acf9b62cf99 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -452,7 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
452 return ERR_PTR(-ENOMEM); 452 return ERR_PTR(-ENOMEM);
453 c2mr->pd = c2pd; 453 c2mr->pd = c2pd;
454 454
455 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); 455 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
456 if (IS_ERR(c2mr->umem)) { 456 if (IS_ERR(c2mr->umem)) {
457 err = PTR_ERR(c2mr->umem); 457 err = PTR_ERR(c2mr->umem);
458 kfree(c2mr); 458 kfree(c2mr);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 66eb7030aea8..5fd8506a8657 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -359,9 +359,10 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
359 cq->sw_wptr++; 359 cq->sw_wptr++;
360} 360}
361 361
362void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) 362int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
363{ 363{
364 u32 ptr; 364 u32 ptr;
365 int flushed = 0;
365 366
366 PDBG("%s wq %p cq %p\n", __func__, wq, cq); 367 PDBG("%s wq %p cq %p\n", __func__, wq, cq);
367 368
@@ -369,8 +370,11 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__, 370 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
370 wq->rq_rptr, wq->rq_wptr, count); 371 wq->rq_rptr, wq->rq_wptr, count);
371 ptr = wq->rq_rptr + count; 372 ptr = wq->rq_rptr + count;
372 while (ptr++ != wq->rq_wptr) 373 while (ptr++ != wq->rq_wptr) {
373 insert_recv_cqe(wq, cq); 374 insert_recv_cqe(wq, cq);
375 flushed++;
376 }
377 return flushed;
374} 378}
375 379
376static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, 380static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
@@ -394,9 +398,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
394 cq->sw_wptr++; 398 cq->sw_wptr++;
395} 399}
396 400
397void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) 401int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
398{ 402{
399 __u32 ptr; 403 __u32 ptr;
404 int flushed = 0;
400 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); 405 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
401 406
402 ptr = wq->sq_rptr + count; 407 ptr = wq->sq_rptr + count;
@@ -405,7 +410,9 @@ void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
405 insert_sq_cqe(wq, cq, sqp); 410 insert_sq_cqe(wq, cq, sqp);
406 sqp++; 411 sqp++;
407 ptr++; 412 ptr++;
413 flushed++;
408 } 414 }
415 return flushed;
409} 416}
410 417
411/* 418/*
@@ -456,7 +463,8 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
456 ptr = cq->sw_rptr; 463 ptr = cq->sw_rptr;
457 while (!Q_EMPTY(ptr, cq->sw_wptr)) { 464 while (!Q_EMPTY(ptr, cq->sw_wptr)) {
458 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); 465 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
459 if ((SQ_TYPE(*cqe) || (CQE_OPCODE(*cqe) == T3_READ_RESP)) && 466 if ((SQ_TYPE(*cqe) ||
467 ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
460 (CQE_QPID(*cqe) == wq->qpid)) 468 (CQE_QPID(*cqe) == wq->qpid))
461 (*count)++; 469 (*count)++;
462 ptr++; 470 ptr++;
@@ -829,7 +837,8 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
829 wqe->mpaattrs = attr->mpaattrs; 837 wqe->mpaattrs = attr->mpaattrs;
830 wqe->qpcaps = attr->qpcaps; 838 wqe->qpcaps = attr->qpcaps;
831 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); 839 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
832 wqe->flags = cpu_to_be32(attr->flags); 840 wqe->rqe_count = cpu_to_be16(attr->rqe_count);
841 wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type));
833 wqe->ord = cpu_to_be32(attr->ord); 842 wqe->ord = cpu_to_be32(attr->ord);
834 wqe->ird = cpu_to_be32(attr->ird); 843 wqe->ird = cpu_to_be32(attr->ird);
835 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); 844 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
@@ -1135,6 +1144,18 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1135 if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) { 1144 if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
1136 1145
1137 /* 1146 /*
1147 * If this is an unsolicited read response, then the read
1148 * was generated by the kernel driver as part of peer-2-peer
1149 * connection setup. So ignore the completion.
1150 */
1151 if (!wq->oldest_read) {
1152 if (CQE_STATUS(*hw_cqe))
1153 wq->error = 1;
1154 ret = -1;
1155 goto skip_cqe;
1156 }
1157
1158 /*
1138 * Don't write to the HWCQ, so create a new read req CQE 1159 * Don't write to the HWCQ, so create a new read req CQE
1139 * in local memory. 1160 * in local memory.
1140 */ 1161 */
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 99543d634704..69ab08ebc680 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,6 +53,7 @@
53#define T3_MAX_PBL_SIZE 256 53#define T3_MAX_PBL_SIZE 256
54#define T3_MAX_RQ_SIZE 1024 54#define T3_MAX_RQ_SIZE 1024
55#define T3_MAX_NUM_STAG (1<<15) 55#define T3_MAX_NUM_STAG (1<<15)
56#define T3_MAX_MR_SIZE 0x100000000ULL
56 57
57#define T3_STAG_UNSET 0xffffffff 58#define T3_STAG_UNSET 0xffffffff
58 59
@@ -172,8 +173,8 @@ u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
172void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid); 173void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
173int __init cxio_hal_init(void); 174int __init cxio_hal_init(void);
174void __exit cxio_hal_exit(void); 175void __exit cxio_hal_exit(void);
175void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); 176int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
176void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); 177int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
177void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 178void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
178void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 179void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
179void cxio_flush_hw_cq(struct t3_cq *cq); 180void cxio_flush_hw_cq(struct t3_cq *cq);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 969d4d928455..f1a25a821a45 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -278,6 +278,17 @@ enum t3_qp_caps {
278 uP_RI_QP_STAG0_ENABLE = 0x10 278 uP_RI_QP_STAG0_ENABLE = 0x10
279} __attribute__ ((packed)); 279} __attribute__ ((packed));
280 280
281enum rdma_init_rtr_types {
282 RTR_READ = 1,
283 RTR_WRITE = 2,
284 RTR_SEND = 3,
285};
286
287#define S_RTR_TYPE 2
288#define M_RTR_TYPE 0x3
289#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
290#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
291
281struct t3_rdma_init_attr { 292struct t3_rdma_init_attr {
282 u32 tid; 293 u32 tid;
283 u32 qpid; 294 u32 qpid;
@@ -293,7 +304,9 @@ struct t3_rdma_init_attr {
293 u32 ird; 304 u32 ird;
294 u64 qp_dma_addr; 305 u64 qp_dma_addr;
295 u32 qp_dma_size; 306 u32 qp_dma_size;
296 u32 flags; 307 enum rdma_init_rtr_types rtr_type;
308 u16 flags;
309 u16 rqe_count;
297 u32 irs; 310 u32 irs;
298}; 311};
299 312
@@ -309,8 +322,8 @@ struct t3_rdma_init_wr {
309 u8 mpaattrs; /* 5 */ 322 u8 mpaattrs; /* 5 */
310 u8 qpcaps; 323 u8 qpcaps;
311 __be16 ulpdu_size; 324 __be16 ulpdu_size;
312 __be32 flags; /* bits 31-1 - reservered */ 325 __be16 flags_rtr_type;
313 /* bit 0 - set if RECV posted */ 326 __be16 rqe_count;
314 __be32 ord; /* 6 */ 327 __be32 ord; /* 6 */
315 __be32 ird; 328 __be32 ird;
316 __be64 qp_dma_addr; /* 7 */ 329 __be64 qp_dma_addr; /* 7 */
@@ -324,7 +337,7 @@ struct t3_genbit {
324}; 337};
325 338
326enum rdma_init_wr_flags { 339enum rdma_init_wr_flags {
327 RECVS_POSTED = (1<<0), 340 MPA_INITIATOR = (1<<0),
328 PRIV_QP = (1<<1), 341 PRIV_QP = (1<<1),
329}; 342};
330 343
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 6ba4138c8ec3..71554eacb13c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -83,6 +83,7 @@ static void rnic_init(struct iwch_dev *rnicp)
83 rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE; 83 rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
84 rnicp->attr.max_pds = T3_MAX_NUM_PD - 1; 84 rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
85 rnicp->attr.mem_pgsizes_bitmask = 0x7FFF; /* 4KB-128MB */ 85 rnicp->attr.mem_pgsizes_bitmask = 0x7FFF; /* 4KB-128MB */
86 rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
86 rnicp->attr.can_resize_wq = 0; 87 rnicp->attr.can_resize_wq = 0;
87 rnicp->attr.max_rdma_reads_per_qp = 8; 88 rnicp->attr.max_rdma_reads_per_qp = 8;
88 rnicp->attr.max_rdma_read_resources = 89 rnicp->attr.max_rdma_read_resources =
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 9ad9b1e7c8c1..d2409a505e8d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -66,6 +66,7 @@ struct iwch_rnic_attributes {
66 * size (4k)^i. Phys block list mode unsupported. 66 * size (4k)^i. Phys block list mode unsupported.
67 */ 67 */
68 u32 mem_pgsizes_bitmask; 68 u32 mem_pgsizes_bitmask;
69 u64 max_mr_size;
69 u8 can_resize_wq; 70 u8 can_resize_wq;
70 71
71 /* 72 /*
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 72ca360c3dbc..c325c44807e8 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -63,10 +63,14 @@ static char *states[] = {
63 NULL, 63 NULL,
64}; 64};
65 65
66static int ep_timeout_secs = 10; 66int peer2peer = 0;
67module_param(peer2peer, int, 0644);
68MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
69
70static int ep_timeout_secs = 60;
67module_param(ep_timeout_secs, int, 0644); 71module_param(ep_timeout_secs, int, 0644);
68MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 72MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
69 "in seconds (default=10)"); 73 "in seconds (default=60)");
70 74
71static int mpa_rev = 1; 75static int mpa_rev = 1;
72module_param(mpa_rev, int, 0644); 76module_param(mpa_rev, int, 0644);
@@ -125,6 +129,12 @@ static void start_ep_timer(struct iwch_ep *ep)
125static void stop_ep_timer(struct iwch_ep *ep) 129static void stop_ep_timer(struct iwch_ep *ep)
126{ 130{
127 PDBG("%s ep %p\n", __func__, ep); 131 PDBG("%s ep %p\n", __func__, ep);
132 if (!timer_pending(&ep->timer)) {
133 printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
134 __func__, ep, ep->com.state);
135 WARN_ON(1);
136 return;
137 }
128 del_timer_sync(&ep->timer); 138 del_timer_sync(&ep->timer);
129 put_ep(&ep->com); 139 put_ep(&ep->com);
130} 140}
@@ -508,7 +518,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
508 skb_reset_transport_header(skb); 518 skb_reset_transport_header(skb);
509 len = skb->len; 519 len = skb->len;
510 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 520 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
511 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 521 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
512 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); 522 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
513 req->len = htonl(len); 523 req->len = htonl(len);
514 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | 524 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
@@ -559,7 +569,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
559 set_arp_failure_handler(skb, arp_failure_discard); 569 set_arp_failure_handler(skb, arp_failure_discard);
560 skb_reset_transport_header(skb); 570 skb_reset_transport_header(skb);
561 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 571 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
562 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 572 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
563 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); 573 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
564 req->len = htonl(mpalen); 574 req->len = htonl(mpalen);
565 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | 575 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
@@ -611,7 +621,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
611 skb_reset_transport_header(skb); 621 skb_reset_transport_header(skb);
612 len = skb->len; 622 len = skb->len;
613 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 623 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
614 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 624 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
615 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); 625 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
616 req->len = htonl(len); 626 req->len = htonl(len);
617 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | 627 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
@@ -879,6 +889,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
879 * the MPA header is valid. 889 * the MPA header is valid.
880 */ 890 */
881 state_set(&ep->com, FPDU_MODE); 891 state_set(&ep->com, FPDU_MODE);
892 ep->mpa_attr.initiator = 1;
882 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 893 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
883 ep->mpa_attr.recv_marker_enabled = markers_enabled; 894 ep->mpa_attr.recv_marker_enabled = markers_enabled;
884 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 895 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
@@ -901,8 +912,14 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
901 /* bind QP and TID with INIT_WR */ 912 /* bind QP and TID with INIT_WR */
902 err = iwch_modify_qp(ep->com.qp->rhp, 913 err = iwch_modify_qp(ep->com.qp->rhp,
903 ep->com.qp, mask, &attrs, 1); 914 ep->com.qp, mask, &attrs, 1);
904 if (!err) 915 if (err)
905 goto out; 916 goto err;
917
918 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
919 iwch_post_zb_read(ep->com.qp);
920 }
921
922 goto out;
906err: 923err:
907 abort_connection(ep, skb, GFP_KERNEL); 924 abort_connection(ep, skb, GFP_KERNEL);
908out: 925out:
@@ -995,6 +1012,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
995 * If we get here we have accumulated the entire mpa 1012 * If we get here we have accumulated the entire mpa
996 * start reply message including private data. 1013 * start reply message including private data.
997 */ 1014 */
1015 ep->mpa_attr.initiator = 0;
998 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1016 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
999 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1017 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1000 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1018 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
@@ -1065,17 +1083,33 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1065 1083
1066 PDBG("%s ep %p credits %u\n", __func__, ep, credits); 1084 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1067 1085
1068 if (credits == 0) 1086 if (credits == 0) {
1087 PDBG(KERN_ERR "%s 0 credit ack ep %p state %u\n",
1088 __func__, ep, state_read(&ep->com));
1069 return CPL_RET_BUF_DONE; 1089 return CPL_RET_BUF_DONE;
1090 }
1091
1070 BUG_ON(credits != 1); 1092 BUG_ON(credits != 1);
1071 BUG_ON(ep->mpa_skb == NULL);
1072 kfree_skb(ep->mpa_skb);
1073 ep->mpa_skb = NULL;
1074 dst_confirm(ep->dst); 1093 dst_confirm(ep->dst);
1075 if (state_read(&ep->com) == MPA_REP_SENT) { 1094 if (!ep->mpa_skb) {
1076 ep->com.rpl_done = 1; 1095 PDBG("%s rdma_init wr_ack ep %p state %u\n",
1077 PDBG("waking up ep %p\n", ep); 1096 __func__, ep, state_read(&ep->com));
1078 wake_up(&ep->com.waitq); 1097 if (ep->mpa_attr.initiator) {
1098 PDBG("%s initiator ep %p state %u\n",
1099 __func__, ep, state_read(&ep->com));
1100 if (peer2peer)
1101 iwch_post_zb_read(ep->com.qp);
1102 } else {
1103 PDBG("%s responder ep %p state %u\n",
1104 __func__, ep, state_read(&ep->com));
1105 ep->com.rpl_done = 1;
1106 wake_up(&ep->com.waitq);
1107 }
1108 } else {
1109 PDBG("%s lsm ack ep %p state %u freeing skb\n",
1110 __func__, ep, state_read(&ep->com));
1111 kfree_skb(ep->mpa_skb);
1112 ep->mpa_skb = NULL;
1079 } 1113 }
1080 return CPL_RET_BUF_DONE; 1114 return CPL_RET_BUF_DONE;
1081} 1115}
@@ -1083,8 +1117,11 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1083static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1117static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1084{ 1118{
1085 struct iwch_ep *ep = ctx; 1119 struct iwch_ep *ep = ctx;
1120 unsigned long flags;
1121 int release = 0;
1086 1122
1087 PDBG("%s ep %p\n", __func__, ep); 1123 PDBG("%s ep %p\n", __func__, ep);
1124 BUG_ON(!ep);
1088 1125
1089 /* 1126 /*
1090 * We get 2 abort replies from the HW. The first one must 1127 * We get 2 abort replies from the HW. The first one must
@@ -1095,9 +1132,22 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1095 return CPL_RET_BUF_DONE; 1132 return CPL_RET_BUF_DONE;
1096 } 1133 }
1097 1134
1098 close_complete_upcall(ep); 1135 spin_lock_irqsave(&ep->com.lock, flags);
1099 state_set(&ep->com, DEAD); 1136 switch (ep->com.state) {
1100 release_ep_resources(ep); 1137 case ABORTING:
1138 close_complete_upcall(ep);
1139 __state_set(&ep->com, DEAD);
1140 release = 1;
1141 break;
1142 default:
1143 printk(KERN_ERR "%s ep %p state %d\n",
1144 __func__, ep, ep->com.state);
1145 break;
1146 }
1147 spin_unlock_irqrestore(&ep->com.lock, flags);
1148
1149 if (release)
1150 release_ep_resources(ep);
1101 return CPL_RET_BUF_DONE; 1151 return CPL_RET_BUF_DONE;
1102} 1152}
1103 1153
@@ -1470,7 +1520,8 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1470 struct sk_buff *rpl_skb; 1520 struct sk_buff *rpl_skb;
1471 struct iwch_qp_attributes attrs; 1521 struct iwch_qp_attributes attrs;
1472 int ret; 1522 int ret;
1473 int state; 1523 int release = 0;
1524 unsigned long flags;
1474 1525
1475 if (is_neg_adv_abort(req->status)) { 1526 if (is_neg_adv_abort(req->status)) {
1476 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep, 1527 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
@@ -1488,9 +1539,9 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1488 return CPL_RET_BUF_DONE; 1539 return CPL_RET_BUF_DONE;
1489 } 1540 }
1490 1541
1491 state = state_read(&ep->com); 1542 spin_lock_irqsave(&ep->com.lock, flags);
1492 PDBG("%s ep %p state %u\n", __func__, ep, state); 1543 PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
1493 switch (state) { 1544 switch (ep->com.state) {
1494 case CONNECTING: 1545 case CONNECTING:
1495 break; 1546 break;
1496 case MPA_REQ_WAIT: 1547 case MPA_REQ_WAIT:
@@ -1536,21 +1587,25 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1536 break; 1587 break;
1537 case DEAD: 1588 case DEAD:
1538 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 1589 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1590 spin_unlock_irqrestore(&ep->com.lock, flags);
1539 return CPL_RET_BUF_DONE; 1591 return CPL_RET_BUF_DONE;
1540 default: 1592 default:
1541 BUG_ON(1); 1593 BUG_ON(1);
1542 break; 1594 break;
1543 } 1595 }
1544 dst_confirm(ep->dst); 1596 dst_confirm(ep->dst);
1597 if (ep->com.state != ABORTING) {
1598 __state_set(&ep->com, DEAD);
1599 release = 1;
1600 }
1601 spin_unlock_irqrestore(&ep->com.lock, flags);
1545 1602
1546 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 1603 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1547 if (!rpl_skb) { 1604 if (!rpl_skb) {
1548 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 1605 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1549 __func__); 1606 __func__);
1550 dst_release(ep->dst); 1607 release = 1;
1551 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 1608 goto out;
1552 put_ep(&ep->com);
1553 return CPL_RET_BUF_DONE;
1554 } 1609 }
1555 rpl_skb->priority = CPL_PRIORITY_DATA; 1610 rpl_skb->priority = CPL_PRIORITY_DATA;
1556 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 1611 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
@@ -1559,10 +1614,9 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1559 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 1614 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1560 rpl->cmd = CPL_ABORT_NO_RST; 1615 rpl->cmd = CPL_ABORT_NO_RST;
1561 cxgb3_ofld_send(ep->com.tdev, rpl_skb); 1616 cxgb3_ofld_send(ep->com.tdev, rpl_skb);
1562 if (state != ABORTING) { 1617out:
1563 state_set(&ep->com, DEAD); 1618 if (release)
1564 release_ep_resources(ep); 1619 release_ep_resources(ep);
1565 }
1566 return CPL_RET_BUF_DONE; 1620 return CPL_RET_BUF_DONE;
1567} 1621}
1568 1622
@@ -1596,8 +1650,8 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1596 release = 1; 1650 release = 1;
1597 break; 1651 break;
1598 case ABORTING: 1652 case ABORTING:
1599 break;
1600 case DEAD: 1653 case DEAD:
1654 break;
1601 default: 1655 default:
1602 BUG_ON(1); 1656 BUG_ON(1);
1603 break; 1657 break;
@@ -1661,15 +1715,18 @@ static void ep_timeout(unsigned long arg)
1661 struct iwch_ep *ep = (struct iwch_ep *)arg; 1715 struct iwch_ep *ep = (struct iwch_ep *)arg;
1662 struct iwch_qp_attributes attrs; 1716 struct iwch_qp_attributes attrs;
1663 unsigned long flags; 1717 unsigned long flags;
1718 int abort = 1;
1664 1719
1665 spin_lock_irqsave(&ep->com.lock, flags); 1720 spin_lock_irqsave(&ep->com.lock, flags);
1666 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 1721 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1667 ep->com.state); 1722 ep->com.state);
1668 switch (ep->com.state) { 1723 switch (ep->com.state) {
1669 case MPA_REQ_SENT: 1724 case MPA_REQ_SENT:
1725 __state_set(&ep->com, ABORTING);
1670 connect_reply_upcall(ep, -ETIMEDOUT); 1726 connect_reply_upcall(ep, -ETIMEDOUT);
1671 break; 1727 break;
1672 case MPA_REQ_WAIT: 1728 case MPA_REQ_WAIT:
1729 __state_set(&ep->com, ABORTING);
1673 break; 1730 break;
1674 case CLOSING: 1731 case CLOSING:
1675 case MORIBUND: 1732 case MORIBUND:
@@ -1679,13 +1736,17 @@ static void ep_timeout(unsigned long arg)
1679 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, 1736 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1680 &attrs, 1); 1737 &attrs, 1);
1681 } 1738 }
1739 __state_set(&ep->com, ABORTING);
1682 break; 1740 break;
1683 default: 1741 default:
1684 BUG(); 1742 printk(KERN_ERR "%s unexpected state ep %p state %u\n",
1743 __func__, ep, ep->com.state);
1744 WARN_ON(1);
1745 abort = 0;
1685 } 1746 }
1686 __state_set(&ep->com, CLOSING);
1687 spin_unlock_irqrestore(&ep->com.lock, flags); 1747 spin_unlock_irqrestore(&ep->com.lock, flags);
1688 abort_connection(ep, NULL, GFP_ATOMIC); 1748 if (abort)
1749 abort_connection(ep, NULL, GFP_ATOMIC);
1689 put_ep(&ep->com); 1750 put_ep(&ep->com);
1690} 1751}
1691 1752
@@ -1762,16 +1823,19 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1762 if (err) 1823 if (err)
1763 goto err; 1824 goto err;
1764 1825
1826 /* if needed, wait for wr_ack */
1827 if (iwch_rqes_posted(qp)) {
1828 wait_event(ep->com.waitq, ep->com.rpl_done);
1829 err = ep->com.rpl_err;
1830 if (err)
1831 goto err;
1832 }
1833
1765 err = send_mpa_reply(ep, conn_param->private_data, 1834 err = send_mpa_reply(ep, conn_param->private_data,
1766 conn_param->private_data_len); 1835 conn_param->private_data_len);
1767 if (err) 1836 if (err)
1768 goto err; 1837 goto err;
1769 1838
1770 /* wait for wr_ack */
1771 wait_event(ep->com.waitq, ep->com.rpl_done);
1772 err = ep->com.rpl_err;
1773 if (err)
1774 goto err;
1775 1839
1776 state_set(&ep->com, FPDU_MODE); 1840 state_set(&ep->com, FPDU_MODE);
1777 established_upcall(ep); 1841 established_upcall(ep);
@@ -1968,40 +2032,39 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
1968 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 2032 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
1969 states[ep->com.state], abrupt); 2033 states[ep->com.state], abrupt);
1970 2034
1971 if (ep->com.state == DEAD) {
1972 PDBG("%s already dead ep %p\n", __func__, ep);
1973 goto out;
1974 }
1975
1976 if (abrupt) {
1977 if (ep->com.state != ABORTING) {
1978 ep->com.state = ABORTING;
1979 close = 1;
1980 }
1981 goto out;
1982 }
1983
1984 switch (ep->com.state) { 2035 switch (ep->com.state) {
1985 case MPA_REQ_WAIT: 2036 case MPA_REQ_WAIT:
1986 case MPA_REQ_SENT: 2037 case MPA_REQ_SENT:
1987 case MPA_REQ_RCVD: 2038 case MPA_REQ_RCVD:
1988 case MPA_REP_SENT: 2039 case MPA_REP_SENT:
1989 case FPDU_MODE: 2040 case FPDU_MODE:
1990 start_ep_timer(ep);
1991 ep->com.state = CLOSING;
1992 close = 1; 2041 close = 1;
2042 if (abrupt)
2043 ep->com.state = ABORTING;
2044 else {
2045 ep->com.state = CLOSING;
2046 start_ep_timer(ep);
2047 }
1993 break; 2048 break;
1994 case CLOSING: 2049 case CLOSING:
1995 ep->com.state = MORIBUND;
1996 close = 1; 2050 close = 1;
2051 if (abrupt) {
2052 stop_ep_timer(ep);
2053 ep->com.state = ABORTING;
2054 } else
2055 ep->com.state = MORIBUND;
1997 break; 2056 break;
1998 case MORIBUND: 2057 case MORIBUND:
2058 case ABORTING:
2059 case DEAD:
2060 PDBG("%s ignoring disconnect ep %p state %u\n",
2061 __func__, ep, ep->com.state);
1999 break; 2062 break;
2000 default: 2063 default:
2001 BUG(); 2064 BUG();
2002 break; 2065 break;
2003 } 2066 }
2004out: 2067
2005 spin_unlock_irqrestore(&ep->com.lock, flags); 2068 spin_unlock_irqrestore(&ep->com.lock, flags);
2006 if (close) { 2069 if (close) {
2007 if (abrupt) 2070 if (abrupt)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 2bb7fbdb3ff4..d7c7e09f0996 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -56,6 +56,7 @@
56#define put_ep(ep) { \ 56#define put_ep(ep) { \
57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ 57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
58 ep, atomic_read(&((ep)->kref.refcount))); \ 58 ep, atomic_read(&((ep)->kref.refcount))); \
59 WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
59 kref_put(&((ep)->kref), __free_ep); \ 60 kref_put(&((ep)->kref), __free_ep); \
60} 61}
61 62
@@ -225,5 +226,6 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, st
225 226
226int __init iwch_cm_init(void); 227int __init iwch_cm_init(void);
227void __exit iwch_cm_term(void); 228void __exit iwch_cm_term(void);
229extern int peer2peer;
228 230
229#endif /* _IWCH_CM_H_ */ 231#endif /* _IWCH_CM_H_ */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ab4695c1dd56..d07d3a377b5f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -602,7 +602,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
602 if (!mhp) 602 if (!mhp)
603 return ERR_PTR(-ENOMEM); 603 return ERR_PTR(-ENOMEM);
604 604
605 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc); 605 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
606 if (IS_ERR(mhp->umem)) { 606 if (IS_ERR(mhp->umem)) {
607 err = PTR_ERR(mhp->umem); 607 err = PTR_ERR(mhp->umem);
608 kfree(mhp); 608 kfree(mhp);
@@ -998,7 +998,7 @@ static int iwch_query_device(struct ib_device *ibdev,
998 props->device_cap_flags = dev->device_cap_flags; 998 props->device_cap_flags = dev->device_cap_flags;
999 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor; 999 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1000 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device; 1000 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
1001 props->max_mr_size = ~0ull; 1001 props->max_mr_size = dev->attr.max_mr_size;
1002 props->max_qp = dev->attr.max_qps; 1002 props->max_qp = dev->attr.max_qps;
1003 props->max_qp_wr = dev->attr.max_wrs; 1003 props->max_qp_wr = dev->attr.max_wrs;
1004 props->max_sge = dev->attr.max_sge_per_wr; 1004 props->max_sge = dev->attr.max_sge_per_wr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 61356f91109d..db5100d27ca2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -118,6 +118,7 @@ enum IWCH_QP_FLAGS {
118}; 118};
119 119
120struct iwch_mpa_attributes { 120struct iwch_mpa_attributes {
121 u8 initiator;
121 u8 recv_marker_enabled; 122 u8 recv_marker_enabled;
122 u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */ 123 u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
123 u8 crc_enabled; 124 u8 crc_enabled;
@@ -322,6 +323,7 @@ enum iwch_qp_query_flags {
322 IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */ 323 IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
323}; 324};
324 325
326u16 iwch_rqes_posted(struct iwch_qp *qhp);
325int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 327int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
326 struct ib_send_wr **bad_wr); 328 struct ib_send_wr **bad_wr);
327int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 329int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
@@ -331,6 +333,7 @@ int iwch_bind_mw(struct ib_qp *qp,
331 struct ib_mw_bind *mw_bind); 333 struct ib_mw_bind *mw_bind);
332int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 334int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
333int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); 335int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
336int iwch_post_zb_read(struct iwch_qp *qhp);
334int iwch_register_device(struct iwch_dev *dev); 337int iwch_register_device(struct iwch_dev *dev);
335void iwch_unregister_device(struct iwch_dev *dev); 338void iwch_unregister_device(struct iwch_dev *dev);
336int iwch_quiesce_qps(struct iwch_cq *chp); 339int iwch_quiesce_qps(struct iwch_cq *chp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 8891c3b0a3d5..79dbe5beae52 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -586,6 +586,36 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
586 } 586 }
587} 587}
588 588
589int iwch_post_zb_read(struct iwch_qp *qhp)
590{
591 union t3_wr *wqe;
592 struct sk_buff *skb;
593 u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
594
595 PDBG("%s enter\n", __func__);
596 skb = alloc_skb(40, GFP_KERNEL);
597 if (!skb) {
598 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
599 return -ENOMEM;
600 }
601 wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
602 memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
603 wqe->read.rdmaop = T3_READ_REQ;
604 wqe->read.reserved[0] = 0;
605 wqe->read.reserved[1] = 0;
606 wqe->read.reserved[2] = 0;
607 wqe->read.rem_stag = cpu_to_be32(1);
608 wqe->read.rem_to = cpu_to_be64(1);
609 wqe->read.local_stag = cpu_to_be32(1);
610 wqe->read.local_len = cpu_to_be32(0);
611 wqe->read.local_to = cpu_to_be64(1);
612 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
613 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)|
614 V_FW_RIWR_LEN(flit_cnt));
615 skb->priority = CPL_PRIORITY_DATA;
616 return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
617}
618
589/* 619/*
590 * This posts a TERMINATE with layer=RDMA, type=catastrophic. 620 * This posts a TERMINATE with layer=RDMA, type=catastrophic.
591 */ 621 */
@@ -625,6 +655,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
625{ 655{
626 struct iwch_cq *rchp, *schp; 656 struct iwch_cq *rchp, *schp;
627 int count; 657 int count;
658 int flushed;
628 659
629 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 660 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
630 schp = get_chp(qhp->rhp, qhp->attr.scq); 661 schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -639,20 +670,22 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
639 spin_lock(&qhp->lock); 670 spin_lock(&qhp->lock);
640 cxio_flush_hw_cq(&rchp->cq); 671 cxio_flush_hw_cq(&rchp->cq);
641 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); 672 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
642 cxio_flush_rq(&qhp->wq, &rchp->cq, count); 673 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
643 spin_unlock(&qhp->lock); 674 spin_unlock(&qhp->lock);
644 spin_unlock_irqrestore(&rchp->lock, *flag); 675 spin_unlock_irqrestore(&rchp->lock, *flag);
645 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 676 if (flushed)
677 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
646 678
647 /* locking heirarchy: cq lock first, then qp lock. */ 679 /* locking heirarchy: cq lock first, then qp lock. */
648 spin_lock_irqsave(&schp->lock, *flag); 680 spin_lock_irqsave(&schp->lock, *flag);
649 spin_lock(&qhp->lock); 681 spin_lock(&qhp->lock);
650 cxio_flush_hw_cq(&schp->cq); 682 cxio_flush_hw_cq(&schp->cq);
651 cxio_count_scqes(&schp->cq, &qhp->wq, &count); 683 cxio_count_scqes(&schp->cq, &qhp->wq, &count);
652 cxio_flush_sq(&qhp->wq, &schp->cq, count); 684 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
653 spin_unlock(&qhp->lock); 685 spin_unlock(&qhp->lock);
654 spin_unlock_irqrestore(&schp->lock, *flag); 686 spin_unlock_irqrestore(&schp->lock, *flag);
655 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 687 if (flushed)
688 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
656 689
657 /* deref */ 690 /* deref */
658 if (atomic_dec_and_test(&qhp->refcnt)) 691 if (atomic_dec_and_test(&qhp->refcnt))
@@ -671,11 +704,18 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
671 704
672 705
673/* 706/*
674 * Return non zero if at least one RECV was pre-posted. 707 * Return count of RECV WRs posted
675 */ 708 */
676static int rqes_posted(struct iwch_qp *qhp) 709u16 iwch_rqes_posted(struct iwch_qp *qhp)
677{ 710{
678 return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV; 711 union t3_wr *wqe = qhp->wq.queue;
712 u16 count = 0;
713 while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
714 count++;
715 wqe++;
716 }
717 PDBG("%s qhp %p count %u\n", __func__, qhp, count);
718 return count;
679} 719}
680 720
681static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, 721static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
@@ -716,8 +756,17 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
716 init_attr.ird = qhp->attr.max_ird; 756 init_attr.ird = qhp->attr.max_ird;
717 init_attr.qp_dma_addr = qhp->wq.dma_addr; 757 init_attr.qp_dma_addr = qhp->wq.dma_addr;
718 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); 758 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
719 init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0; 759 init_attr.rqe_count = iwch_rqes_posted(qhp);
760 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
720 init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0; 761 init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
762 if (peer2peer) {
763 init_attr.rtr_type = RTR_READ;
764 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
765 init_attr.ord = 1;
766 if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
767 init_attr.ird = 1;
768 } else
769 init_attr.rtr_type = 0;
721 init_attr.irs = qhp->ep->rcv_seq; 770 init_attr.irs = qhp->ep->rcv_seq;
722 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d " 771 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
723 "flags 0x%x qpcaps 0x%x\n", __func__, 772 "flags 0x%x qpcaps 0x%x\n", __func__,
@@ -832,8 +881,8 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
832 abort=0; 881 abort=0;
833 disconnect = 1; 882 disconnect = 1;
834 ep = qhp->ep; 883 ep = qhp->ep;
884 get_ep(&ep->com);
835 } 885 }
836 flush_qp(qhp, &flag);
837 break; 886 break;
838 case IWCH_QP_STATE_TERMINATE: 887 case IWCH_QP_STATE_TERMINATE:
839 qhp->attr.state = IWCH_QP_STATE_TERMINATE; 888 qhp->attr.state = IWCH_QP_STATE_TERMINATE;
@@ -848,6 +897,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
848 abort=1; 897 abort=1;
849 disconnect = 1; 898 disconnect = 1;
850 ep = qhp->ep; 899 ep = qhp->ep;
900 get_ep(&ep->com);
851 } 901 }
852 goto err; 902 goto err;
853 break; 903 break;
@@ -863,6 +913,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
863 } 913 }
864 switch (attrs->next_state) { 914 switch (attrs->next_state) {
865 case IWCH_QP_STATE_IDLE: 915 case IWCH_QP_STATE_IDLE:
916 flush_qp(qhp, &flag);
866 qhp->attr.state = IWCH_QP_STATE_IDLE; 917 qhp->attr.state = IWCH_QP_STATE_IDLE;
867 qhp->attr.llp_stream_handle = NULL; 918 qhp->attr.llp_stream_handle = NULL;
868 put_ep(&qhp->ep->com); 919 put_ep(&qhp->ep->com);
@@ -929,8 +980,10 @@ out:
929 * on the EP. This can be a normal close (RTS->CLOSING) or 980 * on the EP. This can be a normal close (RTS->CLOSING) or
930 * an abnormal close (RTS/CLOSING->ERROR). 981 * an abnormal close (RTS/CLOSING->ERROR).
931 */ 982 */
932 if (disconnect) 983 if (disconnect) {
933 iwch_ep_disconnect(ep, abort, GFP_KERNEL); 984 iwch_ep_disconnect(ep, abort, GFP_KERNEL);
985 put_ep(&ep->com);
986 }
934 987
935 /* 988 /*
936 * If free is 1, then we've disassociated the EP from the QP 989 * If free is 1, then we've disassociated the EP from the QP
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 3d6d9461c31d..00bab60f6de4 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -66,6 +66,7 @@ struct ehca_av;
66#include "ehca_irq.h" 66#include "ehca_irq.h"
67 67
68#define EHCA_EQE_CACHE_SIZE 20 68#define EHCA_EQE_CACHE_SIZE 20
69#define EHCA_MAX_NUM_QUEUES 0xffff
69 70
70struct ehca_eqe_cache_entry { 71struct ehca_eqe_cache_entry {
71 struct ehca_eqe *eqe; 72 struct ehca_eqe *eqe;
@@ -127,6 +128,8 @@ struct ehca_shca {
127 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ 128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
128 u32 hca_cap_mr_pgsize; 129 u32 hca_cap_mr_pgsize;
129 int max_mtu; 130 int max_mtu;
131 atomic_t num_cqs;
132 atomic_t num_qps;
130}; 133};
131 134
132struct ehca_pd { 135struct ehca_pd {
@@ -344,6 +347,8 @@ extern int ehca_use_hp_mr;
344extern int ehca_scaling_code; 347extern int ehca_scaling_code;
345extern int ehca_lock_hcalls; 348extern int ehca_lock_hcalls;
346extern int ehca_nr_ports; 349extern int ehca_nr_ports;
350extern int ehca_max_cq;
351extern int ehca_max_qp;
347 352
348struct ipzu_queue_resp { 353struct ipzu_queue_resp {
349 u32 qe_size; /* queue entry size */ 354 u32 qe_size; /* queue entry size */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index ec0cfcf3073f..5540b276a33c 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -132,10 +132,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
132 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) 132 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 134
135 if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) {
136 ehca_err(device, "Unable to create CQ, max number of %i "
137 "CQs reached.", ehca_max_cq);
138 ehca_err(device, "To increase the maximum number of CQs "
139 "use the number_of_cqs module parameter.\n");
140 return ERR_PTR(-ENOSPC);
141 }
142
135 my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL); 143 my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
136 if (!my_cq) { 144 if (!my_cq) {
137 ehca_err(device, "Out of memory for ehca_cq struct device=%p", 145 ehca_err(device, "Out of memory for ehca_cq struct device=%p",
138 device); 146 device);
147 atomic_dec(&shca->num_cqs);
139 return ERR_PTR(-ENOMEM); 148 return ERR_PTR(-ENOMEM);
140 } 149 }
141 150
@@ -305,6 +314,7 @@ create_cq_exit2:
305create_cq_exit1: 314create_cq_exit1:
306 kmem_cache_free(cq_cache, my_cq); 315 kmem_cache_free(cq_cache, my_cq);
307 316
317 atomic_dec(&shca->num_cqs);
308 return cq; 318 return cq;
309} 319}
310 320
@@ -359,6 +369,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
359 ipz_queue_dtor(NULL, &my_cq->ipz_queue); 369 ipz_queue_dtor(NULL, &my_cq->ipz_queue);
360 kmem_cache_free(cq_cache, my_cq); 370 kmem_cache_free(cq_cache, my_cq);
361 371
372 atomic_dec(&shca->num_cqs);
362 return 0; 373 return 0;
363} 374}
364 375
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index b4ac617a70e6..49660dfa1867 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -54,7 +54,8 @@ int ehca_create_eq(struct ehca_shca *shca,
54 struct ehca_eq *eq, 54 struct ehca_eq *eq,
55 const enum ehca_eq_type type, const u32 length) 55 const enum ehca_eq_type type, const u32 length)
56{ 56{
57 u64 ret; 57 int ret;
58 u64 h_ret;
58 u32 nr_pages; 59 u32 nr_pages;
59 u32 i; 60 u32 i;
60 void *vpage; 61 void *vpage;
@@ -73,15 +74,15 @@ int ehca_create_eq(struct ehca_shca *shca,
73 return -EINVAL; 74 return -EINVAL;
74 } 75 }
75 76
76 ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle, 77 h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
77 &eq->pf, 78 &eq->pf,
78 type, 79 type,
79 length, 80 length,
80 &eq->ipz_eq_handle, 81 &eq->ipz_eq_handle,
81 &eq->length, 82 &eq->length,
82 &nr_pages, &eq->ist); 83 &nr_pages, &eq->ist);
83 84
84 if (ret != H_SUCCESS) { 85 if (h_ret != H_SUCCESS) {
85 ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq); 86 ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
86 return -EINVAL; 87 return -EINVAL;
87 } 88 }
@@ -97,24 +98,22 @@ int ehca_create_eq(struct ehca_shca *shca,
97 u64 rpage; 98 u64 rpage;
98 99
99 vpage = ipz_qpageit_get_inc(&eq->ipz_queue); 100 vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
100 if (!vpage) { 101 if (!vpage)
101 ret = H_RESOURCE;
102 goto create_eq_exit2; 102 goto create_eq_exit2;
103 }
104 103
105 rpage = virt_to_abs(vpage); 104 rpage = virt_to_abs(vpage);
106 ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle, 105 h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
107 eq->ipz_eq_handle, 106 eq->ipz_eq_handle,
108 &eq->pf, 107 &eq->pf,
109 0, 0, rpage, 1); 108 0, 0, rpage, 1);
110 109
111 if (i == (nr_pages - 1)) { 110 if (i == (nr_pages - 1)) {
112 /* last page */ 111 /* last page */
113 vpage = ipz_qpageit_get_inc(&eq->ipz_queue); 112 vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
114 if (ret != H_SUCCESS || vpage) 113 if (h_ret != H_SUCCESS || vpage)
115 goto create_eq_exit2; 114 goto create_eq_exit2;
116 } else { 115 } else {
117 if (ret != H_PAGE_REGISTERED || !vpage) 116 if (h_ret != H_PAGE_REGISTERED || !vpage)
118 goto create_eq_exit2; 117 goto create_eq_exit2;
119 } 118 }
120 } 119 }
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 2515cbde7e65..bc3b37d2070f 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -101,7 +101,6 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
101 props->max_ee = limit_uint(rblock->max_rd_ee_context); 101 props->max_ee = limit_uint(rblock->max_rd_ee_context);
102 props->max_rdd = limit_uint(rblock->max_rd_domain); 102 props->max_rdd = limit_uint(rblock->max_rd_domain);
103 props->max_fmr = limit_uint(rblock->max_mr); 103 props->max_fmr = limit_uint(rblock->max_mr);
104 props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay);
105 props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp); 104 props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
106 props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context); 105 props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
107 props->max_res_rd_atom = limit_uint(rblock->max_rr_hca); 106 props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
@@ -115,7 +114,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
115 } 114 }
116 115
117 props->max_pkeys = 16; 116 props->max_pkeys = 16;
118 props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay); 117 props->local_ca_ack_delay = min_t(u8, rblock->local_ca_ack_delay, 255);
119 props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp); 118 props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
120 props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp); 119 props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
121 props->max_mcast_grp = limit_uint(rblock->max_mcast_grp); 120 props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
@@ -136,7 +135,7 @@ query_device1:
136 return ret; 135 return ret;
137} 136}
138 137
139static int map_mtu(struct ehca_shca *shca, u32 fw_mtu) 138static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
140{ 139{
141 switch (fw_mtu) { 140 switch (fw_mtu) {
142 case 0x1: 141 case 0x1:
@@ -156,7 +155,7 @@ static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
156 } 155 }
157} 156}
158 157
159static int map_number_of_vls(struct ehca_shca *shca, u32 vl_cap) 158static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
160{ 159{
161 switch (vl_cap) { 160 switch (vl_cap) {
162 case 0x1: 161 case 0x1:
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 65048976198c..482103eb6eac 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -68,6 +68,8 @@ int ehca_port_act_time = 30;
68int ehca_static_rate = -1; 68int ehca_static_rate = -1;
69int ehca_scaling_code = 0; 69int ehca_scaling_code = 0;
70int ehca_lock_hcalls = -1; 70int ehca_lock_hcalls = -1;
71int ehca_max_cq = -1;
72int ehca_max_qp = -1;
71 73
72module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO); 74module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
73module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); 75module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
@@ -79,6 +81,8 @@ module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
79module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); 81module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
80module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO); 82module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
81module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO); 83module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO);
84module_param_named(number_of_cqs, ehca_max_cq, int, S_IRUGO);
85module_param_named(number_of_qps, ehca_max_qp, int, S_IRUGO);
82 86
83MODULE_PARM_DESC(open_aqp1, 87MODULE_PARM_DESC(open_aqp1,
84 "Open AQP1 on startup (default: no)"); 88 "Open AQP1 on startup (default: no)");
@@ -104,6 +108,12 @@ MODULE_PARM_DESC(scaling_code,
104MODULE_PARM_DESC(lock_hcalls, 108MODULE_PARM_DESC(lock_hcalls,
105 "Serialize all hCalls made by the driver " 109 "Serialize all hCalls made by the driver "
106 "(default: autodetect)"); 110 "(default: autodetect)");
111MODULE_PARM_DESC(number_of_cqs,
112 "Max number of CQs which can be allocated "
113 "(default: autodetect)");
114MODULE_PARM_DESC(number_of_qps,
115 "Max number of QPs which can be allocated "
116 "(default: autodetect)");
107 117
108DEFINE_RWLOCK(ehca_qp_idr_lock); 118DEFINE_RWLOCK(ehca_qp_idr_lock);
109DEFINE_RWLOCK(ehca_cq_idr_lock); 119DEFINE_RWLOCK(ehca_cq_idr_lock);
@@ -355,6 +365,25 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
355 if (rblock->memory_page_size_supported & pgsize_map[i]) 365 if (rblock->memory_page_size_supported & pgsize_map[i])
356 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; 366 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
357 367
368 /* Set maximum number of CQs and QPs to calculate EQ size */
369 if (ehca_max_qp == -1)
370 ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES);
371 else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) {
372 ehca_gen_err("Requested number of QPs is out of range (1 - %i) "
373 "specified by HW", rblock->max_qp);
374 ret = -EINVAL;
375 goto sense_attributes1;
376 }
377
378 if (ehca_max_cq == -1)
379 ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES);
380 else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) {
381 ehca_gen_err("Requested number of CQs is out of range (1 - %i) "
382 "specified by HW", rblock->max_cq);
383 ret = -EINVAL;
384 goto sense_attributes1;
385 }
386
358 /* query max MTU from first port -- it's the same for all ports */ 387 /* query max MTU from first port -- it's the same for all ports */
359 port = (struct hipz_query_port *)rblock; 388 port = (struct hipz_query_port *)rblock;
360 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); 389 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
@@ -684,7 +713,7 @@ static int __devinit ehca_probe(struct of_device *dev,
684 struct ehca_shca *shca; 713 struct ehca_shca *shca;
685 const u64 *handle; 714 const u64 *handle;
686 struct ib_pd *ibpd; 715 struct ib_pd *ibpd;
687 int ret, i; 716 int ret, i, eq_size;
688 717
689 handle = of_get_property(dev->node, "ibm,hca-handle", NULL); 718 handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
690 if (!handle) { 719 if (!handle) {
@@ -705,6 +734,8 @@ static int __devinit ehca_probe(struct of_device *dev,
705 return -ENOMEM; 734 return -ENOMEM;
706 } 735 }
707 mutex_init(&shca->modify_mutex); 736 mutex_init(&shca->modify_mutex);
737 atomic_set(&shca->num_cqs, 0);
738 atomic_set(&shca->num_qps, 0);
708 for (i = 0; i < ARRAY_SIZE(shca->sport); i++) 739 for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
709 spin_lock_init(&shca->sport[i].mod_sqp_lock); 740 spin_lock_init(&shca->sport[i].mod_sqp_lock);
710 741
@@ -724,8 +755,9 @@ static int __devinit ehca_probe(struct of_device *dev,
724 goto probe1; 755 goto probe1;
725 } 756 }
726 757
758 eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp;
727 /* create event queues */ 759 /* create event queues */
728 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048); 760 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
729 if (ret) { 761 if (ret) {
730 ehca_err(&shca->ib_device, "Cannot create EQ."); 762 ehca_err(&shca->ib_device, "Cannot create EQ.");
731 goto probe1; 763 goto probe1;
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 46ae4eb2c4e1..f974367cad40 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -323,7 +323,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
323 } 323 }
324 324
325 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, 325 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
326 mr_access_flags); 326 mr_access_flags, 0);
327 if (IS_ERR(e_mr->umem)) { 327 if (IS_ERR(e_mr->umem)) {
328 ib_mr = (void *)e_mr->umem; 328 ib_mr = (void *)e_mr->umem;
329 goto reg_user_mr_exit1; 329 goto reg_user_mr_exit1;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 57bef1152cc2..18fba92fa7ae 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -421,8 +421,18 @@ static struct ehca_qp *internal_create_qp(
421 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; 421 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
422 unsigned long flags; 422 unsigned long flags;
423 423
424 if (init_attr->create_flags) 424 if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) {
425 ehca_err(pd->device, "Unable to create QP, max number of %i "
426 "QPs reached.", ehca_max_qp);
427 ehca_err(pd->device, "To increase the maximum number of QPs "
428 "use the number_of_qps module parameter.\n");
429 return ERR_PTR(-ENOSPC);
430 }
431
432 if (init_attr->create_flags) {
433 atomic_dec(&shca->num_qps);
425 return ERR_PTR(-EINVAL); 434 return ERR_PTR(-EINVAL);
435 }
426 436
427 memset(&parms, 0, sizeof(parms)); 437 memset(&parms, 0, sizeof(parms));
428 qp_type = init_attr->qp_type; 438 qp_type = init_attr->qp_type;
@@ -431,6 +441,7 @@ static struct ehca_qp *internal_create_qp(
431 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) { 441 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
432 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed", 442 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
433 init_attr->sq_sig_type); 443 init_attr->sq_sig_type);
444 atomic_dec(&shca->num_qps);
434 return ERR_PTR(-EINVAL); 445 return ERR_PTR(-EINVAL);
435 } 446 }
436 447
@@ -455,6 +466,7 @@ static struct ehca_qp *internal_create_qp(
455 466
456 if (is_llqp && has_srq) { 467 if (is_llqp && has_srq) {
457 ehca_err(pd->device, "LLQPs can't have an SRQ"); 468 ehca_err(pd->device, "LLQPs can't have an SRQ");
469 atomic_dec(&shca->num_qps);
458 return ERR_PTR(-EINVAL); 470 return ERR_PTR(-EINVAL);
459 } 471 }
460 472
@@ -466,6 +478,7 @@ static struct ehca_qp *internal_create_qp(
466 ehca_err(pd->device, "no more than three SGEs " 478 ehca_err(pd->device, "no more than three SGEs "
467 "supported for SRQ pd=%p max_sge=%x", 479 "supported for SRQ pd=%p max_sge=%x",
468 pd, init_attr->cap.max_recv_sge); 480 pd, init_attr->cap.max_recv_sge);
481 atomic_dec(&shca->num_qps);
469 return ERR_PTR(-EINVAL); 482 return ERR_PTR(-EINVAL);
470 } 483 }
471 } 484 }
@@ -477,6 +490,7 @@ static struct ehca_qp *internal_create_qp(
477 qp_type != IB_QPT_SMI && 490 qp_type != IB_QPT_SMI &&
478 qp_type != IB_QPT_GSI) { 491 qp_type != IB_QPT_GSI) {
479 ehca_err(pd->device, "wrong QP Type=%x", qp_type); 492 ehca_err(pd->device, "wrong QP Type=%x", qp_type);
493 atomic_dec(&shca->num_qps);
480 return ERR_PTR(-EINVAL); 494 return ERR_PTR(-EINVAL);
481 } 495 }
482 496
@@ -490,6 +504,7 @@ static struct ehca_qp *internal_create_qp(
490 "or max_rq_wr=%x for RC LLQP", 504 "or max_rq_wr=%x for RC LLQP",
491 init_attr->cap.max_send_wr, 505 init_attr->cap.max_send_wr,
492 init_attr->cap.max_recv_wr); 506 init_attr->cap.max_recv_wr);
507 atomic_dec(&shca->num_qps);
493 return ERR_PTR(-EINVAL); 508 return ERR_PTR(-EINVAL);
494 } 509 }
495 break; 510 break;
@@ -497,6 +512,7 @@ static struct ehca_qp *internal_create_qp(
497 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) { 512 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
498 ehca_err(pd->device, "UD LLQP not supported " 513 ehca_err(pd->device, "UD LLQP not supported "
499 "by this adapter"); 514 "by this adapter");
515 atomic_dec(&shca->num_qps);
500 return ERR_PTR(-ENOSYS); 516 return ERR_PTR(-ENOSYS);
501 } 517 }
502 if (!(init_attr->cap.max_send_sge <= 5 518 if (!(init_attr->cap.max_send_sge <= 5
@@ -508,20 +524,22 @@ static struct ehca_qp *internal_create_qp(
508 "or max_recv_sge=%x for UD LLQP", 524 "or max_recv_sge=%x for UD LLQP",
509 init_attr->cap.max_send_sge, 525 init_attr->cap.max_send_sge,
510 init_attr->cap.max_recv_sge); 526 init_attr->cap.max_recv_sge);
527 atomic_dec(&shca->num_qps);
511 return ERR_PTR(-EINVAL); 528 return ERR_PTR(-EINVAL);
512 } else if (init_attr->cap.max_send_wr > 255) { 529 } else if (init_attr->cap.max_send_wr > 255) {
513 ehca_err(pd->device, 530 ehca_err(pd->device,
514 "Invalid Number of " 531 "Invalid Number of "
515 "max_send_wr=%x for UD QP_TYPE=%x", 532 "max_send_wr=%x for UD QP_TYPE=%x",
516 init_attr->cap.max_send_wr, qp_type); 533 init_attr->cap.max_send_wr, qp_type);
534 atomic_dec(&shca->num_qps);
517 return ERR_PTR(-EINVAL); 535 return ERR_PTR(-EINVAL);
518 } 536 }
519 break; 537 break;
520 default: 538 default:
521 ehca_err(pd->device, "unsupported LL QP Type=%x", 539 ehca_err(pd->device, "unsupported LL QP Type=%x",
522 qp_type); 540 qp_type);
541 atomic_dec(&shca->num_qps);
523 return ERR_PTR(-EINVAL); 542 return ERR_PTR(-EINVAL);
524 break;
525 } 543 }
526 } else { 544 } else {
527 int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI 545 int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
@@ -533,6 +551,7 @@ static struct ehca_qp *internal_create_qp(
533 "send_sge=%x recv_sge=%x max_sge=%x", 551 "send_sge=%x recv_sge=%x max_sge=%x",
534 init_attr->cap.max_send_sge, 552 init_attr->cap.max_send_sge,
535 init_attr->cap.max_recv_sge, max_sge); 553 init_attr->cap.max_recv_sge, max_sge);
554 atomic_dec(&shca->num_qps);
536 return ERR_PTR(-EINVAL); 555 return ERR_PTR(-EINVAL);
537 } 556 }
538 } 557 }
@@ -543,6 +562,7 @@ static struct ehca_qp *internal_create_qp(
543 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); 562 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
544 if (!my_qp) { 563 if (!my_qp) {
545 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); 564 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
565 atomic_dec(&shca->num_qps);
546 return ERR_PTR(-ENOMEM); 566 return ERR_PTR(-ENOMEM);
547 } 567 }
548 568
@@ -823,6 +843,7 @@ create_qp_exit1:
823 843
824create_qp_exit0: 844create_qp_exit0:
825 kmem_cache_free(qp_cache, my_qp); 845 kmem_cache_free(qp_cache, my_qp);
846 atomic_dec(&shca->num_qps);
826 return ERR_PTR(ret); 847 return ERR_PTR(ret);
827} 848}
828 849
@@ -1948,6 +1969,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1948 if (HAS_SQ(my_qp)) 1969 if (HAS_SQ(my_qp))
1949 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 1970 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
1950 kmem_cache_free(qp_cache, my_qp); 1971 kmem_cache_free(qp_cache, my_qp);
1972 atomic_dec(&shca->num_qps);
1951 return 0; 1973 return 0;
1952} 1974}
1953 1975
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index db4ba92f79fc..9d343b7c2f3b 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -195,7 +195,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
195 goto bail; 195 goto bail;
196 } 196 }
197 197
198 umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags); 198 umem = ib_umem_get(pd->uobject->context, start, length,
199 mr_access_flags, 0);
199 if (IS_ERR(umem)) 200 if (IS_ERR(umem))
200 return (void *) umem; 201 return (void *) umem;
201 202
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5e570bb0bb6f..4521319b1406 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -137,7 +137,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
137 int err; 137 int err;
138 138
139 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), 139 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
140 IB_ACCESS_LOCAL_WRITE); 140 IB_ACCESS_LOCAL_WRITE, 1);
141 if (IS_ERR(*umem)) 141 if (IS_ERR(*umem))
142 return PTR_ERR(*umem); 142 return PTR_ERR(*umem);
143 143
@@ -221,7 +221,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
221 } 221 }
222 222
223 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, 223 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
224 cq->db.dma, &cq->mcq); 224 cq->db.dma, &cq->mcq, 0);
225 if (err) 225 if (err)
226 goto err_dbmap; 226 goto err_dbmap;
227 227
@@ -246,7 +246,7 @@ err_mtt:
246 if (context) 246 if (context)
247 ib_umem_release(cq->umem); 247 ib_umem_release(cq->umem);
248 else 248 else
249 mlx4_ib_free_cq_buf(dev, &cq->buf, entries); 249 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
250 250
251err_db: 251err_db:
252 if (!context) 252 if (!context)
@@ -434,7 +434,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
434 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); 434 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
435 ib_umem_release(mcq->umem); 435 ib_umem_release(mcq->umem);
436 } else { 436 } else {
437 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); 437 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
438 mlx4_db_free(dev->dev, &mcq->db); 438 mlx4_db_free(dev->dev, &mcq->db);
439 } 439 }
440 440
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 8e342cc9baec..8aee4233b388 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -63,7 +63,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
63 page->user_virt = (virt & PAGE_MASK); 63 page->user_virt = (virt & PAGE_MASK);
64 page->refcnt = 0; 64 page->refcnt = 0;
65 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, 65 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
66 PAGE_SIZE, 0); 66 PAGE_SIZE, 0, 0);
67 if (IS_ERR(page->umem)) { 67 if (IS_ERR(page->umem)) {
68 err = PTR_ERR(page->umem); 68 err = PTR_ERR(page->umem);
69 kfree(page); 69 kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index fe2c2e94a5f8..68e92485fc76 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -132,7 +132,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
132 if (!mr) 132 if (!mr)
133 return ERR_PTR(-ENOMEM); 133 return ERR_PTR(-ENOMEM);
134 134
135 mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags); 135 mr->umem = ib_umem_get(pd->uobject->context, start, length,
136 access_flags, 0);
136 if (IS_ERR(mr->umem)) { 137 if (IS_ERR(mr->umem)) {
137 err = PTR_ERR(mr->umem); 138 err = PTR_ERR(mr->umem);
138 goto err_free; 139 goto err_free;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 80ea8b9e7761..8e02ecfec188 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -482,7 +482,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
482 goto err; 482 goto err;
483 483
484 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 484 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
485 qp->buf_size, 0); 485 qp->buf_size, 0, 0);
486 if (IS_ERR(qp->umem)) { 486 if (IS_ERR(qp->umem)) {
487 err = PTR_ERR(qp->umem); 487 err = PTR_ERR(qp->umem);
488 goto err; 488 goto err;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 204619702f9d..12d6bc6f8007 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -109,7 +109,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
109 } 109 }
110 110
111 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 111 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
112 buf_size, 0); 112 buf_size, 0, 0);
113 if (IS_ERR(srq->umem)) { 113 if (IS_ERR(srq->umem)) {
114 err = PTR_ERR(srq->umem); 114 err = PTR_ERR(srq->umem);
115 goto err_srq; 115 goto err_srq;
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 3538da16e3fe..820205dec560 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -818,15 +818,9 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
818 818
819void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) 819void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
820{ 820{
821 u32 key;
822
823 if (!fmr->maps) 821 if (!fmr->maps)
824 return; 822 return;
825 823
826 key = tavor_key_to_hw_index(fmr->ibmr.lkey);
827 key &= dev->limits.num_mpts - 1;
828 fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
829
830 fmr->maps = 0; 824 fmr->maps = 0;
831 825
832 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); 826 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
@@ -834,16 +828,9 @@ void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
834 828
835void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) 829void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
836{ 830{
837 u32 key;
838
839 if (!fmr->maps) 831 if (!fmr->maps)
840 return; 832 return;
841 833
842 key = arbel_key_to_hw_index(fmr->ibmr.lkey);
843 key &= dev->limits.num_mpts - 1;
844 key = adjust_key(dev, key);
845 fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
846
847 fmr->maps = 0; 834 fmr->maps = 0;
848 835
849 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; 836 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 696e1f302332..be34f99ca625 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -39,6 +39,8 @@
39#include <rdma/ib_smi.h> 39#include <rdma/ib_smi.h>
40#include <rdma/ib_umem.h> 40#include <rdma/ib_umem.h>
41#include <rdma/ib_user_verbs.h> 41#include <rdma/ib_user_verbs.h>
42
43#include <linux/sched.h>
42#include <linux/mm.h> 44#include <linux/mm.h>
43 45
44#include "mthca_dev.h" 46#include "mthca_dev.h"
@@ -367,6 +369,8 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
367 return ERR_PTR(-EFAULT); 369 return ERR_PTR(-EFAULT);
368 } 370 }
369 371
372 context->reg_mr_warned = 0;
373
370 return &context->ibucontext; 374 return &context->ibucontext;
371} 375}
372 376
@@ -1006,17 +1010,31 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1006 struct mthca_dev *dev = to_mdev(pd->device); 1010 struct mthca_dev *dev = to_mdev(pd->device);
1007 struct ib_umem_chunk *chunk; 1011 struct ib_umem_chunk *chunk;
1008 struct mthca_mr *mr; 1012 struct mthca_mr *mr;
1013 struct mthca_reg_mr ucmd;
1009 u64 *pages; 1014 u64 *pages;
1010 int shift, n, len; 1015 int shift, n, len;
1011 int i, j, k; 1016 int i, j, k;
1012 int err = 0; 1017 int err = 0;
1013 int write_mtt_size; 1018 int write_mtt_size;
1014 1019
1020 if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
1021 if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
1022 mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
1023 current->comm);
1024 mthca_warn(dev, " Update libmthca to fix this.\n");
1025 }
1026 ++to_mucontext(pd->uobject->context)->reg_mr_warned;
1027 ucmd.mr_attrs = 0;
1028 } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
1029 return ERR_PTR(-EFAULT);
1030
1015 mr = kmalloc(sizeof *mr, GFP_KERNEL); 1031 mr = kmalloc(sizeof *mr, GFP_KERNEL);
1016 if (!mr) 1032 if (!mr)
1017 return ERR_PTR(-ENOMEM); 1033 return ERR_PTR(-ENOMEM);
1018 1034
1019 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); 1035 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
1036 ucmd.mr_attrs & MTHCA_MR_DMASYNC);
1037
1020 if (IS_ERR(mr->umem)) { 1038 if (IS_ERR(mr->umem)) {
1021 err = PTR_ERR(mr->umem); 1039 err = PTR_ERR(mr->umem);
1022 goto err; 1040 goto err;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 262616c8ebb6..934bf9544037 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -67,6 +67,7 @@ struct mthca_ucontext {
67 struct ib_ucontext ibucontext; 67 struct ib_ucontext ibucontext;
68 struct mthca_uar uar; 68 struct mthca_uar uar;
69 struct mthca_user_db_table *db_tab; 69 struct mthca_user_db_table *db_tab;
70 int reg_mr_warned;
70}; 71};
71 72
72struct mthca_mtt; 73struct mthca_mtt;
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
index 02cc0a766f3a..e1262c942db8 100644
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ b/drivers/infiniband/hw/mthca/mthca_user.h
@@ -61,6 +61,16 @@ struct mthca_alloc_pd_resp {
61 __u32 reserved; 61 __u32 reserved;
62}; 62};
63 63
64struct mthca_reg_mr {
65/*
66 * Mark the memory region with a DMA attribute that causes
67 * in-flight DMA to be flushed when the region is written to:
68 */
69#define MTHCA_MR_DMASYNC 0x1
70 __u32 mr_attrs;
71 __u32 reserved;
72};
73
64struct mthca_create_cq { 74struct mthca_create_cq {
65 __u32 lkey; 75 __u32 lkey;
66 __u32 pdn; 76 __u32 pdn;
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
index 2aeb7ac972a9..d449eb6ec78e 100644
--- a/drivers/infiniband/hw/nes/Kconfig
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -2,6 +2,7 @@ config INFINIBAND_NES
2 tristate "NetEffect RNIC Driver" 2 tristate "NetEffect RNIC Driver"
3 depends on PCI && INET && INFINIBAND 3 depends on PCI && INET && INFINIBAND
4 select LIBCRC32C 4 select LIBCRC32C
5 select INET_LRO
5 ---help--- 6 ---help---
6 This is a low-level driver for NetEffect RDMA enabled 7 This is a low-level driver for NetEffect RDMA enabled
7 Network Interface Cards (RNIC). 8 Network Interface Cards (RNIC).
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index a4e9269a29bd..9f7364a9096d 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -91,6 +91,10 @@ unsigned int nes_debug_level = 0;
91module_param_named(debug_level, nes_debug_level, uint, 0644); 91module_param_named(debug_level, nes_debug_level, uint, 0644);
92MODULE_PARM_DESC(debug_level, "Enable debug output level"); 92MODULE_PARM_DESC(debug_level, "Enable debug output level");
93 93
94unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
95module_param(nes_lro_max_aggr, int, NES_LRO_MAX_AGGR);
96MODULE_PARM_DESC(nes_mro_max_aggr, " nic LRO MAX packet aggregation");
97
94LIST_HEAD(nes_adapter_list); 98LIST_HEAD(nes_adapter_list);
95static LIST_HEAD(nes_dev_list); 99static LIST_HEAD(nes_dev_list);
96 100
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index cdf2e9ad62f7..1f9f7bf73862 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -173,6 +173,7 @@ extern int disable_mpa_crc;
173extern unsigned int send_first; 173extern unsigned int send_first;
174extern unsigned int nes_drv_opt; 174extern unsigned int nes_drv_opt;
175extern unsigned int nes_debug_level; 175extern unsigned int nes_debug_level;
176extern unsigned int nes_lro_max_aggr;
176 177
177extern struct list_head nes_adapter_list; 178extern struct list_head nes_adapter_list;
178 179
@@ -535,8 +536,8 @@ int nes_register_ofa_device(struct nes_ib_device *);
535int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *); 536int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
536void nes_write_1G_phy_reg(struct nes_device *, u8, u8, u16); 537void nes_write_1G_phy_reg(struct nes_device *, u8, u8, u16);
537void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *); 538void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *);
538void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16); 539void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16, u16);
539void nes_read_10G_phy_reg(struct nes_device *, u16, u8); 540void nes_read_10G_phy_reg(struct nes_device *, u8, u8, u16);
540struct nes_cqp_request *nes_get_cqp_request(struct nes_device *); 541struct nes_cqp_request *nes_get_cqp_request(struct nes_device *);
541void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *, int); 542void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *, int);
542int nes_arp_table(struct nes_device *, u32, u8 *, u32); 543int nes_arp_table(struct nes_device *, u32, u8 *, u32);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index d940fc27129a..9a4b40fae40d 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -594,7 +594,7 @@ static void nes_cm_timer_tick(unsigned long pass)
594 continue; 594 continue;
595 } 595 }
596 /* this seems like the correct place, but leave send entry unprotected */ 596 /* this seems like the correct place, but leave send entry unprotected */
597 // spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); 597 /* spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); */
598 atomic_inc(&send_entry->skb->users); 598 atomic_inc(&send_entry->skb->users);
599 cm_packets_retrans++; 599 cm_packets_retrans++;
600 nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p," 600 nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p,"
@@ -1335,7 +1335,7 @@ static int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1335 cm_node->loc_addr, cm_node->loc_port, 1335 cm_node->loc_addr, cm_node->loc_port,
1336 cm_node->rem_addr, cm_node->rem_port, 1336 cm_node->rem_addr, cm_node->rem_port,
1337 cm_node->state, atomic_read(&cm_node->ref_count)); 1337 cm_node->state, atomic_read(&cm_node->ref_count));
1338 // create event 1338 /* create event */
1339 cm_node->state = NES_CM_STATE_CLOSED; 1339 cm_node->state = NES_CM_STATE_CLOSED;
1340 1340
1341 create_event(cm_node, NES_CM_EVENT_ABORTED); 1341 create_event(cm_node, NES_CM_EVENT_ABORTED);
@@ -1669,7 +1669,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1669 if (!cm_node) 1669 if (!cm_node)
1670 return NULL; 1670 return NULL;
1671 1671
1672 // set our node side to client (active) side 1672 /* set our node side to client (active) side */
1673 cm_node->tcp_cntxt.client = 1; 1673 cm_node->tcp_cntxt.client = 1;
1674 cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; 1674 cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
1675 1675
@@ -1694,7 +1694,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1694 loopbackremotenode->mpa_frame_size = mpa_frame_size - 1694 loopbackremotenode->mpa_frame_size = mpa_frame_size -
1695 sizeof(struct ietf_mpa_frame); 1695 sizeof(struct ietf_mpa_frame);
1696 1696
1697 // we are done handling this state, set node to a TSA state 1697 /* we are done handling this state, set node to a TSA state */
1698 cm_node->state = NES_CM_STATE_TSA; 1698 cm_node->state = NES_CM_STATE_TSA;
1699 cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num; 1699 cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num;
1700 loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num; 1700 loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 08964cc7e98a..8dc70f9bad2f 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -38,6 +38,7 @@
38#include <linux/ip.h> 38#include <linux/ip.h>
39#include <linux/tcp.h> 39#include <linux/tcp.h>
40#include <linux/if_vlan.h> 40#include <linux/if_vlan.h>
41#include <linux/inet_lro.h>
41 42
42#include "nes.h" 43#include "nes.h"
43 44
@@ -832,7 +833,7 @@ static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_cou
832 nes_write_indexed(nesdev, 0x00000900, 0x20000001); 833 nes_write_indexed(nesdev, 0x00000900, 0x20000001);
833 nes_write_indexed(nesdev, 0x000060C0, 0x0000028e); 834 nes_write_indexed(nesdev, 0x000060C0, 0x0000028e);
834 nes_write_indexed(nesdev, 0x000060C8, 0x00000020); 835 nes_write_indexed(nesdev, 0x000060C8, 0x00000020);
835 // 836
836 nes_write_indexed(nesdev, 0x000001EC, 0x7b2625a0); 837 nes_write_indexed(nesdev, 0x000001EC, 0x7b2625a0);
837 /* nes_write_indexed(nesdev, 0x000001EC, 0x5f2625a0); */ 838 /* nes_write_indexed(nesdev, 0x000001EC, 0x5f2625a0); */
838 839
@@ -1207,11 +1208,16 @@ int nes_init_phy(struct nes_device *nesdev)
1207{ 1208{
1208 struct nes_adapter *nesadapter = nesdev->nesadapter; 1209 struct nes_adapter *nesadapter = nesdev->nesadapter;
1209 u32 counter = 0; 1210 u32 counter = 0;
1211 u32 sds_common_control0;
1210 u32 mac_index = nesdev->mac_index; 1212 u32 mac_index = nesdev->mac_index;
1211 u32 tx_config; 1213 u32 tx_config = 0;
1212 u16 phy_data; 1214 u16 phy_data;
1215 u32 temp_phy_data = 0;
1216 u32 temp_phy_data2 = 0;
1217 u32 i = 0;
1213 1218
1214 if (nesadapter->OneG_Mode) { 1219 if ((nesadapter->OneG_Mode) &&
1220 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
1215 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index); 1221 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
1216 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) { 1222 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
1217 printk(PFX "%s: Programming mdc config for 1G\n", __func__); 1223 printk(PFX "%s: Programming mdc config for 1G\n", __func__);
@@ -1223,7 +1229,7 @@ int nes_init_phy(struct nes_device *nesdev)
1223 nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data); 1229 nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data);
1224 nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n", 1230 nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n",
1225 nesadapter->phy_index[mac_index], phy_data); 1231 nesadapter->phy_index[mac_index], phy_data);
1226 nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000); 1232 nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000);
1227 1233
1228 /* Reset the PHY */ 1234 /* Reset the PHY */
1229 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000); 1235 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000);
@@ -1277,12 +1283,126 @@ int nes_init_phy(struct nes_device *nesdev)
1277 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); 1283 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
1278 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300); 1284 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300);
1279 } else { 1285 } else {
1280 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) { 1286 if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) ||
1287 (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
1281 /* setup 10G MDIO operation */ 1288 /* setup 10G MDIO operation */
1282 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1289 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1283 tx_config |= 0x14; 1290 tx_config |= 0x14;
1284 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); 1291 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1285 } 1292 }
1293 if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
1294 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
1295
1296 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1297 mdelay(10);
1298 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
1299 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1300
1301 /*
1302 * if firmware is already running (like from a
1303 * driver un-load/load, don't do anything.
1304 */
1305 if (temp_phy_data == temp_phy_data2) {
1306 /* configure QT2505 AMCC PHY */
1307 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0x0000, 0x8000);
1308 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0000);
1309 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc302, 0x0044);
1310 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc318, 0x0052);
1311 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008);
1312 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098);
1313 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00);
1314 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0000);
1315 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528);
1316
1317 /*
1318 * remove micro from reset; chip boots from ROM,
1319 * uploads EEPROM f/w image, uC executes f/w
1320 */
1321 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0002);
1322
1323 /*
1324 * wait for heart beat to start to
1325 * know loading is done
1326 */
1327 counter = 0;
1328 do {
1329 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
1330 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1331 if (counter++ > 1000) {
1332 nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from heartbeat check <this is bad!!!> \n");
1333 break;
1334 }
1335 mdelay(100);
1336 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
1337 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1338 } while ((temp_phy_data2 == temp_phy_data));
1339
1340 /*
1341 * wait for tracking to start to know
1342 * f/w is good to go
1343 */
1344 counter = 0;
1345 do {
1346 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7fd);
1347 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1348 if (counter++ > 1000) {
1349 nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from status check <this is bad!!!> \n");
1350 break;
1351 }
1352 mdelay(1000);
1353 /*
1354 * nes_debug(NES_DBG_PHY, "AMCC PHY- phy_status not ready yet = 0x%02X\n",
1355 * temp_phy_data);
1356 */
1357 } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70));
1358
1359 /* set LOS Control invert RXLOSB_I_PADINV */
1360 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd003, 0x0000);
1361 /* set LOS Control to mask of RXLOSB_I */
1362 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc314, 0x0042);
1363 /* set LED1 to input mode (LED1 and LED2 share same LED) */
1364 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd006, 0x0007);
1365 /* set LED2 to RX link_status and activity */
1366 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd007, 0x000A);
1367 /* set LED3 to RX link_status */
1368 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd008, 0x0009);
1369
1370 /*
1371 * reset the res-calibration on t2
1372 * serdes; ensures it is stable after
1373 * the amcc phy is stable
1374 */
1375
1376 sds_common_control0 = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
1377 sds_common_control0 |= 0x1;
1378 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
1379
1380 /* release the res-calibration reset */
1381 sds_common_control0 &= 0xfffffffe;
1382 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
1383
1384 i = 0;
1385 while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
1386 && (i++ < 5000)) {
1387 /* mdelay(1); */
1388 }
1389
1390 /*
1391 * wait for link train done before moving on,
1392 * or will get an interupt storm
1393 */
1394 counter = 0;
1395 do {
1396 temp_phy_data = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1397 (0x200 * (nesdev->mac_index & 1)));
1398 if (counter++ > 1000) {
1399 nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from link train wait <this is bad, link didnt train!!!>\n");
1400 break;
1401 }
1402 mdelay(1);
1403 } while (((temp_phy_data & 0x0f1f0000) != 0x0f0f0000));
1404 }
1405 }
1286 } 1406 }
1287 return 0; 1407 return 0;
1288} 1408}
@@ -1375,6 +1495,25 @@ static void nes_rq_wqes_timeout(unsigned long parm)
1375} 1495}
1376 1496
1377 1497
1498static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr,
1499 void **tcph, u64 *hdr_flags, void *priv)
1500{
1501 unsigned int ip_len;
1502 struct iphdr *iph;
1503 skb_reset_network_header(skb);
1504 iph = ip_hdr(skb);
1505 if (iph->protocol != IPPROTO_TCP)
1506 return -1;
1507 ip_len = ip_hdrlen(skb);
1508 skb_set_transport_header(skb, ip_len);
1509 *tcph = tcp_hdr(skb);
1510
1511 *hdr_flags = LRO_IPV4 | LRO_TCP;
1512 *iphdr = iph;
1513 return 0;
1514}
1515
1516
1378/** 1517/**
1379 * nes_init_nic_qp 1518 * nes_init_nic_qp
1380 */ 1519 */
@@ -1520,10 +1659,10 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
1520 } 1659 }
1521 1660
1522 u64temp = (u64)nesvnic->nic.sq_pbase; 1661 u64temp = (u64)nesvnic->nic.sq_pbase;
1523 nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp); 1662 nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
1524 nic_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); 1663 nic_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
1525 u64temp = (u64)nesvnic->nic.rq_pbase; 1664 u64temp = (u64)nesvnic->nic.rq_pbase;
1526 nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp); 1665 nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
1527 nic_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); 1666 nic_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
1528 1667
1529 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP | 1668 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
@@ -1575,7 +1714,7 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
1575 nic_rqe = &nesvnic->nic.rq_vbase[counter]; 1714 nic_rqe = &nesvnic->nic.rq_vbase[counter];
1576 nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size); 1715 nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size);
1577 nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0; 1716 nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
1578 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem); 1717 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
1579 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32)); 1718 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
1580 nesvnic->nic.rx_skb[counter] = skb; 1719 nesvnic->nic.rx_skb[counter] = skb;
1581 } 1720 }
@@ -1592,15 +1731,21 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
1592 nesvnic->rq_wqes_timer.function = nes_rq_wqes_timeout; 1731 nesvnic->rq_wqes_timer.function = nes_rq_wqes_timeout;
1593 nesvnic->rq_wqes_timer.data = (unsigned long)nesvnic; 1732 nesvnic->rq_wqes_timer.data = (unsigned long)nesvnic;
1594 nes_debug(NES_DBG_INIT, "NAPI support Enabled\n"); 1733 nes_debug(NES_DBG_INIT, "NAPI support Enabled\n");
1595
1596 if (nesdev->nesadapter->et_use_adaptive_rx_coalesce) 1734 if (nesdev->nesadapter->et_use_adaptive_rx_coalesce)
1597 { 1735 {
1598 nes_nic_init_timer(nesdev); 1736 nes_nic_init_timer(nesdev);
1599 if (netdev->mtu > 1500) 1737 if (netdev->mtu > 1500)
1600 jumbomode = 1; 1738 jumbomode = 1;
1601 nes_nic_init_timer_defaults(nesdev, jumbomode); 1739 nes_nic_init_timer_defaults(nesdev, jumbomode);
1602 } 1740 }
1603 1741 nesvnic->lro_mgr.max_aggr = NES_LRO_MAX_AGGR;
1742 nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS;
1743 nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc;
1744 nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr;
1745 nesvnic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1746 nesvnic->lro_mgr.dev = netdev;
1747 nesvnic->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1748 nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1604 return 0; 1749 return 0;
1605} 1750}
1606 1751
@@ -1620,8 +1765,8 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
1620 1765
1621 /* Free remaining NIC receive buffers */ 1766 /* Free remaining NIC receive buffers */
1622 while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) { 1767 while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
1623 nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail]; 1768 nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
1624 wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]); 1769 wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
1625 wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32; 1770 wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
1626 pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag, 1771 pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag,
1627 nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); 1772 nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
@@ -1704,17 +1849,17 @@ int nes_napi_isr(struct nes_device *nesdev)
1704 /* iff NIC, process here, else wait for DPC */ 1849 /* iff NIC, process here, else wait for DPC */
1705 if ((int_stat) && ((int_stat & 0x0000ff00) == int_stat)) { 1850 if ((int_stat) && ((int_stat & 0x0000ff00) == int_stat)) {
1706 nesdev->napi_isr_ran = 0; 1851 nesdev->napi_isr_ran = 0;
1707 nes_write32(nesdev->regs+NES_INT_STAT, 1852 nes_write32(nesdev->regs + NES_INT_STAT,
1708 (int_stat & 1853 (int_stat &
1709 ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3))); 1854 ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0 | NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3)));
1710 1855
1711 /* Process the CEQs */ 1856 /* Process the CEQs */
1712 nes_process_ceq(nesdev, &nesdev->nesadapter->ceq[nesdev->nic_ceq_index]); 1857 nes_process_ceq(nesdev, &nesdev->nesadapter->ceq[nesdev->nic_ceq_index]);
1713 1858
1714 if (unlikely((((nesadapter->et_rx_coalesce_usecs_irq) && 1859 if (unlikely((((nesadapter->et_rx_coalesce_usecs_irq) &&
1715 (!nesadapter->et_use_adaptive_rx_coalesce)) || 1860 (!nesadapter->et_use_adaptive_rx_coalesce)) ||
1716 ((nesadapter->et_use_adaptive_rx_coalesce) && 1861 ((nesadapter->et_use_adaptive_rx_coalesce) &&
1717 (nesdev->deepcq_count > nesadapter->et_pkt_rate_low)))) ) { 1862 (nesdev->deepcq_count > nesadapter->et_pkt_rate_low))))) {
1718 if ((nesdev->int_req & NES_INT_TIMER) == 0) { 1863 if ((nesdev->int_req & NES_INT_TIMER) == 0) {
1719 /* Enable Periodic timer interrupts */ 1864 /* Enable Periodic timer interrupts */
1720 nesdev->int_req |= NES_INT_TIMER; 1865 nesdev->int_req |= NES_INT_TIMER;
@@ -1792,12 +1937,12 @@ void nes_dpc(unsigned long param)
1792 } 1937 }
1793 1938
1794 if (int_stat) { 1939 if (int_stat) {
1795 if (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0| 1940 if (int_stat & ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0|
1796 NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)) { 1941 NES_INT_MAC1|NES_INT_MAC2 | NES_INT_MAC3)) {
1797 /* Ack the interrupts */ 1942 /* Ack the interrupts */
1798 nes_write32(nesdev->regs+NES_INT_STAT, 1943 nes_write32(nesdev->regs+NES_INT_STAT,
1799 (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0| 1944 (int_stat & ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0|
1800 NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3))); 1945 NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3)));
1801 } 1946 }
1802 1947
1803 temp_int_stat = int_stat; 1948 temp_int_stat = int_stat;
@@ -1862,8 +2007,8 @@ void nes_dpc(unsigned long param)
1862 } 2007 }
1863 } 2008 }
1864 /* Don't use the interface interrupt bit stay in loop */ 2009 /* Don't use the interface interrupt bit stay in loop */
1865 int_stat &= ~NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0| 2010 int_stat &= ~NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0 |
1866 NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3; 2011 NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3;
1867 } while ((int_stat != 0) && (loop_counter++ < MAX_DPC_ITERATIONS)); 2012 } while ((int_stat != 0) && (loop_counter++ < MAX_DPC_ITERATIONS));
1868 2013
1869 if (timer_ints == 1) { 2014 if (timer_ints == 1) {
@@ -1874,9 +2019,9 @@ void nes_dpc(unsigned long param)
1874 nesdev->timer_only_int_count = 0; 2019 nesdev->timer_only_int_count = 0;
1875 nesdev->int_req &= ~NES_INT_TIMER; 2020 nesdev->int_req &= ~NES_INT_TIMER;
1876 nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); 2021 nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
1877 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); 2022 nes_write32(nesdev->regs + NES_INT_MASK, ~nesdev->int_req);
1878 } else { 2023 } else {
1879 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); 2024 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
1880 } 2025 }
1881 } else { 2026 } else {
1882 if (unlikely(nesadapter->et_use_adaptive_rx_coalesce)) 2027 if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
@@ -1884,7 +2029,7 @@ void nes_dpc(unsigned long param)
1884 nes_nic_init_timer(nesdev); 2029 nes_nic_init_timer(nesdev);
1885 } 2030 }
1886 nesdev->timer_only_int_count = 0; 2031 nesdev->timer_only_int_count = 0;
1887 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); 2032 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
1888 } 2033 }
1889 } else { 2034 } else {
1890 nesdev->timer_only_int_count = 0; 2035 nesdev->timer_only_int_count = 0;
@@ -1933,7 +2078,7 @@ static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
1933 do { 2078 do {
1934 if (le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]) & 2079 if (le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]) &
1935 NES_CEQE_VALID) { 2080 NES_CEQE_VALID) {
1936 u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX])))<<32) | 2081 u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]))) << 32) |
1937 ((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_LOW_IDX]))); 2082 ((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_LOW_IDX])));
1938 u64temp <<= 1; 2083 u64temp <<= 1;
1939 cq = *((struct nes_hw_cq **)&u64temp); 2084 cq = *((struct nes_hw_cq **)&u64temp);
@@ -1961,7 +2106,7 @@ static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
1961 */ 2106 */
1962static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq) 2107static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
1963{ 2108{
1964// u64 u64temp; 2109 /* u64 u64temp; */
1965 u32 head; 2110 u32 head;
1966 u32 aeq_size; 2111 u32 aeq_size;
1967 u32 aeqe_misc; 2112 u32 aeqe_misc;
@@ -1980,8 +2125,10 @@ static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
1980 if (aeqe_misc & (NES_AEQE_QP|NES_AEQE_CQ)) { 2125 if (aeqe_misc & (NES_AEQE_QP|NES_AEQE_CQ)) {
1981 if (aeqe_cq_id >= NES_FIRST_QPN) { 2126 if (aeqe_cq_id >= NES_FIRST_QPN) {
1982 /* dealing with an accelerated QP related AE */ 2127 /* dealing with an accelerated QP related AE */
1983// u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])))<<32) | 2128 /*
1984// ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]))); 2129 * u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX]))) << 32) |
2130 * ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX])));
2131 */
1985 nes_process_iwarp_aeqe(nesdev, (struct nes_hw_aeqe *)aeqe); 2132 nes_process_iwarp_aeqe(nesdev, (struct nes_hw_aeqe *)aeqe);
1986 } else { 2133 } else {
1987 /* TODO: dealing with a CQP related AE */ 2134 /* TODO: dealing with a CQP related AE */
@@ -2081,6 +2228,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2081 u32 u32temp; 2228 u32 u32temp;
2082 u16 phy_data; 2229 u16 phy_data;
2083 u16 temp_phy_data; 2230 u16 temp_phy_data;
2231 u32 pcs_val = 0x0f0f0000;
2232 u32 pcs_mask = 0x0f1f0000;
2084 2233
2085 spin_lock_irqsave(&nesadapter->phy_lock, flags); 2234 spin_lock_irqsave(&nesadapter->phy_lock, flags);
2086 if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) { 2235 if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) {
@@ -2144,13 +2293,30 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2144 nes_debug(NES_DBG_PHY, "Eth SERDES Common Status: 0=0x%08X, 1=0x%08X\n", 2293 nes_debug(NES_DBG_PHY, "Eth SERDES Common Status: 0=0x%08X, 1=0x%08X\n",
2145 nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0), 2294 nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0),
2146 nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0+0x200)); 2295 nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0+0x200));
2147 pcs_control_status = nes_read_indexed(nesdev, 2296
2148 NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200)); 2297 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_PUMA_1G) {
2149 pcs_control_status = nes_read_indexed(nesdev, 2298 switch (mac_index) {
2150 NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200)); 2299 case 1:
2300 case 3:
2301 pcs_control_status = nes_read_indexed(nesdev,
2302 NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
2303 break;
2304 default:
2305 pcs_control_status = nes_read_indexed(nesdev,
2306 NES_IDX_PHY_PCS_CONTROL_STATUS0);
2307 break;
2308 }
2309 } else {
2310 pcs_control_status = nes_read_indexed(nesdev,
2311 NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index & 1) * 0x200));
2312 pcs_control_status = nes_read_indexed(nesdev,
2313 NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index & 1) * 0x200));
2314 }
2315
2151 nes_debug(NES_DBG_PHY, "PCS PHY Control/Status%u: 0x%08X\n", 2316 nes_debug(NES_DBG_PHY, "PCS PHY Control/Status%u: 0x%08X\n",
2152 mac_index, pcs_control_status); 2317 mac_index, pcs_control_status);
2153 if (nesadapter->OneG_Mode) { 2318 if ((nesadapter->OneG_Mode) &&
2319 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
2154 u32temp = 0x01010000; 2320 u32temp = 0x01010000;
2155 if (nesadapter->port_count > 2) { 2321 if (nesadapter->port_count > 2) {
2156 u32temp |= 0x02020000; 2322 u32temp |= 0x02020000;
@@ -2159,24 +2325,59 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2159 phy_data = 0; 2325 phy_data = 0;
2160 nes_debug(NES_DBG_PHY, "PCS says the link is down\n"); 2326 nes_debug(NES_DBG_PHY, "PCS says the link is down\n");
2161 } 2327 }
2162 } else if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) {
2163 nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
2164 temp_phy_data = (u16)nes_read_indexed(nesdev,
2165 NES_IDX_MAC_MDIO_CONTROL);
2166 u32temp = 20;
2167 do {
2168 nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
2169 phy_data = (u16)nes_read_indexed(nesdev,
2170 NES_IDX_MAC_MDIO_CONTROL);
2171 if ((phy_data == temp_phy_data) || (!(--u32temp)))
2172 break;
2173 temp_phy_data = phy_data;
2174 } while (1);
2175 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2176 __func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
2177
2178 } else { 2328 } else {
2179 phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0; 2329 switch (nesadapter->phy_type[mac_index]) {
2330 case NES_PHY_TYPE_IRIS:
2331 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
2332 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2333 u32temp = 20;
2334 do {
2335 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
2336 phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2337 if ((phy_data == temp_phy_data) || (!(--u32temp)))
2338 break;
2339 temp_phy_data = phy_data;
2340 } while (1);
2341 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2342 __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
2343 break;
2344
2345 case NES_PHY_TYPE_ARGUS:
2346 /* clear the alarms */
2347 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
2348 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
2349 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc002);
2350 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc005);
2351 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc006);
2352 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
2353 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004);
2354 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005);
2355 /* check link status */
2356 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
2357 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2358 u32temp = 100;
2359 do {
2360 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
2361
2362 phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2363 if ((phy_data == temp_phy_data) || (!(--u32temp)))
2364 break;
2365 temp_phy_data = phy_data;
2366 } while (1);
2367 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2368 __func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
2369 break;
2370
2371 case NES_PHY_TYPE_PUMA_1G:
2372 if (mac_index < 2)
2373 pcs_val = pcs_mask = 0x01010000;
2374 else
2375 pcs_val = pcs_mask = 0x02020000;
2376 /* fall through */
2377 default:
2378 phy_data = (pcs_val == (pcs_control_status & pcs_mask)) ? 0x4 : 0x0;
2379 break;
2380 }
2180 } 2381 }
2181 2382
2182 if (phy_data & 0x0004) { 2383 if (phy_data & 0x0004) {
@@ -2185,8 +2386,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2185 nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n", 2386 nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n",
2186 nesvnic->linkup); 2387 nesvnic->linkup);
2187 if (nesvnic->linkup == 0) { 2388 if (nesvnic->linkup == 0) {
2188 printk(PFX "The Link is now up for port %u, netdev %p.\n", 2389 printk(PFX "The Link is now up for port %s, netdev %p.\n",
2189 mac_index, nesvnic->netdev); 2390 nesvnic->netdev->name, nesvnic->netdev);
2190 if (netif_queue_stopped(nesvnic->netdev)) 2391 if (netif_queue_stopped(nesvnic->netdev))
2191 netif_start_queue(nesvnic->netdev); 2392 netif_start_queue(nesvnic->netdev);
2192 nesvnic->linkup = 1; 2393 nesvnic->linkup = 1;
@@ -2199,8 +2400,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2199 nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n", 2400 nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n",
2200 nesvnic->linkup); 2401 nesvnic->linkup);
2201 if (nesvnic->linkup == 1) { 2402 if (nesvnic->linkup == 1) {
2202 printk(PFX "The Link is now down for port %u, netdev %p.\n", 2403 printk(PFX "The Link is now down for port %s, netdev %p.\n",
2203 mac_index, nesvnic->netdev); 2404 nesvnic->netdev->name, nesvnic->netdev);
2204 if (!(netif_queue_stopped(nesvnic->netdev))) 2405 if (!(netif_queue_stopped(nesvnic->netdev)))
2205 netif_stop_queue(nesvnic->netdev); 2406 netif_stop_queue(nesvnic->netdev);
2206 nesvnic->linkup = 0; 2407 nesvnic->linkup = 0;
@@ -2254,10 +2455,13 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2254 u16 pkt_type; 2455 u16 pkt_type;
2255 u16 rqes_processed = 0; 2456 u16 rqes_processed = 0;
2256 u8 sq_cqes = 0; 2457 u8 sq_cqes = 0;
2458 u8 nes_use_lro = 0;
2257 2459
2258 head = cq->cq_head; 2460 head = cq->cq_head;
2259 cq_size = cq->cq_size; 2461 cq_size = cq->cq_size;
2260 cq->cqes_pending = 1; 2462 cq->cqes_pending = 1;
2463 if (nesvnic->netdev->features & NETIF_F_LRO)
2464 nes_use_lro = 1;
2261 do { 2465 do {
2262 if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) & 2466 if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
2263 NES_NIC_CQE_VALID) { 2467 NES_NIC_CQE_VALID) {
@@ -2272,8 +2476,10 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2272 /* bump past the vlan tag */ 2476 /* bump past the vlan tag */
2273 wqe_fragment_length++; 2477 wqe_fragment_length++;
2274 if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) { 2478 if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
2275 u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]); 2479 u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX +
2276 u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32; 2480 wqe_fragment_index * 2]);
2481 u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX +
2482 wqe_fragment_index * 2])) << 32;
2277 bus_address = (dma_addr_t)u64temp; 2483 bus_address = (dma_addr_t)u64temp;
2278 if (test_and_clear_bit(nesnic->sq_tail, nesnic->first_frag_overflow)) { 2484 if (test_and_clear_bit(nesnic->sq_tail, nesnic->first_frag_overflow)) {
2279 pci_unmap_single(nesdev->pcidev, 2485 pci_unmap_single(nesdev->pcidev,
@@ -2283,8 +2489,10 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2283 } 2489 }
2284 for (; wqe_fragment_index < 5; wqe_fragment_index++) { 2490 for (; wqe_fragment_index < 5; wqe_fragment_index++) {
2285 if (wqe_fragment_length[wqe_fragment_index]) { 2491 if (wqe_fragment_length[wqe_fragment_index]) {
2286 u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]); 2492 u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX +
2287 u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32; 2493 wqe_fragment_index * 2]);
2494 u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX
2495 + wqe_fragment_index * 2])) <<32;
2288 bus_address = (dma_addr_t)u64temp; 2496 bus_address = (dma_addr_t)u64temp;
2289 pci_unmap_page(nesdev->pcidev, 2497 pci_unmap_page(nesdev->pcidev,
2290 bus_address, 2498 bus_address,
@@ -2331,7 +2539,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2331 if (atomic_read(&nesvnic->rx_skbs_needed) > (nesvnic->nic.rq_size>>1)) { 2539 if (atomic_read(&nesvnic->rx_skbs_needed) > (nesvnic->nic.rq_size>>1)) {
2332 nes_write32(nesdev->regs+NES_CQE_ALLOC, 2540 nes_write32(nesdev->regs+NES_CQE_ALLOC,
2333 cq->cq_number | (cqe_count << 16)); 2541 cq->cq_number | (cqe_count << 16));
2334// nesadapter->tune_timer.cq_count += cqe_count; 2542 /* nesadapter->tune_timer.cq_count += cqe_count; */
2335 nesdev->currcq_count += cqe_count; 2543 nesdev->currcq_count += cqe_count;
2336 cqe_count = 0; 2544 cqe_count = 0;
2337 nes_replenish_nic_rq(nesvnic); 2545 nes_replenish_nic_rq(nesvnic);
@@ -2379,9 +2587,16 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2379 >> 16); 2587 >> 16);
2380 nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", 2588 nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
2381 nesvnic->netdev->name, vlan_tag); 2589 nesvnic->netdev->name, vlan_tag);
2382 nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag); 2590 if (nes_use_lro)
2591 lro_vlan_hwaccel_receive_skb(&nesvnic->lro_mgr, rx_skb,
2592 nesvnic->vlan_grp, vlan_tag, NULL);
2593 else
2594 nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag);
2383 } else { 2595 } else {
2384 nes_netif_rx(rx_skb); 2596 if (nes_use_lro)
2597 lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
2598 else
2599 nes_netif_rx(rx_skb);
2385 } 2600 }
2386 } 2601 }
2387 2602
@@ -2399,7 +2614,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2399 /* Replenish Nic CQ */ 2614 /* Replenish Nic CQ */
2400 nes_write32(nesdev->regs+NES_CQE_ALLOC, 2615 nes_write32(nesdev->regs+NES_CQE_ALLOC,
2401 cq->cq_number | (cqe_count << 16)); 2616 cq->cq_number | (cqe_count << 16));
2402// nesdev->nesadapter->tune_timer.cq_count += cqe_count; 2617 /* nesdev->nesadapter->tune_timer.cq_count += cqe_count; */
2403 nesdev->currcq_count += cqe_count; 2618 nesdev->currcq_count += cqe_count;
2404 cqe_count = 0; 2619 cqe_count = 0;
2405 } 2620 }
@@ -2413,26 +2628,27 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2413 2628
2414 } while (1); 2629 } while (1);
2415 2630
2631 if (nes_use_lro)
2632 lro_flush_all(&nesvnic->lro_mgr);
2416 if (sq_cqes) { 2633 if (sq_cqes) {
2417 barrier(); 2634 barrier();
2418 /* restart the queue if it had been stopped */ 2635 /* restart the queue if it had been stopped */
2419 if (netif_queue_stopped(nesvnic->netdev)) 2636 if (netif_queue_stopped(nesvnic->netdev))
2420 netif_wake_queue(nesvnic->netdev); 2637 netif_wake_queue(nesvnic->netdev);
2421 } 2638 }
2422
2423 cq->cq_head = head; 2639 cq->cq_head = head;
2424 /* nes_debug(NES_DBG_CQ, "CQ%u Processed = %u cqes, new head = %u.\n", 2640 /* nes_debug(NES_DBG_CQ, "CQ%u Processed = %u cqes, new head = %u.\n",
2425 cq->cq_number, cqe_count, cq->cq_head); */ 2641 cq->cq_number, cqe_count, cq->cq_head); */
2426 cq->cqe_allocs_pending = cqe_count; 2642 cq->cqe_allocs_pending = cqe_count;
2427 if (unlikely(nesadapter->et_use_adaptive_rx_coalesce)) 2643 if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
2428 { 2644 {
2429// nesdev->nesadapter->tune_timer.cq_count += cqe_count; 2645 /* nesdev->nesadapter->tune_timer.cq_count += cqe_count; */
2430 nesdev->currcq_count += cqe_count; 2646 nesdev->currcq_count += cqe_count;
2431 nes_nic_tune_timer(nesdev); 2647 nes_nic_tune_timer(nesdev);
2432 } 2648 }
2433 if (atomic_read(&nesvnic->rx_skbs_needed)) 2649 if (atomic_read(&nesvnic->rx_skbs_needed))
2434 nes_replenish_nic_rq(nesvnic); 2650 nes_replenish_nic_rq(nesvnic);
2435 } 2651}
2436 2652
2437 2653
2438/** 2654/**
@@ -2461,7 +2677,7 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2461 2677
2462 if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) { 2678 if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
2463 u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head]. 2679 u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head].
2464 cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | 2680 cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]))) << 32) |
2465 ((u64)(le32_to_cpu(cq->cq_vbase[head]. 2681 ((u64)(le32_to_cpu(cq->cq_vbase[head].
2466 cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]))); 2682 cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX])));
2467 cqp = *((struct nes_hw_cqp **)&u64temp); 2683 cqp = *((struct nes_hw_cqp **)&u64temp);
@@ -2478,7 +2694,7 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2478 } 2694 }
2479 2695
2480 u64temp = (((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail]. 2696 u64temp = (((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
2481 wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX])))<<32) | 2697 wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
2482 ((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail]. 2698 ((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
2483 wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX]))); 2699 wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX])));
2484 cqp_request = *((struct nes_cqp_request **)&u64temp); 2700 cqp_request = *((struct nes_cqp_request **)&u64temp);
@@ -2515,7 +2731,7 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2515 } else { 2731 } else {
2516 nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n", 2732 nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
2517 cqp_request, 2733 cqp_request,
2518 le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f); 2734 le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX]) & 0x3f);
2519 if (cqp_request->dynamic) { 2735 if (cqp_request->dynamic) {
2520 kfree(cqp_request); 2736 kfree(cqp_request);
2521 } else { 2737 } else {
@@ -2529,7 +2745,7 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2529 } 2745 }
2530 2746
2531 cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; 2747 cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
2532 nes_write32(nesdev->regs+NES_CQE_ALLOC, cq->cq_number | (1 << 16)); 2748 nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (1 << 16));
2533 if (++cqp->sq_tail >= cqp->sq_size) 2749 if (++cqp->sq_tail >= cqp->sq_size)
2534 cqp->sq_tail = 0; 2750 cqp->sq_tail = 0;
2535 2751
@@ -2598,13 +2814,13 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2598 nes_debug(NES_DBG_AEQ, "\n"); 2814 nes_debug(NES_DBG_AEQ, "\n");
2599 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); 2815 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
2600 if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) { 2816 if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
2601 context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); 2817 context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2602 context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; 2818 context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2603 } else { 2819 } else {
2604 aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); 2820 aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2605 aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; 2821 aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2606 context = (unsigned long)nesadapter->qp_table[le32_to_cpu( 2822 context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
2607 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; 2823 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
2608 BUG_ON(!context); 2824 BUG_ON(!context);
2609 } 2825 }
2610 2826
@@ -2617,7 +2833,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2617 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, 2833 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
2618 nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); 2834 nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
2619 2835
2620
2621 switch (async_event_id) { 2836 switch (async_event_id) {
2622 case NES_AEQE_AEID_LLP_FIN_RECEIVED: 2837 case NES_AEQE_AEID_LLP_FIN_RECEIVED:
2623 nesqp = *((struct nes_qp **)&context); 2838 nesqp = *((struct nes_qp **)&context);
@@ -3021,7 +3236,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
3021 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_ARP_VALID); 3236 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_ARP_VALID);
3022 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = cpu_to_le32( 3237 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = cpu_to_le32(
3023 (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | 3238 (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
3024 (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]); 3239 (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
3025 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32( 3240 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
3026 (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]); 3241 (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
3027 } else { 3242 } else {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 8f36e231bdf5..745bf94f3f07 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -33,8 +33,12 @@
33#ifndef __NES_HW_H 33#ifndef __NES_HW_H
34#define __NES_HW_H 34#define __NES_HW_H
35 35
36#define NES_PHY_TYPE_1G 2 36#include <linux/inet_lro.h>
37#define NES_PHY_TYPE_IRIS 3 37
38#define NES_PHY_TYPE_1G 2
39#define NES_PHY_TYPE_IRIS 3
40#define NES_PHY_TYPE_ARGUS 4
41#define NES_PHY_TYPE_PUMA_1G 5
38#define NES_PHY_TYPE_PUMA_10G 6 42#define NES_PHY_TYPE_PUMA_10G 6
39 43
40#define NES_MULTICAST_PF_MAX 8 44#define NES_MULTICAST_PF_MAX 8
@@ -965,7 +969,7 @@ struct nes_arp_entry {
965#define NES_NIC_CQ_DOWNWARD_TREND 16 969#define NES_NIC_CQ_DOWNWARD_TREND 16
966 970
967struct nes_hw_tune_timer { 971struct nes_hw_tune_timer {
968 //u16 cq_count; 972 /* u16 cq_count; */
969 u16 threshold_low; 973 u16 threshold_low;
970 u16 threshold_target; 974 u16 threshold_target;
971 u16 threshold_high; 975 u16 threshold_high;
@@ -982,8 +986,10 @@ struct nes_hw_tune_timer {
982#define NES_TIMER_INT_LIMIT 2 986#define NES_TIMER_INT_LIMIT 2
983#define NES_TIMER_INT_LIMIT_DYNAMIC 10 987#define NES_TIMER_INT_LIMIT_DYNAMIC 10
984#define NES_TIMER_ENABLE_LIMIT 4 988#define NES_TIMER_ENABLE_LIMIT 4
985#define NES_MAX_LINK_INTERRUPTS 128 989#define NES_MAX_LINK_INTERRUPTS 128
986#define NES_MAX_LINK_CHECK 200 990#define NES_MAX_LINK_CHECK 200
991#define NES_MAX_LRO_DESCRIPTORS 32
992#define NES_LRO_MAX_AGGR 64
987 993
988struct nes_adapter { 994struct nes_adapter {
989 u64 fw_ver; 995 u64 fw_ver;
@@ -1183,6 +1189,9 @@ struct nes_vnic {
1183 u8 of_device_registered; 1189 u8 of_device_registered;
1184 u8 rdma_enabled; 1190 u8 rdma_enabled;
1185 u8 rx_checksum_disabled; 1191 u8 rx_checksum_disabled;
1192 u32 lro_max_aggr;
1193 struct net_lro_mgr lro_mgr;
1194 struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
1186}; 1195};
1187 1196
1188struct nes_ib_device { 1197struct nes_ib_device {
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index e5366b013c1a..1b0938c87774 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -185,12 +185,13 @@ static int nes_netdev_open(struct net_device *netdev)
185 nic_active |= nic_active_bit; 185 nic_active |= nic_active_bit;
186 nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); 186 nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
187 187
188 macaddr_high = ((u16)netdev->dev_addr[0]) << 8; 188 macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
189 macaddr_high += (u16)netdev->dev_addr[1]; 189 macaddr_high += (u16)netdev->dev_addr[1];
190 macaddr_low = ((u32)netdev->dev_addr[2]) << 24; 190
191 macaddr_low += ((u32)netdev->dev_addr[3]) << 16; 191 macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
192 macaddr_low += ((u32)netdev->dev_addr[4]) << 8; 192 macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
193 macaddr_low += (u32)netdev->dev_addr[5]; 193 macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
194 macaddr_low += (u32)netdev->dev_addr[5];
194 195
195 /* Program the various MAC regs */ 196 /* Program the various MAC regs */
196 for (i = 0; i < NES_MAX_PORT_COUNT; i++) { 197 for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
@@ -451,7 +452,7 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
451 __le16 *wqe_fragment_length; 452 __le16 *wqe_fragment_length;
452 u32 nr_frags; 453 u32 nr_frags;
453 u32 original_first_length; 454 u32 original_first_length;
454// u64 *wqe_fragment_address; 455 /* u64 *wqe_fragment_address; */
455 /* first fragment (0) is used by copy buffer */ 456 /* first fragment (0) is used by copy buffer */
456 u16 wqe_fragment_index=1; 457 u16 wqe_fragment_index=1;
457 u16 hoffset; 458 u16 hoffset;
@@ -461,11 +462,12 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
461 u32 old_head; 462 u32 old_head;
462 u32 wqe_misc; 463 u32 wqe_misc;
463 464
464 /* nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u," 465 /*
465 " (%u frags), tso_size=%u\n", 466 * nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
466 netdev->name, skb->len, skb_headlen(skb), 467 * " (%u frags), tso_size=%u\n",
467 skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); 468 * netdev->name, skb->len, skb_headlen(skb),
468 */ 469 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
470 */
469 471
470 if (!netif_carrier_ok(netdev)) 472 if (!netif_carrier_ok(netdev))
471 return NETDEV_TX_OK; 473 return NETDEV_TX_OK;
@@ -795,12 +797,12 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
795 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); 797 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
796 printk(PFX "%s: Address length = %d, Address = %s\n", 798 printk(PFX "%s: Address length = %d, Address = %s\n",
797 __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data)); 799 __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data));
798 macaddr_high = ((u16)netdev->dev_addr[0]) << 8; 800 macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
799 macaddr_high += (u16)netdev->dev_addr[1]; 801 macaddr_high += (u16)netdev->dev_addr[1];
800 macaddr_low = ((u32)netdev->dev_addr[2]) << 24; 802 macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
801 macaddr_low += ((u32)netdev->dev_addr[3]) << 16; 803 macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
802 macaddr_low += ((u32)netdev->dev_addr[4]) << 8; 804 macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
803 macaddr_low += (u32)netdev->dev_addr[5]; 805 macaddr_low += (u32)netdev->dev_addr[5];
804 806
805 for (i = 0; i < NES_MAX_PORT_COUNT; i++) { 807 for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
806 if (nesvnic->qp_nic_index[i] == 0xf) { 808 if (nesvnic->qp_nic_index[i] == 0xf) {
@@ -881,12 +883,12 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
881 print_mac(mac, multicast_addr->dmi_addr), 883 print_mac(mac, multicast_addr->dmi_addr),
882 perfect_filter_register_address+(mc_index * 8), 884 perfect_filter_register_address+(mc_index * 8),
883 mc_nic_index); 885 mc_nic_index);
884 macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; 886 macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
885 macaddr_high += (u16)multicast_addr->dmi_addr[1]; 887 macaddr_high += (u16)multicast_addr->dmi_addr[1];
886 macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24; 888 macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
887 macaddr_low += ((u32)multicast_addr->dmi_addr[3]) << 16; 889 macaddr_low += ((u32)multicast_addr->dmi_addr[3]) << 16;
888 macaddr_low += ((u32)multicast_addr->dmi_addr[4]) << 8; 890 macaddr_low += ((u32)multicast_addr->dmi_addr[4]) << 8;
889 macaddr_low += (u32)multicast_addr->dmi_addr[5]; 891 macaddr_low += (u32)multicast_addr->dmi_addr[5];
890 nes_write_indexed(nesdev, 892 nes_write_indexed(nesdev,
891 perfect_filter_register_address+(mc_index * 8), 893 perfect_filter_register_address+(mc_index * 8),
892 macaddr_low); 894 macaddr_low);
@@ -910,23 +912,23 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
910/** 912/**
911 * nes_netdev_change_mtu 913 * nes_netdev_change_mtu
912 */ 914 */
913static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu) 915static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
914{ 916{
915 struct nes_vnic *nesvnic = netdev_priv(netdev); 917 struct nes_vnic *nesvnic = netdev_priv(netdev);
916 struct nes_device *nesdev = nesvnic->nesdev; 918 struct nes_device *nesdev = nesvnic->nesdev;
917 int ret = 0; 919 int ret = 0;
918 u8 jumbomode=0; 920 u8 jumbomode = 0;
919 921
920 if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu)) 922 if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
921 return -EINVAL; 923 return -EINVAL;
922 924
923 netdev->mtu = new_mtu; 925 netdev->mtu = new_mtu;
924 nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN; 926 nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN;
925 927
926 if (netdev->mtu > 1500) { 928 if (netdev->mtu > 1500) {
927 jumbomode=1; 929 jumbomode=1;
928 } 930 }
929 nes_nic_init_timer_defaults(nesdev, jumbomode); 931 nes_nic_init_timer_defaults(nesdev, jumbomode);
930 932
931 if (netif_running(netdev)) { 933 if (netif_running(netdev)) {
932 nes_netdev_stop(netdev); 934 nes_netdev_stop(netdev);
@@ -936,8 +938,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
936 return ret; 938 return ret;
937} 939}
938 940
939#define NES_ETHTOOL_STAT_COUNT 55 941static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
940static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = {
941 "Link Change Interrupts", 942 "Link Change Interrupts",
942 "Linearized SKBs", 943 "Linearized SKBs",
943 "T/GSO Requests", 944 "T/GSO Requests",
@@ -993,8 +994,12 @@ static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN]
993 "CQ Depth 32", 994 "CQ Depth 32",
994 "CQ Depth 128", 995 "CQ Depth 128",
995 "CQ Depth 256", 996 "CQ Depth 256",
997 "LRO aggregated",
998 "LRO flushed",
999 "LRO no_desc",
996}; 1000};
997 1001
1002#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
998 1003
999/** 1004/**
1000 * nes_netdev_get_rx_csum 1005 * nes_netdev_get_rx_csum
@@ -1189,6 +1194,9 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1189 target_stat_values[52] = int_mod_cq_depth_32; 1194 target_stat_values[52] = int_mod_cq_depth_32;
1190 target_stat_values[53] = int_mod_cq_depth_128; 1195 target_stat_values[53] = int_mod_cq_depth_128;
1191 target_stat_values[54] = int_mod_cq_depth_256; 1196 target_stat_values[54] = int_mod_cq_depth_256;
1197 target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
1198 target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
1199 target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
1192 1200
1193} 1201}
1194 1202
@@ -1219,14 +1227,14 @@ static int nes_netdev_set_coalesce(struct net_device *netdev,
1219 struct ethtool_coalesce *et_coalesce) 1227 struct ethtool_coalesce *et_coalesce)
1220{ 1228{
1221 struct nes_vnic *nesvnic = netdev_priv(netdev); 1229 struct nes_vnic *nesvnic = netdev_priv(netdev);
1222 struct nes_device *nesdev = nesvnic->nesdev; 1230 struct nes_device *nesdev = nesvnic->nesdev;
1223 struct nes_adapter *nesadapter = nesdev->nesadapter; 1231 struct nes_adapter *nesadapter = nesdev->nesadapter;
1224 struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; 1232 struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
1225 unsigned long flags; 1233 unsigned long flags;
1226 1234
1227 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); 1235 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
1228 if (et_coalesce->rx_max_coalesced_frames_low) { 1236 if (et_coalesce->rx_max_coalesced_frames_low) {
1229 shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low; 1237 shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low;
1230 } 1238 }
1231 if (et_coalesce->rx_max_coalesced_frames_irq) { 1239 if (et_coalesce->rx_max_coalesced_frames_irq) {
1232 shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq; 1240 shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq;
@@ -1246,14 +1254,14 @@ static int nes_netdev_set_coalesce(struct net_device *netdev,
1246 nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq; 1254 nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq;
1247 if (et_coalesce->use_adaptive_rx_coalesce) { 1255 if (et_coalesce->use_adaptive_rx_coalesce) {
1248 nesadapter->et_use_adaptive_rx_coalesce = 1; 1256 nesadapter->et_use_adaptive_rx_coalesce = 1;
1249 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC; 1257 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
1250 nesadapter->et_rx_coalesce_usecs_irq = 0; 1258 nesadapter->et_rx_coalesce_usecs_irq = 0;
1251 if (et_coalesce->pkt_rate_low) { 1259 if (et_coalesce->pkt_rate_low) {
1252 nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low; 1260 nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low;
1253 } 1261 }
1254 } else { 1262 } else {
1255 nesadapter->et_use_adaptive_rx_coalesce = 0; 1263 nesadapter->et_use_adaptive_rx_coalesce = 0;
1256 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT; 1264 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
1257 if (nesadapter->et_rx_coalesce_usecs_irq) { 1265 if (nesadapter->et_rx_coalesce_usecs_irq) {
1258 nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 1266 nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
1259 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8))); 1267 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8)));
@@ -1270,28 +1278,28 @@ static int nes_netdev_get_coalesce(struct net_device *netdev,
1270 struct ethtool_coalesce *et_coalesce) 1278 struct ethtool_coalesce *et_coalesce)
1271{ 1279{
1272 struct nes_vnic *nesvnic = netdev_priv(netdev); 1280 struct nes_vnic *nesvnic = netdev_priv(netdev);
1273 struct nes_device *nesdev = nesvnic->nesdev; 1281 struct nes_device *nesdev = nesvnic->nesdev;
1274 struct nes_adapter *nesadapter = nesdev->nesadapter; 1282 struct nes_adapter *nesadapter = nesdev->nesadapter;
1275 struct ethtool_coalesce temp_et_coalesce; 1283 struct ethtool_coalesce temp_et_coalesce;
1276 struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer; 1284 struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
1277 unsigned long flags; 1285 unsigned long flags;
1278 1286
1279 memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce)); 1287 memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce));
1280 temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq; 1288 temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq;
1281 temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce; 1289 temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce;
1282 temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval; 1290 temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval;
1283 temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low; 1291 temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low;
1284 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); 1292 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
1285 temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low; 1293 temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low;
1286 temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target; 1294 temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target;
1287 temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high; 1295 temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high;
1288 temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min; 1296 temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min;
1289 temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max; 1297 temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max;
1290 if (nesadapter->et_use_adaptive_rx_coalesce) { 1298 if (nesadapter->et_use_adaptive_rx_coalesce) {
1291 temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use; 1299 temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use;
1292 } 1300 }
1293 spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags); 1301 spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
1294 memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce)); 1302 memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce));
1295 return 0; 1303 return 0;
1296} 1304}
1297 1305
@@ -1370,30 +1378,38 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1370 u16 phy_data; 1378 u16 phy_data;
1371 1379
1372 et_cmd->duplex = DUPLEX_FULL; 1380 et_cmd->duplex = DUPLEX_FULL;
1373 et_cmd->port = PORT_MII; 1381 et_cmd->port = PORT_MII;
1382
1374 if (nesadapter->OneG_Mode) { 1383 if (nesadapter->OneG_Mode) {
1375 et_cmd->supported = SUPPORTED_1000baseT_Full|SUPPORTED_Autoneg;
1376 et_cmd->advertising = ADVERTISED_1000baseT_Full|ADVERTISED_Autoneg;
1377 et_cmd->speed = SPEED_1000; 1384 et_cmd->speed = SPEED_1000;
1378 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1385 if (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
1379 &phy_data); 1386 et_cmd->supported = SUPPORTED_1000baseT_Full;
1380 if (phy_data&0x1000) { 1387 et_cmd->advertising = ADVERTISED_1000baseT_Full;
1381 et_cmd->autoneg = AUTONEG_ENABLE; 1388 et_cmd->autoneg = AUTONEG_DISABLE;
1389 et_cmd->transceiver = XCVR_INTERNAL;
1390 et_cmd->phy_address = nesdev->mac_index;
1382 } else { 1391 } else {
1383 et_cmd->autoneg = AUTONEG_DISABLE; 1392 et_cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg;
1393 et_cmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg;
1394 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], &phy_data);
1395 if (phy_data & 0x1000)
1396 et_cmd->autoneg = AUTONEG_ENABLE;
1397 else
1398 et_cmd->autoneg = AUTONEG_DISABLE;
1399 et_cmd->transceiver = XCVR_EXTERNAL;
1400 et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
1384 } 1401 }
1385 et_cmd->transceiver = XCVR_EXTERNAL;
1386 et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
1387 } else { 1402 } else {
1388 if (nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) { 1403 if ((nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) ||
1404 (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_ARGUS)) {
1389 et_cmd->transceiver = XCVR_EXTERNAL; 1405 et_cmd->transceiver = XCVR_EXTERNAL;
1390 et_cmd->port = PORT_FIBRE; 1406 et_cmd->port = PORT_FIBRE;
1391 et_cmd->supported = SUPPORTED_FIBRE; 1407 et_cmd->supported = SUPPORTED_FIBRE;
1392 et_cmd->advertising = ADVERTISED_FIBRE; 1408 et_cmd->advertising = ADVERTISED_FIBRE;
1393 et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index]; 1409 et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
1394 } else { 1410 } else {
1395 et_cmd->transceiver = XCVR_INTERNAL; 1411 et_cmd->transceiver = XCVR_INTERNAL;
1396 et_cmd->supported = SUPPORTED_10000baseT_Full; 1412 et_cmd->supported = SUPPORTED_10000baseT_Full;
1397 et_cmd->advertising = ADVERTISED_10000baseT_Full; 1413 et_cmd->advertising = ADVERTISED_10000baseT_Full;
1398 et_cmd->phy_address = nesdev->mac_index; 1414 et_cmd->phy_address = nesdev->mac_index;
1399 } 1415 }
@@ -1416,14 +1432,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1416 struct nes_adapter *nesadapter = nesdev->nesadapter; 1432 struct nes_adapter *nesadapter = nesdev->nesadapter;
1417 u16 phy_data; 1433 u16 phy_data;
1418 1434
1419 if (nesadapter->OneG_Mode) { 1435 if ((nesadapter->OneG_Mode) &&
1436 (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
1420 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1437 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
1421 &phy_data); 1438 &phy_data);
1422 if (et_cmd->autoneg) { 1439 if (et_cmd->autoneg) {
1423 /* Turn on Full duplex, Autoneg, and restart autonegotiation */ 1440 /* Turn on Full duplex, Autoneg, and restart autonegotiation */
1424 phy_data |= 0x1300; 1441 phy_data |= 0x1300;
1425 } else { 1442 } else {
1426 // Turn off autoneg 1443 /* Turn off autoneg */
1427 phy_data &= ~0x1000; 1444 phy_data &= ~0x1000;
1428 } 1445 }
1429 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], 1446 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
@@ -1454,6 +1471,8 @@ static struct ethtool_ops nes_ethtool_ops = {
1454 .set_sg = ethtool_op_set_sg, 1471 .set_sg = ethtool_op_set_sg,
1455 .get_tso = ethtool_op_get_tso, 1472 .get_tso = ethtool_op_get_tso,
1456 .set_tso = ethtool_op_set_tso, 1473 .set_tso = ethtool_op_set_tso,
1474 .get_flags = ethtool_op_get_flags,
1475 .set_flags = ethtool_op_set_flags,
1457}; 1476};
1458 1477
1459 1478
@@ -1607,27 +1626,34 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1607 list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]); 1626 list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
1608 1627
1609 if ((nesdev->netdev_count == 0) && 1628 if ((nesdev->netdev_count == 0) &&
1610 (PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index)) { 1629 ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
1611 nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n", 1630 ((nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) &&
1612 NES_IDX_PHY_PCS_CONTROL_STATUS0+(0x200*(nesvnic->logical_port&1))); 1631 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
1632 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
1633 /*
1634 * nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
1635 * NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesvnic->logical_port & 1)));
1636 */
1613 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1637 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1614 (0x200*(nesvnic->logical_port&1))); 1638 (0x200 * (nesdev->mac_index & 1)));
1615 u32temp |= 0x00200000; 1639 if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G) {
1616 nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1640 u32temp |= 0x00200000;
1617 (0x200*(nesvnic->logical_port&1)), u32temp); 1641 nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1642 (0x200 * (nesdev->mac_index & 1)), u32temp);
1643 }
1644
1618 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1645 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1619 (0x200*(nesvnic->logical_port&1)) ); 1646 (0x200 * (nesdev->mac_index & 1)));
1647
1620 if ((u32temp&0x0f1f0000) == 0x0f0f0000) { 1648 if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
1621 if (nesdev->nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) { 1649 if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) {
1622 nes_init_phy(nesdev); 1650 nes_init_phy(nesdev);
1623 nes_read_10G_phy_reg(nesdev, 1, 1651 nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
1624 nesdev->nesadapter->phy_index[nesvnic->logical_port]);
1625 temp_phy_data = (u16)nes_read_indexed(nesdev, 1652 temp_phy_data = (u16)nes_read_indexed(nesdev,
1626 NES_IDX_MAC_MDIO_CONTROL); 1653 NES_IDX_MAC_MDIO_CONTROL);
1627 u32temp = 20; 1654 u32temp = 20;
1628 do { 1655 do {
1629 nes_read_10G_phy_reg(nesdev, 1, 1656 nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
1630 nesdev->nesadapter->phy_index[nesvnic->logical_port]);
1631 phy_data = (u16)nes_read_indexed(nesdev, 1657 phy_data = (u16)nes_read_indexed(nesdev,
1632 NES_IDX_MAC_MDIO_CONTROL); 1658 NES_IDX_MAC_MDIO_CONTROL);
1633 if ((phy_data == temp_phy_data) || (!(--u32temp))) 1659 if ((phy_data == temp_phy_data) || (!(--u32temp)))
@@ -1644,6 +1670,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1644 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n"); 1670 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1645 nesvnic->linkup = 1; 1671 nesvnic->linkup = 1;
1646 } 1672 }
1673 } else if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
1674 nes_debug(NES_DBG_INIT, "mac_index=%d, logical_port=%d, u32temp=0x%04X, PCI_FUNC=%d\n",
1675 nesdev->mac_index, nesvnic->logical_port, u32temp, PCI_FUNC(nesdev->pcidev->devfn));
1676 if (((nesdev->mac_index < 2) && ((u32temp&0x01010000) == 0x01010000)) ||
1677 ((nesdev->mac_index > 1) && ((u32temp&0x02020000) == 0x02020000))) {
1678 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1679 nesvnic->linkup = 1;
1680 }
1647 } 1681 }
1648 /* clear the MAC interrupt status, assumes direct logical to physical mapping */ 1682 /* clear the MAC interrupt status, assumes direct logical to physical mapping */
1649 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index)); 1683 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index c6d5631a6995..fe83d1b2b177 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -444,15 +444,13 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16
444/** 444/**
445 * nes_write_10G_phy_reg 445 * nes_write_10G_phy_reg
446 */ 446 */
447void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg, 447void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_addr, u8 dev_addr, u16 phy_reg,
448 u8 phy_addr, u16 data) 448 u16 data)
449{ 449{
450 u32 dev_addr;
451 u32 port_addr; 450 u32 port_addr;
452 u32 u32temp; 451 u32 u32temp;
453 u32 counter; 452 u32 counter;
454 453
455 dev_addr = 1;
456 port_addr = phy_addr; 454 port_addr = phy_addr;
457 455
458 /* set address */ 456 /* set address */
@@ -492,14 +490,12 @@ void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg,
492 * This routine only issues the read, the data must be read 490 * This routine only issues the read, the data must be read
493 * separately. 491 * separately.
494 */ 492 */
495void nes_read_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg, u8 phy_addr) 493void nes_read_10G_phy_reg(struct nes_device *nesdev, u8 phy_addr, u8 dev_addr, u16 phy_reg)
496{ 494{
497 u32 dev_addr;
498 u32 port_addr; 495 u32 port_addr;
499 u32 u32temp; 496 u32 u32temp;
500 u32 counter; 497 u32 counter;
501 498
502 dev_addr = 1;
503 port_addr = phy_addr; 499 port_addr = phy_addr;
504 500
505 /* set address */ 501 /* set address */
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index ee74f7c7a6da..99b3c4ae86eb 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1266,7 +1266,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1266 sq_size = init_attr->cap.max_send_wr; 1266 sq_size = init_attr->cap.max_send_wr;
1267 rq_size = init_attr->cap.max_recv_wr; 1267 rq_size = init_attr->cap.max_recv_wr;
1268 1268
1269 // check if the encoded sizes are OK or not... 1269 /* check if the encoded sizes are OK or not... */
1270 sq_encoded_size = nes_get_encoded_size(&sq_size); 1270 sq_encoded_size = nes_get_encoded_size(&sq_size);
1271 rq_encoded_size = nes_get_encoded_size(&rq_size); 1271 rq_encoded_size = nes_get_encoded_size(&rq_size);
1272 1272
@@ -2377,7 +2377,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2377 u8 single_page = 1; 2377 u8 single_page = 1;
2378 u8 stag_key; 2378 u8 stag_key;
2379 2379
2380 region = ib_umem_get(pd->uobject->context, start, length, acc); 2380 region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
2381 if (IS_ERR(region)) { 2381 if (IS_ERR(region)) {
2382 return (struct ib_mr *)region; 2382 return (struct ib_mr *)region;
2383 } 2383 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index f1f142dc64b1..ca126fc2b853 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -95,6 +95,8 @@ enum {
95 IPOIB_MCAST_FLAG_SENDONLY = 1, 95 IPOIB_MCAST_FLAG_SENDONLY = 1,
96 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ 96 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
97 IPOIB_MCAST_FLAG_ATTACHED = 3, 97 IPOIB_MCAST_FLAG_ATTACHED = 3,
98
99 MAX_SEND_CQE = 16,
98}; 100};
99 101
100#define IPOIB_OP_RECV (1ul << 31) 102#define IPOIB_OP_RECV (1ul << 31)
@@ -285,7 +287,8 @@ struct ipoib_dev_priv {
285 u16 pkey_index; 287 u16 pkey_index;
286 struct ib_pd *pd; 288 struct ib_pd *pd;
287 struct ib_mr *mr; 289 struct ib_mr *mr;
288 struct ib_cq *cq; 290 struct ib_cq *recv_cq;
291 struct ib_cq *send_cq;
289 struct ib_qp *qp; 292 struct ib_qp *qp;
290 u32 qkey; 293 u32 qkey;
291 294
@@ -305,6 +308,7 @@ struct ipoib_dev_priv {
305 struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; 308 struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
306 struct ib_send_wr tx_wr; 309 struct ib_send_wr tx_wr;
307 unsigned tx_outstanding; 310 unsigned tx_outstanding;
311 struct ib_wc send_wc[MAX_SEND_CQE];
308 312
309 struct ib_recv_wr rx_wr; 313 struct ib_recv_wr rx_wr;
310 struct ib_sge rx_sge[IPOIB_UD_RX_SG]; 314 struct ib_sge rx_sge[IPOIB_UD_RX_SG];
@@ -330,6 +334,7 @@ struct ipoib_dev_priv {
330#endif 334#endif
331 int hca_caps; 335 int hca_caps;
332 struct ipoib_ethtool_st ethtool; 336 struct ipoib_ethtool_st ethtool;
337 struct timer_list poll_timer;
333}; 338};
334 339
335struct ipoib_ah { 340struct ipoib_ah {
@@ -400,6 +405,7 @@ extern struct workqueue_struct *ipoib_workqueue;
400 405
401int ipoib_poll(struct napi_struct *napi, int budget); 406int ipoib_poll(struct napi_struct *napi, int budget);
402void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); 407void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
408void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
403 409
404struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 410struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
405 struct ib_pd *pd, struct ib_ah_attr *attr); 411 struct ib_pd *pd, struct ib_ah_attr *attr);
@@ -662,7 +668,6 @@ static inline int ipoib_register_debugfs(void) { return 0; }
662static inline void ipoib_unregister_debugfs(void) { } 668static inline void ipoib_unregister_debugfs(void) { }
663#endif 669#endif
664 670
665
666#define ipoib_printk(level, priv, format, arg...) \ 671#define ipoib_printk(level, priv, format, arg...) \
667 printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg) 672 printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
668#define ipoib_warn(priv, format, arg...) \ 673#define ipoib_warn(priv, format, arg...) \
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 9db7b0bd9134..97e67d36378f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -249,8 +249,8 @@ static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
249 struct ipoib_dev_priv *priv = netdev_priv(dev); 249 struct ipoib_dev_priv *priv = netdev_priv(dev);
250 struct ib_qp_init_attr attr = { 250 struct ib_qp_init_attr attr = {
251 .event_handler = ipoib_cm_rx_event_handler, 251 .event_handler = ipoib_cm_rx_event_handler,
252 .send_cq = priv->cq, /* For drain WR */ 252 .send_cq = priv->recv_cq, /* For drain WR */
253 .recv_cq = priv->cq, 253 .recv_cq = priv->recv_cq,
254 .srq = priv->cm.srq, 254 .srq = priv->cm.srq,
255 .cap.max_send_wr = 1, /* For drain WR */ 255 .cap.max_send_wr = 1, /* For drain WR */
256 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ 256 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
@@ -951,8 +951,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
951{ 951{
952 struct ipoib_dev_priv *priv = netdev_priv(dev); 952 struct ipoib_dev_priv *priv = netdev_priv(dev);
953 struct ib_qp_init_attr attr = { 953 struct ib_qp_init_attr attr = {
954 .send_cq = priv->cq, 954 .send_cq = priv->recv_cq,
955 .recv_cq = priv->cq, 955 .recv_cq = priv->recv_cq,
956 .srq = priv->cm.srq, 956 .srq = priv->cm.srq,
957 .cap.max_send_wr = ipoib_sendq_size, 957 .cap.max_send_wr = ipoib_sendq_size,
958 .cap.max_send_sge = 1, 958 .cap.max_send_sge = 1,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 9a47428366c9..10279b79c44d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -71,7 +71,7 @@ static int ipoib_set_coalesce(struct net_device *dev,
71 coal->rx_max_coalesced_frames > 0xffff) 71 coal->rx_max_coalesced_frames > 0xffff)
72 return -EINVAL; 72 return -EINVAL;
73 73
74 ret = ib_modify_cq(priv->cq, coal->rx_max_coalesced_frames, 74 ret = ib_modify_cq(priv->recv_cq, coal->rx_max_coalesced_frames,
75 coal->rx_coalesce_usecs); 75 coal->rx_coalesce_usecs);
76 if (ret && ret != -ENOSYS) { 76 if (ret && ret != -ENOSYS) {
77 ipoib_warn(priv, "failed modifying CQ (%d)\n", ret); 77 ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 7cf1fa7074ab..f429bce24c20 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -364,7 +364,6 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
364 struct ipoib_dev_priv *priv = netdev_priv(dev); 364 struct ipoib_dev_priv *priv = netdev_priv(dev);
365 unsigned int wr_id = wc->wr_id; 365 unsigned int wr_id = wc->wr_id;
366 struct ipoib_tx_buf *tx_req; 366 struct ipoib_tx_buf *tx_req;
367 unsigned long flags;
368 367
369 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", 368 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
370 wr_id, wc->status); 369 wr_id, wc->status);
@@ -384,13 +383,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
384 383
385 dev_kfree_skb_any(tx_req->skb); 384 dev_kfree_skb_any(tx_req->skb);
386 385
387 spin_lock_irqsave(&priv->tx_lock, flags);
388 ++priv->tx_tail; 386 ++priv->tx_tail;
389 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 387 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
390 netif_queue_stopped(dev) && 388 netif_queue_stopped(dev) &&
391 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 389 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
392 netif_wake_queue(dev); 390 netif_wake_queue(dev);
393 spin_unlock_irqrestore(&priv->tx_lock, flags);
394 391
395 if (wc->status != IB_WC_SUCCESS && 392 if (wc->status != IB_WC_SUCCESS &&
396 wc->status != IB_WC_WR_FLUSH_ERR) 393 wc->status != IB_WC_WR_FLUSH_ERR)
@@ -399,6 +396,17 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
399 wc->status, wr_id, wc->vendor_err); 396 wc->status, wr_id, wc->vendor_err);
400} 397}
401 398
399static int poll_tx(struct ipoib_dev_priv *priv)
400{
401 int n, i;
402
403 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
404 for (i = 0; i < n; ++i)
405 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
406
407 return n == MAX_SEND_CQE;
408}
409
402int ipoib_poll(struct napi_struct *napi, int budget) 410int ipoib_poll(struct napi_struct *napi, int budget)
403{ 411{
404 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); 412 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
@@ -414,7 +422,7 @@ poll_more:
414 int max = (budget - done); 422 int max = (budget - done);
415 423
416 t = min(IPOIB_NUM_WC, max); 424 t = min(IPOIB_NUM_WC, max);
417 n = ib_poll_cq(priv->cq, t, priv->ibwc); 425 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
418 426
419 for (i = 0; i < n; i++) { 427 for (i = 0; i < n; i++) {
420 struct ib_wc *wc = priv->ibwc + i; 428 struct ib_wc *wc = priv->ibwc + i;
@@ -425,12 +433,8 @@ poll_more:
425 ipoib_cm_handle_rx_wc(dev, wc); 433 ipoib_cm_handle_rx_wc(dev, wc);
426 else 434 else
427 ipoib_ib_handle_rx_wc(dev, wc); 435 ipoib_ib_handle_rx_wc(dev, wc);
428 } else { 436 } else
429 if (wc->wr_id & IPOIB_OP_CM) 437 ipoib_cm_handle_tx_wc(priv->dev, wc);
430 ipoib_cm_handle_tx_wc(dev, wc);
431 else
432 ipoib_ib_handle_tx_wc(dev, wc);
433 }
434 } 438 }
435 439
436 if (n != t) 440 if (n != t)
@@ -439,7 +443,7 @@ poll_more:
439 443
440 if (done < budget) { 444 if (done < budget) {
441 netif_rx_complete(dev, napi); 445 netif_rx_complete(dev, napi);
442 if (unlikely(ib_req_notify_cq(priv->cq, 446 if (unlikely(ib_req_notify_cq(priv->recv_cq,
443 IB_CQ_NEXT_COMP | 447 IB_CQ_NEXT_COMP |
444 IB_CQ_REPORT_MISSED_EVENTS)) && 448 IB_CQ_REPORT_MISSED_EVENTS)) &&
445 netif_rx_reschedule(dev, napi)) 449 netif_rx_reschedule(dev, napi))
@@ -457,6 +461,26 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
457 netif_rx_schedule(dev, &priv->napi); 461 netif_rx_schedule(dev, &priv->napi);
458} 462}
459 463
464static void drain_tx_cq(struct net_device *dev)
465{
466 struct ipoib_dev_priv *priv = netdev_priv(dev);
467 unsigned long flags;
468
469 spin_lock_irqsave(&priv->tx_lock, flags);
470 while (poll_tx(priv))
471 ; /* nothing */
472
473 if (netif_queue_stopped(dev))
474 mod_timer(&priv->poll_timer, jiffies + 1);
475
476 spin_unlock_irqrestore(&priv->tx_lock, flags);
477}
478
479void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
480{
481 drain_tx_cq((struct net_device *)dev_ptr);
482}
483
460static inline int post_send(struct ipoib_dev_priv *priv, 484static inline int post_send(struct ipoib_dev_priv *priv,
461 unsigned int wr_id, 485 unsigned int wr_id,
462 struct ib_ah *address, u32 qpn, 486 struct ib_ah *address, u32 qpn,
@@ -551,23 +575,34 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
551 else 575 else
552 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 576 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
553 577
578 if (++priv->tx_outstanding == ipoib_sendq_size) {
579 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
580 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
581 ipoib_warn(priv, "request notify on send CQ failed\n");
582 netif_stop_queue(dev);
583 }
584
554 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 585 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
555 address->ah, qpn, tx_req, phead, hlen))) { 586 address->ah, qpn, tx_req, phead, hlen))) {
556 ipoib_warn(priv, "post_send failed\n"); 587 ipoib_warn(priv, "post_send failed\n");
557 ++dev->stats.tx_errors; 588 ++dev->stats.tx_errors;
589 --priv->tx_outstanding;
558 ipoib_dma_unmap_tx(priv->ca, tx_req); 590 ipoib_dma_unmap_tx(priv->ca, tx_req);
559 dev_kfree_skb_any(skb); 591 dev_kfree_skb_any(skb);
592 if (netif_queue_stopped(dev))
593 netif_wake_queue(dev);
560 } else { 594 } else {
561 dev->trans_start = jiffies; 595 dev->trans_start = jiffies;
562 596
563 address->last_send = priv->tx_head; 597 address->last_send = priv->tx_head;
564 ++priv->tx_head; 598 ++priv->tx_head;
599 skb_orphan(skb);
565 600
566 if (++priv->tx_outstanding == ipoib_sendq_size) {
567 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
568 netif_stop_queue(dev);
569 }
570 } 601 }
602
603 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
604 while (poll_tx(priv))
605 ; /* nothing */
571} 606}
572 607
573static void __ipoib_reap_ah(struct net_device *dev) 608static void __ipoib_reap_ah(struct net_device *dev)
@@ -601,6 +636,11 @@ void ipoib_reap_ah(struct work_struct *work)
601 round_jiffies_relative(HZ)); 636 round_jiffies_relative(HZ));
602} 637}
603 638
639static void ipoib_ib_tx_timer_func(unsigned long ctx)
640{
641 drain_tx_cq((struct net_device *)ctx);
642}
643
604int ipoib_ib_dev_open(struct net_device *dev) 644int ipoib_ib_dev_open(struct net_device *dev)
605{ 645{
606 struct ipoib_dev_priv *priv = netdev_priv(dev); 646 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -637,6 +677,10 @@ int ipoib_ib_dev_open(struct net_device *dev)
637 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, 677 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
638 round_jiffies_relative(HZ)); 678 round_jiffies_relative(HZ));
639 679
680 init_timer(&priv->poll_timer);
681 priv->poll_timer.function = ipoib_ib_tx_timer_func;
682 priv->poll_timer.data = (unsigned long)dev;
683
640 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 684 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
641 685
642 return 0; 686 return 0;
@@ -714,7 +758,7 @@ void ipoib_drain_cq(struct net_device *dev)
714 struct ipoib_dev_priv *priv = netdev_priv(dev); 758 struct ipoib_dev_priv *priv = netdev_priv(dev);
715 int i, n; 759 int i, n;
716 do { 760 do {
717 n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc); 761 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
718 for (i = 0; i < n; ++i) { 762 for (i = 0; i < n; ++i) {
719 /* 763 /*
720 * Convert any successful completions to flush 764 * Convert any successful completions to flush
@@ -729,14 +773,13 @@ void ipoib_drain_cq(struct net_device *dev)
729 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); 773 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
730 else 774 else
731 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); 775 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
732 } else { 776 } else
733 if (priv->ibwc[i].wr_id & IPOIB_OP_CM) 777 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
734 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
735 else
736 ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
737 }
738 } 778 }
739 } while (n == IPOIB_NUM_WC); 779 } while (n == IPOIB_NUM_WC);
780
781 while (poll_tx(priv))
782 ; /* nothing */
740} 783}
741 784
742int ipoib_ib_dev_stop(struct net_device *dev, int flush) 785int ipoib_ib_dev_stop(struct net_device *dev, int flush)
@@ -803,6 +846,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
803 ipoib_dbg(priv, "All sends and receives done.\n"); 846 ipoib_dbg(priv, "All sends and receives done.\n");
804 847
805timeout: 848timeout:
849 del_timer_sync(&priv->poll_timer);
806 qp_attr.qp_state = IB_QPS_RESET; 850 qp_attr.qp_state = IB_QPS_RESET;
807 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 851 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
808 ipoib_warn(priv, "Failed to modify QP to RESET state\n"); 852 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
@@ -826,7 +870,7 @@ timeout:
826 msleep(1); 870 msleep(1);
827 } 871 }
828 872
829 ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); 873 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
830 874
831 return 0; 875 return 0;
832} 876}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7a4ed9d3d844..2442090ac8d1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1298,7 +1298,8 @@ static int __init ipoib_init_module(void)
1298 1298
1299 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1299 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1300 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1300 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1301 ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE); 1301 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
1302 IPOIB_MIN_QUEUE_SIZE));
1302#ifdef CONFIG_INFINIBAND_IPOIB_CM 1303#ifdef CONFIG_INFINIBAND_IPOIB_CM
1303 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1304 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1304#endif 1305#endif
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 07c03f178a49..8766d29ce3b7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -171,26 +171,34 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
171 goto out_free_pd; 171 goto out_free_pd;
172 } 172 }
173 173
174 size = ipoib_sendq_size + ipoib_recvq_size + 1; 174 size = ipoib_recvq_size + 1;
175 ret = ipoib_cm_dev_init(dev); 175 ret = ipoib_cm_dev_init(dev);
176 if (!ret) { 176 if (!ret) {
177 size += ipoib_sendq_size;
177 if (ipoib_cm_has_srq(dev)) 178 if (ipoib_cm_has_srq(dev))
178 size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */ 179 size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
179 else 180 else
180 size += ipoib_recvq_size * ipoib_max_conn_qp; 181 size += ipoib_recvq_size * ipoib_max_conn_qp;
181 } 182 }
182 183
183 priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); 184 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
184 if (IS_ERR(priv->cq)) { 185 if (IS_ERR(priv->recv_cq)) {
185 printk(KERN_WARNING "%s: failed to create CQ\n", ca->name); 186 printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
186 goto out_free_mr; 187 goto out_free_mr;
187 } 188 }
188 189
189 if (ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP)) 190 priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
190 goto out_free_cq; 191 dev, ipoib_sendq_size, 0);
192 if (IS_ERR(priv->send_cq)) {
193 printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
194 goto out_free_recv_cq;
195 }
196
197 if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP))
198 goto out_free_send_cq;
191 199
192 init_attr.send_cq = priv->cq; 200 init_attr.send_cq = priv->send_cq;
193 init_attr.recv_cq = priv->cq; 201 init_attr.recv_cq = priv->recv_cq;
194 202
195 if (priv->hca_caps & IB_DEVICE_UD_TSO) 203 if (priv->hca_caps & IB_DEVICE_UD_TSO)
196 init_attr.create_flags = IB_QP_CREATE_IPOIB_UD_LSO; 204 init_attr.create_flags = IB_QP_CREATE_IPOIB_UD_LSO;
@@ -201,7 +209,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
201 priv->qp = ib_create_qp(priv->pd, &init_attr); 209 priv->qp = ib_create_qp(priv->pd, &init_attr);
202 if (IS_ERR(priv->qp)) { 210 if (IS_ERR(priv->qp)) {
203 printk(KERN_WARNING "%s: failed to create QP\n", ca->name); 211 printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
204 goto out_free_cq; 212 goto out_free_send_cq;
205 } 213 }
206 214
207 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; 215 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
@@ -230,8 +238,11 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
230 238
231 return 0; 239 return 0;
232 240
233out_free_cq: 241out_free_send_cq:
234 ib_destroy_cq(priv->cq); 242 ib_destroy_cq(priv->send_cq);
243
244out_free_recv_cq:
245 ib_destroy_cq(priv->recv_cq);
235 246
236out_free_mr: 247out_free_mr:
237 ib_dereg_mr(priv->mr); 248 ib_dereg_mr(priv->mr);
@@ -254,8 +265,11 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
254 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 265 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
255 } 266 }
256 267
257 if (ib_destroy_cq(priv->cq)) 268 if (ib_destroy_cq(priv->send_cq))
258 ipoib_warn(priv, "ib_cq_destroy failed\n"); 269 ipoib_warn(priv, "ib_cq_destroy (send) failed\n");
270
271 if (ib_destroy_cq(priv->recv_cq))
272 ipoib_warn(priv, "ib_cq_destroy (recv) failed\n");
259 273
260 ipoib_cm_dev_cleanup(dev); 274 ipoib_cm_dev_cleanup(dev);
261 275
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 431fdeaa2dc4..1cdb5cfb0ff1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -90,6 +90,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
90 } 90 }
91 91
92 priv->max_ib_mtu = ppriv->max_ib_mtu; 92 priv->max_ib_mtu = ppriv->max_ib_mtu;
93 /* MTU will be reset when mcast join happens */
94 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
95 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
93 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 96 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
94 97
95 priv->pkey = pkey; 98 priv->pkey = pkey;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index be1b9fbd416d..aeb58cae9a3f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -473,13 +473,15 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
473 stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */ 473 stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */
474 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 474 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
475 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 475 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
476 stats->custom_length = 3; 476 stats->custom_length = 4;
477 strcpy(stats->custom[0].desc, "qp_tx_queue_full"); 477 strcpy(stats->custom[0].desc, "qp_tx_queue_full");
478 stats->custom[0].value = 0; /* TB iser_conn->qp_tx_queue_full; */ 478 stats->custom[0].value = 0; /* TB iser_conn->qp_tx_queue_full; */
479 strcpy(stats->custom[1].desc, "fmr_map_not_avail"); 479 strcpy(stats->custom[1].desc, "fmr_map_not_avail");
480 stats->custom[1].value = 0; /* TB iser_conn->fmr_map_not_avail */; 480 stats->custom[1].value = 0; /* TB iser_conn->fmr_map_not_avail */;
481 strcpy(stats->custom[2].desc, "eh_abort_cnt"); 481 strcpy(stats->custom[2].desc, "eh_abort_cnt");
482 stats->custom[2].value = conn->eh_abort_cnt; 482 stats->custom[2].value = conn->eh_abort_cnt;
483 strcpy(stats->custom[3].desc, "fmr_unalign_cnt");
484 stats->custom[3].value = conn->fmr_unalign_cnt;
483} 485}
484 486
485static int 487static int
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 1ee867b1b341..a8c1b300e34d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -71,6 +71,13 @@
71 71
72#define iser_dbg(fmt, arg...) \ 72#define iser_dbg(fmt, arg...) \
73 do { \ 73 do { \
74 if (iser_debug_level > 1) \
75 printk(KERN_DEBUG PFX "%s:" fmt,\
76 __func__ , ## arg); \
77 } while (0)
78
79#define iser_warn(fmt, arg...) \
80 do { \
74 if (iser_debug_level > 0) \ 81 if (iser_debug_level > 0) \
75 printk(KERN_DEBUG PFX "%s:" fmt,\ 82 printk(KERN_DEBUG PFX "%s:" fmt,\
76 __func__ , ## arg); \ 83 __func__ , ## arg); \
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 4a17743a639f..cac50c4dc159 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -334,8 +334,11 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
334 struct scatterlist *sg; 334 struct scatterlist *sg;
335 int i; 335 int i;
336 336
337 if (iser_debug_level == 0)
338 return;
339
337 for_each_sg(sgl, sg, data->dma_nents, i) 340 for_each_sg(sgl, sg, data->dma_nents, i)
338 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 341 iser_warn("sg[%d] dma_addr:0x%lX page:0x%p "
339 "off:0x%x sz:0x%x dma_len:0x%x\n", 342 "off:0x%x sz:0x%x dma_len:0x%x\n",
340 i, (unsigned long)ib_sg_dma_address(ibdev, sg), 343 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
341 sg_page(sg), sg->offset, 344 sg_page(sg), sg->offset,
@@ -420,6 +423,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
420int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, 423int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
421 enum iser_data_dir cmd_dir) 424 enum iser_data_dir cmd_dir)
422{ 425{
426 struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
423 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 427 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
424 struct iser_device *device = ib_conn->device; 428 struct iser_device *device = ib_conn->device;
425 struct ib_device *ibdev = device->ib_device; 429 struct ib_device *ibdev = device->ib_device;
@@ -434,7 +438,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
434 438
435 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 439 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
436 if (aligned_len != mem->dma_nents) { 440 if (aligned_len != mem->dma_nents) {
437 iser_err("rdma alignment violation %d/%d aligned\n", 441 iscsi_conn->fmr_unalign_cnt++;
442 iser_warn("rdma alignment violation %d/%d aligned\n",
438 aligned_len, mem->size); 443 aligned_len, mem->size);
439 iser_data_buf_dump(mem, ibdev); 444 iser_data_buf_dump(mem, ibdev);
440 445
diff --git a/drivers/input/input.c b/drivers/input/input.c
index f02c242c3114..27006fc18305 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -898,30 +898,26 @@ static int __init input_proc_init(void)
898{ 898{
899 struct proc_dir_entry *entry; 899 struct proc_dir_entry *entry;
900 900
901 proc_bus_input_dir = proc_mkdir("input", proc_bus); 901 proc_bus_input_dir = proc_mkdir("bus/input", NULL);
902 if (!proc_bus_input_dir) 902 if (!proc_bus_input_dir)
903 return -ENOMEM; 903 return -ENOMEM;
904 904
905 proc_bus_input_dir->owner = THIS_MODULE; 905 proc_bus_input_dir->owner = THIS_MODULE;
906 906
907 entry = create_proc_entry("devices", 0, proc_bus_input_dir); 907 entry = proc_create("devices", 0, proc_bus_input_dir,
908 &input_devices_fileops);
908 if (!entry) 909 if (!entry)
909 goto fail1; 910 goto fail1;
910 911
911 entry->owner = THIS_MODULE; 912 entry = proc_create("handlers", 0, proc_bus_input_dir,
912 entry->proc_fops = &input_devices_fileops; 913 &input_handlers_fileops);
913
914 entry = create_proc_entry("handlers", 0, proc_bus_input_dir);
915 if (!entry) 914 if (!entry)
916 goto fail2; 915 goto fail2;
917 916
918 entry->owner = THIS_MODULE;
919 entry->proc_fops = &input_handlers_fileops;
920
921 return 0; 917 return 0;
922 918
923 fail2: remove_proc_entry("devices", proc_bus_input_dir); 919 fail2: remove_proc_entry("devices", proc_bus_input_dir);
924 fail1: remove_proc_entry("input", proc_bus); 920 fail1: remove_proc_entry("bus/input", NULL);
925 return -ENOMEM; 921 return -ENOMEM;
926} 922}
927 923
@@ -929,7 +925,7 @@ static void input_proc_exit(void)
929{ 925{
930 remove_proc_entry("devices", proc_bus_input_dir); 926 remove_proc_entry("devices", proc_bus_input_dir);
931 remove_proc_entry("handlers", proc_bus_input_dir); 927 remove_proc_entry("handlers", proc_bus_input_dir);
932 remove_proc_entry("input", proc_bus); 928 remove_proc_entry("bus/input", NULL);
933} 929}
934 930
935#else /* !CONFIG_PROC_FS */ 931#else /* !CONFIG_PROC_FS */
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 02b3ad8c0826..edfedd9a166c 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -69,6 +69,7 @@
69#include <linux/time.h> 69#include <linux/time.h>
70#include <linux/slab.h> 70#include <linux/slab.h>
71#include <linux/hil.h> 71#include <linux/hil.h>
72#include <linux/semaphore.h>
72#include <asm/io.h> 73#include <asm/io.h>
73#include <asm/system.h> 74#include <asm/system.h>
74 75
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index e1a3a79ab3f9..7ff71ba7b7c9 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -46,7 +46,7 @@ struct serport {
46static int serport_serio_write(struct serio *serio, unsigned char data) 46static int serport_serio_write(struct serio *serio, unsigned char data)
47{ 47{
48 struct serport *serport = serio->port_data; 48 struct serport *serport = serio->port_data;
49 return -(serport->tty->driver->write(serport->tty, &data, 1) != 1); 49 return -(serport->tty->ops->write(serport->tty, &data, 1) != 1);
50} 50}
51 51
52static int serport_serio_open(struct serio *serio) 52static int serport_serio_open(struct serio *serio)
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 1d759f6f8076..55c1134d6137 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -528,9 +528,9 @@ static void aiptek_irq(struct urb *urb)
528 (aiptek->curSetting.pointerMode)) { 528 (aiptek->curSetting.pointerMode)) {
529 aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; 529 aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED;
530 } else { 530 } else {
531 x = le16_to_cpu(get_unaligned((__le16 *) (data + 1))); 531 x = get_unaligned_le16(data + 1);
532 y = le16_to_cpu(get_unaligned((__le16 *) (data + 3))); 532 y = get_unaligned_le16(data + 3);
533 z = le16_to_cpu(get_unaligned((__le16 *) (data + 6))); 533 z = get_unaligned_le16(data + 6);
534 534
535 dv = (data[5] & 0x01) != 0 ? 1 : 0; 535 dv = (data[5] & 0x01) != 0 ? 1 : 0;
536 p = (data[5] & 0x02) != 0 ? 1 : 0; 536 p = (data[5] & 0x02) != 0 ? 1 : 0;
@@ -613,8 +613,8 @@ static void aiptek_irq(struct urb *urb)
613 (aiptek->curSetting.pointerMode)) { 613 (aiptek->curSetting.pointerMode)) {
614 aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; 614 aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED;
615 } else { 615 } else {
616 x = le16_to_cpu(get_unaligned((__le16 *) (data + 1))); 616 x = get_unaligned_le16(data + 1);
617 y = le16_to_cpu(get_unaligned((__le16 *) (data + 3))); 617 y = get_unaligned_le16(data + 3);
618 618
619 jitterable = data[5] & 0x1c; 619 jitterable = data[5] & 0x1c;
620 620
@@ -679,7 +679,7 @@ static void aiptek_irq(struct urb *urb)
679 pck = (data[1] & aiptek->curSetting.stylusButtonUpper) != 0 ? 1 : 0; 679 pck = (data[1] & aiptek->curSetting.stylusButtonUpper) != 0 ? 1 : 0;
680 680
681 macro = dv && p && tip && !(data[3] & 1) ? (data[3] >> 1) : -1; 681 macro = dv && p && tip && !(data[3] & 1) ? (data[3] >> 1) : -1;
682 z = le16_to_cpu(get_unaligned((__le16 *) (data + 4))); 682 z = get_unaligned_le16(data + 4);
683 683
684 if (dv) { 684 if (dv) {
685 /* If the selected tool changed, reset the old 685 /* If the selected tool changed, reset the old
@@ -757,7 +757,7 @@ static void aiptek_irq(struct urb *urb)
757 * hat switches (which just so happen to be the macroKeys.) 757 * hat switches (which just so happen to be the macroKeys.)
758 */ 758 */
759 else if (data[0] == 6) { 759 else if (data[0] == 6) {
760 macro = le16_to_cpu(get_unaligned((__le16 *) (data + 1))); 760 macro = get_unaligned_le16(data + 1);
761 if (macro > 0) { 761 if (macro > 0) {
762 input_report_key(inputdev, macroKeyEvents[macro - 1], 762 input_report_key(inputdev, macroKeyEvents[macro - 1],
763 0); 763 0);
@@ -952,7 +952,7 @@ aiptek_query(struct aiptek *aiptek, unsigned char command, unsigned char data)
952 buf[0], buf[1], buf[2]); 952 buf[0], buf[1], buf[2]);
953 ret = -EIO; 953 ret = -EIO;
954 } else { 954 } else {
955 ret = le16_to_cpu(get_unaligned((__le16 *) (buf + 1))); 955 ret = get_unaligned_le16(buf + 1);
956 } 956 }
957 kfree(buf); 957 kfree(buf);
958 return ret; 958 return ret;
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index f66ca215cdec..c5a8661a1baa 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -245,11 +245,11 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
245 data = report[i]; 245 data = report[i];
246 break; 246 break;
247 case 2: 247 case 2:
248 data16 = le16_to_cpu(get_unaligned((__le16 *)&report[i])); 248 data16 = get_unaligned_le16(&report[i]);
249 break; 249 break;
250 case 3: 250 case 3:
251 size = 4; 251 size = 4;
252 data32 = le32_to_cpu(get_unaligned((__le32 *)&report[i])); 252 data32 = get_unaligned_le32(&report[i]);
253 break; 253 break;
254 } 254 }
255 255
@@ -695,10 +695,10 @@ static void gtco_urb_callback(struct urb *urbinfo)
695 /* Fall thru */ 695 /* Fall thru */
696 case 1: 696 case 1:
697 /* All reports have X and Y coords in the same place */ 697 /* All reports have X and Y coords in the same place */
698 val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1])); 698 val = get_unaligned_le16(&device->buffer[1]);
699 input_report_abs(inputdev, ABS_X, val); 699 input_report_abs(inputdev, ABS_X, val);
700 700
701 val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3])); 701 val = get_unaligned_le16(&device->buffer[3]);
702 input_report_abs(inputdev, ABS_Y, val); 702 input_report_abs(inputdev, ABS_Y, val);
703 703
704 /* Ditto for proximity bit */ 704 /* Ditto for proximity bit */
@@ -762,7 +762,7 @@ static void gtco_urb_callback(struct urb *urbinfo)
762 le_buffer[1] = (u8)(device->buffer[4] >> 1); 762 le_buffer[1] = (u8)(device->buffer[4] >> 1);
763 le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7); 763 le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7);
764 764
765 val = le16_to_cpu(get_unaligned((__le16 *)le_buffer)); 765 val = get_unaligned_le16(le_buffer);
766 input_report_abs(inputdev, ABS_Y, val); 766 input_report_abs(inputdev, ABS_Y, val);
767 767
768 /* 768 /*
@@ -772,10 +772,10 @@ static void gtco_urb_callback(struct urb *urbinfo)
772 buttonbyte = device->buffer[5] >> 1; 772 buttonbyte = device->buffer[5] >> 1;
773 } else { 773 } else {
774 774
775 val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1])); 775 val = get_unaligned_le16(&device->buffer[1]);
776 input_report_abs(inputdev, ABS_X, val); 776 input_report_abs(inputdev, ABS_X, val);
777 777
778 val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3])); 778 val = get_unaligned_le16(&device->buffer[3]);
779 input_report_abs(inputdev, ABS_Y, val); 779 input_report_abs(inputdev, ABS_Y, val);
780 780
781 buttonbyte = device->buffer[5]; 781 buttonbyte = device->buffer[5];
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 1182fc133167..f23f5a97fb38 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -63,8 +63,8 @@ static void kbtab_irq(struct urb *urb)
63 goto exit; 63 goto exit;
64 } 64 }
65 65
66 kbtab->x = le16_to_cpu(get_unaligned((__le16 *) &data[1])); 66 kbtab->x = get_unaligned_le16(&data[1]);
67 kbtab->y = le16_to_cpu(get_unaligned((__le16 *) &data[3])); 67 kbtab->y = get_unaligned_le16(&data[3]);
68 68
69 kbtab->pressure = (data[5]); 69 kbtab->pressure = (data[5]);
70 70
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 24c6b7ca62be..6ca0bb949ad3 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1111,11 +1111,12 @@ static int capinc_tty_write(struct tty_struct * tty,
1111 return count; 1111 return count;
1112} 1112}
1113 1113
1114static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch) 1114static int capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
1115{ 1115{
1116 struct capiminor *mp = (struct capiminor *)tty->driver_data; 1116 struct capiminor *mp = (struct capiminor *)tty->driver_data;
1117 struct sk_buff *skb; 1117 struct sk_buff *skb;
1118 unsigned long flags; 1118 unsigned long flags;
1119 int ret = 1;
1119 1120
1120#ifdef _DEBUG_TTYFUNCS 1121#ifdef _DEBUG_TTYFUNCS
1121 printk(KERN_DEBUG "capinc_put_char(%u)\n", ch); 1122 printk(KERN_DEBUG "capinc_put_char(%u)\n", ch);
@@ -1125,7 +1126,7 @@ static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
1125#ifdef _DEBUG_TTYFUNCS 1126#ifdef _DEBUG_TTYFUNCS
1126 printk(KERN_DEBUG "capinc_tty_put_char: mp or mp->ncci NULL\n"); 1127 printk(KERN_DEBUG "capinc_tty_put_char: mp or mp->ncci NULL\n");
1127#endif 1128#endif
1128 return; 1129 return 0;
1129 } 1130 }
1130 1131
1131 spin_lock_irqsave(&workaround_lock, flags); 1132 spin_lock_irqsave(&workaround_lock, flags);
@@ -1134,7 +1135,7 @@ static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
1134 if (skb_tailroom(skb) > 0) { 1135 if (skb_tailroom(skb) > 0) {
1135 *(skb_put(skb, 1)) = ch; 1136 *(skb_put(skb, 1)) = ch;
1136 spin_unlock_irqrestore(&workaround_lock, flags); 1137 spin_unlock_irqrestore(&workaround_lock, flags);
1137 return; 1138 return 1;
1138 } 1139 }
1139 mp->ttyskb = NULL; 1140 mp->ttyskb = NULL;
1140 skb_queue_tail(&mp->outqueue, skb); 1141 skb_queue_tail(&mp->outqueue, skb);
@@ -1148,8 +1149,10 @@ static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
1148 mp->ttyskb = skb; 1149 mp->ttyskb = skb;
1149 } else { 1150 } else {
1150 printk(KERN_ERR "capinc_put_char: char %u lost\n", ch); 1151 printk(KERN_ERR "capinc_put_char: char %u lost\n", ch);
1152 ret = 0;
1151 } 1153 }
1152 spin_unlock_irqrestore(&workaround_lock, flags); 1154 spin_unlock_irqrestore(&workaround_lock, flags);
1155 return ret;
1153} 1156}
1154 1157
1155static void capinc_tty_flush_chars(struct tty_struct *tty) 1158static void capinc_tty_flush_chars(struct tty_struct *tty)
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 845a797b0030..c29208bd7521 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -114,6 +114,7 @@ static int seq_contrstats_open(struct inode *inode, struct file *file)
114} 114}
115 115
116static const struct file_operations proc_controller_ops = { 116static const struct file_operations proc_controller_ops = {
117 .owner = THIS_MODULE,
117 .open = seq_controller_open, 118 .open = seq_controller_open,
118 .read = seq_read, 119 .read = seq_read,
119 .llseek = seq_lseek, 120 .llseek = seq_lseek,
@@ -121,6 +122,7 @@ static const struct file_operations proc_controller_ops = {
121}; 122};
122 123
123static const struct file_operations proc_contrstats_ops = { 124static const struct file_operations proc_contrstats_ops = {
125 .owner = THIS_MODULE,
124 .open = seq_contrstats_open, 126 .open = seq_contrstats_open,
125 .read = seq_read, 127 .read = seq_read,
126 .llseek = seq_lseek, 128 .llseek = seq_lseek,
@@ -219,6 +221,7 @@ seq_applstats_open(struct inode *inode, struct file *file)
219} 221}
220 222
221static const struct file_operations proc_applications_ops = { 223static const struct file_operations proc_applications_ops = {
224 .owner = THIS_MODULE,
222 .open = seq_applications_open, 225 .open = seq_applications_open,
223 .read = seq_read, 226 .read = seq_read,
224 .llseek = seq_lseek, 227 .llseek = seq_lseek,
@@ -226,21 +229,13 @@ static const struct file_operations proc_applications_ops = {
226}; 229};
227 230
228static const struct file_operations proc_applstats_ops = { 231static const struct file_operations proc_applstats_ops = {
232 .owner = THIS_MODULE,
229 .open = seq_applstats_open, 233 .open = seq_applstats_open,
230 .read = seq_read, 234 .read = seq_read,
231 .llseek = seq_lseek, 235 .llseek = seq_lseek,
232 .release = seq_release, 236 .release = seq_release,
233}; 237};
234 238
235static void
236create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
237{
238 struct proc_dir_entry *entry;
239 entry = create_proc_entry(name, mode, NULL);
240 if (entry)
241 entry->proc_fops = f;
242}
243
244// --------------------------------------------------------------------------- 239// ---------------------------------------------------------------------------
245 240
246static void *capi_driver_start(struct seq_file *seq, loff_t *pos) 241static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
@@ -283,6 +278,7 @@ seq_capi_driver_open(struct inode *inode, struct file *file)
283} 278}
284 279
285static const struct file_operations proc_driver_ops = { 280static const struct file_operations proc_driver_ops = {
281 .owner = THIS_MODULE,
286 .open = seq_capi_driver_open, 282 .open = seq_capi_driver_open,
287 .read = seq_read, 283 .read = seq_read,
288 .llseek = seq_lseek, 284 .llseek = seq_lseek,
@@ -296,11 +292,11 @@ kcapi_proc_init(void)
296{ 292{
297 proc_mkdir("capi", NULL); 293 proc_mkdir("capi", NULL);
298 proc_mkdir("capi/controllers", NULL); 294 proc_mkdir("capi/controllers", NULL);
299 create_seq_entry("capi/controller", 0, &proc_controller_ops); 295 proc_create("capi/controller", 0, NULL, &proc_controller_ops);
300 create_seq_entry("capi/contrstats", 0, &proc_contrstats_ops); 296 proc_create("capi/contrstats", 0, NULL, &proc_contrstats_ops);
301 create_seq_entry("capi/applications", 0, &proc_applications_ops); 297 proc_create("capi/applications", 0, NULL, &proc_applications_ops);
302 create_seq_entry("capi/applstats", 0, &proc_applstats_ops); 298 proc_create("capi/applstats", 0, NULL, &proc_applstats_ops);
303 create_seq_entry("capi/driver", 0, &proc_driver_ops); 299 proc_create("capi/driver", 0, NULL, &proc_driver_ops);
304} 300}
305 301
306void __exit 302void __exit
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 4fd4c46892e3..8b256a617c8a 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -288,13 +288,12 @@ divert_dev_init(void)
288 isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net); 288 isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net);
289 if (!isdn_proc_entry) 289 if (!isdn_proc_entry)
290 return (-1); 290 return (-1);
291 isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry); 291 isdn_divert_entry = proc_create("divert", S_IFREG | S_IRUGO,
292 isdn_proc_entry, &isdn_fops);
292 if (!isdn_divert_entry) { 293 if (!isdn_divert_entry) {
293 remove_proc_entry("isdn", init_net.proc_net); 294 remove_proc_entry("isdn", init_net.proc_net);
294 return (-1); 295 return (-1);
295 } 296 }
296 isdn_divert_entry->proc_fops = &isdn_fops;
297 isdn_divert_entry->owner = THIS_MODULE;
298#endif /* CONFIG_PROC_FS */ 297#endif /* CONFIG_PROC_FS */
299 298
300 return (0); 299 return (0);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index fceeb1d57682..45d1ee93cd39 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -68,10 +68,10 @@ static int write_modem(struct cardstate *cs)
68 struct tty_struct *tty = cs->hw.ser->tty; 68 struct tty_struct *tty = cs->hw.ser->tty;
69 struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ 69 struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
70 struct sk_buff *skb = bcs->tx_skb; 70 struct sk_buff *skb = bcs->tx_skb;
71 int sent; 71 int sent = -EOPNOTSUPP;
72 72
73 if (!tty || !tty->driver || !skb) 73 if (!tty || !tty->driver || !skb)
74 return -EFAULT; 74 return -EINVAL;
75 75
76 if (!skb->len) { 76 if (!skb->len) {
77 dev_kfree_skb_any(skb); 77 dev_kfree_skb_any(skb);
@@ -80,7 +80,8 @@ static int write_modem(struct cardstate *cs)
80 } 80 }
81 81
82 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 82 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
83 sent = tty->driver->write(tty, skb->data, skb->len); 83 if (tty->ops->write)
84 sent = tty->ops->write(tty, skb->data, skb->len);
84 gig_dbg(DEBUG_OUTPUT, "write_modem: sent %d", sent); 85 gig_dbg(DEBUG_OUTPUT, "write_modem: sent %d", sent);
85 if (sent < 0) { 86 if (sent < 0) {
86 /* error */ 87 /* error */
@@ -120,7 +121,7 @@ static int send_cb(struct cardstate *cs)
120 121
121 if (cb->len) { 122 if (cb->len) {
122 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 123 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
123 sent = tty->driver->write(tty, cb->buf + cb->offset, cb->len); 124 sent = tty->ops->write(tty, cb->buf + cb->offset, cb->len);
124 if (sent < 0) { 125 if (sent < 0) {
125 /* error */ 126 /* error */
126 gig_dbg(DEBUG_OUTPUT, "send_cb: write error %d", sent); 127 gig_dbg(DEBUG_OUTPUT, "send_cb: write error %d", sent);
@@ -440,14 +441,14 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsi
440 struct tty_struct *tty = cs->hw.ser->tty; 441 struct tty_struct *tty = cs->hw.ser->tty;
441 unsigned int set, clear; 442 unsigned int set, clear;
442 443
443 if (!tty || !tty->driver || !tty->driver->tiocmset) 444 if (!tty || !tty->driver || !tty->ops->tiocmset)
444 return -EFAULT; 445 return -EINVAL;
445 set = new_state & ~old_state; 446 set = new_state & ~old_state;
446 clear = old_state & ~new_state; 447 clear = old_state & ~new_state;
447 if (!set && !clear) 448 if (!set && !clear)
448 return 0; 449 return 0;
449 gig_dbg(DEBUG_IF, "tiocmset set %x clear %x", set, clear); 450 gig_dbg(DEBUG_IF, "tiocmset set %x clear %x", set, clear);
450 return tty->driver->tiocmset(tty, NULL, set, clear); 451 return tty->ops->tiocmset(tty, NULL, set, clear);
451} 452}
452 453
453static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) 454static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index 0632a2606998..fae895828a17 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -125,15 +125,11 @@ static const struct file_operations divas_fops = {
125 125
126int create_divas_proc(void) 126int create_divas_proc(void)
127{ 127{
128 divas_proc_entry = create_proc_entry(divas_proc_name, 128 proc_create(divas_proc_name, S_IFREG | S_IRUGO, proc_net_eicon,
129 S_IFREG | S_IRUGO, 129 &divas_fops);
130 proc_net_eicon);
131 if (!divas_proc_entry) 130 if (!divas_proc_entry)
132 return (0); 131 return (0);
133 132
134 divas_proc_entry->proc_fops = &divas_fops;
135 divas_proc_entry->owner = THIS_MODULE;
136
137 return (1); 133 return (1);
138} 134}
139 135
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 27d890b48f88..15906d005b05 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -370,6 +370,7 @@ hysdn_conf_close(struct inode *ino, struct file *filep)
370/******************************************************/ 370/******************************************************/
371static const struct file_operations conf_fops = 371static const struct file_operations conf_fops =
372{ 372{
373 .owner = THIS_MODULE,
373 .llseek = no_llseek, 374 .llseek = no_llseek,
374 .read = hysdn_conf_read, 375 .read = hysdn_conf_read,
375 .write = hysdn_conf_write, 376 .write = hysdn_conf_write,
@@ -402,11 +403,10 @@ hysdn_procconf_init(void)
402 while (card) { 403 while (card) {
403 404
404 sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid); 405 sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
405 if ((card->procconf = (void *) create_proc_entry(conf_name, 406 if ((card->procconf = (void *) proc_create(conf_name,
406 S_IFREG | S_IRUGO | S_IWUSR, 407 S_IFREG | S_IRUGO | S_IWUSR,
407 hysdn_proc_entry)) != NULL) { 408 hysdn_proc_entry,
408 ((struct proc_dir_entry *) card->procconf)->proc_fops = &conf_fops; 409 &conf_fops)) != NULL) {
409 ((struct proc_dir_entry *) card->procconf)->owner = THIS_MODULE;
410 hysdn_proclog_init(card); /* init the log file entry */ 410 hysdn_proclog_init(card); /* init the log file entry */
411 } 411 }
412 card = card->next; /* next entry */ 412 card = card->next; /* next entry */
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 27b3991fb0ec..8991d2c8ee4a 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -380,6 +380,7 @@ hysdn_log_poll(struct file *file, poll_table * wait)
380/**************************************************/ 380/**************************************************/
381static const struct file_operations log_fops = 381static const struct file_operations log_fops =
382{ 382{
383 .owner = THIS_MODULE,
383 .llseek = no_llseek, 384 .llseek = no_llseek,
384 .read = hysdn_log_read, 385 .read = hysdn_log_read,
385 .write = hysdn_log_write, 386 .write = hysdn_log_write,
@@ -402,10 +403,9 @@ hysdn_proclog_init(hysdn_card * card)
402 403
403 if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) { 404 if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
404 sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid); 405 sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
405 if ((pd->log = create_proc_entry(pd->log_name, S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry)) != NULL) { 406 pd->log = proc_create(pd->log_name,
406 pd->log->proc_fops = &log_fops; 407 S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry,
407 pd->log->owner = THIS_MODULE; 408 &log_fops);
408 }
409 409
410 init_waitqueue_head(&(pd->rd_queue)); 410 init_waitqueue_head(&(pd->rd_queue));
411 411
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 8af0df1d5b8c..1a2222cbb805 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1352,12 +1352,14 @@ isdn_tty_tiocmget(struct tty_struct *tty, struct file *file)
1352 if (tty->flags & (1 << TTY_IO_ERROR)) 1352 if (tty->flags & (1 << TTY_IO_ERROR))
1353 return -EIO; 1353 return -EIO;
1354 1354
1355 lock_kernel();
1355#ifdef ISDN_DEBUG_MODEM_IOCTL 1356#ifdef ISDN_DEBUG_MODEM_IOCTL
1356 printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line); 1357 printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line);
1357#endif 1358#endif
1358 1359
1359 control = info->mcr; 1360 control = info->mcr;
1360 status = info->msr; 1361 status = info->msr;
1362 unlock_kernel();
1361 return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) 1363 return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
1362 | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) 1364 | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
1363 | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) 1365 | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
@@ -1381,6 +1383,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
1381 printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear); 1383 printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear);
1382#endif 1384#endif
1383 1385
1386 lock_kernel();
1384 if (set & TIOCM_RTS) 1387 if (set & TIOCM_RTS)
1385 info->mcr |= UART_MCR_RTS; 1388 info->mcr |= UART_MCR_RTS;
1386 if (set & TIOCM_DTR) { 1389 if (set & TIOCM_DTR) {
@@ -1402,6 +1405,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
1402 isdn_tty_modem_hup(info, 1); 1405 isdn_tty_modem_hup(info, 1);
1403 } 1406 }
1404 } 1407 }
1408 unlock_kernel();
1405 return 0; 1409 return 0;
1406} 1410}
1407 1411
@@ -1435,21 +1439,6 @@ isdn_tty_ioctl(struct tty_struct *tty, struct file *file,
1435 return retval; 1439 return retval;
1436 tty_wait_until_sent(tty, 0); 1440 tty_wait_until_sent(tty, 0);
1437 return 0; 1441 return 0;
1438 case TIOCGSOFTCAR:
1439#ifdef ISDN_DEBUG_MODEM_IOCTL
1440 printk(KERN_DEBUG "ttyI%d ioctl TIOCGSOFTCAR\n", info->line);
1441#endif
1442 return put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg);
1443 case TIOCSSOFTCAR:
1444#ifdef ISDN_DEBUG_MODEM_IOCTL
1445 printk(KERN_DEBUG "ttyI%d ioctl TIOCSSOFTCAR\n", info->line);
1446#endif
1447 if (get_user(arg, (ulong __user *) arg))
1448 return -EFAULT;
1449 tty->termios->c_cflag =
1450 ((tty->termios->c_cflag & ~CLOCAL) |
1451 (arg ? CLOCAL : 0));
1452 return 0;
1453 case TIOCSERGETLSR: /* Get line status register */ 1442 case TIOCSERGETLSR: /* Get line status register */
1454#ifdef ISDN_DEBUG_MODEM_IOCTL 1443#ifdef ISDN_DEBUG_MODEM_IOCTL
1455 printk(KERN_DEBUG "ttyI%d ioctl TIOCSERGETLSR\n", info->line); 1444 printk(KERN_DEBUG "ttyI%d ioctl TIOCSERGETLSR\n", info->line);
@@ -1472,13 +1461,14 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1472 if (!old_termios) 1461 if (!old_termios)
1473 isdn_tty_change_speed(info); 1462 isdn_tty_change_speed(info);
1474 else { 1463 else {
1475 if (tty->termios->c_cflag == old_termios->c_cflag) 1464 if (tty->termios->c_cflag == old_termios->c_cflag &&
1465 tty->termios->c_ispeed == old_termios->c_ispeed &&
1466 tty->termios->c_ospeed == old_termios->c_ospeed)
1476 return; 1467 return;
1477 isdn_tty_change_speed(info); 1468 isdn_tty_change_speed(info);
1478 if ((old_termios->c_cflag & CRTSCTS) && 1469 if ((old_termios->c_cflag & CRTSCTS) &&
1479 !(tty->termios->c_cflag & CRTSCTS)) { 1470 !(tty->termios->c_cflag & CRTSCTS))
1480 tty->hw_stopped = 0; 1471 tty->hw_stopped = 0;
1481 }
1482 } 1472 }
1483} 1473}
1484 1474
@@ -1718,9 +1708,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
1718 } 1708 }
1719 dev->modempoll--; 1709 dev->modempoll--;
1720 isdn_tty_shutdown(info); 1710 isdn_tty_shutdown(info);
1721 1711 isdn_tty_flush_buffer(tty);
1722 if (tty->driver->flush_buffer)
1723 tty->driver->flush_buffer(tty);
1724 tty_ldisc_flush(tty); 1712 tty_ldisc_flush(tty);
1725 info->tty = NULL; 1713 info->tty = NULL;
1726 info->ncarrier = 0; 1714 info->ncarrier = 0;
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index ac05a928f764..b3c54be74556 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -105,7 +105,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
105 105
106 led_cdev->dev = device_create(leds_class, parent, 0, "%s", 106 led_cdev->dev = device_create(leds_class, parent, 0, "%s",
107 led_cdev->name); 107 led_cdev->name);
108 if (unlikely(IS_ERR(led_cdev->dev))) 108 if (IS_ERR(led_cdev->dev))
109 return PTR_ERR(led_cdev->dev); 109 return PTR_ERR(led_cdev->dev);
110 110
111 dev_set_drvdata(led_cdev->dev, led_cdev); 111 dev_set_drvdata(led_cdev->dev, led_cdev);
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 2bc9bf7e88e5..8080249957af 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -85,27 +85,34 @@ static unsigned desc_size(const struct lguest_device_desc *desc)
85 + desc->config_len; 85 + desc->config_len;
86} 86}
87 87
88/* This tests (and acknowleges) a feature bit. */ 88/* This gets the device's feature bits. */
89static bool lg_feature(struct virtio_device *vdev, unsigned fbit) 89static u32 lg_get_features(struct virtio_device *vdev)
90{ 90{
91 unsigned int i;
92 u32 features = 0;
91 struct lguest_device_desc *desc = to_lgdev(vdev)->desc; 93 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
92 u8 *features; 94 u8 *in_features = lg_features(desc);
93 95
94 /* Obviously if they ask for a feature off the end of our feature 96 /* We do this the slow but generic way. */
95 * bitmap, it's not set. */ 97 for (i = 0; i < min(desc->feature_len * 8, 32); i++)
96 if (fbit / 8 > desc->feature_len) 98 if (in_features[i / 8] & (1 << (i % 8)))
97 return false; 99 features |= (1 << i);
98 100
99 /* The feature bitmap comes after the virtqueues. */ 101 return features;
100 features = lg_features(desc); 102}
101 if (!(features[fbit / 8] & (1 << (fbit % 8)))) 103
102 return false; 104static void lg_set_features(struct virtio_device *vdev, u32 features)
103 105{
104 /* We set the matching bit in the other half of the bitmap to tell the 106 unsigned int i;
105 * Host we want to use this feature. We don't use this yet, but we 107 struct lguest_device_desc *desc = to_lgdev(vdev)->desc;
106 * could in future. */ 108 /* Second half of bitmap is features we accept. */
107 features[desc->feature_len + fbit / 8] |= (1 << (fbit % 8)); 109 u8 *out_features = lg_features(desc) + desc->feature_len;
108 return true; 110
111 memset(out_features, 0, desc->feature_len);
112 for (i = 0; i < min(desc->feature_len * 8, 32); i++) {
113 if (features & (1 << i))
114 out_features[i / 8] |= (1 << (i % 8));
115 }
109} 116}
110 117
111/* Once they've found a field, getting a copy of it is easy. */ 118/* Once they've found a field, getting a copy of it is easy. */
@@ -137,20 +144,26 @@ static u8 lg_get_status(struct virtio_device *vdev)
137 return to_lgdev(vdev)->desc->status; 144 return to_lgdev(vdev)->desc->status;
138} 145}
139 146
147/* To notify on status updates, we (ab)use the NOTIFY hypercall, with the
148 * descriptor address of the device. A zero status means "reset". */
149static void set_status(struct virtio_device *vdev, u8 status)
150{
151 unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
152
153 /* We set the status. */
154 to_lgdev(vdev)->desc->status = status;
155 hcall(LHCALL_NOTIFY, (max_pfn<<PAGE_SHIFT) + offset, 0, 0);
156}
157
140static void lg_set_status(struct virtio_device *vdev, u8 status) 158static void lg_set_status(struct virtio_device *vdev, u8 status)
141{ 159{
142 BUG_ON(!status); 160 BUG_ON(!status);
143 to_lgdev(vdev)->desc->status = status; 161 set_status(vdev, status);
144} 162}
145 163
146/* To reset the device, we (ab)use the NOTIFY hypercall, with the descriptor
147 * address of the device. The Host will zero the status and all the
148 * features. */
149static void lg_reset(struct virtio_device *vdev) 164static void lg_reset(struct virtio_device *vdev)
150{ 165{
151 unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; 166 set_status(vdev, 0);
152
153 hcall(LHCALL_NOTIFY, (max_pfn<<PAGE_SHIFT) + offset, 0, 0);
154} 167}
155 168
156/* 169/*
@@ -286,7 +299,8 @@ static void lg_del_vq(struct virtqueue *vq)
286 299
287/* The ops structure which hooks everything together. */ 300/* The ops structure which hooks everything together. */
288static struct virtio_config_ops lguest_config_ops = { 301static struct virtio_config_ops lguest_config_ops = {
289 .feature = lg_feature, 302 .get_features = lg_get_features,
303 .set_features = lg_set_features,
290 .get = lg_get, 304 .get = lg_get,
291 .set = lg_set, 305 .set = lg_set,
292 .get_status = lg_get_status, 306 .get_status = lg_get_status,
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 645e6e040bfb..e73a000473cc 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -102,7 +102,7 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
102static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) 102static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
103{ 103{
104 /* We have a limited number the number of CPUs in the lguest struct. */ 104 /* We have a limited number the number of CPUs in the lguest struct. */
105 if (id >= NR_CPUS) 105 if (id >= ARRAY_SIZE(cpu->lg->cpus))
106 return -EINVAL; 106 return -EINVAL;
107 107
108 /* Set up this CPU's id, and pointer back to the lguest struct. */ 108 /* Set up this CPU's id, and pointer back to the lguest struct. */
@@ -251,8 +251,6 @@ static ssize_t write(struct file *file, const char __user *in,
251 if (!lg || (cpu_id >= lg->nr_cpus)) 251 if (!lg || (cpu_id >= lg->nr_cpus))
252 return -EINVAL; 252 return -EINVAL;
253 cpu = &lg->cpus[cpu_id]; 253 cpu = &lg->cpus[cpu_id];
254 if (!cpu)
255 return -EINVAL;
256 254
257 /* Once the Guest is dead, you can only read() why it died. */ 255 /* Once the Guest is dead, you can only read() why it died. */
258 if (lg->dead) 256 if (lg->dead)
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 77f50b63a970..b52659620d50 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -234,6 +234,14 @@ config WINDFARM_PM112
234 which are the recent dual and quad G5 machines using the 234 which are the recent dual and quad G5 machines using the
235 970MP dual-core processor. 235 970MP dual-core processor.
236 236
237config WINDFARM_PM121
238 tristate "Support for thermal management on PowerMac12,1"
239 depends on WINDFARM && I2C && PMAC_SMU
240 select I2C_POWERMAC
241 help
242 This driver provides thermal control for the PowerMac12,1
243 which is the iMac G5 (iSight).
244
237config ANSLCD 245config ANSLCD
238 tristate "Support for ANS LCD display" 246 tristate "Support for ANS LCD display"
239 depends on ADB_CUDA && PPC_PMAC 247 depends on ADB_CUDA && PPC_PMAC
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 2dfc3f4eaf42..e3132efa17c0 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -42,4 +42,9 @@ obj-$(CONFIG_WINDFARM_PM112) += windfarm_pm112.o windfarm_smu_sat.o \
42 windfarm_smu_sensors.o \ 42 windfarm_smu_sensors.o \
43 windfarm_max6690_sensor.o \ 43 windfarm_max6690_sensor.o \
44 windfarm_lm75_sensor.o windfarm_pid.o 44 windfarm_lm75_sensor.o windfarm_pid.o
45obj-$(CONFIG_WINDFARM_PM121) += windfarm_pm121.o windfarm_smu_sat.o \
46 windfarm_smu_controls.o \
47 windfarm_smu_sensors.o \
48 windfarm_max6690_sensor.o \
49 windfarm_lm75_sensor.o windfarm_pid.o
45obj-$(CONFIG_PMAC_RACKMETER) += rack-meter.o 50obj-$(CONFIG_PMAC_RACKMETER) += rack-meter.o
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 20978205cd02..b8b9e44f7f4e 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -37,7 +37,7 @@
37#include <linux/device.h> 37#include <linux/device.h>
38#include <linux/kthread.h> 38#include <linux/kthread.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/semaphore.h> 40#include <linux/mutex.h>
41 41
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#ifdef CONFIG_PPC 43#ifdef CONFIG_PPC
@@ -102,7 +102,7 @@ static struct adb_handler {
102} adb_handler[16]; 102} adb_handler[16];
103 103
104/* 104/*
105 * The adb_handler_sem mutex protects all accesses to the original_address 105 * The adb_handler_mutex mutex protects all accesses to the original_address
106 * and handler_id fields of adb_handler[i] for all i, and changes to the 106 * and handler_id fields of adb_handler[i] for all i, and changes to the
107 * handler field. 107 * handler field.
108 * Accesses to the handler field are protected by the adb_handler_lock 108 * Accesses to the handler field are protected by the adb_handler_lock
@@ -110,7 +110,7 @@ static struct adb_handler {
110 * time adb_unregister returns, we know that the old handler isn't being 110 * time adb_unregister returns, we know that the old handler isn't being
111 * called. 111 * called.
112 */ 112 */
113static DECLARE_MUTEX(adb_handler_sem); 113static DEFINE_MUTEX(adb_handler_mutex);
114static DEFINE_RWLOCK(adb_handler_lock); 114static DEFINE_RWLOCK(adb_handler_lock);
115 115
116#if 0 116#if 0
@@ -355,7 +355,7 @@ do_adb_reset_bus(void)
355 msleep(500); 355 msleep(500);
356 } 356 }
357 357
358 down(&adb_handler_sem); 358 mutex_lock(&adb_handler_mutex);
359 write_lock_irq(&adb_handler_lock); 359 write_lock_irq(&adb_handler_lock);
360 memset(adb_handler, 0, sizeof(adb_handler)); 360 memset(adb_handler, 0, sizeof(adb_handler));
361 write_unlock_irq(&adb_handler_lock); 361 write_unlock_irq(&adb_handler_lock);
@@ -376,7 +376,7 @@ do_adb_reset_bus(void)
376 if (adb_controller->autopoll) 376 if (adb_controller->autopoll)
377 adb_controller->autopoll(autopoll_devs); 377 adb_controller->autopoll(autopoll_devs);
378 } 378 }
379 up(&adb_handler_sem); 379 mutex_unlock(&adb_handler_mutex);
380 380
381 blocking_notifier_call_chain(&adb_client_list, 381 blocking_notifier_call_chain(&adb_client_list,
382 ADB_MSG_POST_RESET, NULL); 382 ADB_MSG_POST_RESET, NULL);
@@ -454,7 +454,7 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids,
454{ 454{
455 int i; 455 int i;
456 456
457 down(&adb_handler_sem); 457 mutex_lock(&adb_handler_mutex);
458 ids->nids = 0; 458 ids->nids = 0;
459 for (i = 1; i < 16; i++) { 459 for (i = 1; i < 16; i++) {
460 if ((adb_handler[i].original_address == default_id) && 460 if ((adb_handler[i].original_address == default_id) &&
@@ -472,7 +472,7 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids,
472 ids->id[ids->nids++] = i; 472 ids->id[ids->nids++] = i;
473 } 473 }
474 } 474 }
475 up(&adb_handler_sem); 475 mutex_unlock(&adb_handler_mutex);
476 return ids->nids; 476 return ids->nids;
477} 477}
478 478
@@ -481,7 +481,7 @@ adb_unregister(int index)
481{ 481{
482 int ret = -ENODEV; 482 int ret = -ENODEV;
483 483
484 down(&adb_handler_sem); 484 mutex_lock(&adb_handler_mutex);
485 write_lock_irq(&adb_handler_lock); 485 write_lock_irq(&adb_handler_lock);
486 if (adb_handler[index].handler) { 486 if (adb_handler[index].handler) {
487 while(adb_handler[index].busy) { 487 while(adb_handler[index].busy) {
@@ -493,7 +493,7 @@ adb_unregister(int index)
493 adb_handler[index].handler = NULL; 493 adb_handler[index].handler = NULL;
494 } 494 }
495 write_unlock_irq(&adb_handler_lock); 495 write_unlock_irq(&adb_handler_lock);
496 up(&adb_handler_sem); 496 mutex_unlock(&adb_handler_mutex);
497 return ret; 497 return ret;
498} 498}
499 499
@@ -557,19 +557,19 @@ adb_try_handler_change(int address, int new_id)
557{ 557{
558 int ret; 558 int ret;
559 559
560 down(&adb_handler_sem); 560 mutex_lock(&adb_handler_mutex);
561 ret = try_handler_change(address, new_id); 561 ret = try_handler_change(address, new_id);
562 up(&adb_handler_sem); 562 mutex_unlock(&adb_handler_mutex);
563 return ret; 563 return ret;
564} 564}
565 565
566int 566int
567adb_get_infos(int address, int *original_address, int *handler_id) 567adb_get_infos(int address, int *original_address, int *handler_id)
568{ 568{
569 down(&adb_handler_sem); 569 mutex_lock(&adb_handler_mutex);
570 *original_address = adb_handler[address].original_address; 570 *original_address = adb_handler[address].original_address;
571 *handler_id = adb_handler[address].handler_id; 571 *handler_id = adb_handler[address].handler_id;
572 up(&adb_handler_sem); 572 mutex_unlock(&adb_handler_mutex);
573 573
574 return (*original_address != 0); 574 return (*original_address != 0);
575} 575}
@@ -628,10 +628,10 @@ do_adb_query(struct adb_request *req)
628 case ADB_QUERY_GETDEVINFO: 628 case ADB_QUERY_GETDEVINFO:
629 if (req->nbytes < 3) 629 if (req->nbytes < 3)
630 break; 630 break;
631 down(&adb_handler_sem); 631 mutex_lock(&adb_handler_mutex);
632 req->reply[0] = adb_handler[req->data[2]].original_address; 632 req->reply[0] = adb_handler[req->data[2]].original_address;
633 req->reply[1] = adb_handler[req->data[2]].handler_id; 633 req->reply[1] = adb_handler[req->data[2]].handler_id;
634 up(&adb_handler_sem); 634 mutex_unlock(&adb_handler_mutex);
635 req->complete = 1; 635 req->complete = 1;
636 req->reply_len = 2; 636 req->reply_len = 2;
637 adb_write_done(req); 637 adb_write_done(req);
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 1e0a69a5e815..ddfb426a9abd 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -122,6 +122,7 @@
122#include <linux/kmod.h> 122#include <linux/kmod.h>
123#include <linux/i2c.h> 123#include <linux/i2c.h>
124#include <linux/kthread.h> 124#include <linux/kthread.h>
125#include <linux/mutex.h>
125#include <asm/prom.h> 126#include <asm/prom.h>
126#include <asm/machdep.h> 127#include <asm/machdep.h>
127#include <asm/io.h> 128#include <asm/io.h>
@@ -169,7 +170,7 @@ static int rackmac;
169static s32 dimm_output_clamp; 170static s32 dimm_output_clamp;
170static int fcu_rpm_shift; 171static int fcu_rpm_shift;
171static int fcu_tickle_ticks; 172static int fcu_tickle_ticks;
172static DECLARE_MUTEX(driver_lock); 173static DEFINE_MUTEX(driver_lock);
173 174
174/* 175/*
175 * We have 3 types of CPU PID control. One is "split" old style control 176 * We have 3 types of CPU PID control. One is "split" old style control
@@ -729,9 +730,9 @@ static void fetch_cpu_pumps_minmax(void)
729static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \ 730static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
730{ \ 731{ \
731 ssize_t r; \ 732 ssize_t r; \
732 down(&driver_lock); \ 733 mutex_lock(&driver_lock); \
733 r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data)); \ 734 r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data)); \
734 up(&driver_lock); \ 735 mutex_unlock(&driver_lock); \
735 return r; \ 736 return r; \
736} 737}
737#define BUILD_SHOW_FUNC_INT(name, data) \ 738#define BUILD_SHOW_FUNC_INT(name, data) \
@@ -1803,11 +1804,11 @@ static int main_control_loop(void *x)
1803{ 1804{
1804 DBG("main_control_loop started\n"); 1805 DBG("main_control_loop started\n");
1805 1806
1806 down(&driver_lock); 1807 mutex_lock(&driver_lock);
1807 1808
1808 if (start_fcu() < 0) { 1809 if (start_fcu() < 0) {
1809 printk(KERN_ERR "kfand: failed to start FCU\n"); 1810 printk(KERN_ERR "kfand: failed to start FCU\n");
1810 up(&driver_lock); 1811 mutex_unlock(&driver_lock);
1811 goto out; 1812 goto out;
1812 } 1813 }
1813 1814
@@ -1822,14 +1823,14 @@ static int main_control_loop(void *x)
1822 1823
1823 fcu_tickle_ticks = FCU_TICKLE_TICKS; 1824 fcu_tickle_ticks = FCU_TICKLE_TICKS;
1824 1825
1825 up(&driver_lock); 1826 mutex_unlock(&driver_lock);
1826 1827
1827 while (state == state_attached) { 1828 while (state == state_attached) {
1828 unsigned long elapsed, start; 1829 unsigned long elapsed, start;
1829 1830
1830 start = jiffies; 1831 start = jiffies;
1831 1832
1832 down(&driver_lock); 1833 mutex_lock(&driver_lock);
1833 1834
1834 /* Tickle the FCU just in case */ 1835 /* Tickle the FCU just in case */
1835 if (--fcu_tickle_ticks < 0) { 1836 if (--fcu_tickle_ticks < 0) {
@@ -1861,7 +1862,7 @@ static int main_control_loop(void *x)
1861 do_monitor_slots(&slots_state); 1862 do_monitor_slots(&slots_state);
1862 else 1863 else
1863 do_monitor_drives(&drives_state); 1864 do_monitor_drives(&drives_state);
1864 up(&driver_lock); 1865 mutex_unlock(&driver_lock);
1865 1866
1866 if (critical_state == 1) { 1867 if (critical_state == 1) {
1867 printk(KERN_WARNING "Temperature control detected a critical condition\n"); 1868 printk(KERN_WARNING "Temperature control detected a critical condition\n");
@@ -2019,13 +2020,13 @@ static void detach_fcu(void)
2019 */ 2020 */
2020static int therm_pm72_attach(struct i2c_adapter *adapter) 2021static int therm_pm72_attach(struct i2c_adapter *adapter)
2021{ 2022{
2022 down(&driver_lock); 2023 mutex_lock(&driver_lock);
2023 2024
2024 /* Check state */ 2025 /* Check state */
2025 if (state == state_detached) 2026 if (state == state_detached)
2026 state = state_attaching; 2027 state = state_attaching;
2027 if (state != state_attaching) { 2028 if (state != state_attaching) {
2028 up(&driver_lock); 2029 mutex_unlock(&driver_lock);
2029 return 0; 2030 return 0;
2030 } 2031 }
2031 2032
@@ -2054,7 +2055,7 @@ static int therm_pm72_attach(struct i2c_adapter *adapter)
2054 state = state_attached; 2055 state = state_attached;
2055 start_control_loops(); 2056 start_control_loops();
2056 } 2057 }
2057 up(&driver_lock); 2058 mutex_unlock(&driver_lock);
2058 2059
2059 return 0; 2060 return 0;
2060} 2061}
@@ -2065,16 +2066,16 @@ static int therm_pm72_attach(struct i2c_adapter *adapter)
2065 */ 2066 */
2066static int therm_pm72_detach(struct i2c_adapter *adapter) 2067static int therm_pm72_detach(struct i2c_adapter *adapter)
2067{ 2068{
2068 down(&driver_lock); 2069 mutex_lock(&driver_lock);
2069 2070
2070 if (state != state_detached) 2071 if (state != state_detached)
2071 state = state_detaching; 2072 state = state_detaching;
2072 2073
2073 /* Stop control loops if any */ 2074 /* Stop control loops if any */
2074 DBG("stopping control loops\n"); 2075 DBG("stopping control loops\n");
2075 up(&driver_lock); 2076 mutex_unlock(&driver_lock);
2076 stop_control_loops(); 2077 stop_control_loops();
2077 down(&driver_lock); 2078 mutex_lock(&driver_lock);
2078 2079
2079 if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) { 2080 if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) {
2080 DBG("lost U3-0, disposing control loops\n"); 2081 DBG("lost U3-0, disposing control loops\n");
@@ -2090,7 +2091,7 @@ static int therm_pm72_detach(struct i2c_adapter *adapter)
2090 if (u3_0 == NULL && u3_1 == NULL) 2091 if (u3_0 == NULL && u3_1 == NULL)
2091 state = state_detached; 2092 state = state_detached;
2092 2093
2093 up(&driver_lock); 2094 mutex_unlock(&driver_lock);
2094 2095
2095 return 0; 2096 return 0;
2096} 2097}
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 7e10c3ab4d50..b92b959fe16e 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -127,6 +127,12 @@ static struct wf_lm75_sensor *wf_lm75_create(struct i2c_adapter *adapter,
127 */ 127 */
128 if (!strcmp(loc, "Hard drive") || !strcmp(loc, "DRIVE BAY")) 128 if (!strcmp(loc, "Hard drive") || !strcmp(loc, "DRIVE BAY"))
129 lm->sens.name = "hd-temp"; 129 lm->sens.name = "hd-temp";
130 else if (!strcmp(loc, "Incoming Air Temp"))
131 lm->sens.name = "incoming-air-temp";
132 else if (!strcmp(loc, "ODD Temp"))
133 lm->sens.name = "optical-drive-temp";
134 else if (!strcmp(loc, "HD Temp"))
135 lm->sens.name = "hard-drive-temp";
130 else 136 else
131 goto fail; 137 goto fail;
132 138
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 5f03aab9fb5d..e207a90d6b27 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -77,18 +77,28 @@ static struct wf_sensor_ops wf_max6690_ops = {
77 .owner = THIS_MODULE, 77 .owner = THIS_MODULE,
78}; 78};
79 79
80static void wf_max6690_create(struct i2c_adapter *adapter, u8 addr) 80static void wf_max6690_create(struct i2c_adapter *adapter, u8 addr,
81 const char *loc)
81{ 82{
82 struct wf_6690_sensor *max; 83 struct wf_6690_sensor *max;
83 char *name = "backside-temp"; 84 char *name;
84 85
85 max = kzalloc(sizeof(struct wf_6690_sensor), GFP_KERNEL); 86 max = kzalloc(sizeof(struct wf_6690_sensor), GFP_KERNEL);
86 if (max == NULL) { 87 if (max == NULL) {
87 printk(KERN_ERR "windfarm: Couldn't create MAX6690 sensor %s: " 88 printk(KERN_ERR "windfarm: Couldn't create MAX6690 sensor %s: "
88 "no memory\n", name); 89 "no memory\n", loc);
89 return; 90 return;
90 } 91 }
91 92
93 if (!strcmp(loc, "BACKSIDE"))
94 name = "backside-temp";
95 else if (!strcmp(loc, "NB Ambient"))
96 name = "north-bridge-temp";
97 else if (!strcmp(loc, "GPU Ambient"))
98 name = "gpu-temp";
99 else
100 goto fail;
101
92 max->sens.ops = &wf_max6690_ops; 102 max->sens.ops = &wf_max6690_ops;
93 max->sens.name = name; 103 max->sens.name = name;
94 max->i2c.addr = addr >> 1; 104 max->i2c.addr = addr >> 1;
@@ -138,9 +148,7 @@ static int wf_max6690_attach(struct i2c_adapter *adapter)
138 if (loc == NULL || addr == 0) 148 if (loc == NULL || addr == 0)
139 continue; 149 continue;
140 printk("found max6690, loc=%s addr=0x%02x\n", loc, addr); 150 printk("found max6690, loc=%s addr=0x%02x\n", loc, addr);
141 if (strcmp(loc, "BACKSIDE")) 151 wf_max6690_create(adapter, addr, loc);
142 continue;
143 wf_max6690_create(adapter, addr);
144 } 152 }
145 153
146 return 0; 154 return 0;
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
new file mode 100644
index 000000000000..66ec4fb115bb
--- /dev/null
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -0,0 +1,1040 @@
1/*
2 * Windfarm PowerMac thermal control. iMac G5 iSight
3 *
4 * (c) Copyright 2007 Étienne Bersac <bersace@gmail.com>
5 *
6 * Bits & pieces from windfarm_pm81.c by (c) Copyright 2005 Benjamin
7 * Herrenschmidt, IBM Corp. <benh@kernel.crashing.org>
8 *
9 * Released under the term of the GNU GPL v2.
10 *
11 *
12 *
13 * PowerMac12,1
14 * ============
15 *
16 *
17 * The algorithm used is the PID control algorithm, used the same way
18 * the published Darwin code does, using the same values that are
19 * present in the Darwin 8.10 snapshot property lists (note however
20 * that none of the code has been re-used, it's a complete
21 * re-implementation
22 *
23 * There is two models using PowerMac12,1. Model 2 is iMac G5 iSight
24 * 17" while Model 3 is iMac G5 20". They do have both the same
25 * controls with a tiny difference. The control-ids of hard-drive-fan
26 * and cpu-fan is swapped.
27 *
28 *
29 * Target Correction :
30 *
31 * controls have a target correction calculated as :
32 *
33 * new_min = ((((average_power * slope) >> 16) + offset) >> 16) + min_value
34 * new_value = max(new_value, max(new_min, 0))
35 *
36 * OD Fan control correction.
37 *
38 * # model_id: 2
39 * offset : -19563152
40 * slope : 1956315
41 *
42 * # model_id: 3
43 * offset : -15650652
44 * slope : 1565065
45 *
46 * HD Fan control correction.
47 *
48 * # model_id: 2
49 * offset : -15650652
50 * slope : 1565065
51 *
52 * # model_id: 3
53 * offset : -19563152
54 * slope : 1956315
55 *
56 * CPU Fan control correction.
57 *
58 * # model_id: 2
59 * offset : -25431900
60 * slope : 2543190
61 *
62 * # model_id: 3
63 * offset : -15650652
64 * slope : 1565065
65 *
66 *
67 * Target rubber-banding :
68 *
69 * Some controls have a target correction which depends on another
70 * control value. The correction is computed in the following way :
71 *
72 * new_min = ref_value * slope + offset
73 *
74 * ref_value is the value of the reference control. If new_min is
75 * greater than 0, then we correct the target value using :
76 *
77 * new_target = max (new_target, new_min >> 16)
78 *
79 *
80 * # model_id : 2
81 * control : cpu-fan
82 * ref : optical-drive-fan
83 * offset : -15650652
84 * slope : 1565065
85 *
86 * # model_id : 3
87 * control : optical-drive-fan
88 * ref : hard-drive-fan
89 * offset : -32768000
90 * slope : 65536
91 *
92 *
93 * In order to have the moste efficient correction with those
94 * dependencies, we must trigger HD loop before OD loop before CPU
95 * loop.
96 *
97 *
98 * The various control loops found in Darwin config file are:
99 *
100 * HD Fan control loop.
101 *
102 * # model_id: 2
103 * control : hard-drive-fan
104 * sensor : hard-drive-temp
105 * PID params : G_d = 0x00000000
106 * G_p = 0x002D70A3
107 * G_r = 0x00019999
108 * History = 2 entries
109 * Input target = 0x370000
110 * Interval = 5s
111 *
112 * # model_id: 3
113 * control : hard-drive-fan
114 * sensor : hard-drive-temp
115 * PID params : G_d = 0x00000000
116 * G_p = 0x002170A3
117 * G_r = 0x00019999
118 * History = 2 entries
119 * Input target = 0x370000
120 * Interval = 5s
121 *
122 * OD Fan control loop.
123 *
124 * # model_id: 2
125 * control : optical-drive-fan
126 * sensor : optical-drive-temp
127 * PID params : G_d = 0x00000000
128 * G_p = 0x001FAE14
129 * G_r = 0x00019999
130 * History = 2 entries
131 * Input target = 0x320000
132 * Interval = 5s
133 *
134 * # model_id: 3
135 * control : optical-drive-fan
136 * sensor : optical-drive-temp
137 * PID params : G_d = 0x00000000
138 * G_p = 0x001FAE14
139 * G_r = 0x00019999
140 * History = 2 entries
141 * Input target = 0x320000
142 * Interval = 5s
143 *
144 * GPU Fan control loop.
145 *
146 * # model_id: 2
147 * control : hard-drive-fan
148 * sensor : gpu-temp
149 * PID params : G_d = 0x00000000
150 * G_p = 0x002A6666
151 * G_r = 0x00019999
152 * History = 2 entries
153 * Input target = 0x5A0000
154 * Interval = 5s
155 *
156 * # model_id: 3
157 * control : cpu-fan
158 * sensor : gpu-temp
159 * PID params : G_d = 0x00000000
160 * G_p = 0x0010CCCC
161 * G_r = 0x00019999
162 * History = 2 entries
163 * Input target = 0x500000
164 * Interval = 5s
165 *
166 * KODIAK (aka northbridge) Fan control loop.
167 *
168 * # model_id: 2
169 * control : optical-drive-fan
170 * sensor : north-bridge-temp
171 * PID params : G_d = 0x00000000
172 * G_p = 0x003BD70A
173 * G_r = 0x00019999
174 * History = 2 entries
175 * Input target = 0x550000
176 * Interval = 5s
177 *
178 * # model_id: 3
179 * control : hard-drive-fan
180 * sensor : north-bridge-temp
181 * PID params : G_d = 0x00000000
182 * G_p = 0x0030F5C2
183 * G_r = 0x00019999
184 * History = 2 entries
185 * Input target = 0x550000
186 * Interval = 5s
187 *
188 * CPU Fan control loop.
189 *
190 * control : cpu-fan
191 * sensors : cpu-temp, cpu-power
192 * PID params : from SDB partition
193 *
194 *
195 * CPU Slew control loop.
196 *
197 * control : cpufreq-clamp
198 * sensor : cpu-temp
199 *
200 */
201
202#undef DEBUG
203
204#include <linux/types.h>
205#include <linux/errno.h>
206#include <linux/kernel.h>
207#include <linux/delay.h>
208#include <linux/slab.h>
209#include <linux/init.h>
210#include <linux/spinlock.h>
211#include <linux/wait.h>
212#include <linux/kmod.h>
213#include <linux/device.h>
214#include <linux/platform_device.h>
215#include <asm/prom.h>
216#include <asm/machdep.h>
217#include <asm/io.h>
218#include <asm/system.h>
219#include <asm/sections.h>
220#include <asm/smu.h>
221
222#include "windfarm.h"
223#include "windfarm_pid.h"
224
225#define VERSION "0.3"
226
227static int pm121_mach_model; /* machine model id */
228
229/* Controls & sensors */
230static struct wf_sensor *sensor_cpu_power;
231static struct wf_sensor *sensor_cpu_temp;
232static struct wf_sensor *sensor_cpu_voltage;
233static struct wf_sensor *sensor_cpu_current;
234static struct wf_sensor *sensor_gpu_temp;
235static struct wf_sensor *sensor_north_bridge_temp;
236static struct wf_sensor *sensor_hard_drive_temp;
237static struct wf_sensor *sensor_optical_drive_temp;
238static struct wf_sensor *sensor_incoming_air_temp; /* unused ! */
239
240enum {
241 FAN_CPU,
242 FAN_HD,
243 FAN_OD,
244 CPUFREQ,
245 N_CONTROLS
246};
247static struct wf_control *controls[N_CONTROLS] = {};
248
249/* Set to kick the control loop into life */
250static int pm121_all_controls_ok, pm121_all_sensors_ok, pm121_started;
251
252enum {
253 FAILURE_FAN = 1 << 0,
254 FAILURE_SENSOR = 1 << 1,
255 FAILURE_OVERTEMP = 1 << 2
256};
257
258/* All sys loops. Note the HD before the OD loop in order to have it
259 run before. */
260enum {
261 LOOP_GPU, /* control = hd or cpu, but luckily,
262 it doesn't matter */
263 LOOP_HD, /* control = hd */
264 LOOP_KODIAK, /* control = hd or od */
265 LOOP_OD, /* control = od */
266 N_LOOPS
267};
268
269static const char *loop_names[N_LOOPS] = {
270 "GPU",
271 "HD",
272 "KODIAK",
273 "OD",
274};
275
276#define PM121_NUM_CONFIGS 2
277
278static unsigned int pm121_failure_state;
279static int pm121_readjust, pm121_skipping;
280static s32 average_power;
281
282struct pm121_correction {
283 int offset;
284 int slope;
285};
286
287static struct pm121_correction corrections[N_CONTROLS][PM121_NUM_CONFIGS] = {
288 /* FAN_OD */
289 {
290 /* MODEL 2 */
291 { .offset = -19563152,
292 .slope = 1956315
293 },
294 /* MODEL 3 */
295 { .offset = -15650652,
296 .slope = 1565065
297 },
298 },
299 /* FAN_HD */
300 {
301 /* MODEL 2 */
302 { .offset = -15650652,
303 .slope = 1565065
304 },
305 /* MODEL 3 */
306 { .offset = -19563152,
307 .slope = 1956315
308 },
309 },
310 /* FAN_CPU */
311 {
312 /* MODEL 2 */
313 { .offset = -25431900,
314 .slope = 2543190
315 },
316 /* MODEL 3 */
317 { .offset = -15650652,
318 .slope = 1565065
319 },
320 },
321 /* CPUFREQ has no correction (and is not implemented at all) */
322};
323
324struct pm121_connection {
325 unsigned int control_id;
326 unsigned int ref_id;
327 struct pm121_correction correction;
328};
329
330static struct pm121_connection pm121_connections[] = {
331 /* MODEL 2 */
332 { .control_id = FAN_CPU,
333 .ref_id = FAN_OD,
334 { .offset = -32768000,
335 .slope = 65536
336 }
337 },
338 /* MODEL 3 */
339 { .control_id = FAN_OD,
340 .ref_id = FAN_HD,
341 { .offset = -32768000,
342 .slope = 65536
343 }
344 },
345};
346
347/* pointer to the current model connection */
348static struct pm121_connection *pm121_connection;
349
350/*
351 * ****** System Fans Control Loop ******
352 *
353 */
354
355/* Since each loop handles only one control and we want to avoid
356 * writing virtual control, we store the control correction with the
357 * loop params. Some data are not set, there are common to all loop
358 * and thus, hardcoded.
359 */
360struct pm121_sys_param {
361 /* purely informative since we use mach_model-2 as index */
362 int model_id;
363 struct wf_sensor **sensor; /* use sensor_id instead ? */
364 s32 gp, itarget;
365 unsigned int control_id;
366};
367
368static struct pm121_sys_param
369pm121_sys_all_params[N_LOOPS][PM121_NUM_CONFIGS] = {
370 /* GPU Fan control loop */
371 {
372 { .model_id = 2,
373 .sensor = &sensor_gpu_temp,
374 .gp = 0x002A6666,
375 .itarget = 0x5A0000,
376 .control_id = FAN_HD,
377 },
378 { .model_id = 3,
379 .sensor = &sensor_gpu_temp,
380 .gp = 0x0010CCCC,
381 .itarget = 0x500000,
382 .control_id = FAN_CPU,
383 },
384 },
385 /* HD Fan control loop */
386 {
387 { .model_id = 2,
388 .sensor = &sensor_hard_drive_temp,
389 .gp = 0x002D70A3,
390 .itarget = 0x370000,
391 .control_id = FAN_HD,
392 },
393 { .model_id = 3,
394 .sensor = &sensor_hard_drive_temp,
395 .gp = 0x002170A3,
396 .itarget = 0x370000,
397 .control_id = FAN_HD,
398 },
399 },
400 /* KODIAK Fan control loop */
401 {
402 { .model_id = 2,
403 .sensor = &sensor_north_bridge_temp,
404 .gp = 0x003BD70A,
405 .itarget = 0x550000,
406 .control_id = FAN_OD,
407 },
408 { .model_id = 3,
409 .sensor = &sensor_north_bridge_temp,
410 .gp = 0x0030F5C2,
411 .itarget = 0x550000,
412 .control_id = FAN_HD,
413 },
414 },
415 /* OD Fan control loop */
416 {
417 { .model_id = 2,
418 .sensor = &sensor_optical_drive_temp,
419 .gp = 0x001FAE14,
420 .itarget = 0x320000,
421 .control_id = FAN_OD,
422 },
423 { .model_id = 3,
424 .sensor = &sensor_optical_drive_temp,
425 .gp = 0x001FAE14,
426 .itarget = 0x320000,
427 .control_id = FAN_OD,
428 },
429 },
430};
431
432/* the hardcoded values */
433#define PM121_SYS_GD 0x00000000
434#define PM121_SYS_GR 0x00019999
435#define PM121_SYS_HISTORY_SIZE 2
436#define PM121_SYS_INTERVAL 5
437
438/* State data used by the system fans control loop
439 */
440struct pm121_sys_state {
441 int ticks;
442 s32 setpoint;
443 struct wf_pid_state pid;
444};
445
446struct pm121_sys_state *pm121_sys_state[N_LOOPS] = {};
447
448/*
449 * ****** CPU Fans Control Loop ******
450 *
451 */
452
453#define PM121_CPU_INTERVAL 1
454
455/* State data used by the cpu fans control loop
456 */
457struct pm121_cpu_state {
458 int ticks;
459 s32 setpoint;
460 struct wf_cpu_pid_state pid;
461};
462
463static struct pm121_cpu_state *pm121_cpu_state;
464
465
466
467/*
468 * ***** Implementation *****
469 *
470 */
471
472/* correction the value using the output-low-bound correction algo */
473static s32 pm121_correct(s32 new_setpoint,
474 unsigned int control_id,
475 s32 min)
476{
477 s32 new_min;
478 struct pm121_correction *correction;
479 correction = &corrections[control_id][pm121_mach_model - 2];
480
481 new_min = (average_power * correction->slope) >> 16;
482 new_min += correction->offset;
483 new_min = (new_min >> 16) + min;
484
485 return max(new_setpoint, max(new_min, 0));
486}
487
488static s32 pm121_connect(unsigned int control_id, s32 setpoint)
489{
490 s32 new_min, value, new_setpoint;
491
492 if (pm121_connection->control_id == control_id) {
493 controls[control_id]->ops->get_value(controls[control_id],
494 &value);
495 new_min = value * pm121_connection->correction.slope;
496 new_min += pm121_connection->correction.offset;
497 if (new_min > 0) {
498 new_setpoint = max(setpoint, (new_min >> 16));
499 if (new_setpoint != setpoint) {
500 pr_debug("pm121: %s depending on %s, "
501 "corrected from %d to %d RPM\n",
502 controls[control_id]->name,
503 controls[pm121_connection->ref_id]->name,
504 (int) setpoint, (int) new_setpoint);
505 }
506 } else
507 new_setpoint = setpoint;
508 }
509 /* no connection */
510 else
511 new_setpoint = setpoint;
512
513 return new_setpoint;
514}
515
516/* FAN LOOPS */
517static void pm121_create_sys_fans(int loop_id)
518{
519 struct pm121_sys_param *param = NULL;
520 struct wf_pid_param pid_param;
521 struct wf_control *control = NULL;
522 int i;
523
524 /* First, locate the params for this model */
525 for (i = 0; i < PM121_NUM_CONFIGS; i++) {
526 if (pm121_sys_all_params[loop_id][i].model_id == pm121_mach_model) {
527 param = &(pm121_sys_all_params[loop_id][i]);
528 break;
529 }
530 }
531
532 /* No params found, put fans to max */
533 if (param == NULL) {
534 printk(KERN_WARNING "pm121: %s fan config not found "
535 " for this machine model\n",
536 loop_names[loop_id]);
537 goto fail;
538 }
539
540 control = controls[param->control_id];
541
542 /* Alloc & initialize state */
543 pm121_sys_state[loop_id] = kmalloc(sizeof(struct pm121_sys_state),
544 GFP_KERNEL);
545 if (pm121_sys_state[loop_id] == NULL) {
546 printk(KERN_WARNING "pm121: Memory allocation error\n");
547 goto fail;
548 }
549 pm121_sys_state[loop_id]->ticks = 1;
550
551 /* Fill PID params */
552 pid_param.gd = PM121_SYS_GD;
553 pid_param.gp = param->gp;
554 pid_param.gr = PM121_SYS_GR;
555 pid_param.interval = PM121_SYS_INTERVAL;
556 pid_param.history_len = PM121_SYS_HISTORY_SIZE;
557 pid_param.itarget = param->itarget;
558 pid_param.min = control->ops->get_min(control);
559 pid_param.max = control->ops->get_max(control);
560
561 wf_pid_init(&pm121_sys_state[loop_id]->pid, &pid_param);
562
563 pr_debug("pm121: %s Fan control loop initialized.\n"
564 " itarged=%d.%03d, min=%d RPM, max=%d RPM\n",
565 loop_names[loop_id], FIX32TOPRINT(pid_param.itarget),
566 pid_param.min, pid_param.max);
567 return;
568
569 fail:
570 /* note that this is not optimal since another loop may still
571 control the same control */
572 printk(KERN_WARNING "pm121: failed to set up %s loop "
573 "setting \"%s\" to max speed.\n",
574 loop_names[loop_id], control->name);
575
576 if (control)
577 wf_control_set_max(control);
578}
579
580static void pm121_sys_fans_tick(int loop_id)
581{
582 struct pm121_sys_param *param;
583 struct pm121_sys_state *st;
584 struct wf_sensor *sensor;
585 struct wf_control *control;
586 s32 temp, new_setpoint;
587 int rc;
588
589 param = &(pm121_sys_all_params[loop_id][pm121_mach_model-2]);
590 st = pm121_sys_state[loop_id];
591 sensor = *(param->sensor);
592 control = controls[param->control_id];
593
594 if (--st->ticks != 0) {
595 if (pm121_readjust)
596 goto readjust;
597 return;
598 }
599 st->ticks = PM121_SYS_INTERVAL;
600
601 rc = sensor->ops->get_value(sensor, &temp);
602 if (rc) {
603 printk(KERN_WARNING "windfarm: %s sensor error %d\n",
604 sensor->name, rc);
605 pm121_failure_state |= FAILURE_SENSOR;
606 return;
607 }
608
609 pr_debug("pm121: %s Fan tick ! %s: %d.%03d\n",
610 loop_names[loop_id], sensor->name,
611 FIX32TOPRINT(temp));
612
613 new_setpoint = wf_pid_run(&st->pid, temp);
614
615 /* correction */
616 new_setpoint = pm121_correct(new_setpoint,
617 param->control_id,
618 st->pid.param.min);
619 /* linked corretion */
620 new_setpoint = pm121_connect(param->control_id, new_setpoint);
621
622 if (new_setpoint == st->setpoint)
623 return;
624 st->setpoint = new_setpoint;
625 pr_debug("pm121: %s corrected setpoint: %d RPM\n",
626 control->name, (int)new_setpoint);
627 readjust:
628 if (control && pm121_failure_state == 0) {
629 rc = control->ops->set_value(control, st->setpoint);
630 if (rc) {
631 printk(KERN_WARNING "windfarm: %s fan error %d\n",
632 control->name, rc);
633 pm121_failure_state |= FAILURE_FAN;
634 }
635 }
636}
637
638
639/* CPU LOOP */
640static void pm121_create_cpu_fans(void)
641{
642 struct wf_cpu_pid_param pid_param;
643 const struct smu_sdbp_header *hdr;
644 struct smu_sdbp_cpupiddata *piddata;
645 struct smu_sdbp_fvt *fvt;
646 struct wf_control *fan_cpu;
647 s32 tmax, tdelta, maxpow, powadj;
648
649 fan_cpu = controls[FAN_CPU];
650
651 /* First, locate the PID params in SMU SBD */
652 hdr = smu_get_sdb_partition(SMU_SDB_CPUPIDDATA_ID, NULL);
653 if (hdr == 0) {
654 printk(KERN_WARNING "pm121: CPU PID fan config not found.\n");
655 goto fail;
656 }
657 piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
658
659 /* Get the FVT params for operating point 0 (the only supported one
660 * for now) in order to get tmax
661 */
662 hdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
663 if (hdr) {
664 fvt = (struct smu_sdbp_fvt *)&hdr[1];
665 tmax = ((s32)fvt->maxtemp) << 16;
666 } else
667 tmax = 0x5e0000; /* 94 degree default */
668
669 /* Alloc & initialize state */
670 pm121_cpu_state = kmalloc(sizeof(struct pm121_cpu_state),
671 GFP_KERNEL);
672 if (pm121_cpu_state == NULL)
673 goto fail;
674 pm121_cpu_state->ticks = 1;
675
676 /* Fill PID params */
677 pid_param.interval = PM121_CPU_INTERVAL;
678 pid_param.history_len = piddata->history_len;
679 if (pid_param.history_len > WF_CPU_PID_MAX_HISTORY) {
680 printk(KERN_WARNING "pm121: History size overflow on "
681 "CPU control loop (%d)\n", piddata->history_len);
682 pid_param.history_len = WF_CPU_PID_MAX_HISTORY;
683 }
684 pid_param.gd = piddata->gd;
685 pid_param.gp = piddata->gp;
686 pid_param.gr = piddata->gr / pid_param.history_len;
687
688 tdelta = ((s32)piddata->target_temp_delta) << 16;
689 maxpow = ((s32)piddata->max_power) << 16;
690 powadj = ((s32)piddata->power_adj) << 16;
691
692 pid_param.tmax = tmax;
693 pid_param.ttarget = tmax - tdelta;
694 pid_param.pmaxadj = maxpow - powadj;
695
696 pid_param.min = fan_cpu->ops->get_min(fan_cpu);
697 pid_param.max = fan_cpu->ops->get_max(fan_cpu);
698
699 wf_cpu_pid_init(&pm121_cpu_state->pid, &pid_param);
700
701 pr_debug("pm121: CPU Fan control initialized.\n");
702 pr_debug(" ttarged=%d.%03d, tmax=%d.%03d, min=%d RPM, max=%d RPM,\n",
703 FIX32TOPRINT(pid_param.ttarget), FIX32TOPRINT(pid_param.tmax),
704 pid_param.min, pid_param.max);
705
706 return;
707
708 fail:
709 printk(KERN_WARNING "pm121: CPU fan config not found, max fan speed\n");
710
711 if (controls[CPUFREQ])
712 wf_control_set_max(controls[CPUFREQ]);
713 if (fan_cpu)
714 wf_control_set_max(fan_cpu);
715}
716
717
718static void pm121_cpu_fans_tick(struct pm121_cpu_state *st)
719{
720 s32 new_setpoint, temp, power;
721 struct wf_control *fan_cpu = NULL;
722 int rc;
723
724 if (--st->ticks != 0) {
725 if (pm121_readjust)
726 goto readjust;
727 return;
728 }
729 st->ticks = PM121_CPU_INTERVAL;
730
731 fan_cpu = controls[FAN_CPU];
732
733 rc = sensor_cpu_temp->ops->get_value(sensor_cpu_temp, &temp);
734 if (rc) {
735 printk(KERN_WARNING "pm121: CPU temp sensor error %d\n",
736 rc);
737 pm121_failure_state |= FAILURE_SENSOR;
738 return;
739 }
740
741 rc = sensor_cpu_power->ops->get_value(sensor_cpu_power, &power);
742 if (rc) {
743 printk(KERN_WARNING "pm121: CPU power sensor error %d\n",
744 rc);
745 pm121_failure_state |= FAILURE_SENSOR;
746 return;
747 }
748
749 pr_debug("pm121: CPU Fans tick ! CPU temp: %d.%03d°C, power: %d.%03d\n",
750 FIX32TOPRINT(temp), FIX32TOPRINT(power));
751
752 if (temp > st->pid.param.tmax)
753 pm121_failure_state |= FAILURE_OVERTEMP;
754
755 new_setpoint = wf_cpu_pid_run(&st->pid, power, temp);
756
757 /* correction */
758 new_setpoint = pm121_correct(new_setpoint,
759 FAN_CPU,
760 st->pid.param.min);
761
762 /* connected correction */
763 new_setpoint = pm121_connect(FAN_CPU, new_setpoint);
764
765 if (st->setpoint == new_setpoint)
766 return;
767 st->setpoint = new_setpoint;
768 pr_debug("pm121: CPU corrected setpoint: %d RPM\n", (int)new_setpoint);
769
770 readjust:
771 if (fan_cpu && pm121_failure_state == 0) {
772 rc = fan_cpu->ops->set_value(fan_cpu, st->setpoint);
773 if (rc) {
774 printk(KERN_WARNING "pm121: %s fan error %d\n",
775 fan_cpu->name, rc);
776 pm121_failure_state |= FAILURE_FAN;
777 }
778 }
779}
780
781/*
782 * ****** Common ******
783 *
784 */
785
786static void pm121_tick(void)
787{
788 unsigned int last_failure = pm121_failure_state;
789 unsigned int new_failure;
790 s32 total_power;
791 int i;
792
793 if (!pm121_started) {
794 pr_debug("pm121: creating control loops !\n");
795 for (i = 0; i < N_LOOPS; i++)
796 pm121_create_sys_fans(i);
797
798 pm121_create_cpu_fans();
799 pm121_started = 1;
800 }
801
802 /* skipping ticks */
803 if (pm121_skipping && --pm121_skipping)
804 return;
805
806 /* compute average power */
807 total_power = 0;
808 for (i = 0; i < pm121_cpu_state->pid.param.history_len; i++)
809 total_power += pm121_cpu_state->pid.powers[i];
810
811 average_power = total_power / pm121_cpu_state->pid.param.history_len;
812
813
814 pm121_failure_state = 0;
815 for (i = 0 ; i < N_LOOPS; i++) {
816 if (pm121_sys_state[i])
817 pm121_sys_fans_tick(i);
818 }
819
820 if (pm121_cpu_state)
821 pm121_cpu_fans_tick(pm121_cpu_state);
822
823 pm121_readjust = 0;
824 new_failure = pm121_failure_state & ~last_failure;
825
826 /* If entering failure mode, clamp cpufreq and ramp all
827 * fans to full speed.
828 */
829 if (pm121_failure_state && !last_failure) {
830 for (i = 0; i < N_CONTROLS; i++) {
831 if (controls[i])
832 wf_control_set_max(controls[i]);
833 }
834 }
835
836 /* If leaving failure mode, unclamp cpufreq and readjust
837 * all fans on next iteration
838 */
839 if (!pm121_failure_state && last_failure) {
840 if (controls[CPUFREQ])
841 wf_control_set_min(controls[CPUFREQ]);
842 pm121_readjust = 1;
843 }
844
845 /* Overtemp condition detected, notify and start skipping a couple
846 * ticks to let the temperature go down
847 */
848 if (new_failure & FAILURE_OVERTEMP) {
849 wf_set_overtemp();
850 pm121_skipping = 2;
851 }
852
853 /* We only clear the overtemp condition if overtemp is cleared
854 * _and_ no other failure is present. Since a sensor error will
855 * clear the overtemp condition (can't measure temperature) at
856 * the control loop levels, but we don't want to keep it clear
857 * here in this case
858 */
859 if (new_failure == 0 && last_failure & FAILURE_OVERTEMP)
860 wf_clear_overtemp();
861}
862
863
864static struct wf_control* pm121_register_control(struct wf_control *ct,
865 const char *match,
866 unsigned int id)
867{
868 if (controls[id] == NULL && !strcmp(ct->name, match)) {
869 if (wf_get_control(ct) == 0)
870 controls[id] = ct;
871 }
872 return controls[id];
873}
874
875static void pm121_new_control(struct wf_control *ct)
876{
877 int all = 1;
878
879 if (pm121_all_controls_ok)
880 return;
881
882 all = pm121_register_control(ct, "optical-drive-fan", FAN_OD) && all;
883 all = pm121_register_control(ct, "hard-drive-fan", FAN_HD) && all;
884 all = pm121_register_control(ct, "cpu-fan", FAN_CPU) && all;
885 all = pm121_register_control(ct, "cpufreq-clamp", CPUFREQ) && all;
886
887 if (all)
888 pm121_all_controls_ok = 1;
889}
890
891
892
893
894static struct wf_sensor* pm121_register_sensor(struct wf_sensor *sensor,
895 const char *match,
896 struct wf_sensor **var)
897{
898 if (*var == NULL && !strcmp(sensor->name, match)) {
899 if (wf_get_sensor(sensor) == 0)
900 *var = sensor;
901 }
902 return *var;
903}
904
905static void pm121_new_sensor(struct wf_sensor *sr)
906{
907 int all = 1;
908
909 if (pm121_all_sensors_ok)
910 return;
911
912 all = pm121_register_sensor(sr, "cpu-temp",
913 &sensor_cpu_temp) && all;
914 all = pm121_register_sensor(sr, "cpu-current",
915 &sensor_cpu_current) && all;
916 all = pm121_register_sensor(sr, "cpu-voltage",
917 &sensor_cpu_voltage) && all;
918 all = pm121_register_sensor(sr, "cpu-power",
919 &sensor_cpu_power) && all;
920 all = pm121_register_sensor(sr, "hard-drive-temp",
921 &sensor_hard_drive_temp) && all;
922 all = pm121_register_sensor(sr, "optical-drive-temp",
923 &sensor_optical_drive_temp) && all;
924 all = pm121_register_sensor(sr, "incoming-air-temp",
925 &sensor_incoming_air_temp) && all;
926 all = pm121_register_sensor(sr, "north-bridge-temp",
927 &sensor_north_bridge_temp) && all;
928 all = pm121_register_sensor(sr, "gpu-temp",
929 &sensor_gpu_temp) && all;
930
931 if (all)
932 pm121_all_sensors_ok = 1;
933}
934
935
936
937static int pm121_notify(struct notifier_block *self,
938 unsigned long event, void *data)
939{
940 switch (event) {
941 case WF_EVENT_NEW_CONTROL:
942 pr_debug("pm121: new control %s detected\n",
943 ((struct wf_control *)data)->name);
944 pm121_new_control(data);
945 break;
946 case WF_EVENT_NEW_SENSOR:
947 pr_debug("pm121: new sensor %s detected\n",
948 ((struct wf_sensor *)data)->name);
949 pm121_new_sensor(data);
950 break;
951 case WF_EVENT_TICK:
952 if (pm121_all_controls_ok && pm121_all_sensors_ok)
953 pm121_tick();
954 break;
955 }
956
957 return 0;
958}
959
960static struct notifier_block pm121_events = {
961 .notifier_call = pm121_notify,
962};
963
964static int pm121_init_pm(void)
965{
966 const struct smu_sdbp_header *hdr;
967
968 hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL);
969 if (hdr != 0) {
970 struct smu_sdbp_sensortree *st =
971 (struct smu_sdbp_sensortree *)&hdr[1];
972 pm121_mach_model = st->model_id;
973 }
974
975 pm121_connection = &pm121_connections[pm121_mach_model - 2];
976
977 printk(KERN_INFO "pm121: Initializing for iMac G5 iSight model ID %d\n",
978 pm121_mach_model);
979
980 return 0;
981}
982
983
984static int pm121_probe(struct platform_device *ddev)
985{
986 wf_register_client(&pm121_events);
987
988 return 0;
989}
990
991static int __devexit pm121_remove(struct platform_device *ddev)
992{
993 wf_unregister_client(&pm121_events);
994 return 0;
995}
996
997static struct platform_driver pm121_driver = {
998 .probe = pm121_probe,
999 .remove = __devexit_p(pm121_remove),
1000 .driver = {
1001 .name = "windfarm",
1002 .bus = &platform_bus_type,
1003 },
1004};
1005
1006
1007static int __init pm121_init(void)
1008{
1009 int rc = -ENODEV;
1010
1011 if (machine_is_compatible("PowerMac12,1"))
1012 rc = pm121_init_pm();
1013
1014 if (rc == 0) {
1015 request_module("windfarm_smu_controls");
1016 request_module("windfarm_smu_sensors");
1017 request_module("windfarm_smu_sat");
1018 request_module("windfarm_lm75_sensor");
1019 request_module("windfarm_max6690_sensor");
1020 request_module("windfarm_cpufreq_clamp");
1021 platform_driver_register(&pm121_driver);
1022 }
1023
1024 return rc;
1025}
1026
1027static void __exit pm121_exit(void)
1028{
1029
1030 platform_driver_unregister(&pm121_driver);
1031}
1032
1033
1034module_init(pm121_init);
1035module_exit(pm121_exit);
1036
1037MODULE_AUTHOR("Étienne Bersac <bersace@gmail.com>");
1038MODULE_DESCRIPTION("Thermal control logic for iMac G5 (iSight)");
1039MODULE_LICENSE("GPL");
1040
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 58c2590f05ec..961fa0e7c2cf 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -218,6 +218,10 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
218 fct->ctrl.name = "cpu-fan"; 218 fct->ctrl.name = "cpu-fan";
219 else if (!strcmp(l, "Hard Drive") || !strcmp(l, "Hard drive")) 219 else if (!strcmp(l, "Hard Drive") || !strcmp(l, "Hard drive"))
220 fct->ctrl.name = "drive-bay-fan"; 220 fct->ctrl.name = "drive-bay-fan";
221 else if (!strcmp(l, "HDD Fan")) /* seen on iMac G5 iSight */
222 fct->ctrl.name = "hard-drive-fan";
223 else if (!strcmp(l, "ODD Fan")) /* same */
224 fct->ctrl.name = "optical-drive-fan";
221 225
222 /* Unrecognized fan, bail out */ 226 /* Unrecognized fan, bail out */
223 if (fct->ctrl.name == NULL) 227 if (fct->ctrl.name == NULL)
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 797918d0e59c..7f2be4baaeda 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -13,7 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/wait.h> 14#include <linux/wait.h>
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/semaphore.h> 16#include <linux/mutex.h>
17#include <asm/prom.h> 17#include <asm/prom.h>
18#include <asm/smu.h> 18#include <asm/smu.h>
19#include <asm/pmac_low_i2c.h> 19#include <asm/pmac_low_i2c.h>
@@ -36,7 +36,7 @@
36struct wf_sat { 36struct wf_sat {
37 int nr; 37 int nr;
38 atomic_t refcnt; 38 atomic_t refcnt;
39 struct semaphore mutex; 39 struct mutex mutex;
40 unsigned long last_read; /* jiffies when cache last updated */ 40 unsigned long last_read; /* jiffies when cache last updated */
41 u8 cache[16]; 41 u8 cache[16];
42 struct i2c_client i2c; 42 struct i2c_client i2c;
@@ -163,7 +163,7 @@ static int wf_sat_get(struct wf_sensor *sr, s32 *value)
163 if (sat->i2c.adapter == NULL) 163 if (sat->i2c.adapter == NULL)
164 return -ENODEV; 164 return -ENODEV;
165 165
166 down(&sat->mutex); 166 mutex_lock(&sat->mutex);
167 if (time_after(jiffies, (sat->last_read + MAX_AGE))) { 167 if (time_after(jiffies, (sat->last_read + MAX_AGE))) {
168 err = wf_sat_read_cache(sat); 168 err = wf_sat_read_cache(sat);
169 if (err) 169 if (err)
@@ -182,7 +182,7 @@ static int wf_sat_get(struct wf_sensor *sr, s32 *value)
182 err = 0; 182 err = 0;
183 183
184 fail: 184 fail:
185 up(&sat->mutex); 185 mutex_unlock(&sat->mutex);
186 return err; 186 return err;
187} 187}
188 188
@@ -233,7 +233,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
233 sat->nr = -1; 233 sat->nr = -1;
234 sat->node = of_node_get(dev); 234 sat->node = of_node_get(dev);
235 atomic_set(&sat->refcnt, 0); 235 atomic_set(&sat->refcnt, 0);
236 init_MUTEX(&sat->mutex); 236 mutex_init(&sat->mutex);
237 sat->i2c.addr = (addr >> 1) & 0x7f; 237 sat->i2c.addr = (addr >> 1) & 0x7f;
238 sat->i2c.adapter = adapter; 238 sat->i2c.adapter = adapter;
239 sat->i2c.driver = &wf_sat_driver; 239 sat->i2c.driver = &wf_sat_driver;
diff --git a/drivers/mca/mca-legacy.c b/drivers/mca/mca-legacy.c
index 0c7bfa74c8ef..494f0c2001f5 100644
--- a/drivers/mca/mca-legacy.c
+++ b/drivers/mca/mca-legacy.c
@@ -282,24 +282,6 @@ void mca_set_adapter_name(int slot, char* name)
282EXPORT_SYMBOL(mca_set_adapter_name); 282EXPORT_SYMBOL(mca_set_adapter_name);
283 283
284/** 284/**
285 * mca_is_adapter_used - check if claimed by driver
286 * @slot: slot to check
287 *
288 * Returns 1 if the slot has been claimed by a driver
289 */
290
291int mca_is_adapter_used(int slot)
292{
293 struct mca_device *mca_dev = mca_find_device_by_slot(slot);
294
295 if(!mca_dev)
296 return 0;
297
298 return mca_device_claimed(mca_dev);
299}
300EXPORT_SYMBOL(mca_is_adapter_used);
301
302/**
303 * mca_mark_as_used - claim an MCA device 285 * mca_mark_as_used - claim an MCA device
304 * @slot: slot to claim 286 * @slot: slot to claim
305 * FIXME: should we make this threadsafe 287 * FIXME: should we make this threadsafe
diff --git a/drivers/mca/mca-proc.c b/drivers/mca/mca-proc.c
index 33d5e0820cc5..81ea0d377bf4 100644
--- a/drivers/mca/mca-proc.c
+++ b/drivers/mca/mca-proc.c
@@ -183,7 +183,7 @@ void __init mca_do_proc_init(void)
183 struct proc_dir_entry* node = NULL; 183 struct proc_dir_entry* node = NULL;
184 struct mca_device *mca_dev; 184 struct mca_device *mca_dev;
185 185
186 proc_mca = proc_mkdir("mca", &proc_root); 186 proc_mca = proc_mkdir("mca", NULL);
187 create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL); 187 create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL);
188 create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL); 188 create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL);
189 189
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 6b91b9ab1d41..3ea5ad4b7805 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -110,8 +110,6 @@ static struct request *get_failover_req(struct emc_handler *h,
110 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 110 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
111 rq->sense_len = 0; 111 rq->sense_len = 0;
112 112
113 memset(&rq->cmd, 0, BLK_MAX_CDB);
114
115 rq->timeout = EMC_FAILOVER_TIMEOUT; 113 rq->timeout = EMC_FAILOVER_TIMEOUT;
116 rq->cmd_type = REQ_TYPE_BLOCK_PC; 114 rq->cmd_type = REQ_TYPE_BLOCK_PC;
117 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 115 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
diff --git a/drivers/md/dm-mpath-hp-sw.c b/drivers/md/dm-mpath-hp-sw.c
index 204bf42c9449..b63a0ab37c53 100644
--- a/drivers/md/dm-mpath-hp-sw.c
+++ b/drivers/md/dm-mpath-hp-sw.c
@@ -137,7 +137,6 @@ static struct request *hp_sw_get_request(struct dm_path *path)
137 req->sense = h->sense; 137 req->sense = h->sense;
138 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 138 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
139 139
140 memset(&req->cmd, 0, BLK_MAX_CDB);
141 req->cmd[0] = START_STOP; 140 req->cmd[0] = START_STOP;
142 req->cmd[4] = 1; 141 req->cmd[4] = 1;
143 req->cmd_len = COMMAND_SIZE(req->cmd[0]); 142 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c
index e04eb5c697fb..95e77734880a 100644
--- a/drivers/md/dm-mpath-rdac.c
+++ b/drivers/md/dm-mpath-rdac.c
@@ -284,7 +284,6 @@ static struct request *get_rdac_req(struct rdac_handler *h,
284 return NULL; 284 return NULL;
285 } 285 }
286 286
287 memset(&rq->cmd, 0, BLK_MAX_CDB);
288 rq->sense = h->sense; 287 rq->sense = h->sense;
289 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 288 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
290 rq->sense_len = 0; 289 rq->sense_len = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 51be53344214..94116eaf4709 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -873,10 +873,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
873 q->max_hw_sectors = t->limits.max_hw_sectors; 873 q->max_hw_sectors = t->limits.max_hw_sectors;
874 q->seg_boundary_mask = t->limits.seg_boundary_mask; 874 q->seg_boundary_mask = t->limits.seg_boundary_mask;
875 q->bounce_pfn = t->limits.bounce_pfn; 875 q->bounce_pfn = t->limits.bounce_pfn;
876
876 if (t->limits.no_cluster) 877 if (t->limits.no_cluster)
877 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); 878 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
878 else 879 else
879 q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER); 880 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
880 881
881} 882}
882 883
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 87620b705bee..83eb78b00137 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -276,13 +276,15 @@ static mddev_t * mddev_find(dev_t unit)
276 init_waitqueue_head(&new->sb_wait); 276 init_waitqueue_head(&new->sb_wait);
277 new->reshape_position = MaxSector; 277 new->reshape_position = MaxSector;
278 new->resync_max = MaxSector; 278 new->resync_max = MaxSector;
279 new->level = LEVEL_NONE;
279 280
280 new->queue = blk_alloc_queue(GFP_KERNEL); 281 new->queue = blk_alloc_queue(GFP_KERNEL);
281 if (!new->queue) { 282 if (!new->queue) {
282 kfree(new); 283 kfree(new);
283 return NULL; 284 return NULL;
284 } 285 }
285 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags); 286 /* Can be unlocked because the queue is new: no concurrency */
287 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
286 288
287 blk_queue_make_request(new->queue, md_fail_request); 289 blk_queue_make_request(new->queue, md_fail_request);
288 290
@@ -1368,6 +1370,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1368 MD_BUG(); 1370 MD_BUG();
1369 return -EINVAL; 1371 return -EINVAL;
1370 } 1372 }
1373
1374 /* prevent duplicates */
1375 if (find_rdev(mddev, rdev->bdev->bd_dev))
1376 return -EEXIST;
1377
1371 /* make sure rdev->size exceeds mddev->size */ 1378 /* make sure rdev->size exceeds mddev->size */
1372 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { 1379 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1373 if (mddev->pers) { 1380 if (mddev->pers) {
@@ -1651,6 +1658,8 @@ static void md_update_sb(mddev_t * mddev, int force_change)
1651 int sync_req; 1658 int sync_req;
1652 int nospares = 0; 1659 int nospares = 0;
1653 1660
1661 if (mddev->external)
1662 return;
1654repeat: 1663repeat:
1655 spin_lock_irq(&mddev->write_lock); 1664 spin_lock_irq(&mddev->write_lock);
1656 1665
@@ -1819,6 +1828,10 @@ state_show(mdk_rdev_t *rdev, char *page)
1819 len += sprintf(page+len, "%swrite_mostly",sep); 1828 len += sprintf(page+len, "%swrite_mostly",sep);
1820 sep = ","; 1829 sep = ",";
1821 } 1830 }
1831 if (test_bit(Blocked, &rdev->flags)) {
1832 len += sprintf(page+len, "%sblocked", sep);
1833 sep = ",";
1834 }
1822 if (!test_bit(Faulty, &rdev->flags) && 1835 if (!test_bit(Faulty, &rdev->flags) &&
1823 !test_bit(In_sync, &rdev->flags)) { 1836 !test_bit(In_sync, &rdev->flags)) {
1824 len += sprintf(page+len, "%sspare", sep); 1837 len += sprintf(page+len, "%sspare", sep);
@@ -1835,6 +1848,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1835 * remove - disconnects the device 1848 * remove - disconnects the device
1836 * writemostly - sets write_mostly 1849 * writemostly - sets write_mostly
1837 * -writemostly - clears write_mostly 1850 * -writemostly - clears write_mostly
1851 * blocked - sets the Blocked flag
1852 * -blocked - clears the Blocked flag
1838 */ 1853 */
1839 int err = -EINVAL; 1854 int err = -EINVAL;
1840 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 1855 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -1857,6 +1872,16 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1857 } else if (cmd_match(buf, "-writemostly")) { 1872 } else if (cmd_match(buf, "-writemostly")) {
1858 clear_bit(WriteMostly, &rdev->flags); 1873 clear_bit(WriteMostly, &rdev->flags);
1859 err = 0; 1874 err = 0;
1875 } else if (cmd_match(buf, "blocked")) {
1876 set_bit(Blocked, &rdev->flags);
1877 err = 0;
1878 } else if (cmd_match(buf, "-blocked")) {
1879 clear_bit(Blocked, &rdev->flags);
1880 wake_up(&rdev->blocked_wait);
1881 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
1882 md_wakeup_thread(rdev->mddev->thread);
1883
1884 err = 0;
1860 } 1885 }
1861 return err ? err : len; 1886 return err ? err : len;
1862} 1887}
@@ -2096,7 +2121,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2096 rv = -EBUSY; 2121 rv = -EBUSY;
2097 else 2122 else
2098 rv = entry->store(rdev, page, length); 2123 rv = entry->store(rdev, page, length);
2099 mddev_unlock(rdev->mddev); 2124 mddev_unlock(mddev);
2100 } 2125 }
2101 return rv; 2126 return rv;
2102} 2127}
@@ -2185,7 +2210,9 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2185 goto abort_free; 2210 goto abort_free;
2186 } 2211 }
2187 } 2212 }
2213
2188 INIT_LIST_HEAD(&rdev->same_set); 2214 INIT_LIST_HEAD(&rdev->same_set);
2215 init_waitqueue_head(&rdev->blocked_wait);
2189 2216
2190 return rdev; 2217 return rdev;
2191 2218
@@ -2456,7 +2483,6 @@ resync_start_show(mddev_t *mddev, char *page)
2456static ssize_t 2483static ssize_t
2457resync_start_store(mddev_t *mddev, const char *buf, size_t len) 2484resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2458{ 2485{
2459 /* can only set chunk_size if array is not yet active */
2460 char *e; 2486 char *e;
2461 unsigned long long n = simple_strtoull(buf, &e, 10); 2487 unsigned long long n = simple_strtoull(buf, &e, 10);
2462 2488
@@ -2590,15 +2616,20 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
2590 err = do_md_stop(mddev, 1); 2616 err = do_md_stop(mddev, 1);
2591 else { 2617 else {
2592 mddev->ro = 1; 2618 mddev->ro = 1;
2619 set_disk_ro(mddev->gendisk, 1);
2593 err = do_md_run(mddev); 2620 err = do_md_run(mddev);
2594 } 2621 }
2595 break; 2622 break;
2596 case read_auto: 2623 case read_auto:
2597 /* stopping an active array */
2598 if (mddev->pers) { 2624 if (mddev->pers) {
2599 err = do_md_stop(mddev, 1); 2625 if (mddev->ro != 1)
2600 if (err == 0) 2626 err = do_md_stop(mddev, 1);
2601 mddev->ro = 2; /* FIXME mark devices writable */ 2627 else
2628 err = restart_array(mddev);
2629 if (err == 0) {
2630 mddev->ro = 2;
2631 set_disk_ro(mddev->gendisk, 0);
2632 }
2602 } else { 2633 } else {
2603 mddev->ro = 2; 2634 mddev->ro = 2;
2604 err = do_md_run(mddev); 2635 err = do_md_run(mddev);
@@ -2611,6 +2642,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
2611 if (atomic_read(&mddev->writes_pending) == 0) { 2642 if (atomic_read(&mddev->writes_pending) == 0) {
2612 if (mddev->in_sync == 0) { 2643 if (mddev->in_sync == 0) {
2613 mddev->in_sync = 1; 2644 mddev->in_sync = 1;
2645 if (mddev->safemode == 1)
2646 mddev->safemode = 0;
2614 if (mddev->persistent) 2647 if (mddev->persistent)
2615 set_bit(MD_CHANGE_CLEAN, 2648 set_bit(MD_CHANGE_CLEAN,
2616 &mddev->flags); 2649 &mddev->flags);
@@ -2634,6 +2667,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
2634 err = 0; 2667 err = 0;
2635 } else { 2668 } else {
2636 mddev->ro = 0; 2669 mddev->ro = 0;
2670 set_disk_ro(mddev->gendisk, 0);
2637 err = do_md_run(mddev); 2671 err = do_md_run(mddev);
2638 } 2672 }
2639 break; 2673 break;
@@ -3711,6 +3745,30 @@ static int do_md_stop(mddev_t * mddev, int mode)
3711 mddev->reshape_position = MaxSector; 3745 mddev->reshape_position = MaxSector;
3712 mddev->external = 0; 3746 mddev->external = 0;
3713 mddev->persistent = 0; 3747 mddev->persistent = 0;
3748 mddev->level = LEVEL_NONE;
3749 mddev->clevel[0] = 0;
3750 mddev->flags = 0;
3751 mddev->ro = 0;
3752 mddev->metadata_type[0] = 0;
3753 mddev->chunk_size = 0;
3754 mddev->ctime = mddev->utime = 0;
3755 mddev->layout = 0;
3756 mddev->max_disks = 0;
3757 mddev->events = 0;
3758 mddev->delta_disks = 0;
3759 mddev->new_level = LEVEL_NONE;
3760 mddev->new_layout = 0;
3761 mddev->new_chunk = 0;
3762 mddev->curr_resync = 0;
3763 mddev->resync_mismatches = 0;
3764 mddev->suspend_lo = mddev->suspend_hi = 0;
3765 mddev->sync_speed_min = mddev->sync_speed_max = 0;
3766 mddev->recovery = 0;
3767 mddev->in_sync = 0;
3768 mddev->changed = 0;
3769 mddev->degraded = 0;
3770 mddev->barriers_work = 0;
3771 mddev->safemode = 0;
3714 3772
3715 } else if (mddev->pers) 3773 } else if (mddev->pers)
3716 printk(KERN_INFO "md: %s switched to read-only mode.\n", 3774 printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -4918,6 +4976,9 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4918 4976
4919 if (!rdev || test_bit(Faulty, &rdev->flags)) 4977 if (!rdev || test_bit(Faulty, &rdev->flags))
4920 return; 4978 return;
4979
4980 if (mddev->external)
4981 set_bit(Blocked, &rdev->flags);
4921/* 4982/*
4922 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 4983 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4923 mdname(mddev), 4984 mdname(mddev),
@@ -5364,6 +5425,8 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
5364 md_wakeup_thread(mddev->sync_thread); 5425 md_wakeup_thread(mddev->sync_thread);
5365 } 5426 }
5366 atomic_inc(&mddev->writes_pending); 5427 atomic_inc(&mddev->writes_pending);
5428 if (mddev->safemode == 1)
5429 mddev->safemode = 0;
5367 if (mddev->in_sync) { 5430 if (mddev->in_sync) {
5368 spin_lock_irq(&mddev->write_lock); 5431 spin_lock_irq(&mddev->write_lock);
5369 if (mddev->in_sync) { 5432 if (mddev->in_sync) {
@@ -5718,7 +5781,7 @@ static int remove_and_add_spares(mddev_t *mddev)
5718 5781
5719 rdev_for_each(rdev, rtmp, mddev) 5782 rdev_for_each(rdev, rtmp, mddev)
5720 if (rdev->raid_disk >= 0 && 5783 if (rdev->raid_disk >= 0 &&
5721 !mddev->external && 5784 !test_bit(Blocked, &rdev->flags) &&
5722 (test_bit(Faulty, &rdev->flags) || 5785 (test_bit(Faulty, &rdev->flags) ||
5723 ! test_bit(In_sync, &rdev->flags)) && 5786 ! test_bit(In_sync, &rdev->flags)) &&
5724 atomic_read(&rdev->nr_pending)==0) { 5787 atomic_read(&rdev->nr_pending)==0) {
@@ -5788,7 +5851,7 @@ void md_check_recovery(mddev_t *mddev)
5788 return; 5851 return;
5789 5852
5790 if (signal_pending(current)) { 5853 if (signal_pending(current)) {
5791 if (mddev->pers->sync_request) { 5854 if (mddev->pers->sync_request && !mddev->external) {
5792 printk(KERN_INFO "md: %s in immediate safe mode\n", 5855 printk(KERN_INFO "md: %s in immediate safe mode\n",
5793 mdname(mddev)); 5856 mdname(mddev));
5794 mddev->safemode = 2; 5857 mddev->safemode = 2;
@@ -5800,7 +5863,7 @@ void md_check_recovery(mddev_t *mddev)
5800 (mddev->flags && !mddev->external) || 5863 (mddev->flags && !mddev->external) ||
5801 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 5864 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5802 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 5865 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5803 (mddev->safemode == 1) || 5866 (mddev->external == 0 && mddev->safemode == 1) ||
5804 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 5867 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5805 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 5868 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5806 )) 5869 ))
@@ -5809,16 +5872,20 @@ void md_check_recovery(mddev_t *mddev)
5809 if (mddev_trylock(mddev)) { 5872 if (mddev_trylock(mddev)) {
5810 int spares = 0; 5873 int spares = 0;
5811 5874
5812 spin_lock_irq(&mddev->write_lock); 5875 if (!mddev->external) {
5813 if (mddev->safemode && !atomic_read(&mddev->writes_pending) && 5876 spin_lock_irq(&mddev->write_lock);
5814 !mddev->in_sync && mddev->recovery_cp == MaxSector) { 5877 if (mddev->safemode &&
5815 mddev->in_sync = 1; 5878 !atomic_read(&mddev->writes_pending) &&
5816 if (mddev->persistent) 5879 !mddev->in_sync &&
5817 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 5880 mddev->recovery_cp == MaxSector) {
5881 mddev->in_sync = 1;
5882 if (mddev->persistent)
5883 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5884 }
5885 if (mddev->safemode == 1)
5886 mddev->safemode = 0;
5887 spin_unlock_irq(&mddev->write_lock);
5818 } 5888 }
5819 if (mddev->safemode == 1)
5820 mddev->safemode = 0;
5821 spin_unlock_irq(&mddev->write_lock);
5822 5889
5823 if (mddev->flags) 5890 if (mddev->flags)
5824 md_update_sb(mddev, 0); 5891 md_update_sb(mddev, 0);
@@ -5913,6 +5980,16 @@ void md_check_recovery(mddev_t *mddev)
5913 } 5980 }
5914} 5981}
5915 5982
5983void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
5984{
5985 sysfs_notify(&rdev->kobj, NULL, "state");
5986 wait_event_timeout(rdev->blocked_wait,
5987 !test_bit(Blocked, &rdev->flags),
5988 msecs_to_jiffies(5000));
5989 rdev_dec_pending(rdev, mddev);
5990}
5991EXPORT_SYMBOL(md_wait_for_blocked_rdev);
5992
5916static int md_notify_reboot(struct notifier_block *this, 5993static int md_notify_reboot(struct notifier_block *this,
5917 unsigned long code, void *x) 5994 unsigned long code, void *x)
5918{ 5995{
@@ -5947,13 +6024,9 @@ static struct notifier_block md_notifier = {
5947 6024
5948static void md_geninit(void) 6025static void md_geninit(void)
5949{ 6026{
5950 struct proc_dir_entry *p;
5951
5952 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 6027 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5953 6028
5954 p = create_proc_entry("mdstat", S_IRUGO, NULL); 6029 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
5955 if (p)
5956 p->proc_fops = &md_seq_fops;
5957} 6030}
5958 6031
5959static int __init md_init(void) 6032static int __init md_init(void)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 9fd473a6dbf5..6778b7cb39bd 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -773,7 +773,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
773 r1bio_t *r1_bio; 773 r1bio_t *r1_bio;
774 struct bio *read_bio; 774 struct bio *read_bio;
775 int i, targets = 0, disks; 775 int i, targets = 0, disks;
776 mdk_rdev_t *rdev;
777 struct bitmap *bitmap = mddev->bitmap; 776 struct bitmap *bitmap = mddev->bitmap;
778 unsigned long flags; 777 unsigned long flags;
779 struct bio_list bl; 778 struct bio_list bl;
@@ -781,6 +780,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
781 const int rw = bio_data_dir(bio); 780 const int rw = bio_data_dir(bio);
782 const int do_sync = bio_sync(bio); 781 const int do_sync = bio_sync(bio);
783 int do_barriers; 782 int do_barriers;
783 mdk_rdev_t *blocked_rdev;
784 784
785 /* 785 /*
786 * Register the new request and wait if the reconstruction 786 * Register the new request and wait if the reconstruction
@@ -862,10 +862,17 @@ static int make_request(struct request_queue *q, struct bio * bio)
862 first = 0; 862 first = 0;
863 } 863 }
864#endif 864#endif
865 retry_write:
866 blocked_rdev = NULL;
865 rcu_read_lock(); 867 rcu_read_lock();
866 for (i = 0; i < disks; i++) { 868 for (i = 0; i < disks; i++) {
867 if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL && 869 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
868 !test_bit(Faulty, &rdev->flags)) { 870 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
871 atomic_inc(&rdev->nr_pending);
872 blocked_rdev = rdev;
873 break;
874 }
875 if (rdev && !test_bit(Faulty, &rdev->flags)) {
869 atomic_inc(&rdev->nr_pending); 876 atomic_inc(&rdev->nr_pending);
870 if (test_bit(Faulty, &rdev->flags)) { 877 if (test_bit(Faulty, &rdev->flags)) {
871 rdev_dec_pending(rdev, mddev); 878 rdev_dec_pending(rdev, mddev);
@@ -878,6 +885,20 @@ static int make_request(struct request_queue *q, struct bio * bio)
878 } 885 }
879 rcu_read_unlock(); 886 rcu_read_unlock();
880 887
888 if (unlikely(blocked_rdev)) {
889 /* Wait for this device to become unblocked */
890 int j;
891
892 for (j = 0; j < i; j++)
893 if (r1_bio->bios[j])
894 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
895
896 allow_barrier(conf);
897 md_wait_for_blocked_rdev(blocked_rdev, mddev);
898 wait_barrier(conf);
899 goto retry_write;
900 }
901
881 BUG_ON(targets == 0); /* we never fail the last device */ 902 BUG_ON(targets == 0); /* we never fail the last device */
882 903
883 if (targets < conf->raid_disks) { 904 if (targets < conf->raid_disks) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1e96aa3ff513..5938fa962922 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -790,6 +790,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
790 const int do_sync = bio_sync(bio); 790 const int do_sync = bio_sync(bio);
791 struct bio_list bl; 791 struct bio_list bl;
792 unsigned long flags; 792 unsigned long flags;
793 mdk_rdev_t *blocked_rdev;
793 794
794 if (unlikely(bio_barrier(bio))) { 795 if (unlikely(bio_barrier(bio))) {
795 bio_endio(bio, -EOPNOTSUPP); 796 bio_endio(bio, -EOPNOTSUPP);
@@ -879,17 +880,23 @@ static int make_request(struct request_queue *q, struct bio * bio)
879 /* 880 /*
880 * WRITE: 881 * WRITE:
881 */ 882 */
882 /* first select target devices under spinlock and 883 /* first select target devices under rcu_lock and
883 * inc refcount on their rdev. Record them by setting 884 * inc refcount on their rdev. Record them by setting
884 * bios[x] to bio 885 * bios[x] to bio
885 */ 886 */
886 raid10_find_phys(conf, r10_bio); 887 raid10_find_phys(conf, r10_bio);
888 retry_write:
889 blocked_rdev = 0;
887 rcu_read_lock(); 890 rcu_read_lock();
888 for (i = 0; i < conf->copies; i++) { 891 for (i = 0; i < conf->copies; i++) {
889 int d = r10_bio->devs[i].devnum; 892 int d = r10_bio->devs[i].devnum;
890 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); 893 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
891 if (rdev && 894 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
892 !test_bit(Faulty, &rdev->flags)) { 895 atomic_inc(&rdev->nr_pending);
896 blocked_rdev = rdev;
897 break;
898 }
899 if (rdev && !test_bit(Faulty, &rdev->flags)) {
893 atomic_inc(&rdev->nr_pending); 900 atomic_inc(&rdev->nr_pending);
894 r10_bio->devs[i].bio = bio; 901 r10_bio->devs[i].bio = bio;
895 } else { 902 } else {
@@ -899,6 +906,22 @@ static int make_request(struct request_queue *q, struct bio * bio)
899 } 906 }
900 rcu_read_unlock(); 907 rcu_read_unlock();
901 908
909 if (unlikely(blocked_rdev)) {
910 /* Have to wait for this device to get unblocked, then retry */
911 int j;
912 int d;
913
914 for (j = 0; j < i; j++)
915 if (r10_bio->devs[j].bio) {
916 d = r10_bio->devs[j].devnum;
917 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
918 }
919 allow_barrier(conf);
920 md_wait_for_blocked_rdev(blocked_rdev, mddev);
921 wait_barrier(conf);
922 goto retry_write;
923 }
924
902 atomic_set(&r10_bio->remaining, 0); 925 atomic_set(&r10_bio->remaining, 0);
903 926
904 bio_list_init(&bl); 927 bio_list_init(&bl);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 968dacaced6d..087eee0cb809 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2607,6 +2607,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2607 } 2607 }
2608} 2608}
2609 2609
2610
2610/* 2611/*
2611 * handle_stripe - do things to a stripe. 2612 * handle_stripe - do things to a stripe.
2612 * 2613 *
@@ -2632,6 +2633,7 @@ static void handle_stripe5(struct stripe_head *sh)
2632 struct stripe_head_state s; 2633 struct stripe_head_state s;
2633 struct r5dev *dev; 2634 struct r5dev *dev;
2634 unsigned long pending = 0; 2635 unsigned long pending = 0;
2636 mdk_rdev_t *blocked_rdev = NULL;
2635 2637
2636 memset(&s, 0, sizeof(s)); 2638 memset(&s, 0, sizeof(s));
2637 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d " 2639 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d "
@@ -2691,6 +2693,11 @@ static void handle_stripe5(struct stripe_head *sh)
2691 if (dev->written) 2693 if (dev->written)
2692 s.written++; 2694 s.written++;
2693 rdev = rcu_dereference(conf->disks[i].rdev); 2695 rdev = rcu_dereference(conf->disks[i].rdev);
2696 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
2697 blocked_rdev = rdev;
2698 atomic_inc(&rdev->nr_pending);
2699 break;
2700 }
2694 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2701 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
2695 /* The ReadError flag will just be confusing now */ 2702 /* The ReadError flag will just be confusing now */
2696 clear_bit(R5_ReadError, &dev->flags); 2703 clear_bit(R5_ReadError, &dev->flags);
@@ -2705,6 +2712,11 @@ static void handle_stripe5(struct stripe_head *sh)
2705 } 2712 }
2706 rcu_read_unlock(); 2713 rcu_read_unlock();
2707 2714
2715 if (unlikely(blocked_rdev)) {
2716 set_bit(STRIPE_HANDLE, &sh->state);
2717 goto unlock;
2718 }
2719
2708 if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2720 if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
2709 sh->ops.count++; 2721 sh->ops.count++;
2710 2722
@@ -2894,8 +2906,13 @@ static void handle_stripe5(struct stripe_head *sh)
2894 if (sh->ops.count) 2906 if (sh->ops.count)
2895 pending = get_stripe_work(sh); 2907 pending = get_stripe_work(sh);
2896 2908
2909 unlock:
2897 spin_unlock(&sh->lock); 2910 spin_unlock(&sh->lock);
2898 2911
2912 /* wait for this device to become unblocked */
2913 if (unlikely(blocked_rdev))
2914 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
2915
2899 if (pending) 2916 if (pending)
2900 raid5_run_ops(sh, pending); 2917 raid5_run_ops(sh, pending);
2901 2918
@@ -2912,6 +2929,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2912 struct stripe_head_state s; 2929 struct stripe_head_state s;
2913 struct r6_state r6s; 2930 struct r6_state r6s;
2914 struct r5dev *dev, *pdev, *qdev; 2931 struct r5dev *dev, *pdev, *qdev;
2932 mdk_rdev_t *blocked_rdev = NULL;
2915 2933
2916 r6s.qd_idx = raid6_next_disk(pd_idx, disks); 2934 r6s.qd_idx = raid6_next_disk(pd_idx, disks);
2917 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 2935 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
@@ -2975,6 +2993,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2975 if (dev->written) 2993 if (dev->written)
2976 s.written++; 2994 s.written++;
2977 rdev = rcu_dereference(conf->disks[i].rdev); 2995 rdev = rcu_dereference(conf->disks[i].rdev);
2996 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
2997 blocked_rdev = rdev;
2998 atomic_inc(&rdev->nr_pending);
2999 break;
3000 }
2978 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 3001 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
2979 /* The ReadError flag will just be confusing now */ 3002 /* The ReadError flag will just be confusing now */
2980 clear_bit(R5_ReadError, &dev->flags); 3003 clear_bit(R5_ReadError, &dev->flags);
@@ -2989,6 +3012,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2989 set_bit(R5_Insync, &dev->flags); 3012 set_bit(R5_Insync, &dev->flags);
2990 } 3013 }
2991 rcu_read_unlock(); 3014 rcu_read_unlock();
3015
3016 if (unlikely(blocked_rdev)) {
3017 set_bit(STRIPE_HANDLE, &sh->state);
3018 goto unlock;
3019 }
2992 pr_debug("locked=%d uptodate=%d to_read=%d" 3020 pr_debug("locked=%d uptodate=%d to_read=%d"
2993 " to_write=%d failed=%d failed_num=%d,%d\n", 3021 " to_write=%d failed=%d failed_num=%d,%d\n",
2994 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3022 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
@@ -3094,8 +3122,13 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
3094 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 3122 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
3095 handle_stripe_expansion(conf, sh, &r6s); 3123 handle_stripe_expansion(conf, sh, &r6s);
3096 3124
3125 unlock:
3097 spin_unlock(&sh->lock); 3126 spin_unlock(&sh->lock);
3098 3127
3128 /* wait for this device to become unblocked */
3129 if (unlikely(blocked_rdev))
3130 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3131
3099 return_io(return_bi); 3132 return_io(return_bi);
3100 3133
3101 for (i=disks; i-- ;) { 3134 for (i=disks; i-- ;) {
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 128bb9cd5755..ddf57e135c6c 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -5,16 +5,20 @@
5menu "Multimedia devices" 5menu "Multimedia devices"
6 depends on HAS_IOMEM 6 depends on HAS_IOMEM
7 7
8comment "Multimedia core support"
9
10#
11# V4L core and enabled API's
12#
13
8config VIDEO_DEV 14config VIDEO_DEV
9 tristate "Video For Linux" 15 tristate "Video For Linux"
10 ---help--- 16 ---help---
11 Support for audio/video capture and overlay devices and FM radio 17 V4L core support for video capture and overlay devices, webcams and
12 cards. The exact capabilities of each device vary. 18 AM/FM radio cards.
13 19
14 This kernel includes support for the new Video for Linux Two API, 20 This kernel includes support for the new Video for Linux Two API,
15 (V4L2) as well as the original system. Drivers and applications 21 (V4L2).
16 need to be rewritten to use V4L2, but drivers for popular cards
17 and applications for most video capture functions already exist.
18 22
19 Additional info and docs are available on the web at 23 Additional info and docs are available on the web at
20 <http://linuxtv.org> 24 <http://linuxtv.org>
@@ -36,8 +40,11 @@ config VIDEO_ALLOW_V4L1
36 default VIDEO_DEV && VIDEO_V4L2_COMMON 40 default VIDEO_DEV && VIDEO_V4L2_COMMON
37 select VIDEO_V4L1_COMPAT 41 select VIDEO_V4L1_COMPAT
38 ---help--- 42 ---help---
39 Enables a compatibility API used by most V4L2 devices to allow 43 Enables drivers based on the legacy V4L1 API.
40 its usage with legacy applications that supports only V4L1 api. 44
45 This api were developed to be used at Kernel 2.2 and 2.4, but
46 lacks support for several video standards. There are several
47 drivers at kernel that still depends on it.
41 48
42 If you are unsure as to whether this is required, answer Y. 49 If you are unsure as to whether this is required, answer Y.
43 50
@@ -46,9 +53,8 @@ config VIDEO_V4L1_COMPAT
46 depends on VIDEO_DEV 53 depends on VIDEO_DEV
47 default VIDEO_DEV 54 default VIDEO_DEV
48 ---help--- 55 ---help---
49 This api were developed to be used at Kernel 2.2 and 2.4, but 56 Enables a compatibility API used by most V4L2 devices to allow
50 lacks support for several video standards. There are several 57 its usage with legacy applications that supports only V4L1 api.
51 drivers at kernel that still depends on it.
52 58
53 Documentation for the original API is included in the file 59 Documentation for the original API is included in the file
54 <Documentation/video4linux/API.html>. 60 <Documentation/video4linux/API.html>.
@@ -58,135 +64,57 @@ config VIDEO_V4L1_COMPAT
58 64
59 If you are unsure as to whether this is required, answer Y. 65 If you are unsure as to whether this is required, answer Y.
60 66
61config VIDEO_V4L2 67#
62 tristate 68# DVB Core
63 depends on VIDEO_DEV && VIDEO_V4L2_COMMON 69#
64 default VIDEO_DEV && VIDEO_V4L2_COMMON
65
66config VIDEO_V4L1
67 tristate
68 depends on VIDEO_DEV && VIDEO_V4L2_COMMON && VIDEO_ALLOW_V4L1
69 default VIDEO_DEV && VIDEO_V4L2_COMMON && VIDEO_ALLOW_V4L1
70
71source "drivers/media/video/Kconfig"
72
73source "drivers/media/radio/Kconfig"
74
75source "drivers/media/dvb/Kconfig"
76
77source "drivers/media/common/Kconfig"
78 70
79config VIDEO_TUNER 71config DVB_CORE
80 tristate 72 tristate "DVB for Linux"
81 depends on I2C 73 depends on NET && INET
82 select TUNER_XC2028 if !VIDEO_TUNER_CUSTOMIZE 74 select CRC32
83 select TUNER_MT20XX if !VIDEO_TUNER_CUSTOMIZE
84 select TUNER_TDA8290 if !VIDEO_TUNER_CUSTOMIZE
85 select TUNER_TEA5761 if !VIDEO_TUNER_CUSTOMIZE
86 select TUNER_TEA5767 if !VIDEO_TUNER_CUSTOMIZE
87 select TUNER_SIMPLE if !VIDEO_TUNER_CUSTOMIZE
88 select TUNER_TDA9887 if !VIDEO_TUNER_CUSTOMIZE
89
90menuconfig VIDEO_TUNER_CUSTOMIZE
91 bool "Customize analog tuner modules to build"
92 depends on VIDEO_TUNER
93 help 75 help
94 This allows the user to deselect tuner drivers unnecessary 76 DVB core utility functions for device handling, software fallbacks etc.
95 for their hardware from the build. Use this option with care
96 as deselecting tuner drivers which are in fact necessary will
97 result in V4L devices which cannot be tuned due to lack of
98 driver support
99 77
100 If unsure say N. 78 Enable this if you own a DVB/ATSC adapter and want to use it or if
101 79 you compile Linux for a digital SetTopBox.
102if VIDEO_TUNER_CUSTOMIZE
103
104config TUNER_XC2028
105 tristate "XCeive xc2028/xc3028 tuners"
106 depends on I2C && FW_LOADER
107 default m if VIDEO_TUNER_CUSTOMIZE
108 help
109 Say Y here to include support for the xc2028/xc3028 tuners.
110 80
111config TUNER_MT20XX 81 Say Y when you have a DVB or an ATSC card and want to use it.
112 tristate "Microtune 2032 / 2050 tuners"
113 depends on I2C
114 default m if VIDEO_TUNER_CUSTOMIZE
115 help
116 Say Y here to include support for the MT2032 / MT2050 tuner.
117
118config TUNER_TDA8290
119 tristate "TDA 8290/8295 + 8275(a)/18271 tuner combo"
120 depends on I2C
121 select DVB_TDA827X
122 select DVB_TDA18271
123 default m if VIDEO_TUNER_CUSTOMIZE
124 help
125 Say Y here to include support for Philips TDA8290+8275(a) tuner.
126 82
127config TUNER_TEA5761 83 API specs and user tools are available from <http://www.linuxtv.org/>.
128 tristate "TEA 5761 radio tuner (EXPERIMENTAL)"
129 depends on I2C && EXPERIMENTAL
130 default m if VIDEO_TUNER_CUSTOMIZE
131 help
132 Say Y here to include support for the Philips TEA5761 radio tuner.
133 84
134config TUNER_TEA5767 85 Please report problems regarding this support to the LinuxDVB
135 tristate "TEA 5767 radio tuner" 86 mailing list.
136 depends on I2C
137 default m if VIDEO_TUNER_CUSTOMIZE
138 help
139 Say Y here to include support for the Philips TEA5767 radio tuner.
140 87
141config TUNER_SIMPLE 88 If unsure say N.
142 tristate "Simple tuner support"
143 depends on I2C
144 select TUNER_TDA9887
145 default m if VIDEO_TUNER_CUSTOMIZE
146 help
147 Say Y here to include support for various simple tuners.
148 89
149config TUNER_TDA9887 90config VIDEO_MEDIA
150 tristate "TDA 9885/6/7 analog IF demodulator" 91 tristate
151 depends on I2C 92 default DVB_CORE || VIDEO_DEV
152 default m if VIDEO_TUNER_CUSTOMIZE 93 depends on DVB_CORE || VIDEO_DEV
153 help
154 Say Y here to include support for Philips TDA9885/6/7
155 analog IF demodulator.
156 94
157endif # VIDEO_TUNER_CUSTOMIZE 95comment "Multimedia drivers"
158 96
159config VIDEOBUF_GEN 97source "drivers/media/common/Kconfig"
160 tristate
161 98
162config VIDEOBUF_DMA_SG 99#
163 depends on HAS_DMA 100# Tuner drivers for DVB and V4L
164 select VIDEOBUF_GEN 101#
165 tristate
166 102
167config VIDEOBUF_VMALLOC 103source "drivers/media/common/tuners/Kconfig"
168 select VIDEOBUF_GEN
169 tristate
170 104
171config VIDEOBUF_DVB 105#
172 tristate 106# Video/Radio/Hybrid adapters
173 select VIDEOBUF_GEN 107#
174 select VIDEOBUF_DMA_SG
175 108
176config VIDEO_BTCX 109source "drivers/media/video/Kconfig"
177 tristate
178 110
179config VIDEO_IR_I2C 111source "drivers/media/radio/Kconfig"
180 tristate
181 112
182config VIDEO_IR 113#
183 tristate 114# DVB adapters
184 depends on INPUT 115#
185 select VIDEO_IR_I2C if I2C
186 116
187config VIDEO_TVEEPROM 117source "drivers/media/dvb/Kconfig"
188 tristate
189 depends on I2C
190 118
191config DAB 119config DAB
192 boolean "DAB adapters" 120 boolean "DAB adapters"
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 7b8bb6949f5e..73f742c7e818 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -2,10 +2,10 @@
2# Makefile for the kernel multimedia device drivers. 2# Makefile for the kernel multimedia device drivers.
3# 3#
4 4
5obj-y := common/ 5obj-$(CONFIG_VIDEO_MEDIA) += common/
6obj-y += video/ 6
7# Since hybrid devices are here, should be compiled if DVB and/or V4L
8obj-$(CONFIG_VIDEO_MEDIA) += video/
9
7obj-$(CONFIG_VIDEO_DEV) += radio/ 10obj-$(CONFIG_VIDEO_DEV) += radio/
8obj-$(CONFIG_DVB_CORE) += dvb/ 11obj-$(CONFIG_DVB_CORE) += dvb/
9ifeq ($(CONFIG_DVB_CORE),)
10 obj-$(CONFIG_VIDEO_TUNER) += dvb/frontends/
11endif
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index 8e7448230643..351b98b9b302 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -2,6 +2,7 @@ saa7146-objs := saa7146_i2c.o saa7146_core.o
2saa7146_vv-objs := saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o 2saa7146_vv-objs := saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o
3ir-common-objs := ir-functions.o ir-keymaps.o 3ir-common-objs := ir-functions.o ir-keymaps.o
4 4
5obj-y += tuners/
5obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o 6obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o
6obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o 7obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o
7obj-$(CONFIG_VIDEO_IR) += ir-common.o 8obj-$(CONFIG_VIDEO_IR) += ir-common.o
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
new file mode 100644
index 000000000000..5be85ff53e12
--- /dev/null
+++ b/drivers/media/common/tuners/Kconfig
@@ -0,0 +1,151 @@
1config MEDIA_ATTACH
2 bool "Load and attach frontend and tuner driver modules as needed"
3 depends on DVB_CORE
4 depends on MODULES
5 help
6 Remove the static dependency of DVB card drivers on all
7 frontend modules for all possible card variants. Instead,
8 allow the card drivers to only load the frontend modules
9 they require.
10
11 Also, tuner module will automatically load a tuner driver
12 when needed, for analog mode.
13
14 This saves several KBytes of memory.
15
16 Note: You will need module-init-tools v3.2 or later for this feature.
17
18 If unsure say Y.
19
20config MEDIA_TUNER
21 tristate
22 default DVB_CORE || VIDEO_DEV
23 depends on DVB_CORE || VIDEO_DEV
24 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMIZE
25 select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMIZE
26 select MEDIA_TUNER_MT20XX if !MEDIA_TUNER_CUSTOMIZE
27 select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMIZE
28 select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMIZE
29 select MEDIA_TUNER_TEA5767 if !MEDIA_TUNER_CUSTOMIZE
30 select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE
31 select MEDIA_TUNER_TDA9887 if !MEDIA_TUNER_CUSTOMIZE
32
33menuconfig MEDIA_TUNER_CUSTOMIZE
34 bool "Customize analog and hybrid tuner modules to build"
35 depends on MEDIA_TUNER
36 help
37 This allows the user to deselect tuner drivers unnecessary
38 for their hardware from the build. Use this option with care
39 as deselecting tuner drivers which are in fact necessary will
40 result in V4L/DVB devices which cannot be tuned due to lack of
41 driver support
42
43 If unsure say N.
44
45if MEDIA_TUNER_CUSTOMIZE
46
47config MEDIA_TUNER_SIMPLE
48 tristate "Simple tuner support"
49 depends on I2C
50 select MEDIA_TUNER_TDA9887
51 default m if MEDIA_TUNER_CUSTOMIZE
52 help
53 Say Y here to include support for various simple tuners.
54
55config MEDIA_TUNER_TDA8290
56 tristate "TDA 8290/8295 + 8275(a)/18271 tuner combo"
57 depends on I2C
58 select MEDIA_TUNER_TDA827X
59 select MEDIA_TUNER_TDA18271
60 default m if MEDIA_TUNER_CUSTOMIZE
61 help
62 Say Y here to include support for Philips TDA8290+8275(a) tuner.
63
64config MEDIA_TUNER_TDA827X
65 tristate "Philips TDA827X silicon tuner"
66 depends on DVB_CORE && I2C
67 default m if DVB_FE_CUSTOMISE
68 help
69 A DVB-T silicon tuner module. Say Y when you want to support this tuner.
70
71config MEDIA_TUNER_TDA18271
72 tristate "NXP TDA18271 silicon tuner"
73 depends on I2C
74 default m if DVB_FE_CUSTOMISE
75 help
76 A silicon tuner module. Say Y when you want to support this tuner.
77
78config MEDIA_TUNER_TDA9887
79 tristate "TDA 9885/6/7 analog IF demodulator"
80 depends on I2C
81 default m if MEDIA_TUNER_CUSTOMIZE
82 help
83 Say Y here to include support for Philips TDA9885/6/7
84 analog IF demodulator.
85
86config MEDIA_TUNER_TEA5761
87 tristate "TEA 5761 radio tuner (EXPERIMENTAL)"
88 depends on I2C && EXPERIMENTAL
89 default m if MEDIA_TUNER_CUSTOMIZE
90 help
91 Say Y here to include support for the Philips TEA5761 radio tuner.
92
93config MEDIA_TUNER_TEA5767
94 tristate "TEA 5767 radio tuner"
95 depends on I2C
96 default m if MEDIA_TUNER_CUSTOMIZE
97 help
98 Say Y here to include support for the Philips TEA5767 radio tuner.
99
100config MEDIA_TUNER_MT20XX
101 tristate "Microtune 2032 / 2050 tuners"
102 depends on I2C
103 default m if MEDIA_TUNER_CUSTOMIZE
104 help
105 Say Y here to include support for the MT2032 / MT2050 tuner.
106
107config MEDIA_TUNER_MT2060
108 tristate "Microtune MT2060 silicon IF tuner"
109 depends on I2C
110 default m if DVB_FE_CUSTOMISE
111 help
112 A driver for the silicon IF tuner MT2060 from Microtune.
113
114config MEDIA_TUNER_MT2266
115 tristate "Microtune MT2266 silicon tuner"
116 depends on I2C
117 default m if DVB_FE_CUSTOMISE
118 help
119 A driver for the silicon baseband tuner MT2266 from Microtune.
120
121config MEDIA_TUNER_MT2131
122 tristate "Microtune MT2131 silicon tuner"
123 depends on I2C
124 default m if DVB_FE_CUSTOMISE
125 help
126 A driver for the silicon baseband tuner MT2131 from Microtune.
127
128config MEDIA_TUNER_QT1010
129 tristate "Quantek QT1010 silicon tuner"
130 depends on DVB_CORE && I2C
131 default m if DVB_FE_CUSTOMISE
132 help
133 A driver for the silicon tuner QT1010 from Quantek.
134
135config MEDIA_TUNER_XC2028
136 tristate "XCeive xc2028/xc3028 tuners"
137 depends on I2C && FW_LOADER
138 default m if MEDIA_TUNER_CUSTOMIZE
139 help
140 Say Y here to include support for the xc2028/xc3028 tuners.
141
142config MEDIA_TUNER_XC5000
143 tristate "Xceive XC5000 silicon tuner"
144 depends on I2C
145 default m if DVB_FE_CUSTOMISE
146 help
147 A driver for the silicon tuner XC5000 from Xceive.
148 This device is only used inside a SiP called togther with a
149 demodulator for now.
150
151endif # MEDIA_TUNER_CUSTOMIZE
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
new file mode 100644
index 000000000000..236d9932fd92
--- /dev/null
+++ b/drivers/media/common/tuners/Makefile
@@ -0,0 +1,25 @@
1#
2# Makefile for common V4L/DVB tuners
3#
4
5tda18271-objs := tda18271-maps.o tda18271-common.o tda18271-fe.o
6
7obj-$(CONFIG_MEDIA_TUNER_XC2028) += tuner-xc2028.o
8obj-$(CONFIG_MEDIA_TUNER_SIMPLE) += tuner-simple.o
9# tuner-types will be merged into tuner-simple, in the future
10obj-$(CONFIG_MEDIA_TUNER_SIMPLE) += tuner-types.o
11obj-$(CONFIG_MEDIA_TUNER_MT20XX) += mt20xx.o
12obj-$(CONFIG_MEDIA_TUNER_TDA8290) += tda8290.o
13obj-$(CONFIG_MEDIA_TUNER_TEA5767) += tea5767.o
14obj-$(CONFIG_MEDIA_TUNER_TEA5761) += tea5761.o
15obj-$(CONFIG_MEDIA_TUNER_TDA9887) += tda9887.o
16obj-$(CONFIG_MEDIA_TUNER_TDA827X) += tda827x.o
17obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o
18obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o
19obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
20obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o
21obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o
22obj-$(CONFIG_MEDIA_TUNER_MT2131) += mt2131.o
23
24EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
25EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/dvb/frontends/mt2060.c b/drivers/media/common/tuners/mt2060.c
index 1305b0e63ce5..1305b0e63ce5 100644
--- a/drivers/media/dvb/frontends/mt2060.c
+++ b/drivers/media/common/tuners/mt2060.c
diff --git a/drivers/media/dvb/frontends/mt2060.h b/drivers/media/common/tuners/mt2060.h
index acba0058f519..cb60caffb6b6 100644
--- a/drivers/media/dvb/frontends/mt2060.h
+++ b/drivers/media/common/tuners/mt2060.h
@@ -30,7 +30,7 @@ struct mt2060_config {
30 u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */ 30 u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
31}; 31};
32 32
33#if defined(CONFIG_DVB_TUNER_MT2060) || (defined(CONFIG_DVB_TUNER_MT2060_MODULE) && defined(MODULE)) 33#if defined(CONFIG_MEDIA_TUNER_MT2060) || (defined(CONFIG_MEDIA_TUNER_MT2060_MODULE) && defined(MODULE))
34extern struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1); 34extern struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1);
35#else 35#else
36static inline struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1) 36static inline struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1)
@@ -38,6 +38,6 @@ static inline struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struc
38 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 38 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
39 return NULL; 39 return NULL;
40} 40}
41#endif // CONFIG_DVB_TUNER_MT2060 41#endif // CONFIG_MEDIA_TUNER_MT2060
42 42
43#endif 43#endif
diff --git a/drivers/media/dvb/frontends/mt2060_priv.h b/drivers/media/common/tuners/mt2060_priv.h
index 5eaccdefd0b0..5eaccdefd0b0 100644
--- a/drivers/media/dvb/frontends/mt2060_priv.h
+++ b/drivers/media/common/tuners/mt2060_priv.h
diff --git a/drivers/media/video/mt20xx.c b/drivers/media/common/tuners/mt20xx.c
index fbcb28233737..fbcb28233737 100644
--- a/drivers/media/video/mt20xx.c
+++ b/drivers/media/common/tuners/mt20xx.c
diff --git a/drivers/media/video/mt20xx.h b/drivers/media/common/tuners/mt20xx.h
index aa848e14ce5e..259553a24903 100644
--- a/drivers/media/video/mt20xx.h
+++ b/drivers/media/common/tuners/mt20xx.h
@@ -20,7 +20,7 @@
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include "dvb_frontend.h" 21#include "dvb_frontend.h"
22 22
23#if defined(CONFIG_TUNER_MT20XX) || (defined(CONFIG_TUNER_MT20XX_MODULE) && defined(MODULE)) 23#if defined(CONFIG_MEDIA_TUNER_MT20XX) || (defined(CONFIG_MEDIA_TUNER_MT20XX_MODULE) && defined(MODULE))
24extern struct dvb_frontend *microtune_attach(struct dvb_frontend *fe, 24extern struct dvb_frontend *microtune_attach(struct dvb_frontend *fe,
25 struct i2c_adapter* i2c_adap, 25 struct i2c_adapter* i2c_adap,
26 u8 i2c_addr); 26 u8 i2c_addr);
diff --git a/drivers/media/dvb/frontends/mt2131.c b/drivers/media/common/tuners/mt2131.c
index e254bcfc2efb..e254bcfc2efb 100644
--- a/drivers/media/dvb/frontends/mt2131.c
+++ b/drivers/media/common/tuners/mt2131.c
diff --git a/drivers/media/dvb/frontends/mt2131.h b/drivers/media/common/tuners/mt2131.h
index 606d8576bc98..cd8376f6f7b4 100644
--- a/drivers/media/dvb/frontends/mt2131.h
+++ b/drivers/media/common/tuners/mt2131.h
@@ -30,7 +30,7 @@ struct mt2131_config {
30 u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */ 30 u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
31}; 31};
32 32
33#if defined(CONFIG_DVB_TUNER_MT2131) || (defined(CONFIG_DVB_TUNER_MT2131_MODULE) && defined(MODULE)) 33#if defined(CONFIG_MEDIA_TUNER_MT2131) || (defined(CONFIG_MEDIA_TUNER_MT2131_MODULE) && defined(MODULE))
34extern struct dvb_frontend* mt2131_attach(struct dvb_frontend *fe, 34extern struct dvb_frontend* mt2131_attach(struct dvb_frontend *fe,
35 struct i2c_adapter *i2c, 35 struct i2c_adapter *i2c,
36 struct mt2131_config *cfg, 36 struct mt2131_config *cfg,
@@ -44,7 +44,7 @@ static inline struct dvb_frontend* mt2131_attach(struct dvb_frontend *fe,
44 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 44 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
45 return NULL; 45 return NULL;
46} 46}
47#endif /* CONFIG_DVB_TUNER_MT2131 */ 47#endif /* CONFIG_MEDIA_TUNER_MT2131 */
48 48
49#endif /* __MT2131_H__ */ 49#endif /* __MT2131_H__ */
50 50
diff --git a/drivers/media/dvb/frontends/mt2131_priv.h b/drivers/media/common/tuners/mt2131_priv.h
index e930759c2c00..e930759c2c00 100644
--- a/drivers/media/dvb/frontends/mt2131_priv.h
+++ b/drivers/media/common/tuners/mt2131_priv.h
diff --git a/drivers/media/dvb/frontends/mt2266.c b/drivers/media/common/tuners/mt2266.c
index 54b18f94b14b..54b18f94b14b 100644
--- a/drivers/media/dvb/frontends/mt2266.c
+++ b/drivers/media/common/tuners/mt2266.c
diff --git a/drivers/media/dvb/frontends/mt2266.h b/drivers/media/common/tuners/mt2266.h
index c5113efe333c..4d083882d044 100644
--- a/drivers/media/dvb/frontends/mt2266.h
+++ b/drivers/media/common/tuners/mt2266.h
@@ -24,7 +24,7 @@ struct mt2266_config {
24 u8 i2c_address; 24 u8 i2c_address;
25}; 25};
26 26
27#if defined(CONFIG_DVB_TUNER_MT2266) || (defined(CONFIG_DVB_TUNER_MT2266_MODULE) && defined(MODULE)) 27#if defined(CONFIG_MEDIA_TUNER_MT2266) || (defined(CONFIG_MEDIA_TUNER_MT2266_MODULE) && defined(MODULE))
28extern struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg); 28extern struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg);
29#else 29#else
30static inline struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg) 30static inline struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg)
@@ -32,6 +32,6 @@ static inline struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struc
32 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 32 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
33 return NULL; 33 return NULL;
34} 34}
35#endif // CONFIG_DVB_TUNER_MT2266 35#endif // CONFIG_MEDIA_TUNER_MT2266
36 36
37#endif 37#endif
diff --git a/drivers/media/dvb/frontends/qt1010.c b/drivers/media/common/tuners/qt1010.c
index 825aa1412e6f..825aa1412e6f 100644
--- a/drivers/media/dvb/frontends/qt1010.c
+++ b/drivers/media/common/tuners/qt1010.c
diff --git a/drivers/media/dvb/frontends/qt1010.h b/drivers/media/common/tuners/qt1010.h
index cff6a7ca5380..807fb7b6146b 100644
--- a/drivers/media/dvb/frontends/qt1010.h
+++ b/drivers/media/common/tuners/qt1010.h
@@ -36,7 +36,7 @@ struct qt1010_config {
36 * @param cfg tuner hw based configuration 36 * @param cfg tuner hw based configuration
37 * @return fe pointer on success, NULL on failure 37 * @return fe pointer on success, NULL on failure
38 */ 38 */
39#if defined(CONFIG_DVB_TUNER_QT1010) || (defined(CONFIG_DVB_TUNER_QT1010_MODULE) && defined(MODULE)) 39#if defined(CONFIG_MEDIA_TUNER_QT1010) || (defined(CONFIG_MEDIA_TUNER_QT1010_MODULE) && defined(MODULE))
40extern struct dvb_frontend *qt1010_attach(struct dvb_frontend *fe, 40extern struct dvb_frontend *qt1010_attach(struct dvb_frontend *fe,
41 struct i2c_adapter *i2c, 41 struct i2c_adapter *i2c,
42 struct qt1010_config *cfg); 42 struct qt1010_config *cfg);
@@ -48,6 +48,6 @@ static inline struct dvb_frontend *qt1010_attach(struct dvb_frontend *fe,
48 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 48 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
49 return NULL; 49 return NULL;
50} 50}
51#endif // CONFIG_DVB_TUNER_QT1010 51#endif // CONFIG_MEDIA_TUNER_QT1010
52 52
53#endif 53#endif
diff --git a/drivers/media/dvb/frontends/qt1010_priv.h b/drivers/media/common/tuners/qt1010_priv.h
index 090cf475f099..090cf475f099 100644
--- a/drivers/media/dvb/frontends/qt1010_priv.h
+++ b/drivers/media/common/tuners/qt1010_priv.h
diff --git a/drivers/media/dvb/frontends/tda18271-common.c b/drivers/media/common/tuners/tda18271-common.c
index e27a7620a32f..e27a7620a32f 100644
--- a/drivers/media/dvb/frontends/tda18271-common.c
+++ b/drivers/media/common/tuners/tda18271-common.c
diff --git a/drivers/media/dvb/frontends/tda18271-fe.c b/drivers/media/common/tuners/tda18271-fe.c
index b262100ae897..b262100ae897 100644
--- a/drivers/media/dvb/frontends/tda18271-fe.c
+++ b/drivers/media/common/tuners/tda18271-fe.c
diff --git a/drivers/media/dvb/frontends/tda18271-tables.c b/drivers/media/common/tuners/tda18271-maps.c
index 83e7561960c1..83e7561960c1 100644
--- a/drivers/media/dvb/frontends/tda18271-tables.c
+++ b/drivers/media/common/tuners/tda18271-maps.c
diff --git a/drivers/media/dvb/frontends/tda18271-priv.h b/drivers/media/common/tuners/tda18271-priv.h
index 2bc5eb368ea2..2bc5eb368ea2 100644
--- a/drivers/media/dvb/frontends/tda18271-priv.h
+++ b/drivers/media/common/tuners/tda18271-priv.h
diff --git a/drivers/media/dvb/frontends/tda18271.h b/drivers/media/common/tuners/tda18271.h
index 0e7af8d05a38..7db9831c0cb0 100644
--- a/drivers/media/dvb/frontends/tda18271.h
+++ b/drivers/media/common/tuners/tda18271.h
@@ -81,7 +81,7 @@ struct tda18271_config {
81 unsigned int small_i2c:1; 81 unsigned int small_i2c:1;
82}; 82};
83 83
84#if defined(CONFIG_DVB_TDA18271) || (defined(CONFIG_DVB_TDA18271_MODULE) && defined(MODULE)) 84#if defined(CONFIG_MEDIA_TUNER_TDA18271) || (defined(CONFIG_MEDIA_TUNER_TDA18271_MODULE) && defined(MODULE))
85extern struct dvb_frontend *tda18271_attach(struct dvb_frontend *fe, u8 addr, 85extern struct dvb_frontend *tda18271_attach(struct dvb_frontend *fe, u8 addr,
86 struct i2c_adapter *i2c, 86 struct i2c_adapter *i2c,
87 struct tda18271_config *cfg); 87 struct tda18271_config *cfg);
diff --git a/drivers/media/dvb/frontends/tda827x.c b/drivers/media/common/tuners/tda827x.c
index d30d2c9094d9..d30d2c9094d9 100644
--- a/drivers/media/dvb/frontends/tda827x.c
+++ b/drivers/media/common/tuners/tda827x.c
diff --git a/drivers/media/dvb/frontends/tda827x.h b/drivers/media/common/tuners/tda827x.h
index b73c23570dab..7850a9a1dc8f 100644
--- a/drivers/media/dvb/frontends/tda827x.h
+++ b/drivers/media/common/tuners/tda827x.h
@@ -51,7 +51,7 @@ struct tda827x_config
51 * @param cfg optional callback function pointers. 51 * @param cfg optional callback function pointers.
52 * @return FE pointer on success, NULL on failure. 52 * @return FE pointer on success, NULL on failure.
53 */ 53 */
54#if defined(CONFIG_DVB_TDA827X) || (defined(CONFIG_DVB_TDA827X_MODULE) && defined(MODULE)) 54#if defined(CONFIG_MEDIA_TUNER_TDA827X) || (defined(CONFIG_MEDIA_TUNER_TDA827X_MODULE) && defined(MODULE))
55extern struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe, int addr, 55extern struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe, int addr,
56 struct i2c_adapter *i2c, 56 struct i2c_adapter *i2c,
57 struct tda827x_config *cfg); 57 struct tda827x_config *cfg);
@@ -64,6 +64,6 @@ static inline struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe,
64 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 64 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
65 return NULL; 65 return NULL;
66} 66}
67#endif // CONFIG_DVB_TDA827X 67#endif // CONFIG_MEDIA_TUNER_TDA827X
68 68
69#endif // __DVB_TDA827X_H__ 69#endif // __DVB_TDA827X_H__
diff --git a/drivers/media/video/tda8290.c b/drivers/media/common/tuners/tda8290.c
index 0ebb5b525e57..91204d3f282d 100644
--- a/drivers/media/video/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -578,16 +578,16 @@ static int tda829x_find_tuner(struct dvb_frontend *fe)
578 578
579 if ((data == 0x83) || (data == 0x84)) { 579 if ((data == 0x83) || (data == 0x84)) {
580 priv->ver |= TDA18271; 580 priv->ver |= TDA18271;
581 tda18271_attach(fe, priv->tda827x_addr, 581 dvb_attach(tda18271_attach, fe, priv->tda827x_addr,
582 priv->i2c_props.adap, 582 priv->i2c_props.adap, &tda829x_tda18271_config);
583 &tda829x_tda18271_config);
584 } else { 583 } else {
585 if ((data & 0x3c) == 0) 584 if ((data & 0x3c) == 0)
586 priv->ver |= TDA8275; 585 priv->ver |= TDA8275;
587 else 586 else
588 priv->ver |= TDA8275A; 587 priv->ver |= TDA8275A;
589 588
590 tda827x_attach(fe, priv->tda827x_addr, priv->i2c_props.adap, &priv->cfg); 589 dvb_attach(tda827x_attach, fe, priv->tda827x_addr,
590 priv->i2c_props.adap, &priv->cfg);
591 priv->cfg.switch_addr = priv->i2c_props.addr; 591 priv->cfg.switch_addr = priv->i2c_props.addr;
592 } 592 }
593 if (fe->ops.tuner_ops.init) 593 if (fe->ops.tuner_ops.init)
diff --git a/drivers/media/video/tda8290.h b/drivers/media/common/tuners/tda8290.h
index d3bbf276a469..aa074f3f0c07 100644
--- a/drivers/media/video/tda8290.h
+++ b/drivers/media/common/tuners/tda8290.h
@@ -29,7 +29,7 @@ struct tda829x_config {
29#define TDA829X_DONT_PROBE 1 29#define TDA829X_DONT_PROBE 1
30}; 30};
31 31
32#if defined(CONFIG_TUNER_TDA8290) || (defined(CONFIG_TUNER_TDA8290_MODULE) && defined(MODULE)) 32#if defined(CONFIG_MEDIA_TUNER_TDA8290) || (defined(CONFIG_MEDIA_TUNER_TDA8290_MODULE) && defined(MODULE))
33extern int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr); 33extern int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr);
34 34
35extern struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe, 35extern struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/video/tda9887.c b/drivers/media/common/tuners/tda9887.c
index a0545ba957b0..a0545ba957b0 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/common/tuners/tda9887.c
diff --git a/drivers/media/video/tda9887.h b/drivers/media/common/tuners/tda9887.h
index be49dcbfc70e..acc419e8c4fc 100644
--- a/drivers/media/video/tda9887.h
+++ b/drivers/media/common/tuners/tda9887.h
@@ -21,7 +21,7 @@
21#include "dvb_frontend.h" 21#include "dvb_frontend.h"
22 22
23/* ------------------------------------------------------------------------ */ 23/* ------------------------------------------------------------------------ */
24#if defined(CONFIG_TUNER_TDA9887) || (defined(CONFIG_TUNER_TDA9887_MODULE) && defined(MODULE)) 24#if defined(CONFIG_MEDIA_TUNER_TDA9887) || (defined(CONFIG_MEDIA_TUNER_TDA9887_MODULE) && defined(MODULE))
25extern struct dvb_frontend *tda9887_attach(struct dvb_frontend *fe, 25extern struct dvb_frontend *tda9887_attach(struct dvb_frontend *fe,
26 struct i2c_adapter *i2c_adap, 26 struct i2c_adapter *i2c_adap,
27 u8 i2c_addr); 27 u8 i2c_addr);
diff --git a/drivers/media/video/tea5761.c b/drivers/media/common/tuners/tea5761.c
index b93cdef9ac73..b93cdef9ac73 100644
--- a/drivers/media/video/tea5761.c
+++ b/drivers/media/common/tuners/tea5761.c
diff --git a/drivers/media/video/tea5761.h b/drivers/media/common/tuners/tea5761.h
index 8eb62722b988..2e2ff82c95a4 100644
--- a/drivers/media/video/tea5761.h
+++ b/drivers/media/common/tuners/tea5761.h
@@ -20,7 +20,7 @@
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include "dvb_frontend.h" 21#include "dvb_frontend.h"
22 22
23#if defined(CONFIG_TUNER_TEA5761) || (defined(CONFIG_TUNER_TEA5761_MODULE) && defined(MODULE)) 23#if defined(CONFIG_MEDIA_TUNER_TEA5761) || (defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE) && defined(MODULE))
24extern int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr); 24extern int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
25 25
26extern struct dvb_frontend *tea5761_attach(struct dvb_frontend *fe, 26extern struct dvb_frontend *tea5761_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/video/tea5767.c b/drivers/media/common/tuners/tea5767.c
index f6e7d7ad8424..f6e7d7ad8424 100644
--- a/drivers/media/video/tea5767.c
+++ b/drivers/media/common/tuners/tea5767.c
diff --git a/drivers/media/video/tea5767.h b/drivers/media/common/tuners/tea5767.h
index 7b547c092e25..d30ab1b483de 100644
--- a/drivers/media/video/tea5767.h
+++ b/drivers/media/common/tuners/tea5767.h
@@ -39,7 +39,7 @@ struct tea5767_ctrl {
39 enum tea5767_xtal xtal_freq; 39 enum tea5767_xtal xtal_freq;
40}; 40};
41 41
42#if defined(CONFIG_TUNER_TEA5767) || (defined(CONFIG_TUNER_TEA5767_MODULE) && defined(MODULE)) 42#if defined(CONFIG_MEDIA_TUNER_TEA5767) || (defined(CONFIG_MEDIA_TUNER_TEA5767_MODULE) && defined(MODULE))
43extern int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr); 43extern int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
44 44
45extern struct dvb_frontend *tea5767_attach(struct dvb_frontend *fe, 45extern struct dvb_frontend *tea5767_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/video/tuner-i2c.h b/drivers/media/common/tuners/tuner-i2c.h
index 3ad6c8e0b04c..3ad6c8e0b04c 100644
--- a/drivers/media/video/tuner-i2c.h
+++ b/drivers/media/common/tuners/tuner-i2c.h
diff --git a/drivers/media/video/tuner-simple.c b/drivers/media/common/tuners/tuner-simple.c
index be8d903171b7..be8d903171b7 100644
--- a/drivers/media/video/tuner-simple.c
+++ b/drivers/media/common/tuners/tuner-simple.c
diff --git a/drivers/media/video/tuner-simple.h b/drivers/media/common/tuners/tuner-simple.h
index e46cf0121e03..381fa5d35a9b 100644
--- a/drivers/media/video/tuner-simple.h
+++ b/drivers/media/common/tuners/tuner-simple.h
@@ -20,7 +20,7 @@
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include "dvb_frontend.h" 21#include "dvb_frontend.h"
22 22
23#if defined(CONFIG_TUNER_SIMPLE) || (defined(CONFIG_TUNER_SIMPLE_MODULE) && defined(MODULE)) 23#if defined(CONFIG_MEDIA_TUNER_SIMPLE) || (defined(CONFIG_MEDIA_TUNER_SIMPLE_MODULE) && defined(MODULE))
24extern struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe, 24extern struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
25 struct i2c_adapter *i2c_adap, 25 struct i2c_adapter *i2c_adap,
26 u8 i2c_addr, 26 u8 i2c_addr,
diff --git a/drivers/media/video/tuner-types.c b/drivers/media/common/tuners/tuner-types.c
index 10dddca8b5d1..10dddca8b5d1 100644
--- a/drivers/media/video/tuner-types.c
+++ b/drivers/media/common/tuners/tuner-types.c
diff --git a/drivers/media/video/tuner-xc2028-types.h b/drivers/media/common/tuners/tuner-xc2028-types.h
index 74dc46a71f64..74dc46a71f64 100644
--- a/drivers/media/video/tuner-xc2028-types.h
+++ b/drivers/media/common/tuners/tuner-xc2028-types.h
diff --git a/drivers/media/video/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 9e9003cffc7f..9e9003cffc7f 100644
--- a/drivers/media/video/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
diff --git a/drivers/media/video/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h
index fc2f132a5541..216025cf5d4b 100644
--- a/drivers/media/video/tuner-xc2028.h
+++ b/drivers/media/common/tuners/tuner-xc2028.h
@@ -47,7 +47,7 @@ struct xc2028_config {
47#define XC2028_TUNER_RESET 0 47#define XC2028_TUNER_RESET 0
48#define XC2028_RESET_CLK 1 48#define XC2028_RESET_CLK 1
49 49
50#if defined(CONFIG_TUNER_XC2028) || (defined(CONFIG_TUNER_XC2028_MODULE) && defined(MODULE)) 50#if defined(CONFIG_MEDIA_TUNER_XC2028) || (defined(CONFIG_MEDIA_TUNER_XC2028_MODULE) && defined(MODULE))
51extern struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe, 51extern struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe,
52 struct xc2028_config *cfg); 52 struct xc2028_config *cfg);
53#else 53#else
diff --git a/drivers/media/dvb/frontends/xc5000.c b/drivers/media/common/tuners/xc5000.c
index 43d35bdb221f..43d35bdb221f 100644
--- a/drivers/media/dvb/frontends/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
diff --git a/drivers/media/dvb/frontends/xc5000.h b/drivers/media/common/tuners/xc5000.h
index b890883a0cdc..0ee80f9d19b8 100644
--- a/drivers/media/dvb/frontends/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -45,8 +45,8 @@ struct xc5000_config {
45/* xc5000 callback command */ 45/* xc5000 callback command */
46#define XC5000_TUNER_RESET 0 46#define XC5000_TUNER_RESET 0
47 47
48#if defined(CONFIG_DVB_TUNER_XC5000) || \ 48#if defined(CONFIG_MEDIA_TUNER_XC5000) || \
49 (defined(CONFIG_DVB_TUNER_XC5000_MODULE) && defined(MODULE)) 49 (defined(CONFIG_MEDIA_TUNER_XC5000_MODULE) && defined(MODULE))
50extern struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe, 50extern struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe,
51 struct i2c_adapter *i2c, 51 struct i2c_adapter *i2c,
52 struct xc5000_config *cfg); 52 struct xc5000_config *cfg);
@@ -58,6 +58,6 @@ static inline struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe,
58 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 58 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
59 return NULL; 59 return NULL;
60} 60}
61#endif // CONFIG_DVB_TUNER_XC5000 61#endif // CONFIG_MEDIA_TUNER_XC5000
62 62
63#endif // __XC5000_H__ 63#endif // __XC5000_H__
diff --git a/drivers/media/dvb/frontends/xc5000_priv.h b/drivers/media/common/tuners/xc5000_priv.h
index 13b2d19341da..13b2d19341da 100644
--- a/drivers/media/dvb/frontends/xc5000_priv.h
+++ b/drivers/media/common/tuners/xc5000_priv.h
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index 03ef88acd9b8..7b21b49f1945 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -1,9 +1,7 @@
1# 1#
2# Multimedia device configuration 2# DVB device configuration
3# 3#
4 4
5source "drivers/media/dvb/dvb-core/Kconfig"
6
7menuconfig DVB_CAPTURE_DRIVERS 5menuconfig DVB_CAPTURE_DRIVERS
8 bool "DVB/ATSC adapters" 6 bool "DVB/ATSC adapters"
9 depends on DVB_CORE 7 depends on DVB_CORE
diff --git a/drivers/media/dvb/b2c2/Kconfig b/drivers/media/dvb/b2c2/Kconfig
index 6ec5afba1ca7..73dc2ee9b014 100644
--- a/drivers/media/dvb/b2c2/Kconfig
+++ b/drivers/media/dvb/b2c2/Kconfig
@@ -9,7 +9,7 @@ config DVB_B2C2_FLEXCOP
9 select DVB_STV0297 if !DVB_FE_CUSTOMISE 9 select DVB_STV0297 if !DVB_FE_CUSTOMISE
10 select DVB_BCM3510 if !DVB_FE_CUSTOMISE 10 select DVB_BCM3510 if !DVB_FE_CUSTOMISE
11 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 11 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
12 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 12 select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
13 select DVB_S5H1420 if !DVB_FE_CUSTOMISE 13 select DVB_S5H1420 if !DVB_FE_CUSTOMISE
14 select DVB_TUNER_ITD1000 if !DVB_FE_CUSTOMISE 14 select DVB_TUNER_ITD1000 if !DVB_FE_CUSTOMISE
15 select DVB_ISL6421 if !DVB_FE_CUSTOMISE 15 select DVB_ISL6421 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/b2c2/Makefile b/drivers/media/dvb/b2c2/Makefile
index 870e2848c296..d9db066f9854 100644
--- a/drivers/media/dvb/b2c2/Makefile
+++ b/drivers/media/dvb/b2c2/Makefile
@@ -14,4 +14,4 @@ b2c2-flexcop-usb-objs = flexcop-usb.o
14obj-$(CONFIG_DVB_B2C2_FLEXCOP_USB) += b2c2-flexcop-usb.o 14obj-$(CONFIG_DVB_B2C2_FLEXCOP_USB) += b2c2-flexcop-usb.o
15 15
16EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/ 16EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
17EXTRA_CFLAGS += -Idrivers/media/video/ 17EXTRA_CFLAGS += -Idrivers/media/common/tuners/
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index 902c762e0b7f..d1239b8342f8 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -8,7 +8,7 @@ config DVB_BT8XX
8 select DVB_OR51211 if !DVB_FE_CUSTOMISE 8 select DVB_OR51211 if !DVB_FE_CUSTOMISE
9 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 9 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
10 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 10 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
11 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 11 select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
12 select FW_LOADER 12 select FW_LOADER
13 help 13 help
14 Support for PCI cards based on the Bt8xx PCI bridge. Examples are 14 Support for PCI cards based on the Bt8xx PCI bridge. Examples are
diff --git a/drivers/media/dvb/bt8xx/Makefile b/drivers/media/dvb/bt8xx/Makefile
index 9d3e68b5d6eb..d98f1d49ffa8 100644
--- a/drivers/media/dvb/bt8xx/Makefile
+++ b/drivers/media/dvb/bt8xx/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
3EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 3EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
4EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 4EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
5EXTRA_CFLAGS += -Idrivers/media/video/bt8xx 5EXTRA_CFLAGS += -Idrivers/media/video/bt8xx
6EXTRA_CFLAGS += -Idrivers/media/video 6EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index 75711bde23ad..a7637562e742 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -1714,7 +1714,7 @@ static void dst_release(struct dvb_frontend *fe)
1714 struct dst_state *state = fe->demodulator_priv; 1714 struct dst_state *state = fe->demodulator_priv;
1715 if (state->dst_ca) { 1715 if (state->dst_ca) {
1716 dvb_unregister_device(state->dst_ca); 1716 dvb_unregister_device(state->dst_ca);
1717#ifdef CONFIG_DVB_CORE_ATTACH 1717#ifdef CONFIG_MEDIA_ATTACH
1718 symbol_put(dst_ca_attach); 1718 symbol_put(dst_ca_attach);
1719#endif 1719#endif
1720 } 1720 }
diff --git a/drivers/media/dvb/dvb-core/Kconfig b/drivers/media/dvb/dvb-core/Kconfig
deleted file mode 100644
index e3e6839f8073..000000000000
--- a/drivers/media/dvb/dvb-core/Kconfig
+++ /dev/null
@@ -1,34 +0,0 @@
1config DVB_CORE
2 tristate "DVB for Linux"
3 depends on NET && INET
4 select CRC32
5 help
6 Support Digital Video Broadcasting hardware. Enable this if you
7 own a DVB adapter and want to use it or if you compile Linux for
8 a digital SetTopBox.
9
10 DVB core utility functions for device handling, software fallbacks etc.
11 Say Y when you have a DVB card and want to use it. Say Y if your want
12 to build your drivers outside the kernel, but need the DVB core. All
13 in-kernel drivers will select this automatically if needed.
14
15 API specs and user tools are available from <http://www.linuxtv.org/>.
16
17 Please report problems regarding this driver to the LinuxDVB
18 mailing list.
19
20 If unsure say N.
21
22config DVB_CORE_ATTACH
23 bool "Load and attach frontend modules as needed"
24 depends on DVB_CORE
25 depends on MODULES
26 help
27 Remove the static dependency of DVB card drivers on all
28 frontend modules for all possible card variants. Instead,
29 allow the card drivers to only load the frontend modules
30 they require. This saves several KBytes of memory.
31
32 Note: You will need module-init-tools v3.2 or later for this feature.
33
34 If unsure say Y.
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 2dddd08c5445..8cbdb218952f 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -1189,7 +1189,7 @@ int dvb_unregister_frontend(struct dvb_frontend* fe)
1189} 1189}
1190EXPORT_SYMBOL(dvb_unregister_frontend); 1190EXPORT_SYMBOL(dvb_unregister_frontend);
1191 1191
1192#ifdef CONFIG_DVB_CORE_ATTACH 1192#ifdef CONFIG_MEDIA_ATTACH
1193void dvb_frontend_detach(struct dvb_frontend* fe) 1193void dvb_frontend_detach(struct dvb_frontend* fe)
1194{ 1194{
1195 void *ptr; 1195 void *ptr;
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index 5f9a737c6de1..89d12dc477a7 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -115,7 +115,7 @@ extern int dvb_usercopy(struct inode *inode, struct file *file,
115 unsigned int cmd, void *arg)); 115 unsigned int cmd, void *arg));
116 116
117/** generic DVB attach function. */ 117/** generic DVB attach function. */
118#ifdef CONFIG_DVB_CORE_ATTACH 118#ifdef CONFIG_MEDIA_ATTACH
119#define dvb_attach(FUNCTION, ARGS...) ({ \ 119#define dvb_attach(FUNCTION, ARGS...) ({ \
120 void *__r = NULL; \ 120 void *__r = NULL; \
121 typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ 121 typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 3c8493d2026d..4c1cff9feb2e 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -25,7 +25,7 @@ config DVB_USB_A800
25 tristate "AVerMedia AverTV DVB-T USB 2.0 (A800)" 25 tristate "AVerMedia AverTV DVB-T USB 2.0 (A800)"
26 depends on DVB_USB 26 depends on DVB_USB
27 select DVB_DIB3000MC 27 select DVB_DIB3000MC
28 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 28 select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
29 select DVB_PLL if !DVB_FE_CUSTOMISE 29 select DVB_PLL if !DVB_FE_CUSTOMISE
30 help 30 help
31 Say Y here to support the AVerMedia AverTV DVB-T USB 2.0 (A800) receiver. 31 Say Y here to support the AVerMedia AverTV DVB-T USB 2.0 (A800) receiver.
@@ -35,7 +35,7 @@ config DVB_USB_DIBUSB_MB
35 depends on DVB_USB 35 depends on DVB_USB
36 select DVB_PLL if !DVB_FE_CUSTOMISE 36 select DVB_PLL if !DVB_FE_CUSTOMISE
37 select DVB_DIB3000MB 37 select DVB_DIB3000MB
38 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 38 select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
39 help 39 help
40 Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by 40 Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by
41 DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-B demodulator. 41 DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-B demodulator.
@@ -56,7 +56,7 @@ config DVB_USB_DIBUSB_MC
56 tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)" 56 tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)"
57 depends on DVB_USB 57 depends on DVB_USB
58 select DVB_DIB3000MC 58 select DVB_DIB3000MC
59 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 59 select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
60 help 60 help
61 Support for USB2.0 DVB-T receivers based on reference designs made by 61 Support for USB2.0 DVB-T receivers based on reference designs made by
62 DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-C/P demodulator. 62 DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-C/P demodulator.
@@ -73,8 +73,8 @@ config DVB_USB_DIB0700
73 select DVB_DIB7000P 73 select DVB_DIB7000P
74 select DVB_DIB7000M 74 select DVB_DIB7000M
75 select DVB_DIB3000MC 75 select DVB_DIB3000MC
76 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 76 select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
77 select DVB_TUNER_MT2266 if !DVB_FE_CUSTOMISE 77 select MEDIA_TUNER_MT2266 if !DVB_FE_CUSTOMISE
78 select DVB_TUNER_DIB0070 78 select DVB_TUNER_DIB0070
79 help 79 help
80 Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The 80 Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The
@@ -93,7 +93,7 @@ config DVB_USB_UMT_010
93 depends on DVB_USB 93 depends on DVB_USB
94 select DVB_PLL if !DVB_FE_CUSTOMISE 94 select DVB_PLL if !DVB_FE_CUSTOMISE
95 select DVB_DIB3000MC 95 select DVB_DIB3000MC
96 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 96 select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
97 help 97 help
98 Say Y here to support the HanfTek UMT-010 USB2.0 stick-sized DVB-T receiver. 98 Say Y here to support the HanfTek UMT-010 USB2.0 stick-sized DVB-T receiver.
99 99
@@ -105,7 +105,7 @@ config DVB_USB_CXUSB
105 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 105 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
106 select DVB_MT352 if !DVB_FE_CUSTOMISE 106 select DVB_MT352 if !DVB_FE_CUSTOMISE
107 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 107 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
108 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 108 select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
109 help 109 help
110 Say Y here to support the Conexant USB2.0 hybrid reference design. 110 Say Y here to support the Conexant USB2.0 hybrid reference design.
111 Currently, only DVB and ATSC modes are supported, analog mode 111 Currently, only DVB and ATSC modes are supported, analog mode
@@ -118,7 +118,7 @@ config DVB_USB_M920X
118 tristate "Uli m920x DVB-T USB2.0 support" 118 tristate "Uli m920x DVB-T USB2.0 support"
119 depends on DVB_USB 119 depends on DVB_USB
120 select DVB_MT352 if !DVB_FE_CUSTOMISE 120 select DVB_MT352 if !DVB_FE_CUSTOMISE
121 select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE 121 select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE
122 help 122 help
123 Say Y here to support the MSI Mega Sky 580 USB2.0 DVB-T receiver. 123 Say Y here to support the MSI Mega Sky 580 USB2.0 DVB-T receiver.
124 Currently, only devices with a product id of 124 Currently, only devices with a product id of
@@ -129,7 +129,7 @@ config DVB_USB_GL861
129 tristate "Genesys Logic GL861 USB2.0 support" 129 tristate "Genesys Logic GL861 USB2.0 support"
130 depends on DVB_USB 130 depends on DVB_USB
131 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 131 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
132 select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE 132 select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE
133 help 133 help
134 Say Y here to support the MSI Megasky 580 (55801) DVB-T USB2.0 134 Say Y here to support the MSI Megasky 580 (55801) DVB-T USB2.0
135 receiver with USB ID 0db0:5581. 135 receiver with USB ID 0db0:5581.
@@ -138,7 +138,7 @@ config DVB_USB_AU6610
138 tristate "Alcor Micro AU6610 USB2.0 support" 138 tristate "Alcor Micro AU6610 USB2.0 support"
139 depends on DVB_USB 139 depends on DVB_USB
140 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 140 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
141 select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE 141 select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE
142 help 142 help
143 Say Y here to support the Sigmatek DVB-110 DVB-T USB2.0 receiver. 143 Say Y here to support the Sigmatek DVB-110 DVB-T USB2.0 receiver.
144 144
@@ -190,7 +190,7 @@ config DVB_USB_NOVA_T_USB2
190 tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support" 190 tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support"
191 depends on DVB_USB 191 depends on DVB_USB
192 select DVB_DIB3000MC 192 select DVB_DIB3000MC
193 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 193 select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
194 select DVB_PLL if !DVB_FE_CUSTOMISE 194 select DVB_PLL if !DVB_FE_CUSTOMISE
195 help 195 help
196 Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver. 196 Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver.
@@ -227,8 +227,8 @@ config DVB_USB_OPERA1
227config DVB_USB_AF9005 227config DVB_USB_AF9005
228 tristate "Afatech AF9005 DVB-T USB1.1 support" 228 tristate "Afatech AF9005 DVB-T USB1.1 support"
229 depends on DVB_USB && EXPERIMENTAL 229 depends on DVB_USB && EXPERIMENTAL
230 select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE 230 select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
231 select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE 231 select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE
232 help 232 help
233 Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver 233 Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver
234 and the TerraTec Cinergy T USB XE (Rev.1) 234 and the TerraTec Cinergy T USB XE (Rev.1)
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index 60a910052c16..c6511a6c0ab8 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -63,5 +63,5 @@ obj-$(CONFIG_DVB_USB_AF9005_REMOTE) += dvb-usb-af9005-remote.o
63 63
64EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/ 64EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
65# due to tuner-xc3028 65# due to tuner-xc3028
66EXTRA_CFLAGS += -Idrivers/media/video 66EXTRA_CFLAGS += -Idrivers/media/common/tuners
67 67
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index f5fceb3cdb3c..6d2384605927 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -15,22 +15,36 @@ config DVB_FE_CUSTOMISE
15comment "DVB-S (satellite) frontends" 15comment "DVB-S (satellite) frontends"
16 depends on DVB_CORE 16 depends on DVB_CORE
17 17
18config DVB_STV0299 18config DVB_CX24110
19 tristate "ST STV0299 based" 19 tristate "Conexant CX24110 based"
20 depends on DVB_CORE && I2C 20 depends on DVB_CORE && I2C
21 default m if DVB_FE_CUSTOMISE 21 default m if DVB_FE_CUSTOMISE
22 help 22 help
23 A DVB-S tuner module. Say Y when you want to support this frontend. 23 A DVB-S tuner module. Say Y when you want to support this frontend.
24 24
25config DVB_CX24110 25config DVB_CX24123
26 tristate "Conexant CX24110 based" 26 tristate "Conexant CX24123 based"
27 depends on DVB_CORE && I2C 27 depends on DVB_CORE && I2C
28 default m if DVB_FE_CUSTOMISE 28 default m if DVB_FE_CUSTOMISE
29 help 29 help
30 A DVB-S tuner module. Say Y when you want to support this frontend. 30 A DVB-S tuner module. Say Y when you want to support this frontend.
31 31
32config DVB_CX24123 32config DVB_MT312
33 tristate "Conexant CX24123 based" 33 tristate "Zarlink VP310/MT312 based"
34 depends on DVB_CORE && I2C
35 default m if DVB_FE_CUSTOMISE
36 help
37 A DVB-S tuner module. Say Y when you want to support this frontend.
38
39config DVB_S5H1420
40 tristate "Samsung S5H1420 based"
41 depends on DVB_CORE && I2C
42 default m if DVB_FE_CUSTOMISE
43 help
44 A DVB-S tuner module. Say Y when you want to support this frontend.
45
46config DVB_STV0299
47 tristate "ST STV0299 based"
34 depends on DVB_CORE && I2C 48 depends on DVB_CORE && I2C
35 default m if DVB_FE_CUSTOMISE 49 default m if DVB_FE_CUSTOMISE
36 help 50 help
@@ -43,8 +57,8 @@ config DVB_TDA8083
43 help 57 help
44 A DVB-S tuner module. Say Y when you want to support this frontend. 58 A DVB-S tuner module. Say Y when you want to support this frontend.
45 59
46config DVB_MT312 60config DVB_TDA10086
47 tristate "Zarlink VP310/MT312 based" 61 tristate "Philips TDA10086 based"
48 depends on DVB_CORE && I2C 62 depends on DVB_CORE && I2C
49 default m if DVB_FE_CUSTOMISE 63 default m if DVB_FE_CUSTOMISE
50 help 64 help
@@ -57,19 +71,26 @@ config DVB_VES1X93
57 help 71 help
58 A DVB-S tuner module. Say Y when you want to support this frontend. 72 A DVB-S tuner module. Say Y when you want to support this frontend.
59 73
60config DVB_S5H1420 74config DVB_TUNER_ITD1000
61 tristate "Samsung S5H1420 based" 75 tristate "Integrant ITD1000 Zero IF tuner for DVB-S/DSS"
62 depends on DVB_CORE && I2C 76 depends on DVB_CORE && I2C
63 default m if DVB_FE_CUSTOMISE 77 default m if DVB_FE_CUSTOMISE
64 help 78 help
65 A DVB-S tuner module. Say Y when you want to support this frontend. 79 A DVB-S tuner module. Say Y when you want to support this frontend.
66 80
67config DVB_TDA10086 81config DVB_TDA826X
68 tristate "Philips TDA10086 based" 82 tristate "Philips TDA826X silicon tuner"
69 depends on DVB_CORE && I2C 83 depends on DVB_CORE && I2C
70 default m if DVB_FE_CUSTOMISE 84 default m if DVB_FE_CUSTOMISE
71 help 85 help
72 A DVB-S tuner module. Say Y when you want to support this frontend. 86 A DVB-S silicon tuner module. Say Y when you want to support this tuner.
87
88config DVB_TUA6100
89 tristate "Infineon TUA6100 PLL"
90 depends on DVB_CORE && I2C
91 default m if DVB_FE_CUSTOMISE
92 help
93 A DVB-S PLL chip.
73 94
74comment "DVB-T (terrestrial) frontends" 95comment "DVB-T (terrestrial) frontends"
75 depends on DVB_CORE 96 depends on DVB_CORE
@@ -315,7 +336,7 @@ config DVB_S5H1411
315 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want 336 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
316 to support this frontend. 337 to support this frontend.
317 338
318comment "Tuners/PLL support" 339comment "Digital terrestrial only tuners/PLL"
319 depends on DVB_CORE 340 depends on DVB_CORE
320 341
321config DVB_PLL 342config DVB_PLL
@@ -326,55 +347,6 @@ config DVB_PLL
326 This module drives a number of tuners based on PLL chips with a 347 This module drives a number of tuners based on PLL chips with a
327 common I2C interface. Say Y when you want to support these tuners. 348 common I2C interface. Say Y when you want to support these tuners.
328 349
329config DVB_TDA826X
330 tristate "Philips TDA826X silicon tuner"
331 depends on DVB_CORE && I2C
332 default m if DVB_FE_CUSTOMISE
333 help
334 A DVB-S silicon tuner module. Say Y when you want to support this tuner.
335
336config DVB_TDA827X
337 tristate "Philips TDA827X silicon tuner"
338 depends on DVB_CORE && I2C
339 default m if DVB_FE_CUSTOMISE
340 help
341 A DVB-T silicon tuner module. Say Y when you want to support this tuner.
342
343config DVB_TDA18271
344 tristate "NXP TDA18271 silicon tuner"
345 depends on I2C
346 default m if DVB_FE_CUSTOMISE
347 help
348 A silicon tuner module. Say Y when you want to support this tuner.
349
350config DVB_TUNER_QT1010
351 tristate "Quantek QT1010 silicon tuner"
352 depends on DVB_CORE && I2C
353 default m if DVB_FE_CUSTOMISE
354 help
355 A driver for the silicon tuner QT1010 from Quantek.
356
357config DVB_TUNER_MT2060
358 tristate "Microtune MT2060 silicon IF tuner"
359 depends on I2C
360 default m if DVB_FE_CUSTOMISE
361 help
362 A driver for the silicon IF tuner MT2060 from Microtune.
363
364config DVB_TUNER_MT2266
365 tristate "Microtune MT2266 silicon tuner"
366 depends on I2C
367 default m if DVB_FE_CUSTOMISE
368 help
369 A driver for the silicon baseband tuner MT2266 from Microtune.
370
371config DVB_TUNER_MT2131
372 tristate "Microtune MT2131 silicon tuner"
373 depends on I2C
374 default m if DVB_FE_CUSTOMISE
375 help
376 A driver for the silicon baseband tuner MT2131 from Microtune.
377
378config DVB_TUNER_DIB0070 350config DVB_TUNER_DIB0070
379 tristate "DiBcom DiB0070 silicon base-band tuner" 351 tristate "DiBcom DiB0070 silicon base-band tuner"
380 depends on I2C 352 depends on I2C
@@ -384,21 +356,7 @@ config DVB_TUNER_DIB0070
384 This device is only used inside a SiP called togther with a 356 This device is only used inside a SiP called togther with a
385 demodulator for now. 357 demodulator for now.
386 358
387config DVB_TUNER_XC5000 359comment "SEC control devices for DVB-S"
388 tristate "Xceive XC5000 silicon tuner"
389 depends on I2C
390 default m if DVB_FE_CUSTOMISE
391 help
392 A driver for the silicon tuner XC5000 from Xceive.
393 This device is only used inside a SiP called togther with a
394 demodulator for now.
395
396config DVB_TUNER_ITD1000
397 tristate "Integrant ITD1000 Zero IF tuner for DVB-S/DSS"
398 depends on DVB_CORE && I2C
399 default m if DVB_FE_CUSTOMISE
400
401comment "Miscellaneous devices"
402 depends on DVB_CORE 360 depends on DVB_CORE
403 361
404config DVB_LNBP21 362config DVB_LNBP21
@@ -422,11 +380,4 @@ config DVB_ISL6421
422 help 380 help
423 An SEC control chip. 381 An SEC control chip.
424 382
425config DVB_TUA6100
426 tristate "TUA6100 PLL"
427 depends on DVB_CORE && I2C
428 default m if DVB_FE_CUSTOMISE
429 help
430 A DVBS PLL chip.
431
432endmenu 383endmenu
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 9747c73dc826..a89dc0fc4c6f 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -3,9 +3,7 @@
3# 3#
4 4
5EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ 5EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
6EXTRA_CFLAGS += -Idrivers/media/video/ 6EXTRA_CFLAGS += -Idrivers/media/common/tuners/
7
8tda18271-objs := tda18271-tables.o tda18271-common.o tda18271-fe.o
9 7
10obj-$(CONFIG_DVB_PLL) += dvb-pll.o 8obj-$(CONFIG_DVB_PLL) += dvb-pll.o
11obj-$(CONFIG_DVB_STV0299) += stv0299.o 9obj-$(CONFIG_DVB_STV0299) += stv0299.o
@@ -42,16 +40,9 @@ obj-$(CONFIG_DVB_ISL6405) += isl6405.o
42obj-$(CONFIG_DVB_ISL6421) += isl6421.o 40obj-$(CONFIG_DVB_ISL6421) += isl6421.o
43obj-$(CONFIG_DVB_TDA10086) += tda10086.o 41obj-$(CONFIG_DVB_TDA10086) += tda10086.o
44obj-$(CONFIG_DVB_TDA826X) += tda826x.o 42obj-$(CONFIG_DVB_TDA826X) += tda826x.o
45obj-$(CONFIG_DVB_TDA827X) += tda827x.o
46obj-$(CONFIG_DVB_TDA18271) += tda18271.o
47obj-$(CONFIG_DVB_TUNER_MT2060) += mt2060.o
48obj-$(CONFIG_DVB_TUNER_MT2266) += mt2266.o
49obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o 43obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o
50obj-$(CONFIG_DVB_TUNER_QT1010) += qt1010.o
51obj-$(CONFIG_DVB_TUA6100) += tua6100.o 44obj-$(CONFIG_DVB_TUA6100) += tua6100.o
52obj-$(CONFIG_DVB_TUNER_MT2131) += mt2131.o
53obj-$(CONFIG_DVB_S5H1409) += s5h1409.o 45obj-$(CONFIG_DVB_S5H1409) += s5h1409.o
54obj-$(CONFIG_DVB_TUNER_XC5000) += xc5000.o
55obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o 46obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
56obj-$(CONFIG_DVB_AU8522) += au8522.o 47obj-$(CONFIG_DVB_AU8522) += au8522.o
57obj-$(CONFIG_DVB_TDA10048) += tda10048.o 48obj-$(CONFIG_DVB_TDA10048) += tda10048.o
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 281e1cb2edc6..720ed9ff7c5f 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -481,7 +481,7 @@ static void s5h1420_setsymbolrate(struct s5h1420_state* state,
481 val *= 2; 481 val *= 2;
482 do_div(val, (state->fclk / 1000)); 482 do_div(val, (state->fclk / 1000));
483 483
484 dprintk("symbol rate register: %06llx\n", val); 484 dprintk("symbol rate register: %06llx\n", (unsigned long long)val);
485 485
486 v = s5h1420_readreg(state, Loop01); 486 v = s5h1420_readreg(state, Loop01);
487 s5h1420_writereg(state, Loop01, v & 0x7f); 487 s5h1420_writereg(state, Loop01, v & 0x7f);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index fe9a4cc14141..fe743aa7f645 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -1,4 +1,50 @@
1# 1#
2# Generic video config states
3#
4
5config VIDEO_V4L2
6 tristate
7 depends on VIDEO_DEV && VIDEO_V4L2_COMMON
8 default VIDEO_DEV && VIDEO_V4L2_COMMON
9
10config VIDEO_V4L1
11 tristate
12 depends on VIDEO_DEV && VIDEO_V4L2_COMMON && VIDEO_ALLOW_V4L1
13 default VIDEO_DEV && VIDEO_V4L2_COMMON && VIDEO_ALLOW_V4L1
14
15config VIDEOBUF_GEN
16 tristate
17
18config VIDEOBUF_DMA_SG
19 depends on HAS_DMA
20 select VIDEOBUF_GEN
21 tristate
22
23config VIDEOBUF_VMALLOC
24 select VIDEOBUF_GEN
25 tristate
26
27config VIDEOBUF_DVB
28 tristate
29 select VIDEOBUF_GEN
30 select VIDEOBUF_DMA_SG
31
32config VIDEO_BTCX
33 tristate
34
35config VIDEO_IR_I2C
36 tristate
37
38config VIDEO_IR
39 tristate
40 depends on INPUT
41 select VIDEO_IR_I2C if I2C
42
43config VIDEO_TVEEPROM
44 tristate
45 depends on I2C
46
47#
2# Multimedia Video device configuration 48# Multimedia Video device configuration
3# 49#
4 50
@@ -644,7 +690,7 @@ config VIDEO_MXB
644 tristate "Siemens-Nixdorf 'Multimedia eXtension Board'" 690 tristate "Siemens-Nixdorf 'Multimedia eXtension Board'"
645 depends on PCI && VIDEO_V4L1 && I2C 691 depends on PCI && VIDEO_V4L1 && I2C
646 select VIDEO_SAA7146_VV 692 select VIDEO_SAA7146_VV
647 select VIDEO_TUNER 693 select MEDIA_TUNER
648 select VIDEO_SAA7111 if VIDEO_HELPER_CHIPS_AUTO 694 select VIDEO_SAA7111 if VIDEO_HELPER_CHIPS_AUTO
649 select VIDEO_TDA9840 if VIDEO_HELPER_CHIPS_AUTO 695 select VIDEO_TDA9840 if VIDEO_HELPER_CHIPS_AUTO
650 select VIDEO_TEA6415C if VIDEO_HELPER_CHIPS_AUTO 696 select VIDEO_TEA6415C if VIDEO_HELPER_CHIPS_AUTO
@@ -702,6 +748,8 @@ source "drivers/media/video/au0828/Kconfig"
702 748
703source "drivers/media/video/ivtv/Kconfig" 749source "drivers/media/video/ivtv/Kconfig"
704 750
751source "drivers/media/video/cx18/Kconfig"
752
705config VIDEO_M32R_AR 753config VIDEO_M32R_AR
706 tristate "AR devices" 754 tristate "AR devices"
707 depends on M32R && VIDEO_V4L1 755 depends on M32R && VIDEO_V4L1
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index be14227f3726..a352c6e31f0c 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -84,17 +84,7 @@ obj-$(CONFIG_VIDEO_HEXIUM_GEMINI) += hexium_gemini.o
84obj-$(CONFIG_VIDEO_DPC) += dpc7146.o 84obj-$(CONFIG_VIDEO_DPC) += dpc7146.o
85obj-$(CONFIG_TUNER_3036) += tuner-3036.o 85obj-$(CONFIG_TUNER_3036) += tuner-3036.o
86 86
87obj-$(CONFIG_VIDEO_TUNER) += tuner.o 87obj-$(CONFIG_MEDIA_TUNER) += tuner.o
88
89obj-$(CONFIG_TUNER_XC2028) += tuner-xc2028.o
90obj-$(CONFIG_TUNER_SIMPLE) += tuner-simple.o
91# tuner-types will be merged into tuner-simple, in the future
92obj-$(CONFIG_TUNER_SIMPLE) += tuner-types.o
93obj-$(CONFIG_TUNER_MT20XX) += mt20xx.o
94obj-$(CONFIG_TUNER_TDA8290) += tda8290.o
95obj-$(CONFIG_TUNER_TEA5767) += tea5767.o
96obj-$(CONFIG_TUNER_TEA5761) += tea5761.o
97obj-$(CONFIG_TUNER_TDA9887) += tda9887.o
98 88
99obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o 89obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
100obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o 90obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
@@ -134,6 +124,7 @@ obj-$(CONFIG_USB_VICAM) += usbvideo/
134obj-$(CONFIG_USB_QUICKCAM_MESSENGER) += usbvideo/ 124obj-$(CONFIG_USB_QUICKCAM_MESSENGER) += usbvideo/
135 125
136obj-$(CONFIG_VIDEO_IVTV) += ivtv/ 126obj-$(CONFIG_VIDEO_IVTV) += ivtv/
127obj-$(CONFIG_VIDEO_CX18) += cx18/
137 128
138obj-$(CONFIG_VIDEO_VIVI) += vivi.o 129obj-$(CONFIG_VIDEO_VIVI) += vivi.o
139obj-$(CONFIG_VIDEO_CX23885) += cx23885/ 130obj-$(CONFIG_VIDEO_CX23885) += cx23885/
@@ -147,3 +138,4 @@ obj-$(CONFIG_VIDEO_AU0828) += au0828/
147 138
148EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 139EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
149EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 140EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
141EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/au0828/Kconfig b/drivers/media/video/au0828/Kconfig
index 41708267e7a4..cab277fafa63 100644
--- a/drivers/media/video/au0828/Kconfig
+++ b/drivers/media/video/au0828/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_AU0828
4 depends on VIDEO_DEV && I2C && INPUT && DVB_CORE 4 depends on VIDEO_DEV && I2C && INPUT && DVB_CORE
5 select I2C_ALGOBIT 5 select I2C_ALGOBIT
6 select DVB_AU8522 if !DVB_FE_CUSTOMIZE 6 select DVB_AU8522 if !DVB_FE_CUSTOMIZE
7 select DVB_TUNER_XC5000 if !DVB_FE_CUSTOMIZE 7 select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
8 ---help--- 8 ---help---
9 This is a video4linux driver for Auvitek's USB device. 9 This is a video4linux driver for Auvitek's USB device.
10 10
diff --git a/drivers/media/video/au0828/Makefile b/drivers/media/video/au0828/Makefile
index 9f4f572c89c5..cd2c58281b4e 100644
--- a/drivers/media/video/au0828/Makefile
+++ b/drivers/media/video/au0828/Makefile
@@ -2,7 +2,7 @@ au0828-objs := au0828-core.o au0828-i2c.o au0828-cards.o au0828-dvb.o
2 2
3obj-$(CONFIG_VIDEO_AU0828) += au0828.o 3obj-$(CONFIG_VIDEO_AU0828) += au0828.o
4 4
5EXTRA_CFLAGS += -Idrivers/media/video 5EXTRA_CFLAGS += -Idrivers/media/common/tuners
6EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 6EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
7EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 7EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
8 8
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index 5040d7fc4af5..1371b4e4b5f1 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -119,7 +119,7 @@ static int start_urb_transfer(struct au0828_dev *dev)
119 purb->transfer_buffer = kzalloc(URB_BUFSIZE, GFP_KERNEL); 119 purb->transfer_buffer = kzalloc(URB_BUFSIZE, GFP_KERNEL);
120 if (!purb->transfer_buffer) { 120 if (!purb->transfer_buffer) {
121 usb_free_urb(purb); 121 usb_free_urb(purb);
122 dev->urbs[i] = 0; 122 dev->urbs[i] = NULL;
123 goto err; 123 goto err;
124 } 124 }
125 125
diff --git a/drivers/media/video/bt8xx/Kconfig b/drivers/media/video/bt8xx/Kconfig
index cfc822bb502a..7431ef6de9f1 100644
--- a/drivers/media/video/bt8xx/Kconfig
+++ b/drivers/media/video/bt8xx/Kconfig
@@ -6,7 +6,7 @@ config VIDEO_BT848
6 select VIDEO_BTCX 6 select VIDEO_BTCX
7 select VIDEOBUF_DMA_SG 7 select VIDEOBUF_DMA_SG
8 select VIDEO_IR 8 select VIDEO_IR
9 select VIDEO_TUNER 9 select MEDIA_TUNER
10 select VIDEO_TVEEPROM 10 select VIDEO_TVEEPROM
11 select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO 11 select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO
12 select VIDEO_TVAUDIO if VIDEO_HELPER_CHIPS_AUTO 12 select VIDEO_TVAUDIO if VIDEO_HELPER_CHIPS_AUTO
diff --git a/drivers/media/video/bt8xx/Makefile b/drivers/media/video/bt8xx/Makefile
index 924d216d9570..e415f6fc447c 100644
--- a/drivers/media/video/bt8xx/Makefile
+++ b/drivers/media/video/bt8xx/Makefile
@@ -9,4 +9,5 @@ bttv-objs := bttv-driver.o bttv-cards.o bttv-if.o \
9obj-$(CONFIG_VIDEO_BT848) += bttv.o 9obj-$(CONFIG_VIDEO_BT848) += bttv.o
10 10
11EXTRA_CFLAGS += -Idrivers/media/video 11EXTRA_CFLAGS += -Idrivers/media/video
12EXTRA_CFLAGS += -Idrivers/media/common/tuners
12EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 13EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 03816b73f847..27da7b423275 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -81,8 +81,6 @@
81/* Limits scaled width, which must be a multiple of 4. */ 81/* Limits scaled width, which must be a multiple of 4. */
82#define MAX_HACTIVE (0x3FF & -4) 82#define MAX_HACTIVE (0x3FF & -4)
83 83
84#define clamp(x, low, high) min (max (low, x), high)
85
86#define BTTV_NORMS (\ 84#define BTTV_NORMS (\
87 V4L2_STD_PAL | V4L2_STD_PAL_N | \ 85 V4L2_STD_PAL | V4L2_STD_PAL_N | \
88 V4L2_STD_PAL_Nc | V4L2_STD_SECAM | \ 86 V4L2_STD_PAL_Nc | V4L2_STD_SECAM | \
diff --git a/drivers/media/video/cs5345.c b/drivers/media/video/cs5345.c
index fae469ce16f5..2a429f9e32cd 100644
--- a/drivers/media/video/cs5345.c
+++ b/drivers/media/video/cs5345.c
@@ -142,7 +142,8 @@ static int cs5345_command(struct i2c_client *client, unsigned cmd, void *arg)
142 142
143/* ----------------------------------------------------------------------- */ 143/* ----------------------------------------------------------------------- */
144 144
145static int cs5345_probe(struct i2c_client *client) 145static int cs5345_probe(struct i2c_client *client,
146 const struct i2c_device_id *id)
146{ 147{
147 /* Check if the adapter supports the needed features */ 148 /* Check if the adapter supports the needed features */
148 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 149 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index f41bfde045fe..2dfd0afc62db 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -135,7 +135,8 @@ static int cs53l32a_command(struct i2c_client *client, unsigned cmd, void *arg)
135 * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' 135 * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
136 */ 136 */
137 137
138static int cs53l32a_probe(struct i2c_client *client) 138static int cs53l32a_probe(struct i2c_client *client,
139 const struct i2c_device_id *id)
139{ 140{
140 int i; 141 int i;
141 142
diff --git a/drivers/media/video/cx18/Kconfig b/drivers/media/video/cx18/Kconfig
new file mode 100644
index 000000000000..acc4b47f1d1d
--- /dev/null
+++ b/drivers/media/video/cx18/Kconfig
@@ -0,0 +1,20 @@
1config VIDEO_CX18
2 tristate "Conexant cx23418 MPEG encoder support"
3 depends on VIDEO_V4L2 && DVB_CORE && PCI && I2C && EXPERIMENTAL
4 select I2C_ALGOBIT
5 select FW_LOADER
6 select VIDEO_IR
7 select MEDIA_TUNER
8 select VIDEO_TVEEPROM
9 select VIDEO_CX2341X
10 select VIDEO_CS5345
11 select DVB_S5H1409
12 ---help---
13 This is a video4linux driver for Conexant cx23418 based
14 PCI combo video recorder devices.
15
16 This is used in devices such as the Hauppauge HVR-1600
17 cards.
18
19 To compile this driver as a module, choose M here: the
20 module will be called cx18.
diff --git a/drivers/media/video/cx18/Makefile b/drivers/media/video/cx18/Makefile
new file mode 100644
index 000000000000..b23d2e26120f
--- /dev/null
+++ b/drivers/media/video/cx18/Makefile
@@ -0,0 +1,11 @@
1cx18-objs := cx18-driver.o cx18-cards.o cx18-i2c.o cx18-firmware.o cx18-gpio.o \
2 cx18-queue.o cx18-streams.o cx18-fileops.o cx18-ioctl.o cx18-controls.o \
3 cx18-mailbox.o cx18-vbi.o cx18-audio.o cx18-video.o cx18-irq.o \
4 cx18-av-core.o cx18-av-audio.o cx18-av-firmware.o cx18-av-vbi.o cx18-scb.o \
5 cx18-dvb.o
6
7obj-$(CONFIG_VIDEO_CX18) += cx18.o
8
9EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
10EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
11EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/cx18/cx18-audio.c b/drivers/media/video/cx18/cx18-audio.c
new file mode 100644
index 000000000000..1adc404d955e
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-audio.c
@@ -0,0 +1,73 @@
1/*
2 * cx18 audio-related functions
3 *
4 * Derived from ivtv-audio.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-i2c.h"
26#include "cx18-cards.h"
27#include "cx18-audio.h"
28
29/* Selects the audio input and output according to the current
30 settings. */
31int cx18_audio_set_io(struct cx18 *cx)
32{
33 struct v4l2_routing route;
34 u32 audio_input;
35 int mux_input;
36
37 /* Determine which input to use */
38 if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
39 audio_input = cx->card->radio_input.audio_input;
40 mux_input = cx->card->radio_input.muxer_input;
41 } else {
42 audio_input =
43 cx->card->audio_inputs[cx->audio_input].audio_input;
44 mux_input =
45 cx->card->audio_inputs[cx->audio_input].muxer_input;
46 }
47
48 /* handle muxer chips */
49 route.input = mux_input;
50 route.output = 0;
51 cx18_i2c_hw(cx, cx->card->hw_muxer, VIDIOC_INT_S_AUDIO_ROUTING, &route);
52
53 route.input = audio_input;
54 return cx18_i2c_hw(cx, cx->card->hw_audio_ctrl,
55 VIDIOC_INT_S_AUDIO_ROUTING, &route);
56}
57
58void cx18_audio_set_route(struct cx18 *cx, struct v4l2_routing *route)
59{
60 cx18_i2c_hw(cx, cx->card->hw_audio_ctrl,
61 VIDIOC_INT_S_AUDIO_ROUTING, route);
62}
63
64void cx18_audio_set_audio_clock_freq(struct cx18 *cx, u8 freq)
65{
66 static u32 freqs[3] = { 44100, 48000, 32000 };
67
68 /* The audio clock of the digitizer must match the codec sample
69 rate otherwise you get some very strange effects. */
70 if (freq > 2)
71 return;
72 cx18_call_i2c_clients(cx, VIDIOC_INT_AUDIO_CLOCK_FREQ, &freqs[freq]);
73}
diff --git a/drivers/media/video/cx18/cx18-audio.h b/drivers/media/video/cx18/cx18-audio.h
new file mode 100644
index 000000000000..cb569a69379c
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-audio.h
@@ -0,0 +1,26 @@
1/*
2 * cx18 audio-related functions
3 *
4 * Derived from ivtv-audio.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24int cx18_audio_set_io(struct cx18 *cx);
25void cx18_audio_set_route(struct cx18 *cx, struct v4l2_routing *route);
26void cx18_audio_set_audio_clock_freq(struct cx18 *cx, u8 freq);
diff --git a/drivers/media/video/cx18/cx18-av-audio.c b/drivers/media/video/cx18/cx18-av-audio.c
new file mode 100644
index 000000000000..2dc3a5dd170e
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-av-audio.c
@@ -0,0 +1,361 @@
1/*
2 * cx18 ADEC audio functions
3 *
4 * Derived from cx25840-audio.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 */
23
24#include "cx18-driver.h"
25
26static int set_audclk_freq(struct cx18 *cx, u32 freq)
27{
28 struct cx18_av_state *state = &cx->av_state;
29
30 if (freq != 32000 && freq != 44100 && freq != 48000)
31 return -EINVAL;
32
33 /* common for all inputs and rates */
34 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x10 */
35 cx18_av_write(cx, 0x127, 0x50);
36
37 if (state->aud_input != CX18_AV_AUDIO_SERIAL) {
38 switch (freq) {
39 case 32000:
40 /* VID_PLL and AUX_PLL */
41 cx18_av_write4(cx, 0x108, 0x1006040f);
42
43 /* AUX_PLL_FRAC */
44 cx18_av_write4(cx, 0x110, 0x01bb39ee);
45
46 /* src3/4/6_ctl = 0x0801f77f */
47 cx18_av_write4(cx, 0x900, 0x0801f77f);
48 cx18_av_write4(cx, 0x904, 0x0801f77f);
49 cx18_av_write4(cx, 0x90c, 0x0801f77f);
50 break;
51
52 case 44100:
53 /* VID_PLL and AUX_PLL */
54 cx18_av_write4(cx, 0x108, 0x1009040f);
55
56 /* AUX_PLL_FRAC */
57 cx18_av_write4(cx, 0x110, 0x00ec6bd6);
58
59 /* src3/4/6_ctl = 0x08016d59 */
60 cx18_av_write4(cx, 0x900, 0x08016d59);
61 cx18_av_write4(cx, 0x904, 0x08016d59);
62 cx18_av_write4(cx, 0x90c, 0x08016d59);
63 break;
64
65 case 48000:
66 /* VID_PLL and AUX_PLL */
67 cx18_av_write4(cx, 0x108, 0x100a040f);
68
69 /* AUX_PLL_FRAC */
70 cx18_av_write4(cx, 0x110, 0x0098d6e5);
71
72 /* src3/4/6_ctl = 0x08014faa */
73 cx18_av_write4(cx, 0x900, 0x08014faa);
74 cx18_av_write4(cx, 0x904, 0x08014faa);
75 cx18_av_write4(cx, 0x90c, 0x08014faa);
76 break;
77 }
78 } else {
79 switch (freq) {
80 case 32000:
81 /* VID_PLL and AUX_PLL */
82 cx18_av_write4(cx, 0x108, 0x1e08040f);
83
84 /* AUX_PLL_FRAC */
85 cx18_av_write4(cx, 0x110, 0x012a0869);
86
87 /* src1_ctl = 0x08010000 */
88 cx18_av_write4(cx, 0x8f8, 0x08010000);
89
90 /* src3/4/6_ctl = 0x08020000 */
91 cx18_av_write4(cx, 0x900, 0x08020000);
92 cx18_av_write4(cx, 0x904, 0x08020000);
93 cx18_av_write4(cx, 0x90c, 0x08020000);
94
95 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x14 */
96 cx18_av_write(cx, 0x127, 0x54);
97 break;
98
99 case 44100:
100 /* VID_PLL and AUX_PLL */
101 cx18_av_write4(cx, 0x108, 0x1809040f);
102
103 /* AUX_PLL_FRAC */
104 cx18_av_write4(cx, 0x110, 0x00ec6bd6);
105
106 /* src1_ctl = 0x08010000 */
107 cx18_av_write4(cx, 0x8f8, 0x080160cd);
108
109 /* src3/4/6_ctl = 0x08020000 */
110 cx18_av_write4(cx, 0x900, 0x08017385);
111 cx18_av_write4(cx, 0x904, 0x08017385);
112 cx18_av_write4(cx, 0x90c, 0x08017385);
113 break;
114
115 case 48000:
116 /* VID_PLL and AUX_PLL */
117 cx18_av_write4(cx, 0x108, 0x180a040f);
118
119 /* AUX_PLL_FRAC */
120 cx18_av_write4(cx, 0x110, 0x0098d6e5);
121
122 /* src1_ctl = 0x08010000 */
123 cx18_av_write4(cx, 0x8f8, 0x08018000);
124
125 /* src3/4/6_ctl = 0x08020000 */
126 cx18_av_write4(cx, 0x900, 0x08015555);
127 cx18_av_write4(cx, 0x904, 0x08015555);
128 cx18_av_write4(cx, 0x90c, 0x08015555);
129 break;
130 }
131 }
132
133 state->audclk_freq = freq;
134
135 return 0;
136}
137
138void cx18_av_audio_set_path(struct cx18 *cx)
139{
140 struct cx18_av_state *state = &cx->av_state;
141
142 /* stop microcontroller */
143 cx18_av_and_or(cx, 0x803, ~0x10, 0);
144
145 /* assert soft reset */
146 cx18_av_and_or(cx, 0x810, ~0x1, 0x01);
147
148 /* Mute everything to prevent the PFFT! */
149 cx18_av_write(cx, 0x8d3, 0x1f);
150
151 if (state->aud_input == CX18_AV_AUDIO_SERIAL) {
152 /* Set Path1 to Serial Audio Input */
153 cx18_av_write4(cx, 0x8d0, 0x01011012);
154
155 /* The microcontroller should not be started for the
156 * non-tuner inputs: autodetection is specific for
157 * TV audio. */
158 } else {
159 /* Set Path1 to Analog Demod Main Channel */
160 cx18_av_write4(cx, 0x8d0, 0x1f063870);
161 }
162
163 set_audclk_freq(cx, state->audclk_freq);
164
165 /* deassert soft reset */
166 cx18_av_and_or(cx, 0x810, ~0x1, 0x00);
167
168 if (state->aud_input != CX18_AV_AUDIO_SERIAL) {
169 /* When the microcontroller detects the
170 * audio format, it will unmute the lines */
171 cx18_av_and_or(cx, 0x803, ~0x10, 0x10);
172 }
173}
174
175static int get_volume(struct cx18 *cx)
176{
177 /* Volume runs +18dB to -96dB in 1/2dB steps
178 * change to fit the msp3400 -114dB to +12dB range */
179
180 /* check PATH1_VOLUME */
181 int vol = 228 - cx18_av_read(cx, 0x8d4);
182 vol = (vol / 2) + 23;
183 return vol << 9;
184}
185
186static void set_volume(struct cx18 *cx, int volume)
187{
188 /* First convert the volume to msp3400 values (0-127) */
189 int vol = volume >> 9;
190 /* now scale it up to cx18_av values
191 * -114dB to -96dB maps to 0
192 * this should be 19, but in my testing that was 4dB too loud */
193 if (vol <= 23)
194 vol = 0;
195 else
196 vol -= 23;
197
198 /* PATH1_VOLUME */
199 cx18_av_write(cx, 0x8d4, 228 - (vol * 2));
200}
201
202static int get_bass(struct cx18 *cx)
203{
204 /* bass is 49 steps +12dB to -12dB */
205
206 /* check PATH1_EQ_BASS_VOL */
207 int bass = cx18_av_read(cx, 0x8d9) & 0x3f;
208 bass = (((48 - bass) * 0xffff) + 47) / 48;
209 return bass;
210}
211
212static void set_bass(struct cx18 *cx, int bass)
213{
214 /* PATH1_EQ_BASS_VOL */
215 cx18_av_and_or(cx, 0x8d9, ~0x3f, 48 - (bass * 48 / 0xffff));
216}
217
218static int get_treble(struct cx18 *cx)
219{
220 /* treble is 49 steps +12dB to -12dB */
221
222 /* check PATH1_EQ_TREBLE_VOL */
223 int treble = cx18_av_read(cx, 0x8db) & 0x3f;
224 treble = (((48 - treble) * 0xffff) + 47) / 48;
225 return treble;
226}
227
228static void set_treble(struct cx18 *cx, int treble)
229{
230 /* PATH1_EQ_TREBLE_VOL */
231 cx18_av_and_or(cx, 0x8db, ~0x3f, 48 - (treble * 48 / 0xffff));
232}
233
234static int get_balance(struct cx18 *cx)
235{
236 /* balance is 7 bit, 0 to -96dB */
237
238 /* check PATH1_BAL_LEVEL */
239 int balance = cx18_av_read(cx, 0x8d5) & 0x7f;
240 /* check PATH1_BAL_LEFT */
241 if ((cx18_av_read(cx, 0x8d5) & 0x80) == 0)
242 balance = 0x80 - balance;
243 else
244 balance = 0x80 + balance;
245 return balance << 8;
246}
247
248static void set_balance(struct cx18 *cx, int balance)
249{
250 int bal = balance >> 8;
251 if (bal > 0x80) {
252 /* PATH1_BAL_LEFT */
253 cx18_av_and_or(cx, 0x8d5, 0x7f, 0x80);
254 /* PATH1_BAL_LEVEL */
255 cx18_av_and_or(cx, 0x8d5, ~0x7f, bal & 0x7f);
256 } else {
257 /* PATH1_BAL_LEFT */
258 cx18_av_and_or(cx, 0x8d5, 0x7f, 0x00);
259 /* PATH1_BAL_LEVEL */
260 cx18_av_and_or(cx, 0x8d5, ~0x7f, 0x80 - bal);
261 }
262}
263
264static int get_mute(struct cx18 *cx)
265{
266 /* check SRC1_MUTE_EN */
267 return cx18_av_read(cx, 0x8d3) & 0x2 ? 1 : 0;
268}
269
270static void set_mute(struct cx18 *cx, int mute)
271{
272 struct cx18_av_state *state = &cx->av_state;
273
274 if (state->aud_input != CX18_AV_AUDIO_SERIAL) {
275 /* Must turn off microcontroller in order to mute sound.
276 * Not sure if this is the best method, but it does work.
277 * If the microcontroller is running, then it will undo any
278 * changes to the mute register. */
279 if (mute) {
280 /* disable microcontroller */
281 cx18_av_and_or(cx, 0x803, ~0x10, 0x00);
282 cx18_av_write(cx, 0x8d3, 0x1f);
283 } else {
284 /* enable microcontroller */
285 cx18_av_and_or(cx, 0x803, ~0x10, 0x10);
286 }
287 } else {
288 /* SRC1_MUTE_EN */
289 cx18_av_and_or(cx, 0x8d3, ~0x2, mute ? 0x02 : 0x00);
290 }
291}
292
293int cx18_av_audio(struct cx18 *cx, unsigned int cmd, void *arg)
294{
295 struct cx18_av_state *state = &cx->av_state;
296 struct v4l2_control *ctrl = arg;
297 int retval;
298
299 switch (cmd) {
300 case VIDIOC_INT_AUDIO_CLOCK_FREQ:
301 if (state->aud_input != CX18_AV_AUDIO_SERIAL) {
302 cx18_av_and_or(cx, 0x803, ~0x10, 0);
303 cx18_av_write(cx, 0x8d3, 0x1f);
304 }
305 cx18_av_and_or(cx, 0x810, ~0x1, 1);
306 retval = set_audclk_freq(cx, *(u32 *)arg);
307 cx18_av_and_or(cx, 0x810, ~0x1, 0);
308 if (state->aud_input != CX18_AV_AUDIO_SERIAL)
309 cx18_av_and_or(cx, 0x803, ~0x10, 0x10);
310 return retval;
311
312 case VIDIOC_G_CTRL:
313 switch (ctrl->id) {
314 case V4L2_CID_AUDIO_VOLUME:
315 ctrl->value = get_volume(cx);
316 break;
317 case V4L2_CID_AUDIO_BASS:
318 ctrl->value = get_bass(cx);
319 break;
320 case V4L2_CID_AUDIO_TREBLE:
321 ctrl->value = get_treble(cx);
322 break;
323 case V4L2_CID_AUDIO_BALANCE:
324 ctrl->value = get_balance(cx);
325 break;
326 case V4L2_CID_AUDIO_MUTE:
327 ctrl->value = get_mute(cx);
328 break;
329 default:
330 return -EINVAL;
331 }
332 break;
333
334 case VIDIOC_S_CTRL:
335 switch (ctrl->id) {
336 case V4L2_CID_AUDIO_VOLUME:
337 set_volume(cx, ctrl->value);
338 break;
339 case V4L2_CID_AUDIO_BASS:
340 set_bass(cx, ctrl->value);
341 break;
342 case V4L2_CID_AUDIO_TREBLE:
343 set_treble(cx, ctrl->value);
344 break;
345 case V4L2_CID_AUDIO_BALANCE:
346 set_balance(cx, ctrl->value);
347 break;
348 case V4L2_CID_AUDIO_MUTE:
349 set_mute(cx, ctrl->value);
350 break;
351 default:
352 return -EINVAL;
353 }
354 break;
355
356 default:
357 return -EINVAL;
358 }
359
360 return 0;
361}
diff --git a/drivers/media/video/cx18/cx18-av-core.c b/drivers/media/video/cx18/cx18-av-core.c
new file mode 100644
index 000000000000..66864904c99b
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-av-core.c
@@ -0,0 +1,879 @@
1/*
2 * cx18 ADEC audio functions
3 *
4 * Derived from cx25840-core.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 */
23
24#include "cx18-driver.h"
25
26int cx18_av_write(struct cx18 *cx, u16 addr, u8 value)
27{
28 u32 x = readl(cx->reg_mem + 0xc40000 + (addr & ~3));
29 u32 mask = 0xff;
30 int shift = (addr & 3) * 8;
31
32 x = (x & ~(mask << shift)) | ((u32)value << shift);
33 writel(x, cx->reg_mem + 0xc40000 + (addr & ~3));
34 return 0;
35}
36
37int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value)
38{
39 writel(value, cx->reg_mem + 0xc40000 + addr);
40 return 0;
41}
42
43u8 cx18_av_read(struct cx18 *cx, u16 addr)
44{
45 u32 x = readl(cx->reg_mem + 0xc40000 + (addr & ~3));
46 int shift = (addr & 3) * 8;
47
48 return (x >> shift) & 0xff;
49}
50
51u32 cx18_av_read4(struct cx18 *cx, u16 addr)
52{
53 return readl(cx->reg_mem + 0xc40000 + addr);
54}
55
56int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned and_mask,
57 u8 or_value)
58{
59 return cx18_av_write(cx, addr,
60 (cx18_av_read(cx, addr) & and_mask) |
61 or_value);
62}
63
64int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 and_mask,
65 u32 or_value)
66{
67 return cx18_av_write4(cx, addr,
68 (cx18_av_read4(cx, addr) & and_mask) |
69 or_value);
70}
71
72/* ----------------------------------------------------------------------- */
73
74static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
75 enum cx18_av_audio_input aud_input);
76static void log_audio_status(struct cx18 *cx);
77static void log_video_status(struct cx18 *cx);
78
79/* ----------------------------------------------------------------------- */
80
81static void cx18_av_initialize(struct cx18 *cx)
82{
83 u32 v;
84
85 cx18_av_loadfw(cx);
86 /* Stop 8051 code execution */
87 cx18_av_write4(cx, CXADEC_DL_CTL, 0x03000000);
88
89 /* initallize the PLL by toggling sleep bit */
90 v = cx18_av_read4(cx, CXADEC_HOST_REG1);
91 /* enable sleep mode */
92 cx18_av_write4(cx, CXADEC_HOST_REG1, v | 1);
93 /* disable sleep mode */
94 cx18_av_write4(cx, CXADEC_HOST_REG1, v & 0xfffe);
95
96 /* initialize DLLs */
97 v = cx18_av_read4(cx, CXADEC_DLL1_DIAG_CTRL) & 0xE1FFFEFF;
98 /* disable FLD */
99 cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v);
100 /* enable FLD */
101 cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v | 0x10000100);
102
103 v = cx18_av_read4(cx, CXADEC_DLL2_DIAG_CTRL) & 0xE1FFFEFF;
104 /* disable FLD */
105 cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v);
106 /* enable FLD */
107 cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v | 0x06000100);
108
109 /* set analog bias currents. Set Vreg to 1.20V. */
110 cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL1, 0x000A1802);
111
112 v = cx18_av_read4(cx, CXADEC_AFE_DIAG_CTRL3) | 1;
113 /* enable TUNE_FIL_RST */
114 cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL3, v);
115 /* disable TUNE_FIL_RST */
116 cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL3, v & 0xFFFFFFFE);
117
118 /* enable 656 output */
119 cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x040C00);
120
121 /* video output drive strength */
122 cx18_av_and_or4(cx, CXADEC_PIN_CTRL2, ~0, 0x2);
123
124 /* reset video */
125 cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0x8000);
126 cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0);
127
128 /* set video to auto-detect */
129 /* Clear bits 11-12 to enable slow locking mode. Set autodetect mode */
130 /* set the comb notch = 1 */
131 cx18_av_and_or4(cx, CXADEC_MODE_CTRL, 0xFFF7E7F0, 0x02040800);
132
133 /* Enable wtw_en in CRUSH_CTRL (Set bit 22) */
134 /* Enable maj_sel in CRUSH_CTRL (Set bit 20) */
135 cx18_av_and_or4(cx, CXADEC_CRUSH_CTRL, ~0, 0x00500000);
136
137 /* Set VGA_TRACK_RANGE to 0x20 */
138 cx18_av_and_or4(cx, CXADEC_DFE_CTRL2, 0xFFFF00FF, 0x00002000);
139
140 /* Enable VBI capture */
141 cx18_av_write4(cx, CXADEC_OUT_CTRL1, 0x4010253F);
142 /* cx18_av_write4(cx, CXADEC_OUT_CTRL1, 0x4010253E); */
143
144 /* Set the video input.
145 The setting in MODE_CTRL gets lost when we do the above setup */
146 /* EncSetSignalStd(dwDevNum, pEnc->dwSigStd); */
147 /* EncSetVideoInput(dwDevNum, pEnc->VidIndSelection); */
148
149 v = cx18_av_read4(cx, CXADEC_AFE_CTRL);
150 v &= 0xFFFBFFFF; /* turn OFF bit 18 for droop_comp_ch1 */
151 v &= 0xFFFF7FFF; /* turn OFF bit 9 for clamp_sel_ch1 */
152 v &= 0xFFFFFFFE; /* turn OFF bit 0 for 12db_ch1 */
153 /* v |= 0x00000001;*/ /* turn ON bit 0 for 12db_ch1 */
154 cx18_av_write4(cx, CXADEC_AFE_CTRL, v);
155
156/* if(dwEnable && dw3DCombAvailable) { */
157/* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x7728021F); */
158/* } else { */
159/* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x6628021F); */
160/* } */
161 cx18_av_write4(cx, CXADEC_SRC_COMB_CFG, 0x6628021F);
162}
163
164/* ----------------------------------------------------------------------- */
165
166static void input_change(struct cx18 *cx)
167{
168 struct cx18_av_state *state = &cx->av_state;
169 v4l2_std_id std = state->std;
170
171 /* Follow step 8c and 8d of section 3.16 in the cx18_av datasheet */
172 if (std & V4L2_STD_SECAM)
173 cx18_av_write(cx, 0x402, 0);
174 else {
175 cx18_av_write(cx, 0x402, 0x04);
176 cx18_av_write(cx, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11);
177 }
178 cx18_av_and_or(cx, 0x401, ~0x60, 0);
179 cx18_av_and_or(cx, 0x401, ~0x60, 0x60);
180
181 if (std & V4L2_STD_525_60) {
182 if (std == V4L2_STD_NTSC_M_JP) {
183 /* Japan uses EIAJ audio standard */
184 cx18_av_write(cx, 0x808, 0xf7);
185 } else if (std == V4L2_STD_NTSC_M_KR) {
186 /* South Korea uses A2 audio standard */
187 cx18_av_write(cx, 0x808, 0xf8);
188 } else {
189 /* Others use the BTSC audio standard */
190 cx18_av_write(cx, 0x808, 0xf6);
191 }
192 cx18_av_write(cx, 0x80b, 0x00);
193 } else if (std & V4L2_STD_PAL) {
194 /* Follow tuner change procedure for PAL */
195 cx18_av_write(cx, 0x808, 0xff);
196 cx18_av_write(cx, 0x80b, 0x03);
197 } else if (std & V4L2_STD_SECAM) {
198 /* Select autodetect for SECAM */
199 cx18_av_write(cx, 0x808, 0xff);
200 cx18_av_write(cx, 0x80b, 0x03);
201 }
202
203 if (cx18_av_read(cx, 0x803) & 0x10) {
204 /* restart audio decoder microcontroller */
205 cx18_av_and_or(cx, 0x803, ~0x10, 0x00);
206 cx18_av_and_or(cx, 0x803, ~0x10, 0x10);
207 }
208}
209
210static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
211 enum cx18_av_audio_input aud_input)
212{
213 struct cx18_av_state *state = &cx->av_state;
214 u8 is_composite = (vid_input >= CX18_AV_COMPOSITE1 &&
215 vid_input <= CX18_AV_COMPOSITE8);
216 u8 reg;
217
218 CX18_DEBUG_INFO("decoder set video input %d, audio input %d\n",
219 vid_input, aud_input);
220
221 if (is_composite) {
222 reg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1);
223 } else {
224 int luma = vid_input & 0xf0;
225 int chroma = vid_input & 0xf00;
226
227 if ((vid_input & ~0xff0) ||
228 luma < CX18_AV_SVIDEO_LUMA1 ||
229 luma > CX18_AV_SVIDEO_LUMA4 ||
230 chroma < CX18_AV_SVIDEO_CHROMA4 ||
231 chroma > CX18_AV_SVIDEO_CHROMA8) {
232 CX18_ERR("0x%04x is not a valid video input!\n",
233 vid_input);
234 return -EINVAL;
235 }
236 reg = 0xf0 + ((luma - CX18_AV_SVIDEO_LUMA1) >> 4);
237 if (chroma >= CX18_AV_SVIDEO_CHROMA7) {
238 reg &= 0x3f;
239 reg |= (chroma - CX18_AV_SVIDEO_CHROMA7) >> 2;
240 } else {
241 reg &= 0xcf;
242 reg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4;
243 }
244 }
245
246 switch (aud_input) {
247 case CX18_AV_AUDIO_SERIAL:
248 /* do nothing, use serial audio input */
249 break;
250 case CX18_AV_AUDIO4: reg &= ~0x30; break;
251 case CX18_AV_AUDIO5: reg &= ~0x30; reg |= 0x10; break;
252 case CX18_AV_AUDIO6: reg &= ~0x30; reg |= 0x20; break;
253 case CX18_AV_AUDIO7: reg &= ~0xc0; break;
254 case CX18_AV_AUDIO8: reg &= ~0xc0; reg |= 0x40; break;
255
256 default:
257 CX18_ERR("0x%04x is not a valid audio input!\n", aud_input);
258 return -EINVAL;
259 }
260
261 cx18_av_write(cx, 0x103, reg);
262 /* Set INPUT_MODE to Composite (0) or S-Video (1) */
263 cx18_av_and_or(cx, 0x401, ~0x6, is_composite ? 0 : 0x02);
264 /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
265 cx18_av_and_or(cx, 0x102, ~0x2, (reg & 0x80) == 0 ? 2 : 0);
266 /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */
267 if ((reg & 0xc0) != 0xc0 && (reg & 0x30) != 0x30)
268 cx18_av_and_or(cx, 0x102, ~0x4, 4);
269 else
270 cx18_av_and_or(cx, 0x102, ~0x4, 0);
271 /*cx18_av_and_or4(cx, 0x104, ~0x001b4180, 0x00004180);*/
272
273 state->vid_input = vid_input;
274 state->aud_input = aud_input;
275 cx18_av_audio_set_path(cx);
276 input_change(cx);
277 return 0;
278}
279
280/* ----------------------------------------------------------------------- */
281
282static int set_v4lstd(struct cx18 *cx)
283{
284 struct cx18_av_state *state = &cx->av_state;
285 u8 fmt = 0; /* zero is autodetect */
286 u8 pal_m = 0;
287
288 /* First tests should be against specific std */
289 if (state->std == V4L2_STD_NTSC_M_JP) {
290 fmt = 0x2;
291 } else if (state->std == V4L2_STD_NTSC_443) {
292 fmt = 0x3;
293 } else if (state->std == V4L2_STD_PAL_M) {
294 pal_m = 1;
295 fmt = 0x5;
296 } else if (state->std == V4L2_STD_PAL_N) {
297 fmt = 0x6;
298 } else if (state->std == V4L2_STD_PAL_Nc) {
299 fmt = 0x7;
300 } else if (state->std == V4L2_STD_PAL_60) {
301 fmt = 0x8;
302 } else {
303 /* Then, test against generic ones */
304 if (state->std & V4L2_STD_NTSC)
305 fmt = 0x1;
306 else if (state->std & V4L2_STD_PAL)
307 fmt = 0x4;
308 else if (state->std & V4L2_STD_SECAM)
309 fmt = 0xc;
310 }
311
312 CX18_DEBUG_INFO("changing video std to fmt %i\n", fmt);
313
314 /* Follow step 9 of section 3.16 in the cx18_av datasheet.
315 Without this PAL may display a vertical ghosting effect.
316 This happens for example with the Yuan MPC622. */
317 if (fmt >= 4 && fmt < 8) {
318 /* Set format to NTSC-M */
319 cx18_av_and_or(cx, 0x400, ~0xf, 1);
320 /* Turn off LCOMB */
321 cx18_av_and_or(cx, 0x47b, ~6, 0);
322 }
323 cx18_av_and_or(cx, 0x400, ~0xf, fmt);
324 cx18_av_and_or(cx, 0x403, ~0x3, pal_m);
325 cx18_av_vbi_setup(cx);
326 input_change(cx);
327 return 0;
328}
329
330/* ----------------------------------------------------------------------- */
331
332static int set_v4lctrl(struct cx18 *cx, struct v4l2_control *ctrl)
333{
334 switch (ctrl->id) {
335 case V4L2_CID_BRIGHTNESS:
336 if (ctrl->value < 0 || ctrl->value > 255) {
337 CX18_ERR("invalid brightness setting %d\n",
338 ctrl->value);
339 return -ERANGE;
340 }
341
342 cx18_av_write(cx, 0x414, ctrl->value - 128);
343 break;
344
345 case V4L2_CID_CONTRAST:
346 if (ctrl->value < 0 || ctrl->value > 127) {
347 CX18_ERR("invalid contrast setting %d\n",
348 ctrl->value);
349 return -ERANGE;
350 }
351
352 cx18_av_write(cx, 0x415, ctrl->value << 1);
353 break;
354
355 case V4L2_CID_SATURATION:
356 if (ctrl->value < 0 || ctrl->value > 127) {
357 CX18_ERR("invalid saturation setting %d\n",
358 ctrl->value);
359 return -ERANGE;
360 }
361
362 cx18_av_write(cx, 0x420, ctrl->value << 1);
363 cx18_av_write(cx, 0x421, ctrl->value << 1);
364 break;
365
366 case V4L2_CID_HUE:
367 if (ctrl->value < -127 || ctrl->value > 127) {
368 CX18_ERR("invalid hue setting %d\n", ctrl->value);
369 return -ERANGE;
370 }
371
372 cx18_av_write(cx, 0x422, ctrl->value);
373 break;
374
375 case V4L2_CID_AUDIO_VOLUME:
376 case V4L2_CID_AUDIO_BASS:
377 case V4L2_CID_AUDIO_TREBLE:
378 case V4L2_CID_AUDIO_BALANCE:
379 case V4L2_CID_AUDIO_MUTE:
380 return cx18_av_audio(cx, VIDIOC_S_CTRL, ctrl);
381
382 default:
383 return -EINVAL;
384 }
385
386 return 0;
387}
388
389static int get_v4lctrl(struct cx18 *cx, struct v4l2_control *ctrl)
390{
391 switch (ctrl->id) {
392 case V4L2_CID_BRIGHTNESS:
393 ctrl->value = (s8)cx18_av_read(cx, 0x414) + 128;
394 break;
395 case V4L2_CID_CONTRAST:
396 ctrl->value = cx18_av_read(cx, 0x415) >> 1;
397 break;
398 case V4L2_CID_SATURATION:
399 ctrl->value = cx18_av_read(cx, 0x420) >> 1;
400 break;
401 case V4L2_CID_HUE:
402 ctrl->value = (s8)cx18_av_read(cx, 0x422);
403 break;
404 case V4L2_CID_AUDIO_VOLUME:
405 case V4L2_CID_AUDIO_BASS:
406 case V4L2_CID_AUDIO_TREBLE:
407 case V4L2_CID_AUDIO_BALANCE:
408 case V4L2_CID_AUDIO_MUTE:
409 return cx18_av_audio(cx, VIDIOC_G_CTRL, ctrl);
410 default:
411 return -EINVAL;
412 }
413
414 return 0;
415}
416
417/* ----------------------------------------------------------------------- */
418
419static int get_v4lfmt(struct cx18 *cx, struct v4l2_format *fmt)
420{
421 switch (fmt->type) {
422 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
423 return cx18_av_vbi(cx, VIDIOC_G_FMT, fmt);
424 default:
425 return -EINVAL;
426 }
427
428 return 0;
429}
430
431static int set_v4lfmt(struct cx18 *cx, struct v4l2_format *fmt)
432{
433 struct cx18_av_state *state = &cx->av_state;
434 struct v4l2_pix_format *pix;
435 int HSC, VSC, Vsrc, Hsrc, filter, Vlines;
436 int is_50Hz = !(state->std & V4L2_STD_525_60);
437
438 switch (fmt->type) {
439 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
440 pix = &(fmt->fmt.pix);
441
442 Vsrc = (cx18_av_read(cx, 0x476) & 0x3f) << 4;
443 Vsrc |= (cx18_av_read(cx, 0x475) & 0xf0) >> 4;
444
445 Hsrc = (cx18_av_read(cx, 0x472) & 0x3f) << 4;
446 Hsrc |= (cx18_av_read(cx, 0x471) & 0xf0) >> 4;
447
448 Vlines = pix->height + (is_50Hz ? 4 : 7);
449
450 if ((pix->width * 16 < Hsrc) || (Hsrc < pix->width) ||
451 (Vlines * 8 < Vsrc) || (Vsrc < Vlines)) {
452 CX18_ERR("%dx%d is not a valid size!\n",
453 pix->width, pix->height);
454 return -ERANGE;
455 }
456
457 HSC = (Hsrc * (1 << 20)) / pix->width - (1 << 20);
458 VSC = (1 << 16) - (Vsrc * (1 << 9) / Vlines - (1 << 9));
459 VSC &= 0x1fff;
460
461 if (pix->width >= 385)
462 filter = 0;
463 else if (pix->width > 192)
464 filter = 1;
465 else if (pix->width > 96)
466 filter = 2;
467 else
468 filter = 3;
469
470 CX18_DEBUG_INFO("decoder set size %dx%d -> scale %ux%u\n",
471 pix->width, pix->height, HSC, VSC);
472
473 /* HSCALE=HSC */
474 cx18_av_write(cx, 0x418, HSC & 0xff);
475 cx18_av_write(cx, 0x419, (HSC >> 8) & 0xff);
476 cx18_av_write(cx, 0x41a, HSC >> 16);
477 /* VSCALE=VSC */
478 cx18_av_write(cx, 0x41c, VSC & 0xff);
479 cx18_av_write(cx, 0x41d, VSC >> 8);
480 /* VS_INTRLACE=1 VFILT=filter */
481 cx18_av_write(cx, 0x41e, 0x8 | filter);
482 break;
483
484 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
485 return cx18_av_vbi(cx, VIDIOC_S_FMT, fmt);
486
487 case V4L2_BUF_TYPE_VBI_CAPTURE:
488 return cx18_av_vbi(cx, VIDIOC_S_FMT, fmt);
489
490 default:
491 return -EINVAL;
492 }
493
494 return 0;
495}
496
497/* ----------------------------------------------------------------------- */
498
499int cx18_av_cmd(struct cx18 *cx, unsigned int cmd, void *arg)
500{
501 struct cx18_av_state *state = &cx->av_state;
502 struct v4l2_tuner *vt = arg;
503 struct v4l2_routing *route = arg;
504
505 /* ignore these commands */
506 switch (cmd) {
507 case TUNER_SET_TYPE_ADDR:
508 return 0;
509 }
510
511 if (!state->is_initialized) {
512 CX18_DEBUG_INFO("cmd %08x triggered fw load\n", cmd);
513 /* initialize on first use */
514 state->is_initialized = 1;
515 cx18_av_initialize(cx);
516 }
517
518 switch (cmd) {
519 case VIDIOC_INT_DECODE_VBI_LINE:
520 return cx18_av_vbi(cx, cmd, arg);
521
522 case VIDIOC_INT_AUDIO_CLOCK_FREQ:
523 return cx18_av_audio(cx, cmd, arg);
524
525 case VIDIOC_STREAMON:
526 CX18_DEBUG_INFO("enable output\n");
527 cx18_av_write(cx, 0x115, 0x8c);
528 cx18_av_write(cx, 0x116, 0x07);
529 break;
530
531 case VIDIOC_STREAMOFF:
532 CX18_DEBUG_INFO("disable output\n");
533 cx18_av_write(cx, 0x115, 0x00);
534 cx18_av_write(cx, 0x116, 0x00);
535 break;
536
537 case VIDIOC_LOG_STATUS:
538 log_video_status(cx);
539 log_audio_status(cx);
540 break;
541
542 case VIDIOC_G_CTRL:
543 return get_v4lctrl(cx, (struct v4l2_control *)arg);
544
545 case VIDIOC_S_CTRL:
546 return set_v4lctrl(cx, (struct v4l2_control *)arg);
547
548 case VIDIOC_QUERYCTRL:
549 {
550 struct v4l2_queryctrl *qc = arg;
551
552 switch (qc->id) {
553 case V4L2_CID_BRIGHTNESS:
554 case V4L2_CID_CONTRAST:
555 case V4L2_CID_SATURATION:
556 case V4L2_CID_HUE:
557 return v4l2_ctrl_query_fill_std(qc);
558 default:
559 break;
560 }
561
562 switch (qc->id) {
563 case V4L2_CID_AUDIO_VOLUME:
564 case V4L2_CID_AUDIO_MUTE:
565 case V4L2_CID_AUDIO_BALANCE:
566 case V4L2_CID_AUDIO_BASS:
567 case V4L2_CID_AUDIO_TREBLE:
568 return v4l2_ctrl_query_fill_std(qc);
569 default:
570 return -EINVAL;
571 }
572 return -EINVAL;
573 }
574
575 case VIDIOC_G_STD:
576 *(v4l2_std_id *)arg = state->std;
577 break;
578
579 case VIDIOC_S_STD:
580 if (state->radio == 0 && state->std == *(v4l2_std_id *)arg)
581 return 0;
582 state->radio = 0;
583 state->std = *(v4l2_std_id *)arg;
584 return set_v4lstd(cx);
585
586 case AUDC_SET_RADIO:
587 state->radio = 1;
588 break;
589
590 case VIDIOC_INT_G_VIDEO_ROUTING:
591 route->input = state->vid_input;
592 route->output = 0;
593 break;
594
595 case VIDIOC_INT_S_VIDEO_ROUTING:
596 return set_input(cx, route->input, state->aud_input);
597
598 case VIDIOC_INT_G_AUDIO_ROUTING:
599 route->input = state->aud_input;
600 route->output = 0;
601 break;
602
603 case VIDIOC_INT_S_AUDIO_ROUTING:
604 return set_input(cx, state->vid_input, route->input);
605
606 case VIDIOC_S_FREQUENCY:
607 input_change(cx);
608 break;
609
610 case VIDIOC_G_TUNER:
611 {
612 u8 vpres = cx18_av_read(cx, 0x40e) & 0x20;
613 u8 mode;
614 int val = 0;
615
616 if (state->radio)
617 break;
618
619 vt->signal = vpres ? 0xffff : 0x0;
620
621 vt->capability |=
622 V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 |
623 V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
624
625 mode = cx18_av_read(cx, 0x804);
626
627 /* get rxsubchans and audmode */
628 if ((mode & 0xf) == 1)
629 val |= V4L2_TUNER_SUB_STEREO;
630 else
631 val |= V4L2_TUNER_SUB_MONO;
632
633 if (mode == 2 || mode == 4)
634 val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
635
636 if (mode & 0x10)
637 val |= V4L2_TUNER_SUB_SAP;
638
639 vt->rxsubchans = val;
640 vt->audmode = state->audmode;
641 break;
642 }
643
644 case VIDIOC_S_TUNER:
645 if (state->radio)
646 break;
647
648 switch (vt->audmode) {
649 case V4L2_TUNER_MODE_MONO:
650 /* mono -> mono
651 stereo -> mono
652 bilingual -> lang1 */
653 cx18_av_and_or(cx, 0x809, ~0xf, 0x00);
654 break;
655 case V4L2_TUNER_MODE_STEREO:
656 case V4L2_TUNER_MODE_LANG1:
657 /* mono -> mono
658 stereo -> stereo
659 bilingual -> lang1 */
660 cx18_av_and_or(cx, 0x809, ~0xf, 0x04);
661 break;
662 case V4L2_TUNER_MODE_LANG1_LANG2:
663 /* mono -> mono
664 stereo -> stereo
665 bilingual -> lang1/lang2 */
666 cx18_av_and_or(cx, 0x809, ~0xf, 0x07);
667 break;
668 case V4L2_TUNER_MODE_LANG2:
669 /* mono -> mono
670 stereo -> stereo
671 bilingual -> lang2 */
672 cx18_av_and_or(cx, 0x809, ~0xf, 0x01);
673 break;
674 default:
675 return -EINVAL;
676 }
677 state->audmode = vt->audmode;
678 break;
679
680 case VIDIOC_G_FMT:
681 return get_v4lfmt(cx, (struct v4l2_format *)arg);
682
683 case VIDIOC_S_FMT:
684 return set_v4lfmt(cx, (struct v4l2_format *)arg);
685
686 case VIDIOC_INT_RESET:
687 cx18_av_initialize(cx);
688 break;
689
690 default:
691 return -EINVAL;
692 }
693
694 return 0;
695}
696
697/* ----------------------------------------------------------------------- */
698
699/* ----------------------------------------------------------------------- */
700
701static void log_video_status(struct cx18 *cx)
702{
703 static const char *const fmt_strs[] = {
704 "0x0",
705 "NTSC-M", "NTSC-J", "NTSC-4.43",
706 "PAL-BDGHI", "PAL-M", "PAL-N", "PAL-Nc", "PAL-60",
707 "0x9", "0xA", "0xB",
708 "SECAM",
709 "0xD", "0xE", "0xF"
710 };
711
712 struct cx18_av_state *state = &cx->av_state;
713 u8 vidfmt_sel = cx18_av_read(cx, 0x400) & 0xf;
714 u8 gen_stat1 = cx18_av_read(cx, 0x40d);
715 u8 gen_stat2 = cx18_av_read(cx, 0x40e);
716 int vid_input = state->vid_input;
717
718 CX18_INFO("Video signal: %spresent\n",
719 (gen_stat2 & 0x20) ? "" : "not ");
720 CX18_INFO("Detected format: %s\n",
721 fmt_strs[gen_stat1 & 0xf]);
722
723 CX18_INFO("Specified standard: %s\n",
724 vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection");
725
726 if (vid_input >= CX18_AV_COMPOSITE1 &&
727 vid_input <= CX18_AV_COMPOSITE8) {
728 CX18_INFO("Specified video input: Composite %d\n",
729 vid_input - CX18_AV_COMPOSITE1 + 1);
730 } else {
731 CX18_INFO("Specified video input: S-Video (Luma In%d, Chroma In%d)\n",
732 (vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8);
733 }
734
735 CX18_INFO("Specified audioclock freq: %d Hz\n", state->audclk_freq);
736}
737
738/* ----------------------------------------------------------------------- */
739
740static void log_audio_status(struct cx18 *cx)
741{
742 struct cx18_av_state *state = &cx->av_state;
743 u8 download_ctl = cx18_av_read(cx, 0x803);
744 u8 mod_det_stat0 = cx18_av_read(cx, 0x805);
745 u8 mod_det_stat1 = cx18_av_read(cx, 0x804);
746 u8 audio_config = cx18_av_read(cx, 0x808);
747 u8 pref_mode = cx18_av_read(cx, 0x809);
748 u8 afc0 = cx18_av_read(cx, 0x80b);
749 u8 mute_ctl = cx18_av_read(cx, 0x8d3);
750 int aud_input = state->aud_input;
751 char *p;
752
753 switch (mod_det_stat0) {
754 case 0x00: p = "mono"; break;
755 case 0x01: p = "stereo"; break;
756 case 0x02: p = "dual"; break;
757 case 0x04: p = "tri"; break;
758 case 0x10: p = "mono with SAP"; break;
759 case 0x11: p = "stereo with SAP"; break;
760 case 0x12: p = "dual with SAP"; break;
761 case 0x14: p = "tri with SAP"; break;
762 case 0xfe: p = "forced mode"; break;
763 default: p = "not defined";
764 }
765 CX18_INFO("Detected audio mode: %s\n", p);
766
767 switch (mod_det_stat1) {
768 case 0x00: p = "BTSC"; break;
769 case 0x01: p = "EIAJ"; break;
770 case 0x02: p = "A2-M"; break;
771 case 0x03: p = "A2-BG"; break;
772 case 0x04: p = "A2-DK1"; break;
773 case 0x05: p = "A2-DK2"; break;
774 case 0x06: p = "A2-DK3"; break;
775 case 0x07: p = "A1 (6.0 MHz FM Mono)"; break;
776 case 0x08: p = "AM-L"; break;
777 case 0x09: p = "NICAM-BG"; break;
778 case 0x0a: p = "NICAM-DK"; break;
779 case 0x0b: p = "NICAM-I"; break;
780 case 0x0c: p = "NICAM-L"; break;
781 case 0x0d: p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)"; break;
782 case 0xff: p = "no detected audio standard"; break;
783 default: p = "not defined";
784 }
785 CX18_INFO("Detected audio standard: %s\n", p);
786 CX18_INFO("Audio muted: %s\n",
787 (mute_ctl & 0x2) ? "yes" : "no");
788 CX18_INFO("Audio microcontroller: %s\n",
789 (download_ctl & 0x10) ? "running" : "stopped");
790
791 switch (audio_config >> 4) {
792 case 0x00: p = "BTSC"; break;
793 case 0x01: p = "EIAJ"; break;
794 case 0x02: p = "A2-M"; break;
795 case 0x03: p = "A2-BG"; break;
796 case 0x04: p = "A2-DK1"; break;
797 case 0x05: p = "A2-DK2"; break;
798 case 0x06: p = "A2-DK3"; break;
799 case 0x07: p = "A1 (6.0 MHz FM Mono)"; break;
800 case 0x08: p = "AM-L"; break;
801 case 0x09: p = "NICAM-BG"; break;
802 case 0x0a: p = "NICAM-DK"; break;
803 case 0x0b: p = "NICAM-I"; break;
804 case 0x0c: p = "NICAM-L"; break;
805 case 0x0d: p = "FM radio"; break;
806 case 0x0f: p = "automatic detection"; break;
807 default: p = "undefined";
808 }
809 CX18_INFO("Configured audio standard: %s\n", p);
810
811 if ((audio_config >> 4) < 0xF) {
812 switch (audio_config & 0xF) {
813 case 0x00: p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)"; break;
814 case 0x01: p = "MONO2 (LANGUAGE B)"; break;
815 case 0x02: p = "MONO3 (STEREO forced MONO)"; break;
816 case 0x03: p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)"; break;
817 case 0x04: p = "STEREO"; break;
818 case 0x05: p = "DUAL1 (AB)"; break;
819 case 0x06: p = "DUAL2 (AC) (FM)"; break;
820 case 0x07: p = "DUAL3 (BC) (FM)"; break;
821 case 0x08: p = "DUAL4 (AC) (AM)"; break;
822 case 0x09: p = "DUAL5 (BC) (AM)"; break;
823 case 0x0a: p = "SAP"; break;
824 default: p = "undefined";
825 }
826 CX18_INFO("Configured audio mode: %s\n", p);
827 } else {
828 switch (audio_config & 0xF) {
829 case 0x00: p = "BG"; break;
830 case 0x01: p = "DK1"; break;
831 case 0x02: p = "DK2"; break;
832 case 0x03: p = "DK3"; break;
833 case 0x04: p = "I"; break;
834 case 0x05: p = "L"; break;
835 case 0x06: p = "BTSC"; break;
836 case 0x07: p = "EIAJ"; break;
837 case 0x08: p = "A2-M"; break;
838 case 0x09: p = "FM Radio"; break;
839 case 0x0f: p = "automatic standard and mode detection"; break;
840 default: p = "undefined";
841 }
842 CX18_INFO("Configured audio system: %s\n", p);
843 }
844
845 if (aud_input)
846 CX18_INFO("Specified audio input: Tuner (In%d)\n",
847 aud_input);
848 else
849 CX18_INFO("Specified audio input: External\n");
850
851 switch (pref_mode & 0xf) {
852 case 0: p = "mono/language A"; break;
853 case 1: p = "language B"; break;
854 case 2: p = "language C"; break;
855 case 3: p = "analog fallback"; break;
856 case 4: p = "stereo"; break;
857 case 5: p = "language AC"; break;
858 case 6: p = "language BC"; break;
859 case 7: p = "language AB"; break;
860 default: p = "undefined";
861 }
862 CX18_INFO("Preferred audio mode: %s\n", p);
863
864 if ((audio_config & 0xf) == 0xf) {
865 switch ((afc0 >> 2) & 0x1) {
866 case 0: p = "system DK"; break;
867 case 1: p = "system L"; break;
868 }
869 CX18_INFO("Selected 65 MHz format: %s\n", p);
870
871 switch (afc0 & 0x3) {
872 case 0: p = "BTSC"; break;
873 case 1: p = "EIAJ"; break;
874 case 2: p = "A2-M"; break;
875 default: p = "undefined";
876 }
877 CX18_INFO("Selected 45 MHz format: %s\n", p);
878 }
879}
diff --git a/drivers/media/video/cx18/cx18-av-core.h b/drivers/media/video/cx18/cx18-av-core.h
new file mode 100644
index 000000000000..786901d72e9a
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-av-core.h
@@ -0,0 +1,318 @@
1/*
2 * cx18 ADEC header
3 *
4 * Derived from cx25840-core.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 */
23
24#ifndef _CX18_AV_CORE_H_
25#define _CX18_AV_CORE_H_
26
27struct cx18;
28
29enum cx18_av_video_input {
30 /* Composite video inputs In1-In8 */
31 CX18_AV_COMPOSITE1 = 1,
32 CX18_AV_COMPOSITE2,
33 CX18_AV_COMPOSITE3,
34 CX18_AV_COMPOSITE4,
35 CX18_AV_COMPOSITE5,
36 CX18_AV_COMPOSITE6,
37 CX18_AV_COMPOSITE7,
38 CX18_AV_COMPOSITE8,
39
40 /* S-Video inputs consist of one luma input (In1-In4) ORed with one
41 chroma input (In5-In8) */
42 CX18_AV_SVIDEO_LUMA1 = 0x10,
43 CX18_AV_SVIDEO_LUMA2 = 0x20,
44 CX18_AV_SVIDEO_LUMA3 = 0x30,
45 CX18_AV_SVIDEO_LUMA4 = 0x40,
46 CX18_AV_SVIDEO_CHROMA4 = 0x400,
47 CX18_AV_SVIDEO_CHROMA5 = 0x500,
48 CX18_AV_SVIDEO_CHROMA6 = 0x600,
49 CX18_AV_SVIDEO_CHROMA7 = 0x700,
50 CX18_AV_SVIDEO_CHROMA8 = 0x800,
51
52 /* S-Video aliases for common luma/chroma combinations */
53 CX18_AV_SVIDEO1 = 0x510,
54 CX18_AV_SVIDEO2 = 0x620,
55 CX18_AV_SVIDEO3 = 0x730,
56 CX18_AV_SVIDEO4 = 0x840,
57};
58
59enum cx18_av_audio_input {
60 /* Audio inputs: serial or In4-In8 */
61 CX18_AV_AUDIO_SERIAL,
62 CX18_AV_AUDIO4 = 4,
63 CX18_AV_AUDIO5,
64 CX18_AV_AUDIO6,
65 CX18_AV_AUDIO7,
66 CX18_AV_AUDIO8,
67};
68
69struct cx18_av_state {
70 int radio;
71 v4l2_std_id std;
72 enum cx18_av_video_input vid_input;
73 enum cx18_av_audio_input aud_input;
74 u32 audclk_freq;
75 int audmode;
76 int vbi_line_offset;
77 u32 id;
78 u32 rev;
79 int is_initialized;
80};
81
82
83/* Registers */
84#define CXADEC_CHIP_TYPE_TIGER 0x837
85#define CXADEC_CHIP_TYPE_MAKO 0x843
86
87#define CXADEC_HOST_REG1 0x000
88#define CXADEC_HOST_REG2 0x001
89
90#define CXADEC_CHIP_CTRL 0x100
91#define CXADEC_AFE_CTRL 0x104
92#define CXADEC_PLL_CTRL1 0x108
93#define CXADEC_VID_PLL_FRAC 0x10C
94#define CXADEC_AUX_PLL_FRAC 0x110
95#define CXADEC_PIN_CTRL1 0x114
96#define CXADEC_PIN_CTRL2 0x118
97#define CXADEC_PIN_CFG1 0x11C
98#define CXADEC_PIN_CFG2 0x120
99
100#define CXADEC_PIN_CFG3 0x124
101#define CXADEC_I2S_MCLK 0x127
102
103#define CXADEC_AUD_LOCK1 0x128
104#define CXADEC_AUD_LOCK2 0x12C
105#define CXADEC_POWER_CTRL 0x130
106#define CXADEC_AFE_DIAG_CTRL1 0x134
107#define CXADEC_AFE_DIAG_CTRL2 0x138
108#define CXADEC_AFE_DIAG_CTRL3 0x13C
109#define CXADEC_PLL_DIAG_CTRL 0x140
110#define CXADEC_TEST_CTRL1 0x144
111#define CXADEC_TEST_CTRL2 0x148
112#define CXADEC_BIST_STAT 0x14C
113#define CXADEC_DLL1_DIAG_CTRL 0x158
114#define CXADEC_DLL2_DIAG_CTRL 0x15C
115
116/* IR registers */
117#define CXADEC_IR_CTRL_REG 0x200
118#define CXADEC_IR_TXCLK_REG 0x204
119#define CXADEC_IR_RXCLK_REG 0x208
120#define CXADEC_IR_CDUTY_REG 0x20C
121#define CXADEC_IR_STAT_REG 0x210
122#define CXADEC_IR_IRQEN_REG 0x214
123#define CXADEC_IR_FILTER_REG 0x218
124#define CXADEC_IR_FIFO_REG 0x21C
125
126/* Video Registers */
127#define CXADEC_MODE_CTRL 0x400
128#define CXADEC_OUT_CTRL1 0x404
129#define CXADEC_OUT_CTRL2 0x408
130#define CXADEC_GEN_STAT 0x40C
131#define CXADEC_INT_STAT_MASK 0x410
132#define CXADEC_LUMA_CTRL 0x414
133
134#define CXADEC_BRIGHTNESS_CTRL_BYTE 0x414
135#define CXADEC_CONTRAST_CTRL_BYTE 0x415
136#define CXADEC_LUMA_CTRL_BYTE_3 0x416
137
138#define CXADEC_HSCALE_CTRL 0x418
139#define CXADEC_VSCALE_CTRL 0x41C
140
141#define CXADEC_CHROMA_CTRL 0x420
142
143#define CXADEC_USAT_CTRL_BYTE 0x420
144#define CXADEC_VSAT_CTRL_BYTE 0x421
145#define CXADEC_HUE_CTRL_BYTE 0x422
146
147#define CXADEC_VBI_LINE_CTRL1 0x424
148#define CXADEC_VBI_LINE_CTRL2 0x428
149#define CXADEC_VBI_LINE_CTRL3 0x42C
150#define CXADEC_VBI_LINE_CTRL4 0x430
151#define CXADEC_VBI_LINE_CTRL5 0x434
152#define CXADEC_VBI_FC_CFG 0x438
153#define CXADEC_VBI_MISC_CFG1 0x43C
154#define CXADEC_VBI_MISC_CFG2 0x440
155#define CXADEC_VBI_PAY1 0x444
156#define CXADEC_VBI_PAY2 0x448
157#define CXADEC_VBI_CUST1_CFG1 0x44C
158#define CXADEC_VBI_CUST1_CFG2 0x450
159#define CXADEC_VBI_CUST1_CFG3 0x454
160#define CXADEC_VBI_CUST2_CFG1 0x458
161#define CXADEC_VBI_CUST2_CFG2 0x45C
162#define CXADEC_VBI_CUST2_CFG3 0x460
163#define CXADEC_VBI_CUST3_CFG1 0x464
164#define CXADEC_VBI_CUST3_CFG2 0x468
165#define CXADEC_VBI_CUST3_CFG3 0x46C
166#define CXADEC_HORIZ_TIM_CTRL 0x470
167#define CXADEC_VERT_TIM_CTRL 0x474
168#define CXADEC_SRC_COMB_CFG 0x478
169#define CXADEC_CHROMA_VBIOFF_CFG 0x47C
170#define CXADEC_FIELD_COUNT 0x480
171#define CXADEC_MISC_TIM_CTRL 0x484
172#define CXADEC_DFE_CTRL1 0x488
173#define CXADEC_DFE_CTRL2 0x48C
174#define CXADEC_DFE_CTRL3 0x490
175#define CXADEC_PLL_CTRL2 0x494
176#define CXADEC_HTL_CTRL 0x498
177#define CXADEC_COMB_CTRL 0x49C
178#define CXADEC_CRUSH_CTRL 0x4A0
179#define CXADEC_SOFT_RST_CTRL 0x4A4
180#define CXADEC_MV_DT_CTRL2 0x4A8
181#define CXADEC_MV_DT_CTRL3 0x4AC
182#define CXADEC_MISC_DIAG_CTRL 0x4B8
183
184#define CXADEC_DL_CTL 0x800
185#define CXADEC_DL_CTL_ADDRESS_LOW 0x800 /* Byte 1 in DL_CTL */
186#define CXADEC_DL_CTL_ADDRESS_HIGH 0x801 /* Byte 2 in DL_CTL */
187#define CXADEC_DL_CTL_DATA 0x802 /* Byte 3 in DL_CTL */
188#define CXADEC_DL_CTL_CONTROL 0x803 /* Byte 4 in DL_CTL */
189
190#define CXADEC_STD_DET_STATUS 0x804
191
192#define CXADEC_STD_DET_CTL 0x808
193#define CXADEC_STD_DET_CTL_AUD_CTL 0x808 /* Byte 1 in STD_DET_CTL */
194#define CXADEC_STD_DET_CTL_PREF_MODE 0x809 /* Byte 2 in STD_DET_CTL */
195
196#define CXADEC_DW8051_INT 0x80C
197#define CXADEC_GENERAL_CTL 0x810
198#define CXADEC_AAGC_CTL 0x814
199#define CXADEC_IF_SRC_CTL 0x818
200#define CXADEC_ANLOG_DEMOD_CTL 0x81C
201#define CXADEC_ROT_FREQ_CTL 0x820
202#define CXADEC_FM1_CTL 0x824
203#define CXADEC_PDF_CTL 0x828
204#define CXADEC_DFT1_CTL1 0x82C
205#define CXADEC_DFT1_CTL2 0x830
206#define CXADEC_DFT_STATUS 0x834
207#define CXADEC_DFT2_CTL1 0x838
208#define CXADEC_DFT2_CTL2 0x83C
209#define CXADEC_DFT2_STATUS 0x840
210#define CXADEC_DFT3_CTL1 0x844
211#define CXADEC_DFT3_CTL2 0x848
212#define CXADEC_DFT3_STATUS 0x84C
213#define CXADEC_DFT4_CTL1 0x850
214#define CXADEC_DFT4_CTL2 0x854
215#define CXADEC_DFT4_STATUS 0x858
216#define CXADEC_AM_MTS_DET 0x85C
217#define CXADEC_ANALOG_MUX_CTL 0x860
218#define CXADEC_DIG_PLL_CTL1 0x864
219#define CXADEC_DIG_PLL_CTL2 0x868
220#define CXADEC_DIG_PLL_CTL3 0x86C
221#define CXADEC_DIG_PLL_CTL4 0x870
222#define CXADEC_DIG_PLL_CTL5 0x874
223#define CXADEC_DEEMPH_GAIN_CTL 0x878
224#define CXADEC_DEEMPH_COEF1 0x87C
225#define CXADEC_DEEMPH_COEF2 0x880
226#define CXADEC_DBX1_CTL1 0x884
227#define CXADEC_DBX1_CTL2 0x888
228#define CXADEC_DBX1_STATUS 0x88C
229#define CXADEC_DBX2_CTL1 0x890
230#define CXADEC_DBX2_CTL2 0x894
231#define CXADEC_DBX2_STATUS 0x898
232#define CXADEC_AM_FM_DIFF 0x89C
233
234/* NICAM registers go here */
235#define CXADEC_NICAM_STATUS 0x8C8
236#define CXADEC_DEMATRIX_CTL 0x8CC
237
238#define CXADEC_PATH1_CTL1 0x8D0
239#define CXADEC_PATH1_VOL_CTL 0x8D4
240#define CXADEC_PATH1_EQ_CTL 0x8D8
241#define CXADEC_PATH1_SC_CTL 0x8DC
242
243#define CXADEC_PATH2_CTL1 0x8E0
244#define CXADEC_PATH2_VOL_CTL 0x8E4
245#define CXADEC_PATH2_EQ_CTL 0x8E8
246#define CXADEC_PATH2_SC_CTL 0x8EC
247
248#define CXADEC_SRC_CTL 0x8F0
249#define CXADEC_SRC_LF_COEF 0x8F4
250#define CXADEC_SRC1_CTL 0x8F8
251#define CXADEC_SRC2_CTL 0x8FC
252#define CXADEC_SRC3_CTL 0x900
253#define CXADEC_SRC4_CTL 0x904
254#define CXADEC_SRC5_CTL 0x908
255#define CXADEC_SRC6_CTL 0x90C
256
257#define CXADEC_BASEBAND_OUT_SEL 0x910
258#define CXADEC_I2S_IN_CTL 0x914
259#define CXADEC_I2S_OUT_CTL 0x918
260#define CXADEC_AC97_CTL 0x91C
261#define CXADEC_QAM_PDF 0x920
262#define CXADEC_QAM_CONST_DEC 0x924
263#define CXADEC_QAM_ROTATOR_FREQ 0x948
264
265/* Bit defintions / settings used in Mako Audio */
266#define CXADEC_PREF_MODE_MONO_LANGA 0
267#define CXADEC_PREF_MODE_MONO_LANGB 1
268#define CXADEC_PREF_MODE_MONO_LANGC 2
269#define CXADEC_PREF_MODE_FALLBACK 3
270#define CXADEC_PREF_MODE_STEREO 4
271#define CXADEC_PREF_MODE_DUAL_LANG_AC 5
272#define CXADEC_PREF_MODE_DUAL_LANG_BC 6
273#define CXADEC_PREF_MODE_DUAL_LANG_AB 7
274
275
276#define CXADEC_DETECT_STEREO 1
277#define CXADEC_DETECT_DUAL 2
278#define CXADEC_DETECT_TRI 4
279#define CXADEC_DETECT_SAP 0x10
280#define CXADEC_DETECT_NO_SIGNAL 0xFF
281
282#define CXADEC_SELECT_AUDIO_STANDARD_BG 0xF0 /* NICAM BG and A2 BG */
283#define CXADEC_SELECT_AUDIO_STANDARD_DK1 0xF1 /* NICAM DK and A2 DK */
284#define CXADEC_SELECT_AUDIO_STANDARD_DK2 0xF2
285#define CXADEC_SELECT_AUDIO_STANDARD_DK3 0xF3
286#define CXADEC_SELECT_AUDIO_STANDARD_I 0xF4 /* NICAM I and A1 */
287#define CXADEC_SELECT_AUDIO_STANDARD_L 0xF5 /* NICAM L and System L AM */
288#define CXADEC_SELECT_AUDIO_STANDARD_BTSC 0xF6
289#define CXADEC_SELECT_AUDIO_STANDARD_EIAJ 0xF7
290#define CXADEC_SELECT_AUDIO_STANDARD_A2_M 0xF8 /* A2 M */
291#define CXADEC_SELECT_AUDIO_STANDARD_FM 0xF9 /* FM radio */
292#define CXADEC_SELECT_AUDIO_STANDARD_AUTO 0xFF /* Auto detect */
293
294/* ----------------------------------------------------------------------- */
295/* cx18_av-core.c */
296int cx18_av_write(struct cx18 *cx, u16 addr, u8 value);
297int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value);
298u8 cx18_av_read(struct cx18 *cx, u16 addr);
299u32 cx18_av_read4(struct cx18 *cx, u16 addr);
300int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned mask, u8 value);
301int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 mask, u32 value);
302int cx18_av_cmd(struct cx18 *cx, unsigned int cmd, void *arg);
303
304/* ----------------------------------------------------------------------- */
305/* cx18_av-firmware.c */
306int cx18_av_loadfw(struct cx18 *cx);
307
308/* ----------------------------------------------------------------------- */
309/* cx18_av-audio.c */
310int cx18_av_audio(struct cx18 *cx, unsigned int cmd, void *arg);
311void cx18_av_audio_set_path(struct cx18 *cx);
312
313/* ----------------------------------------------------------------------- */
314/* cx18_av-vbi.c */
315void cx18_av_vbi_setup(struct cx18 *cx);
316int cx18_av_vbi(struct cx18 *cx, unsigned int cmd, void *arg);
317
318#endif
diff --git a/drivers/media/video/cx18/cx18-av-firmware.c b/drivers/media/video/cx18/cx18-av-firmware.c
new file mode 100644
index 000000000000..526e142156cd
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-av-firmware.c
@@ -0,0 +1,120 @@
1/*
2 * cx18 ADEC firmware functions
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22#include "cx18-driver.h"
23#include <linux/firmware.h>
24
25#define FWFILE "v4l-cx23418-dig.fw"
26
27int cx18_av_loadfw(struct cx18 *cx)
28{
29 const struct firmware *fw = NULL;
30 u32 size;
31 u32 v;
32 u8 *ptr;
33 int i;
34
35 if (request_firmware(&fw, FWFILE, &cx->dev->dev) != 0) {
36 CX18_ERR("unable to open firmware %s\n", FWFILE);
37 return -EINVAL;
38 }
39
40 cx18_av_write4(cx, CXADEC_CHIP_CTRL, 0x00010000);
41 cx18_av_write(cx, CXADEC_STD_DET_CTL, 0xf6); /* Byte 0 */
42
43 /* Reset the Mako core (Register is undocumented.) */
44 cx18_av_write4(cx, 0x8100, 0x00010000);
45
46 /* Put the 8051 in reset and enable firmware upload */
47 cx18_av_write4(cx, CXADEC_DL_CTL, 0x0F000000);
48
49 ptr = fw->data;
50 size = fw->size;
51
52 for (i = 0; i < size; i++) {
53 u32 dl_control = 0x0F000000 | ((u32)ptr[i] << 16);
54 u32 value = 0;
55 int retries;
56
57 for (retries = 0; retries < 5; retries++) {
58 cx18_av_write4(cx, CXADEC_DL_CTL, dl_control);
59 value = cx18_av_read4(cx, CXADEC_DL_CTL);
60 if ((value & 0x3F00) == (dl_control & 0x3F00))
61 break;
62 }
63 if (retries >= 5) {
64 CX18_ERR("unable to load firmware %s\n", FWFILE);
65 release_firmware(fw);
66 return -EIO;
67 }
68 }
69
70 cx18_av_write4(cx, CXADEC_DL_CTL, 0x13000000 | fw->size);
71
72 /* Output to the 416 */
73 cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x78000);
74
75 /* Audio input control 1 set to Sony mode */
76 /* Audio output input 2 is 0 for slave operation input */
77 /* 0xC4000914[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */
78 /* 0xC4000914[7]: 0 = Philips mode, 1 = Sony mode (1st SCK rising edge
79 after WS transition for first bit of audio word. */
80 cx18_av_write4(cx, CXADEC_I2S_IN_CTL, 0x000000A0);
81
82 /* Audio output control 1 is set to Sony mode */
83 /* Audio output control 2 is set to 1 for master mode */
84 /* 0xC4000918[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */
85 /* 0xC4000918[7]: 0 = Philips mode, 1 = Sony mode (1st SCK rising edge
86 after WS transition for first bit of audio word. */
87 /* 0xC4000918[8]: 0 = slave operation, 1 = master (SCK_OUT and WS_OUT
88 are generated) */
89 cx18_av_write4(cx, CXADEC_I2S_OUT_CTL, 0x000001A0);
90
91 /* set alt I2s master clock to /16 and enable alt divider i2s
92 passthrough */
93 cx18_av_write4(cx, CXADEC_PIN_CFG3, 0x5000B687);
94
95 cx18_av_write4(cx, CXADEC_STD_DET_CTL, 0x000000F6);
96 /* CxDevWrReg(CXADEC_STD_DET_CTL, 0x000000FF); */
97
98 /* Set bit 0 in register 0x9CC to signify that this is MiniMe. */
99 /* Register 0x09CC is defined by the Merlin firmware, and doesn't
100 have a name in the spec. */
101 cx18_av_write4(cx, 0x09CC, 1);
102
103#define CX18_AUDIO_ENABLE 0xc72014
104 v = read_reg(CX18_AUDIO_ENABLE);
105 /* If bit 11 is 1 */
106 if (v & 0x800)
107 write_reg(v & 0xFFFFFBFF, CX18_AUDIO_ENABLE); /* Clear bit 10 */
108
109 /* Enable WW auto audio standard detection */
110 v = cx18_av_read4(cx, CXADEC_STD_DET_CTL);
111 v |= 0xFF; /* Auto by default */
112 v |= 0x400; /* Stereo by default */
113 v |= 0x14000000;
114 cx18_av_write4(cx, CXADEC_STD_DET_CTL, v);
115
116 release_firmware(fw);
117
118 CX18_INFO("loaded %s firmware (%d bytes)\n", FWFILE, size);
119 return 0;
120}
diff --git a/drivers/media/video/cx18/cx18-av-vbi.c b/drivers/media/video/cx18/cx18-av-vbi.c
new file mode 100644
index 000000000000..d09f1daf4ebf
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-av-vbi.c
@@ -0,0 +1,413 @@
1/*
2 * cx18 ADEC VBI functions
3 *
4 * Derived from cx25840-vbi.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 */
23
24
25#include "cx18-driver.h"
26
27static int odd_parity(u8 c)
28{
29 c ^= (c >> 4);
30 c ^= (c >> 2);
31 c ^= (c >> 1);
32
33 return c & 1;
34}
35
36static int decode_vps(u8 *dst, u8 *p)
37{
38 static const u8 biphase_tbl[] = {
39 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
40 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
41 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
42 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
43 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
44 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
45 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
46 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
47 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
48 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
49 0xc3, 0x4b, 0x43, 0xc3, 0x87, 0x0f, 0x07, 0x87,
50 0x83, 0x0b, 0x03, 0x83, 0xc3, 0x4b, 0x43, 0xc3,
51 0xc1, 0x49, 0x41, 0xc1, 0x85, 0x0d, 0x05, 0x85,
52 0x81, 0x09, 0x01, 0x81, 0xc1, 0x49, 0x41, 0xc1,
53 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
54 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
55 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
56 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
57 0xc2, 0x4a, 0x42, 0xc2, 0x86, 0x0e, 0x06, 0x86,
58 0x82, 0x0a, 0x02, 0x82, 0xc2, 0x4a, 0x42, 0xc2,
59 0xc0, 0x48, 0x40, 0xc0, 0x84, 0x0c, 0x04, 0x84,
60 0x80, 0x08, 0x00, 0x80, 0xc0, 0x48, 0x40, 0xc0,
61 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
62 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
63 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
64 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
65 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
66 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
67 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
68 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
69 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
70 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
71 };
72
73 u8 c, err = 0;
74 int i;
75
76 for (i = 0; i < 2 * 13; i += 2) {
77 err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]];
78 c = (biphase_tbl[p[i + 1]] & 0xf) |
79 ((biphase_tbl[p[i]] & 0xf) << 4);
80 dst[i / 2] = c;
81 }
82
83 return err & 0xf0;
84}
85
86void cx18_av_vbi_setup(struct cx18 *cx)
87{
88 struct cx18_av_state *state = &cx->av_state;
89 v4l2_std_id std = state->std;
90 int hblank, hactive, burst, vblank, vactive, sc;
91 int vblank656, src_decimation;
92 int luma_lpf, uv_lpf, comb;
93 u32 pll_int, pll_frac, pll_post;
94
95 /* datasheet startup, step 8d */
96 if (std & ~V4L2_STD_NTSC)
97 cx18_av_write(cx, 0x49f, 0x11);
98 else
99 cx18_av_write(cx, 0x49f, 0x14);
100
101 if (std & V4L2_STD_625_50) {
102 hblank = 0x084;
103 hactive = 0x2d0;
104 burst = 0x5d;
105 vblank = 0x024;
106 vactive = 0x244;
107 vblank656 = 0x28;
108 src_decimation = 0x21f;
109
110 luma_lpf = 2;
111 if (std & V4L2_STD_SECAM) {
112 uv_lpf = 0;
113 comb = 0;
114 sc = 0x0a425f;
115 } else if (std == V4L2_STD_PAL_Nc) {
116 uv_lpf = 1;
117 comb = 0x20;
118 sc = 556453;
119 } else {
120 uv_lpf = 1;
121 comb = 0x20;
122 sc = 0x0a8263;
123 }
124 } else {
125 hactive = 720;
126 hblank = 122;
127 vactive = 487;
128 luma_lpf = 1;
129 uv_lpf = 1;
130
131 src_decimation = 0x21f;
132 if (std == V4L2_STD_PAL_60) {
133 vblank = 26;
134 vblank656 = 26;
135 burst = 0x5b;
136 luma_lpf = 2;
137 comb = 0x20;
138 sc = 0x0a8263;
139 } else if (std == V4L2_STD_PAL_M) {
140 vblank = 20;
141 vblank656 = 24;
142 burst = 0x61;
143 comb = 0x20;
144
145 sc = 555452;
146 } else {
147 vblank = 26;
148 vblank656 = 26;
149 burst = 0x5b;
150 comb = 0x66;
151 sc = 556063;
152 }
153 }
154
155 /* DEBUG: Displays configured PLL frequency */
156 pll_int = cx18_av_read(cx, 0x108);
157 pll_frac = cx18_av_read4(cx, 0x10c) & 0x1ffffff;
158 pll_post = cx18_av_read(cx, 0x109);
159 CX18_DEBUG_INFO("PLL regs = int: %u, frac: %u, post: %u\n",
160 pll_int, pll_frac, pll_post);
161
162 if (pll_post) {
163 int fin, fsc;
164 int pll = 28636363L * ((((u64)pll_int) << 25) + pll_frac);
165
166 pll >>= 25;
167 pll /= pll_post;
168 CX18_DEBUG_INFO("PLL = %d.%06d MHz\n",
169 pll / 1000000, pll % 1000000);
170 CX18_DEBUG_INFO("PLL/8 = %d.%06d MHz\n",
171 pll / 8000000, (pll / 8) % 1000000);
172
173 fin = ((u64)src_decimation * pll) >> 12;
174 CX18_DEBUG_INFO("ADC Sampling freq = %d.%06d MHz\n",
175 fin / 1000000, fin % 1000000);
176
177 fsc = (((u64)sc) * pll) >> 24L;
178 CX18_DEBUG_INFO("Chroma sub-carrier freq = %d.%06d MHz\n",
179 fsc / 1000000, fsc % 1000000);
180
181 CX18_DEBUG_INFO("hblank %i, hactive %i, "
182 "vblank %i , vactive %i, vblank656 %i, src_dec %i,"
183 "burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x,"
184 " sc 0x%06x\n",
185 hblank, hactive, vblank, vactive, vblank656,
186 src_decimation, burst, luma_lpf, uv_lpf, comb, sc);
187 }
188
189 /* Sets horizontal blanking delay and active lines */
190 cx18_av_write(cx, 0x470, hblank);
191 cx18_av_write(cx, 0x471, 0xff & (((hblank >> 8) & 0x3) |
192 (hactive << 4)));
193 cx18_av_write(cx, 0x472, hactive >> 4);
194
195 /* Sets burst gate delay */
196 cx18_av_write(cx, 0x473, burst);
197
198 /* Sets vertical blanking delay and active duration */
199 cx18_av_write(cx, 0x474, vblank);
200 cx18_av_write(cx, 0x475, 0xff & (((vblank >> 8) & 0x3) |
201 (vactive << 4)));
202 cx18_av_write(cx, 0x476, vactive >> 4);
203 cx18_av_write(cx, 0x477, vblank656);
204
205 /* Sets src decimation rate */
206 cx18_av_write(cx, 0x478, 0xff & src_decimation);
207 cx18_av_write(cx, 0x479, 0xff & (src_decimation >> 8));
208
209 /* Sets Luma and UV Low pass filters */
210 cx18_av_write(cx, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30));
211
212 /* Enables comb filters */
213 cx18_av_write(cx, 0x47b, comb);
214
215 /* Sets SC Step*/
216 cx18_av_write(cx, 0x47c, sc);
217 cx18_av_write(cx, 0x47d, 0xff & sc >> 8);
218 cx18_av_write(cx, 0x47e, 0xff & sc >> 16);
219
220 /* Sets VBI parameters */
221 if (std & V4L2_STD_625_50) {
222 cx18_av_write(cx, 0x47f, 0x01);
223 state->vbi_line_offset = 5;
224 } else {
225 cx18_av_write(cx, 0x47f, 0x00);
226 state->vbi_line_offset = 8;
227 }
228}
229
230int cx18_av_vbi(struct cx18 *cx, unsigned int cmd, void *arg)
231{
232 struct cx18_av_state *state = &cx->av_state;
233 struct v4l2_format *fmt;
234 struct v4l2_sliced_vbi_format *svbi;
235
236 switch (cmd) {
237 case VIDIOC_G_FMT:
238 {
239 static u16 lcr2vbi[] = {
240 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
241 0, V4L2_SLICED_WSS_625, 0, /* 4 */
242 V4L2_SLICED_CAPTION_525, /* 6 */
243 0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */
244 0, 0, 0, 0
245 };
246 int is_pal = !(state->std & V4L2_STD_525_60);
247 int i;
248
249 fmt = arg;
250 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
251 return -EINVAL;
252 svbi = &fmt->fmt.sliced;
253 memset(svbi, 0, sizeof(*svbi));
254 /* we're done if raw VBI is active */
255 if ((cx18_av_read(cx, 0x404) & 0x10) == 0)
256 break;
257
258 if (is_pal) {
259 for (i = 7; i <= 23; i++) {
260 u8 v = cx18_av_read(cx, 0x424 + i - 7);
261
262 svbi->service_lines[0][i] = lcr2vbi[v >> 4];
263 svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
264 svbi->service_set |= svbi->service_lines[0][i] |
265 svbi->service_lines[1][i];
266 }
267 } else {
268 for (i = 10; i <= 21; i++) {
269 u8 v = cx18_av_read(cx, 0x424 + i - 10);
270
271 svbi->service_lines[0][i] = lcr2vbi[v >> 4];
272 svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
273 svbi->service_set |= svbi->service_lines[0][i] |
274 svbi->service_lines[1][i];
275 }
276 }
277 break;
278 }
279
280 case VIDIOC_S_FMT:
281 {
282 int is_pal = !(state->std & V4L2_STD_525_60);
283 int vbi_offset = is_pal ? 1 : 0;
284 int i, x;
285 u8 lcr[24];
286
287 fmt = arg;
288 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
289 return -EINVAL;
290 svbi = &fmt->fmt.sliced;
291 if (svbi->service_set == 0) {
292 /* raw VBI */
293 memset(svbi, 0, sizeof(*svbi));
294
295 /* Setup VBI */
296 cx18_av_vbi_setup(cx);
297
298 /* VBI Offset */
299 cx18_av_write(cx, 0x47f, vbi_offset);
300 cx18_av_write(cx, 0x404, 0x2e);
301 break;
302 }
303
304 for (x = 0; x <= 23; x++)
305 lcr[x] = 0x00;
306
307 /* Setup VBI */
308 cx18_av_vbi_setup(cx);
309
310 /* Sliced VBI */
311 cx18_av_write(cx, 0x404, 0x32); /* Ancillary data */
312 cx18_av_write(cx, 0x406, 0x13);
313 cx18_av_write(cx, 0x47f, vbi_offset);
314
315 if (is_pal) {
316 for (i = 0; i <= 6; i++)
317 svbi->service_lines[0][i] =
318 svbi->service_lines[1][i] = 0;
319 } else {
320 for (i = 0; i <= 9; i++)
321 svbi->service_lines[0][i] =
322 svbi->service_lines[1][i] = 0;
323
324 for (i = 22; i <= 23; i++)
325 svbi->service_lines[0][i] =
326 svbi->service_lines[1][i] = 0;
327 }
328
329 for (i = 7; i <= 23; i++) {
330 for (x = 0; x <= 1; x++) {
331 switch (svbi->service_lines[1-x][i]) {
332 case V4L2_SLICED_TELETEXT_B:
333 lcr[i] |= 1 << (4 * x);
334 break;
335 case V4L2_SLICED_WSS_625:
336 lcr[i] |= 4 << (4 * x);
337 break;
338 case V4L2_SLICED_CAPTION_525:
339 lcr[i] |= 6 << (4 * x);
340 break;
341 case V4L2_SLICED_VPS:
342 lcr[i] |= 9 << (4 * x);
343 break;
344 }
345 }
346 }
347
348 if (is_pal) {
349 for (x = 1, i = 0x424; i <= 0x434; i++, x++)
350 cx18_av_write(cx, i, lcr[6 + x]);
351 } else {
352 for (x = 1, i = 0x424; i <= 0x430; i++, x++)
353 cx18_av_write(cx, i, lcr[9 + x]);
354 for (i = 0x431; i <= 0x434; i++)
355 cx18_av_write(cx, i, 0);
356 }
357
358 cx18_av_write(cx, 0x43c, 0x16);
359 cx18_av_write(cx, 0x474, is_pal ? 0x2a : 0x22);
360 break;
361 }
362
363 case VIDIOC_INT_DECODE_VBI_LINE:
364 {
365 struct v4l2_decode_vbi_line *vbi = arg;
366 u8 *p = vbi->p;
367 int id1, id2, l, err = 0;
368
369 if (p[0] || p[1] != 0xff || p[2] != 0xff ||
370 (p[3] != 0x55 && p[3] != 0x91)) {
371 vbi->line = vbi->type = 0;
372 break;
373 }
374
375 p += 4;
376 id1 = p[-1];
377 id2 = p[0] & 0xf;
378 l = p[2] & 0x3f;
379 l += state->vbi_line_offset;
380 p += 4;
381
382 switch (id2) {
383 case 1:
384 id2 = V4L2_SLICED_TELETEXT_B;
385 break;
386 case 4:
387 id2 = V4L2_SLICED_WSS_625;
388 break;
389 case 6:
390 id2 = V4L2_SLICED_CAPTION_525;
391 err = !odd_parity(p[0]) || !odd_parity(p[1]);
392 break;
393 case 9:
394 id2 = V4L2_SLICED_VPS;
395 if (decode_vps(p, p) != 0)
396 err = 1;
397 break;
398 default:
399 id2 = 0;
400 err = 1;
401 break;
402 }
403
404 vbi->type = err ? 0 : id2;
405 vbi->line = err ? 0 : l;
406 vbi->is_second_field = err ? 0 : (id1 == 0x55);
407 vbi->p = p;
408 break;
409 }
410 }
411
412 return 0;
413}
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
new file mode 100644
index 000000000000..f5e3ba1f5354
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -0,0 +1,277 @@
1/*
2 * cx18 functions to query card hardware
3 *
4 * Derived from ivtv-cards.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-cards.h"
26#include "cx18-i2c.h"
27#include <media/cs5345.h>
28
29/********************** card configuration *******************************/
30
31/* usual i2c tuner addresses to probe */
32static struct cx18_card_tuner_i2c cx18_i2c_std = {
33 .radio = { I2C_CLIENT_END },
34 .demod = { 0x43, I2C_CLIENT_END },
35 .tv = { 0x61, 0x60, I2C_CLIENT_END },
36};
37
38/* Please add new PCI IDs to: http://pci-ids.ucw.cz/iii
39 This keeps the PCI ID database up to date. Note that the entries
40 must be added under vendor 0x4444 (Conexant) as subsystem IDs.
41 New vendor IDs should still be added to the vendor ID list. */
42
43/* Hauppauge HVR-1600 cards */
44
45/* Note: for Hauppauge cards the tveeprom information is used instead
46 of PCI IDs */
47static const struct cx18_card cx18_card_hvr1600_esmt = {
48 .type = CX18_CARD_HVR_1600_ESMT,
49 .name = "Hauppauge HVR-1600",
50 .comment = "DVB & VBI are not yet supported\n",
51 .v4l2_capabilities = CX18_CAP_ENCODER,
52 .hw_audio_ctrl = CX18_HW_CX23418,
53 .hw_muxer = CX18_HW_CS5345,
54 .hw_all = CX18_HW_TVEEPROM | CX18_HW_TUNER | CX18_HW_CS5345,
55 .video_inputs = {
56 { CX18_CARD_INPUT_VID_TUNER, 0, CX23418_COMPOSITE7 },
57 { CX18_CARD_INPUT_SVIDEO1, 1, CX23418_SVIDEO1 },
58 { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE3 },
59 { CX18_CARD_INPUT_SVIDEO2, 2, CX23418_SVIDEO2 },
60 { CX18_CARD_INPUT_COMPOSITE2, 2, CX23418_COMPOSITE4 },
61 },
62 .audio_inputs = {
63 { CX18_CARD_INPUT_AUD_TUNER,
64 CX23418_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
65 { CX18_CARD_INPUT_LINE_IN1,
66 CX23418_AUDIO_SERIAL, CS5345_IN_2 },
67 { CX18_CARD_INPUT_LINE_IN2,
68 CX23418_AUDIO_SERIAL, CS5345_IN_2 },
69 },
70 .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
71 CX23418_AUDIO_SERIAL, 0 },
72 .ddr = {
73 /* ESMT M13S128324A-5B memory */
74 .chip_config = 0x003,
75 .refresh = 0x30c,
76 .timing1 = 0x44220e82,
77 .timing2 = 0x08,
78 .tune_lane = 0,
79 .initial_emrs = 0,
80 },
81 .gpio_init.initial_value = 0x3001,
82 .gpio_init.direction = 0x3001,
83 .i2c = &cx18_i2c_std,
84};
85
86static const struct cx18_card cx18_card_hvr1600_samsung = {
87 .type = CX18_CARD_HVR_1600_SAMSUNG,
88 .name = "Hauppauge HVR-1600 (Preproduction)",
89 .comment = "DVB & VBI are not yet supported\n",
90 .v4l2_capabilities = CX18_CAP_ENCODER,
91 .hw_audio_ctrl = CX18_HW_CX23418,
92 .hw_muxer = CX18_HW_CS5345,
93 .hw_all = CX18_HW_TVEEPROM | CX18_HW_TUNER | CX18_HW_CS5345,
94 .video_inputs = {
95 { CX18_CARD_INPUT_VID_TUNER, 0, CX23418_COMPOSITE7 },
96 { CX18_CARD_INPUT_SVIDEO1, 1, CX23418_SVIDEO1 },
97 { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE3 },
98 { CX18_CARD_INPUT_SVIDEO2, 2, CX23418_SVIDEO2 },
99 { CX18_CARD_INPUT_COMPOSITE2, 2, CX23418_COMPOSITE4 },
100 },
101 .audio_inputs = {
102 { CX18_CARD_INPUT_AUD_TUNER,
103 CX23418_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
104 { CX18_CARD_INPUT_LINE_IN1,
105 CX23418_AUDIO_SERIAL, CS5345_IN_2 },
106 { CX18_CARD_INPUT_LINE_IN2,
107 CX23418_AUDIO_SERIAL, CS5345_IN_2 },
108 },
109 .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
110 CX23418_AUDIO_SERIAL, 0 },
111 .ddr = {
112 /* Samsung K4D263238G-VC33 memory */
113 .chip_config = 0x003,
114 .refresh = 0x30c,
115 .timing1 = 0x23230b73,
116 .timing2 = 0x08,
117 .tune_lane = 0,
118 .initial_emrs = 2,
119 },
120 .gpio_init.initial_value = 0x3001,
121 .gpio_init.direction = 0x3001,
122 .i2c = &cx18_i2c_std,
123};
124
125/* ------------------------------------------------------------------------- */
126
127/* Compro VideoMate H900: not working at the moment! */
128
129static const struct cx18_card_pci_info cx18_pci_h900[] = {
130 { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_COMPRO, 0xe100 },
131 { 0, 0, 0 }
132};
133
134static const struct cx18_card cx18_card_h900 = {
135 .type = CX18_CARD_COMPRO_H900,
136 .name = "Compro VideoMate H900",
137 .comment = "Not yet supported!\n",
138 .v4l2_capabilities = 0,
139 .hw_audio_ctrl = CX18_HW_CX23418,
140 .hw_all = CX18_HW_TUNER,
141 .video_inputs = {
142 { CX18_CARD_INPUT_VID_TUNER, 0, CX23418_COMPOSITE7 },
143 { CX18_CARD_INPUT_SVIDEO1, 1, CX23418_SVIDEO1 },
144 { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE3 },
145 },
146 .audio_inputs = {
147 { CX18_CARD_INPUT_AUD_TUNER,
148 CX23418_AUDIO8, 0 },
149 { CX18_CARD_INPUT_LINE_IN1,
150 CX23418_AUDIO_SERIAL, 0 },
151 },
152 .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
153 CX23418_AUDIO_SERIAL, 0 },
154 .tuners = {
155 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
156 },
157 .ddr = {
158 /* EtronTech EM6A9160TS-5G memory */
159 .chip_config = 0x50003,
160 .refresh = 0x753,
161 .timing1 = 0x24330e84,
162 .timing2 = 0x1f,
163 .tune_lane = 0,
164 .initial_emrs = 0,
165 },
166 .pci_list = cx18_pci_h900,
167 .i2c = &cx18_i2c_std,
168};
169
170/* ------------------------------------------------------------------------- */
171
172/* Yuan MPC718: not working at the moment! */
173
174static const struct cx18_card_pci_info cx18_pci_mpc718[] = {
175 { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_YUAN, 0x0718 },
176 { 0, 0, 0 }
177};
178
179static const struct cx18_card cx18_card_mpc718 = {
180 .type = CX18_CARD_YUAN_MPC718,
181 .name = "Yuan MPC718",
182 .comment = "Not yet supported!\n",
183 .v4l2_capabilities = 0,
184 .hw_audio_ctrl = CX18_HW_CX23418,
185 .hw_all = CX18_HW_TUNER,
186 .video_inputs = {
187 { CX18_CARD_INPUT_VID_TUNER, 0, CX23418_COMPOSITE7 },
188 { CX18_CARD_INPUT_SVIDEO1, 1, CX23418_SVIDEO1 },
189 { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE3 },
190 },
191 .audio_inputs = {
192 { CX18_CARD_INPUT_AUD_TUNER,
193 CX23418_AUDIO8, 0 },
194 { CX18_CARD_INPUT_LINE_IN1,
195 CX23418_AUDIO_SERIAL, 0 },
196 },
197 .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
198 CX23418_AUDIO_SERIAL, 0 },
199 .tuners = {
200 /* XC3028 tuner */
201 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
202 },
203 /* tuner reset */
204 .gpio_init = { .direction = 0x1000, .initial_value = 0x1000 },
205 .ddr = {
206 /* Probably Samsung K4D263238G-VC33 memory */
207 .chip_config = 0x003,
208 .refresh = 0x30c,
209 .timing1 = 0x23230b73,
210 .timing2 = 0x08,
211 .tune_lane = 0,
212 .initial_emrs = 2,
213 },
214 .pci_list = cx18_pci_mpc718,
215 .i2c = &cx18_i2c_std,
216};
217
218static const struct cx18_card *cx18_card_list[] = {
219 &cx18_card_hvr1600_esmt,
220 &cx18_card_hvr1600_samsung,
221 &cx18_card_h900,
222 &cx18_card_mpc718,
223};
224
225const struct cx18_card *cx18_get_card(u16 index)
226{
227 if (index >= ARRAY_SIZE(cx18_card_list))
228 return NULL;
229 return cx18_card_list[index];
230}
231
232int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input)
233{
234 const struct cx18_card_video_input *card_input =
235 cx->card->video_inputs + index;
236 static const char * const input_strs[] = {
237 "Tuner 1",
238 "S-Video 1",
239 "S-Video 2",
240 "Composite 1",
241 "Composite 2",
242 "Composite 3"
243 };
244
245 memset(input, 0, sizeof(*input));
246 if (index >= cx->nof_inputs)
247 return -EINVAL;
248 input->index = index;
249 strlcpy(input->name, input_strs[card_input->video_type - 1],
250 sizeof(input->name));
251 input->type = (card_input->video_type == CX18_CARD_INPUT_VID_TUNER ?
252 V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA);
253 input->audioset = (1 << cx->nof_audio_inputs) - 1;
254 input->std = (input->type == V4L2_INPUT_TYPE_TUNER) ?
255 cx->tuner_std : V4L2_STD_ALL;
256 return 0;
257}
258
259int cx18_get_audio_input(struct cx18 *cx, u16 index, struct v4l2_audio *audio)
260{
261 const struct cx18_card_audio_input *aud_input =
262 cx->card->audio_inputs + index;
263 static const char * const input_strs[] = {
264 "Tuner 1",
265 "Line In 1",
266 "Line In 2"
267 };
268
269 memset(audio, 0, sizeof(*audio));
270 if (index >= cx->nof_audio_inputs)
271 return -EINVAL;
272 strlcpy(audio->name, input_strs[aud_input->audio_type - 1],
273 sizeof(audio->name));
274 audio->index = index;
275 audio->capability = V4L2_AUDCAP_STEREO;
276 return 0;
277}
diff --git a/drivers/media/video/cx18/cx18-cards.h b/drivers/media/video/cx18/cx18-cards.h
new file mode 100644
index 000000000000..bca249bdd337
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-cards.h
@@ -0,0 +1,170 @@
1/*
2 * cx18 functions to query card hardware
3 *
4 * Derived from ivtv-cards.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23/* hardware flags */
24#define CX18_HW_TUNER (1 << 0)
25#define CX18_HW_TVEEPROM (1 << 1)
26#define CX18_HW_CS5345 (1 << 2)
27#define CX18_HW_GPIO (1 << 3)
28#define CX18_HW_CX23418 (1 << 4)
29#define CX18_HW_DVB (1 << 5)
30
31/* video inputs */
32#define CX18_CARD_INPUT_VID_TUNER 1
33#define CX18_CARD_INPUT_SVIDEO1 2
34#define CX18_CARD_INPUT_SVIDEO2 3
35#define CX18_CARD_INPUT_COMPOSITE1 4
36#define CX18_CARD_INPUT_COMPOSITE2 5
37#define CX18_CARD_INPUT_COMPOSITE3 6
38
39enum cx34180_video_input {
40 /* Composite video inputs In1-In8 */
41 CX23418_COMPOSITE1 = 1,
42 CX23418_COMPOSITE2,
43 CX23418_COMPOSITE3,
44 CX23418_COMPOSITE4,
45 CX23418_COMPOSITE5,
46 CX23418_COMPOSITE6,
47 CX23418_COMPOSITE7,
48 CX23418_COMPOSITE8,
49
50 /* S-Video inputs consist of one luma input (In1-In4) ORed with one
51 chroma input (In5-In8) */
52 CX23418_SVIDEO_LUMA1 = 0x10,
53 CX23418_SVIDEO_LUMA2 = 0x20,
54 CX23418_SVIDEO_LUMA3 = 0x30,
55 CX23418_SVIDEO_LUMA4 = 0x40,
56 CX23418_SVIDEO_CHROMA4 = 0x400,
57 CX23418_SVIDEO_CHROMA5 = 0x500,
58 CX23418_SVIDEO_CHROMA6 = 0x600,
59 CX23418_SVIDEO_CHROMA7 = 0x700,
60 CX23418_SVIDEO_CHROMA8 = 0x800,
61
62 /* S-Video aliases for common luma/chroma combinations */
63 CX23418_SVIDEO1 = 0x510,
64 CX23418_SVIDEO2 = 0x620,
65 CX23418_SVIDEO3 = 0x730,
66 CX23418_SVIDEO4 = 0x840,
67};
68
69/* audio inputs */
70#define CX18_CARD_INPUT_AUD_TUNER 1
71#define CX18_CARD_INPUT_LINE_IN1 2
72#define CX18_CARD_INPUT_LINE_IN2 3
73
74#define CX18_CARD_MAX_VIDEO_INPUTS 6
75#define CX18_CARD_MAX_AUDIO_INPUTS 3
76#define CX18_CARD_MAX_TUNERS 2
77
78enum cx23418_audio_input {
79 /* Audio inputs: serial or In4-In8 */
80 CX23418_AUDIO_SERIAL,
81 CX23418_AUDIO4 = 4,
82 CX23418_AUDIO5,
83 CX23418_AUDIO6,
84 CX23418_AUDIO7,
85 CX23418_AUDIO8,
86};
87
88/* V4L2 capability aliases */
89#define CX18_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \
90 V4L2_CAP_AUDIO | V4L2_CAP_READWRITE)
91/* | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE) not yet */
92
93struct cx18_card_video_input {
94 u8 video_type; /* video input type */
95 u8 audio_index; /* index in cx18_card_audio_input array */
96 u16 video_input; /* hardware video input */
97};
98
99struct cx18_card_audio_input {
100 u8 audio_type; /* audio input type */
101 u32 audio_input; /* hardware audio input */
102 u16 muxer_input; /* hardware muxer input for boards with a
103 multiplexer chip */
104};
105
106struct cx18_card_pci_info {
107 u16 device;
108 u16 subsystem_vendor;
109 u16 subsystem_device;
110};
111
112/* GPIO definitions */
113
114/* The mask is the set of bits used by the operation */
115
116struct cx18_gpio_init { /* set initial GPIO DIR and OUT values */
117 u16 direction; /* DIR setting. Leave to 0 if no init is needed */
118 u16 initial_value;
119};
120
121struct cx18_card_tuner {
122 v4l2_std_id std; /* standard for which the tuner is suitable */
123 int tuner; /* tuner ID (from tuner.h) */
124};
125
126struct cx18_card_tuner_i2c {
127 unsigned short radio[2];/* radio tuner i2c address to probe */
128 unsigned short demod[2];/* demodulator i2c address to probe */
129 unsigned short tv[4]; /* tv tuner i2c addresses to probe */
130};
131
132struct cx18_ddr { /* DDR config data */
133 u32 chip_config;
134 u32 refresh;
135 u32 timing1;
136 u32 timing2;
137 u32 tune_lane;
138 u32 initial_emrs;
139};
140
141/* for card information/parameters */
142struct cx18_card {
143 int type;
144 char *name;
145 char *comment;
146 u32 v4l2_capabilities;
147 u32 hw_audio_ctrl; /* hardware used for the V4L2 controls (only
148 1 dev allowed) */
149 u32 hw_muxer; /* hardware used to multiplex audio input */
150 u32 hw_all; /* all hardware used by the board */
151 struct cx18_card_video_input video_inputs[CX18_CARD_MAX_VIDEO_INPUTS];
152 struct cx18_card_audio_input audio_inputs[CX18_CARD_MAX_AUDIO_INPUTS];
153 struct cx18_card_audio_input radio_input;
154
155 /* GPIO card-specific settings */
156 struct cx18_gpio_init gpio_init;
157
158 struct cx18_card_tuner tuners[CX18_CARD_MAX_TUNERS];
159 struct cx18_card_tuner_i2c *i2c;
160
161 struct cx18_ddr ddr;
162
163 /* list of device and subsystem vendor/devices that
164 correspond to this card type. */
165 const struct cx18_card_pci_info *pci_list;
166};
167
168int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input);
169int cx18_get_audio_input(struct cx18 *cx, u16 index, struct v4l2_audio *input);
170const struct cx18_card *cx18_get_card(u16 index);
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
new file mode 100644
index 000000000000..2bdac5ebbb0d
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -0,0 +1,306 @@
1/*
2 * cx18 ioctl control functions
3 *
4 * Derived from ivtv-controls.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-av-core.h"
26#include "cx18-cards.h"
27#include "cx18-ioctl.h"
28#include "cx18-audio.h"
29#include "cx18-i2c.h"
30#include "cx18-mailbox.h"
31#include "cx18-controls.h"
32
33static const u32 user_ctrls[] = {
34 V4L2_CID_USER_CLASS,
35 V4L2_CID_BRIGHTNESS,
36 V4L2_CID_CONTRAST,
37 V4L2_CID_SATURATION,
38 V4L2_CID_HUE,
39 V4L2_CID_AUDIO_VOLUME,
40 V4L2_CID_AUDIO_BALANCE,
41 V4L2_CID_AUDIO_BASS,
42 V4L2_CID_AUDIO_TREBLE,
43 V4L2_CID_AUDIO_MUTE,
44 V4L2_CID_AUDIO_LOUDNESS,
45 0
46};
47
48static const u32 *ctrl_classes[] = {
49 user_ctrls,
50 cx2341x_mpeg_ctrls,
51 NULL
52};
53
54static int cx18_queryctrl(struct cx18 *cx, struct v4l2_queryctrl *qctrl)
55{
56 const char *name;
57
58 CX18_DEBUG_IOCTL("VIDIOC_QUERYCTRL(%08x)\n", qctrl->id);
59
60 qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
61 if (qctrl->id == 0)
62 return -EINVAL;
63
64 switch (qctrl->id) {
65 /* Standard V4L2 controls */
66 case V4L2_CID_BRIGHTNESS:
67 case V4L2_CID_HUE:
68 case V4L2_CID_SATURATION:
69 case V4L2_CID_CONTRAST:
70 if (cx18_av_cmd(cx, VIDIOC_QUERYCTRL, qctrl))
71 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
72 return 0;
73
74 case V4L2_CID_AUDIO_VOLUME:
75 case V4L2_CID_AUDIO_MUTE:
76 case V4L2_CID_AUDIO_BALANCE:
77 case V4L2_CID_AUDIO_BASS:
78 case V4L2_CID_AUDIO_TREBLE:
79 case V4L2_CID_AUDIO_LOUDNESS:
80 if (cx18_i2c_hw(cx, cx->card->hw_audio_ctrl, VIDIOC_QUERYCTRL, qctrl))
81 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
82 return 0;
83
84 default:
85 if (cx2341x_ctrl_query(&cx->params, qctrl))
86 qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
87 return 0;
88 }
89 strncpy(qctrl->name, name, sizeof(qctrl->name) - 1);
90 qctrl->name[sizeof(qctrl->name) - 1] = 0;
91 return 0;
92}
93
94static int cx18_querymenu(struct cx18 *cx, struct v4l2_querymenu *qmenu)
95{
96 struct v4l2_queryctrl qctrl;
97
98 qctrl.id = qmenu->id;
99 cx18_queryctrl(cx, &qctrl);
100 return v4l2_ctrl_query_menu(qmenu, &qctrl, cx2341x_ctrl_get_menu(qmenu->id));
101}
102
103static int cx18_s_ctrl(struct cx18 *cx, struct v4l2_control *vctrl)
104{
105 s32 v = vctrl->value;
106
107 CX18_DEBUG_IOCTL("VIDIOC_S_CTRL(%08x, %x)\n", vctrl->id, v);
108
109 switch (vctrl->id) {
110 /* Standard V4L2 controls */
111 case V4L2_CID_BRIGHTNESS:
112 case V4L2_CID_HUE:
113 case V4L2_CID_SATURATION:
114 case V4L2_CID_CONTRAST:
115 return cx18_av_cmd(cx, VIDIOC_S_CTRL, vctrl);
116
117 case V4L2_CID_AUDIO_VOLUME:
118 case V4L2_CID_AUDIO_MUTE:
119 case V4L2_CID_AUDIO_BALANCE:
120 case V4L2_CID_AUDIO_BASS:
121 case V4L2_CID_AUDIO_TREBLE:
122 case V4L2_CID_AUDIO_LOUDNESS:
123 return cx18_i2c_hw(cx, cx->card->hw_audio_ctrl, VIDIOC_S_CTRL, vctrl);
124
125 default:
126 CX18_DEBUG_IOCTL("invalid control %x\n", vctrl->id);
127 return -EINVAL;
128 }
129 return 0;
130}
131
132static int cx18_g_ctrl(struct cx18 *cx, struct v4l2_control *vctrl)
133{
134 CX18_DEBUG_IOCTL("VIDIOC_G_CTRL(%08x)\n", vctrl->id);
135
136 switch (vctrl->id) {
137 /* Standard V4L2 controls */
138 case V4L2_CID_BRIGHTNESS:
139 case V4L2_CID_HUE:
140 case V4L2_CID_SATURATION:
141 case V4L2_CID_CONTRAST:
142 return cx18_av_cmd(cx, VIDIOC_G_CTRL, vctrl);
143
144 case V4L2_CID_AUDIO_VOLUME:
145 case V4L2_CID_AUDIO_MUTE:
146 case V4L2_CID_AUDIO_BALANCE:
147 case V4L2_CID_AUDIO_BASS:
148 case V4L2_CID_AUDIO_TREBLE:
149 case V4L2_CID_AUDIO_LOUDNESS:
150 return cx18_i2c_hw(cx, cx->card->hw_audio_ctrl, VIDIOC_G_CTRL, vctrl);
151 default:
152 CX18_DEBUG_IOCTL("invalid control %x\n", vctrl->id);
153 return -EINVAL;
154 }
155 return 0;
156}
157
158static int cx18_setup_vbi_fmt(struct cx18 *cx, enum v4l2_mpeg_stream_vbi_fmt fmt)
159{
160 if (!(cx->v4l2_cap & V4L2_CAP_SLICED_VBI_CAPTURE))
161 return -EINVAL;
162 if (atomic_read(&cx->capturing) > 0)
163 return -EBUSY;
164
165 /* First try to allocate sliced VBI buffers if needed. */
166 if (fmt && cx->vbi.sliced_mpeg_data[0] == NULL) {
167 int i;
168
169 for (i = 0; i < CX18_VBI_FRAMES; i++) {
170 /* Yuck, hardcoded. Needs to be a define */
171 cx->vbi.sliced_mpeg_data[i] = kmalloc(2049, GFP_KERNEL);
172 if (cx->vbi.sliced_mpeg_data[i] == NULL) {
173 while (--i >= 0) {
174 kfree(cx->vbi.sliced_mpeg_data[i]);
175 cx->vbi.sliced_mpeg_data[i] = NULL;
176 }
177 return -ENOMEM;
178 }
179 }
180 }
181
182 cx->vbi.insert_mpeg = fmt;
183
184 if (cx->vbi.insert_mpeg == 0)
185 return 0;
186 /* Need sliced data for mpeg insertion */
187 if (cx18_get_service_set(cx->vbi.sliced_in) == 0) {
188 if (cx->is_60hz)
189 cx->vbi.sliced_in->service_set = V4L2_SLICED_CAPTION_525;
190 else
191 cx->vbi.sliced_in->service_set = V4L2_SLICED_WSS_625;
192 cx18_expand_service_set(cx->vbi.sliced_in, cx->is_50hz);
193 }
194 return 0;
195}
196
197int cx18_control_ioctls(struct cx18 *cx, unsigned int cmd, void *arg)
198{
199 struct v4l2_control ctrl;
200
201 switch (cmd) {
202 case VIDIOC_QUERYMENU:
203 CX18_DEBUG_IOCTL("VIDIOC_QUERYMENU\n");
204 return cx18_querymenu(cx, arg);
205
206 case VIDIOC_QUERYCTRL:
207 return cx18_queryctrl(cx, arg);
208
209 case VIDIOC_S_CTRL:
210 return cx18_s_ctrl(cx, arg);
211
212 case VIDIOC_G_CTRL:
213 return cx18_g_ctrl(cx, arg);
214
215 case VIDIOC_S_EXT_CTRLS:
216 {
217 struct v4l2_ext_controls *c = arg;
218
219 if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
220 int i;
221 int err = 0;
222
223 for (i = 0; i < c->count; i++) {
224 ctrl.id = c->controls[i].id;
225 ctrl.value = c->controls[i].value;
226 err = cx18_s_ctrl(cx, &ctrl);
227 c->controls[i].value = ctrl.value;
228 if (err) {
229 c->error_idx = i;
230 break;
231 }
232 }
233 return err;
234 }
235 CX18_DEBUG_IOCTL("VIDIOC_S_EXT_CTRLS\n");
236 if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
237 struct cx2341x_mpeg_params p = cx->params;
238 int err = cx2341x_ext_ctrls(&p, atomic_read(&cx->capturing), arg, cmd);
239
240 if (err)
241 return err;
242
243 if (p.video_encoding != cx->params.video_encoding) {
244 int is_mpeg1 = p.video_encoding ==
245 V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
246 struct v4l2_format fmt;
247
248 /* fix videodecoder resolution */
249 fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
250 fmt.fmt.pix.width = cx->params.width / (is_mpeg1 ? 2 : 1);
251 fmt.fmt.pix.height = cx->params.height;
252 cx18_av_cmd(cx, VIDIOC_S_FMT, &fmt);
253 }
254 err = cx2341x_update(cx, cx18_api_func, &cx->params, &p);
255 if (!err && cx->params.stream_vbi_fmt != p.stream_vbi_fmt)
256 err = cx18_setup_vbi_fmt(cx, p.stream_vbi_fmt);
257 cx->params = p;
258 cx->dualwatch_stereo_mode = p.audio_properties & 0x0300;
259 cx18_audio_set_audio_clock_freq(cx, p.audio_properties & 0x03);
260 return err;
261 }
262 return -EINVAL;
263 }
264
265 case VIDIOC_G_EXT_CTRLS:
266 {
267 struct v4l2_ext_controls *c = arg;
268
269 if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
270 int i;
271 int err = 0;
272
273 for (i = 0; i < c->count; i++) {
274 ctrl.id = c->controls[i].id;
275 ctrl.value = c->controls[i].value;
276 err = cx18_g_ctrl(cx, &ctrl);
277 c->controls[i].value = ctrl.value;
278 if (err) {
279 c->error_idx = i;
280 break;
281 }
282 }
283 return err;
284 }
285 CX18_DEBUG_IOCTL("VIDIOC_G_EXT_CTRLS\n");
286 if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
287 return cx2341x_ext_ctrls(&cx->params, 0, arg, cmd);
288 return -EINVAL;
289 }
290
291 case VIDIOC_TRY_EXT_CTRLS:
292 {
293 struct v4l2_ext_controls *c = arg;
294
295 CX18_DEBUG_IOCTL("VIDIOC_TRY_EXT_CTRLS\n");
296 if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
297 return cx2341x_ext_ctrls(&cx->params,
298 atomic_read(&cx->capturing), arg, cmd);
299 return -EINVAL;
300 }
301
302 default:
303 return -EINVAL;
304 }
305 return 0;
306}
diff --git a/drivers/media/video/cx18/cx18-controls.h b/drivers/media/video/cx18/cx18-controls.h
new file mode 100644
index 000000000000..6e985cf422a0
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-controls.h
@@ -0,0 +1,24 @@
1/*
2 * cx18 ioctl control functions
3 *
4 * Derived from ivtv-controls.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24int cx18_control_ioctls(struct cx18 *cx, unsigned int cmd, void *arg);
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
new file mode 100644
index 000000000000..8f5ed9b4bf83
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -0,0 +1,971 @@
1/*
2 * cx18 driver initialization and card probing
3 *
4 * Derived from ivtv-driver.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-version.h"
26#include "cx18-cards.h"
27#include "cx18-i2c.h"
28#include "cx18-irq.h"
29#include "cx18-gpio.h"
30#include "cx18-firmware.h"
31#include "cx18-streams.h"
32#include "cx18-av-core.h"
33#include "cx18-scb.h"
34#include "cx18-mailbox.h"
35#include "cx18-ioctl.h"
36#include "tuner-xc2028.h"
37
38#include <media/tveeprom.h>
39
40
41/* var to keep track of the number of array elements in use */
42int cx18_cards_active;
43
44/* If you have already X v4l cards, then set this to X. This way
45 the device numbers stay matched. Example: you have a WinTV card
46 without radio and a Compro H900 with. Normally this would give a
47 video1 device together with a radio0 device for the Compro. By
48 setting this to 1 you ensure that radio0 is now also radio1. */
49int cx18_first_minor;
50
51/* Master variable for all cx18 info */
52struct cx18 *cx18_cards[CX18_MAX_CARDS];
53
54/* Protects cx18_cards_active */
55DEFINE_SPINLOCK(cx18_cards_lock);
56
57/* add your revision and whatnot here */
58static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
59 {PCI_VENDOR_ID_CX, PCI_DEVICE_ID_CX23418,
60 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
61 {0,}
62};
63
64MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
65
66/* Parameter declarations */
67static int cardtype[CX18_MAX_CARDS];
68static int tuner[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1 };
72static int radio[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1,
75 -1, -1, -1, -1, -1, -1, -1, -1 };
76
77static int cardtype_c = 1;
78static int tuner_c = 1;
79static int radio_c = 1;
80static char pal[] = "--";
81static char secam[] = "--";
82static char ntsc[] = "-";
83
84/* Buffers */
85static int enc_mpg_buffers = CX18_DEFAULT_ENC_MPG_BUFFERS;
86static int enc_ts_buffers = CX18_DEFAULT_ENC_TS_BUFFERS;
87static int enc_yuv_buffers = CX18_DEFAULT_ENC_YUV_BUFFERS;
88static int enc_vbi_buffers = CX18_DEFAULT_ENC_VBI_BUFFERS;
89static int enc_pcm_buffers = CX18_DEFAULT_ENC_PCM_BUFFERS;
90
91static int cx18_pci_latency = 1;
92
93int cx18_debug;
94
95module_param_array(tuner, int, &tuner_c, 0644);
96module_param_array(radio, bool, &radio_c, 0644);
97module_param_array(cardtype, int, &cardtype_c, 0644);
98module_param_string(pal, pal, sizeof(pal), 0644);
99module_param_string(secam, secam, sizeof(secam), 0644);
100module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
101module_param_named(debug, cx18_debug, int, 0644);
102module_param(cx18_pci_latency, int, 0644);
103module_param(cx18_first_minor, int, 0644);
104
105module_param(enc_mpg_buffers, int, 0644);
106module_param(enc_ts_buffers, int, 0644);
107module_param(enc_yuv_buffers, int, 0644);
108module_param(enc_vbi_buffers, int, 0644);
109module_param(enc_pcm_buffers, int, 0644);
110
111MODULE_PARM_DESC(tuner, "Tuner type selection,\n"
112 "\t\t\tsee tuner.h for values");
113MODULE_PARM_DESC(radio,
114 "Enable or disable the radio. Use only if autodetection\n"
115 "\t\t\tfails. 0 = disable, 1 = enable");
116MODULE_PARM_DESC(cardtype,
117 "Only use this option if your card is not detected properly.\n"
118 "\t\tSpecify card type:\n"
119 "\t\t\t 1 = Hauppauge HVR 1600 (ESMT memory)\n"
120 "\t\t\t 2 = Hauppauge HVR 1600 (Samsung memory)\n"
121 "\t\t\t 3 = Compro VideoMate H900\n"
122 "\t\t\t 4 = Yuan MPC718\n"
123 "\t\t\t 0 = Autodetect (default)\n"
124 "\t\t\t-1 = Ignore this card\n\t\t");
125MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
126MODULE_PARM_DESC(secam, "Set SECAM standard: B, G, H, D, K, L, LC");
127MODULE_PARM_DESC(ntsc, "Set NTSC standard: M, J, K");
128MODULE_PARM_DESC(debug,
129 "Debug level (bitmask). Default: 0\n"
130 "\t\t\t 1/0x0001: warning\n"
131 "\t\t\t 2/0x0002: info\n"
132 "\t\t\t 4/0x0004: mailbox\n"
133 "\t\t\t 8/0x0008: dma\n"
134 "\t\t\t 16/0x0010: ioctl\n"
135 "\t\t\t 32/0x0020: file\n"
136 "\t\t\t 64/0x0040: i2c\n"
137 "\t\t\t128/0x0080: irq\n"
138 "\t\t\t256/0x0100: high volume\n");
139MODULE_PARM_DESC(cx18_pci_latency,
140 "Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n"
141 "\t\t\tDefault: Yes");
142MODULE_PARM_DESC(enc_mpg_buffers,
143 "Encoder MPG Buffers (in MB)\n"
144 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_MPG_BUFFERS));
145MODULE_PARM_DESC(enc_ts_buffers,
146 "Encoder TS Buffers (in MB)\n"
147 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_TS_BUFFERS));
148MODULE_PARM_DESC(enc_yuv_buffers,
149 "Encoder YUV Buffers (in MB)\n"
150 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_YUV_BUFFERS));
151MODULE_PARM_DESC(enc_vbi_buffers,
152 "Encoder VBI Buffers (in MB)\n"
153 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_VBI_BUFFERS));
154MODULE_PARM_DESC(enc_pcm_buffers,
155 "Encoder PCM buffers (in MB)\n"
156 "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_PCM_BUFFERS));
157
158MODULE_PARM_DESC(cx18_first_minor, "Set minor assigned to first card");
159
160MODULE_AUTHOR("Hans Verkuil");
161MODULE_DESCRIPTION("CX23418 driver");
162MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder");
163MODULE_LICENSE("GPL");
164
165MODULE_VERSION(CX18_VERSION);
166
167int cx18_waitq(wait_queue_head_t *waitq)
168{
169 DEFINE_WAIT(wait);
170
171 prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE);
172 schedule();
173 finish_wait(waitq, &wait);
174 return signal_pending(current) ? -EINTR : 0;
175}
176
177/* Generic utility functions */
178int cx18_msleep_timeout(unsigned int msecs, int intr)
179{
180 int timeout = msecs_to_jiffies(msecs);
181 int sig;
182
183 do {
184 set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
185 timeout = schedule_timeout(timeout);
186 sig = intr ? signal_pending(current) : 0;
187 } while (!sig && timeout);
188 return sig;
189}
190
191/* Release ioremapped memory */
192static void cx18_iounmap(struct cx18 *cx)
193{
194 if (cx == NULL)
195 return;
196
197 /* Release io memory */
198 if (cx->enc_mem != NULL) {
199 CX18_DEBUG_INFO("releasing enc_mem\n");
200 iounmap(cx->enc_mem);
201 cx->enc_mem = NULL;
202 }
203}
204
205/* Hauppauge card? get values from tveeprom */
206void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
207{
208 u8 eedata[256];
209
210 cx->i2c_client[0].addr = 0xA0 >> 1;
211 tveeprom_read(&cx->i2c_client[0], eedata, sizeof(eedata));
212 tveeprom_hauppauge_analog(&cx->i2c_client[0], tv, eedata);
213}
214
215static void cx18_process_eeprom(struct cx18 *cx)
216{
217 struct tveeprom tv;
218
219 cx18_read_eeprom(cx, &tv);
220
221 /* Many thanks to Steven Toth from Hauppauge for providing the
222 model numbers */
223 switch (tv.model) {
224 case 74000 ... 74099:
225 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
226 break;
227 case 74700 ... 74799:
228 cx->card = cx18_get_card(CX18_CARD_HVR_1600_SAMSUNG);
229 break;
230 case 0:
231 CX18_ERR("Invalid EEPROM\n");
232 return;
233 default:
234 CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model);
235 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
236 break;
237 }
238
239 cx->v4l2_cap = cx->card->v4l2_capabilities;
240 cx->card_name = cx->card->name;
241 cx->card_i2c = cx->card->i2c;
242
243 CX18_INFO("Autodetected %s\n", cx->card_name);
244
245 if (tv.tuner_type == TUNER_ABSENT)
246 CX18_ERR("tveeprom cannot autodetect tuner!");
247
248 if (cx->options.tuner == -1)
249 cx->options.tuner = tv.tuner_type;
250 if (cx->options.radio == -1)
251 cx->options.radio = (tv.has_radio != 0);
252
253 if (cx->std != 0)
254 /* user specified tuner standard */
255 return;
256
257 /* autodetect tuner standard */
258 if (tv.tuner_formats & V4L2_STD_PAL) {
259 CX18_DEBUG_INFO("PAL tuner detected\n");
260 cx->std |= V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
261 } else if (tv.tuner_formats & V4L2_STD_NTSC) {
262 CX18_DEBUG_INFO("NTSC tuner detected\n");
263 cx->std |= V4L2_STD_NTSC_M;
264 } else if (tv.tuner_formats & V4L2_STD_SECAM) {
265 CX18_DEBUG_INFO("SECAM tuner detected\n");
266 cx->std |= V4L2_STD_SECAM_L;
267 } else {
268 CX18_INFO("No tuner detected, default to NTSC-M\n");
269 cx->std |= V4L2_STD_NTSC_M;
270 }
271}
272
273static v4l2_std_id cx18_parse_std(struct cx18 *cx)
274{
275 switch (pal[0]) {
276 case '6':
277 return V4L2_STD_PAL_60;
278 case 'b':
279 case 'B':
280 case 'g':
281 case 'G':
282 return V4L2_STD_PAL_BG;
283 case 'h':
284 case 'H':
285 return V4L2_STD_PAL_H;
286 case 'n':
287 case 'N':
288 if (pal[1] == 'c' || pal[1] == 'C')
289 return V4L2_STD_PAL_Nc;
290 return V4L2_STD_PAL_N;
291 case 'i':
292 case 'I':
293 return V4L2_STD_PAL_I;
294 case 'd':
295 case 'D':
296 case 'k':
297 case 'K':
298 return V4L2_STD_PAL_DK;
299 case 'M':
300 case 'm':
301 return V4L2_STD_PAL_M;
302 case '-':
303 break;
304 default:
305 CX18_WARN("pal= argument not recognised\n");
306 return 0;
307 }
308
309 switch (secam[0]) {
310 case 'b':
311 case 'B':
312 case 'g':
313 case 'G':
314 case 'h':
315 case 'H':
316 return V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H;
317 case 'd':
318 case 'D':
319 case 'k':
320 case 'K':
321 return V4L2_STD_SECAM_DK;
322 case 'l':
323 case 'L':
324 if (secam[1] == 'C' || secam[1] == 'c')
325 return V4L2_STD_SECAM_LC;
326 return V4L2_STD_SECAM_L;
327 case '-':
328 break;
329 default:
330 CX18_WARN("secam= argument not recognised\n");
331 return 0;
332 }
333
334 switch (ntsc[0]) {
335 case 'm':
336 case 'M':
337 return V4L2_STD_NTSC_M;
338 case 'j':
339 case 'J':
340 return V4L2_STD_NTSC_M_JP;
341 case 'k':
342 case 'K':
343 return V4L2_STD_NTSC_M_KR;
344 case '-':
345 break;
346 default:
347 CX18_WARN("ntsc= argument not recognised\n");
348 return 0;
349 }
350
351 /* no match found */
352 return 0;
353}
354
355static void cx18_process_options(struct cx18 *cx)
356{
357 int i, j;
358
359 cx->options.megabytes[CX18_ENC_STREAM_TYPE_MPG] = enc_mpg_buffers;
360 cx->options.megabytes[CX18_ENC_STREAM_TYPE_TS] = enc_ts_buffers;
361 cx->options.megabytes[CX18_ENC_STREAM_TYPE_YUV] = enc_yuv_buffers;
362 cx->options.megabytes[CX18_ENC_STREAM_TYPE_VBI] = enc_vbi_buffers;
363 cx->options.megabytes[CX18_ENC_STREAM_TYPE_PCM] = enc_pcm_buffers;
364 cx->options.cardtype = cardtype[cx->num];
365 cx->options.tuner = tuner[cx->num];
366 cx->options.radio = radio[cx->num];
367
368 cx->std = cx18_parse_std(cx);
369 if (cx->options.cardtype == -1) {
370 CX18_INFO("Ignore card\n");
371 return;
372 }
373 cx->card = cx18_get_card(cx->options.cardtype - 1);
374 if (cx->card)
375 CX18_INFO("User specified %s card\n", cx->card->name);
376 else if (cx->options.cardtype != 0)
377 CX18_ERR("Unknown user specified type, trying to autodetect card\n");
378 if (cx->card == NULL) {
379 if (cx->dev->subsystem_vendor == CX18_PCI_ID_HAUPPAUGE) {
380 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
381 CX18_INFO("Autodetected Hauppauge card\n");
382 }
383 }
384 if (cx->card == NULL) {
385 for (i = 0; (cx->card = cx18_get_card(i)); i++) {
386 if (cx->card->pci_list == NULL)
387 continue;
388 for (j = 0; cx->card->pci_list[j].device; j++) {
389 if (cx->dev->device !=
390 cx->card->pci_list[j].device)
391 continue;
392 if (cx->dev->subsystem_vendor !=
393 cx->card->pci_list[j].subsystem_vendor)
394 continue;
395 if (cx->dev->subsystem_device !=
396 cx->card->pci_list[j].subsystem_device)
397 continue;
398 CX18_INFO("Autodetected %s card\n", cx->card->name);
399 goto done;
400 }
401 }
402 }
403done:
404
405 if (cx->card == NULL) {
406 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
407 CX18_ERR("Unknown card: vendor/device: %04x/%04x\n",
408 cx->dev->vendor, cx->dev->device);
409 CX18_ERR(" subsystem vendor/device: %04x/%04x\n",
410 cx->dev->subsystem_vendor, cx->dev->subsystem_device);
411 CX18_ERR("Defaulting to %s card\n", cx->card->name);
412 CX18_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n");
413 CX18_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n");
414 CX18_ERR("Prefix your subject line with [UNKNOWN CX18 CARD].\n");
415 }
416 cx->v4l2_cap = cx->card->v4l2_capabilities;
417 cx->card_name = cx->card->name;
418 cx->card_i2c = cx->card->i2c;
419}
420
421/* Precondition: the cx18 structure has been memset to 0. Only
422 the dev and num fields have been filled in.
423 No assumptions on the card type may be made here (see cx18_init_struct2
424 for that).
425 */
426static int __devinit cx18_init_struct1(struct cx18 *cx)
427{
428 cx->base_addr = pci_resource_start(cx->dev, 0);
429
430 mutex_init(&cx->serialize_lock);
431 mutex_init(&cx->i2c_bus_lock[0]);
432 mutex_init(&cx->i2c_bus_lock[1]);
433
434 spin_lock_init(&cx->lock);
435 spin_lock_init(&cx->dma_reg_lock);
436
437 /* start counting open_id at 1 */
438 cx->open_id = 1;
439
440 /* Initial settings */
441 cx2341x_fill_defaults(&cx->params);
442 cx->temporal_strength = cx->params.video_temporal_filter;
443 cx->spatial_strength = cx->params.video_spatial_filter;
444 cx->filter_mode = cx->params.video_spatial_filter_mode |
445 (cx->params.video_temporal_filter_mode << 1) |
446 (cx->params.video_median_filter_type << 2);
447 cx->params.port = CX2341X_PORT_MEMORY;
448 cx->params.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
449 init_waitqueue_head(&cx->cap_w);
450 init_waitqueue_head(&cx->mb_apu_waitq);
451 init_waitqueue_head(&cx->mb_cpu_waitq);
452 init_waitqueue_head(&cx->mb_epu_waitq);
453 init_waitqueue_head(&cx->mb_hpu_waitq);
454 init_waitqueue_head(&cx->dma_waitq);
455
456 /* VBI */
457 cx->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
458 cx->vbi.sliced_in = &cx->vbi.in.fmt.sliced;
459 cx->vbi.raw_size = 1456;
460 cx->vbi.raw_decoder_line_size = 1456;
461 cx->vbi.raw_decoder_sav_odd_field = 0x20;
462 cx->vbi.raw_decoder_sav_even_field = 0x60;
463 cx->vbi.sliced_decoder_line_size = 272;
464 cx->vbi.sliced_decoder_sav_odd_field = 0xB0;
465 cx->vbi.sliced_decoder_sav_even_field = 0xF0;
466 return 0;
467}
468
469/* Second initialization part. Here the card type has been
470 autodetected. */
471static void __devinit cx18_init_struct2(struct cx18 *cx)
472{
473 int i;
474
475 for (i = 0; i < CX18_CARD_MAX_VIDEO_INPUTS; i++)
476 if (cx->card->video_inputs[i].video_type == 0)
477 break;
478 cx->nof_inputs = i;
479 for (i = 0; i < CX18_CARD_MAX_AUDIO_INPUTS; i++)
480 if (cx->card->audio_inputs[i].audio_type == 0)
481 break;
482 cx->nof_audio_inputs = i;
483
484 /* Find tuner input */
485 for (i = 0; i < cx->nof_inputs; i++) {
486 if (cx->card->video_inputs[i].video_type ==
487 CX18_CARD_INPUT_VID_TUNER)
488 break;
489 }
490 if (i == cx->nof_inputs)
491 i = 0;
492 cx->active_input = i;
493 cx->audio_input = cx->card->video_inputs[i].audio_index;
494 cx->av_state.vid_input = CX18_AV_COMPOSITE7;
495 cx->av_state.aud_input = CX18_AV_AUDIO8;
496 cx->av_state.audclk_freq = 48000;
497 cx->av_state.audmode = V4L2_TUNER_MODE_LANG1;
498 cx->av_state.vbi_line_offset = 8;
499}
500
501static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *dev,
502 const struct pci_device_id *pci_id)
503{
504 u16 cmd;
505 unsigned char pci_latency;
506
507 CX18_DEBUG_INFO("Enabling pci device\n");
508
509 if (pci_enable_device(dev)) {
510 CX18_ERR("Can't enable device %d!\n", cx->num);
511 return -EIO;
512 }
513 if (pci_set_dma_mask(dev, 0xffffffff)) {
514 CX18_ERR("No suitable DMA available on card %d.\n", cx->num);
515 return -EIO;
516 }
517 if (!request_mem_region(cx->base_addr, CX18_MEM_SIZE, "cx18 encoder")) {
518 CX18_ERR("Cannot request encoder memory region on card %d.\n", cx->num);
519 return -EIO;
520 }
521
522 /* Check for bus mastering */
523 pci_read_config_word(dev, PCI_COMMAND, &cmd);
524 cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
525 pci_write_config_word(dev, PCI_COMMAND, cmd);
526
527 pci_read_config_byte(dev, PCI_CLASS_REVISION, &cx->card_rev);
528 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency);
529
530 if (pci_latency < 64 && cx18_pci_latency) {
531 CX18_INFO("Unreasonably low latency timer, "
532 "setting to 64 (was %d)\n", pci_latency);
533 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
534 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency);
535 }
536 /* This config space value relates to DMA latencies. The
537 default value 0x8080 is too low however and will lead
538 to DMA errors. 0xffff is the max value which solves
539 these problems. */
540 pci_write_config_dword(dev, 0x40, 0xffff);
541
542 CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
543 "irq: %d, latency: %d, memory: 0x%lx\n",
544 cx->dev->device, cx->card_rev, dev->bus->number,
545 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
546 cx->dev->irq, pci_latency, (unsigned long)cx->base_addr);
547
548 return 0;
549}
550
551static u32 cx18_request_module(struct cx18 *cx, u32 hw,
552 const char *name, u32 id)
553{
554 if ((hw & id) == 0)
555 return hw;
556 if (request_module(name) != 0) {
557 CX18_ERR("Failed to load module %s\n", name);
558 return hw & ~id;
559 }
560 CX18_DEBUG_INFO("Loaded module %s\n", name);
561 return hw;
562}
563
564static void cx18_load_and_init_modules(struct cx18 *cx)
565{
566 u32 hw = cx->card->hw_all;
567 int i;
568
569 /* load modules */
570#ifndef CONFIG_MEDIA_TUNER
571 hw = cx18_request_module(cx, hw, "tuner", CX18_HW_TUNER);
572#endif
573#ifndef CONFIG_VIDEO_CS5345
574 hw = cx18_request_module(cx, hw, "cs5345", CX18_HW_CS5345);
575#endif
576
577 /* check which i2c devices are actually found */
578 for (i = 0; i < 32; i++) {
579 u32 device = 1 << i;
580
581 if (!(device & hw))
582 continue;
583 if (device == CX18_HW_GPIO || device == CX18_HW_TVEEPROM ||
584 device == CX18_HW_CX23418 || device == CX18_HW_DVB) {
585 /* These 'devices' do not use i2c probing */
586 cx->hw_flags |= device;
587 continue;
588 }
589 cx18_i2c_register(cx, i);
590 if (cx18_i2c_hw_addr(cx, device) > 0)
591 cx->hw_flags |= device;
592 }
593
594 hw = cx->hw_flags;
595}
596
597static int __devinit cx18_probe(struct pci_dev *dev,
598 const struct pci_device_id *pci_id)
599{
600 int retval = 0;
601 int vbi_buf_size;
602 u32 devtype;
603 struct cx18 *cx;
604
605 spin_lock(&cx18_cards_lock);
606
607 /* Make sure we've got a place for this card */
608 if (cx18_cards_active == CX18_MAX_CARDS) {
609 printk(KERN_ERR "cx18: Maximum number of cards detected (%d).\n",
610 cx18_cards_active);
611 spin_unlock(&cx18_cards_lock);
612 return -ENOMEM;
613 }
614
615 cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
616 if (cx == 0) {
617 spin_unlock(&cx18_cards_lock);
618 return -ENOMEM;
619 }
620 cx18_cards[cx18_cards_active] = cx;
621 cx->dev = dev;
622 cx->num = cx18_cards_active++;
623 snprintf(cx->name, sizeof(cx->name) - 1, "cx18-%d", cx->num);
624 CX18_INFO("Initializing card #%d\n", cx->num);
625
626 spin_unlock(&cx18_cards_lock);
627
628 cx18_process_options(cx);
629 if (cx->options.cardtype == -1) {
630 retval = -ENODEV;
631 goto err;
632 }
633 if (cx18_init_struct1(cx)) {
634 retval = -ENOMEM;
635 goto err;
636 }
637
638 CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
639
640 /* PCI Device Setup */
641 retval = cx18_setup_pci(cx, dev, pci_id);
642 if (retval != 0) {
643 if (retval == -EIO)
644 goto free_workqueue;
645 else if (retval == -ENXIO)
646 goto free_mem;
647 }
648 /* save cx in the pci struct for later use */
649 pci_set_drvdata(dev, cx);
650
651 /* map io memory */
652 CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
653 cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
654 cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
655 CX18_MEM_SIZE);
656 if (!cx->enc_mem) {
657 CX18_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
658 CX18_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
659 retval = -ENOMEM;
660 goto free_mem;
661 }
662 cx->reg_mem = cx->enc_mem + CX18_REG_OFFSET;
663 devtype = read_reg(0xC72028);
664 switch (devtype & 0xff000000) {
665 case 0xff000000:
666 CX18_INFO("cx23418 revision %08x (A)\n", devtype);
667 break;
668 case 0x01000000:
669 CX18_INFO("cx23418 revision %08x (B)\n", devtype);
670 break;
671 default:
672 CX18_INFO("cx23418 revision %08x (Unknown)\n", devtype);
673 break;
674 }
675
676 cx18_init_power(cx, 1);
677 cx18_init_memory(cx);
678
679 cx->scb = (struct cx18_scb *)(cx->enc_mem + SCB_OFFSET);
680 cx18_init_scb(cx);
681
682 cx18_gpio_init(cx);
683
684 /* active i2c */
685 CX18_DEBUG_INFO("activating i2c...\n");
686 if (init_cx18_i2c(cx)) {
687 CX18_ERR("Could not initialize i2c\n");
688 goto free_map;
689 }
690
691 CX18_DEBUG_INFO("Active card count: %d.\n", cx18_cards_active);
692
693 if (cx->card->hw_all & CX18_HW_TVEEPROM) {
694 /* Based on the model number the cardtype may be changed.
695 The PCI IDs are not always reliable. */
696 cx18_process_eeprom(cx);
697 }
698 if (cx->card->comment)
699 CX18_INFO("%s", cx->card->comment);
700 if (cx->card->v4l2_capabilities == 0) {
701 retval = -ENODEV;
702 goto free_i2c;
703 }
704 cx18_init_memory(cx);
705
706 /* Register IRQ */
707 retval = request_irq(cx->dev->irq, cx18_irq_handler,
708 IRQF_SHARED | IRQF_DISABLED, cx->name, (void *)cx);
709 if (retval) {
710 CX18_ERR("Failed to register irq %d\n", retval);
711 goto free_i2c;
712 }
713
714 if (cx->std == 0)
715 cx->std = V4L2_STD_NTSC_M;
716
717 if (cx->options.tuner == -1) {
718 int i;
719
720 for (i = 0; i < CX18_CARD_MAX_TUNERS; i++) {
721 if ((cx->std & cx->card->tuners[i].std) == 0)
722 continue;
723 cx->options.tuner = cx->card->tuners[i].tuner;
724 break;
725 }
726 }
727 /* if no tuner was found, then pick the first tuner in the card list */
728 if (cx->options.tuner == -1 && cx->card->tuners[0].std) {
729 cx->std = cx->card->tuners[0].std;
730 cx->options.tuner = cx->card->tuners[0].tuner;
731 }
732 if (cx->options.radio == -1)
733 cx->options.radio = (cx->card->radio_input.audio_type != 0);
734
735 /* The card is now fully identified, continue with card-specific
736 initialization. */
737 cx18_init_struct2(cx);
738
739 cx18_load_and_init_modules(cx);
740
741 if (cx->std & V4L2_STD_525_60) {
742 cx->is_60hz = 1;
743 cx->is_out_60hz = 1;
744 } else {
745 cx->is_50hz = 1;
746 cx->is_out_50hz = 1;
747 }
748 cx->params.video_gop_size = cx->is_60hz ? 15 : 12;
749
750 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_MPG] = 0x08000;
751 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_TS] = 0x08000;
752 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_PCM] = 0x01200;
753 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_YUV] = 0x20000;
754 vbi_buf_size = cx->vbi.raw_size * (cx->is_60hz ? 24 : 36) / 2;
755 cx->stream_buf_size[CX18_ENC_STREAM_TYPE_VBI] = vbi_buf_size;
756
757 if (cx->options.radio > 0)
758 cx->v4l2_cap |= V4L2_CAP_RADIO;
759
760 retval = cx18_streams_setup(cx);
761 if (retval) {
762 CX18_ERR("Error %d setting up streams\n", retval);
763 goto free_irq;
764 }
765 retval = cx18_streams_register(cx);
766 if (retval) {
767 CX18_ERR("Error %d registering devices\n", retval);
768 goto free_streams;
769 }
770
771 if (cx->options.tuner > -1) {
772 struct tuner_setup setup;
773
774 setup.addr = ADDR_UNSET;
775 setup.type = cx->options.tuner;
776 setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */
777 setup.tuner_callback = (setup.type == TUNER_XC2028) ?
778 cx18_reset_tuner_gpio : NULL;
779 cx18_call_i2c_clients(cx, TUNER_SET_TYPE_ADDR, &setup);
780 if (setup.type == TUNER_XC2028) {
781 static struct xc2028_ctrl ctrl = {
782 .fname = XC2028_DEFAULT_FIRMWARE,
783 .max_len = 64,
784 };
785 struct v4l2_priv_tun_config cfg = {
786 .tuner = cx->options.tuner,
787 .priv = &ctrl,
788 };
789 cx18_call_i2c_clients(cx, TUNER_SET_CONFIG, &cfg);
790 }
791 }
792
793 /* The tuner is fixed to the standard. The other inputs (e.g. S-Video)
794 are not. */
795 cx->tuner_std = cx->std;
796
797 cx18_init_on_first_open(cx);
798
799 CX18_INFO("Initialized card #%d: %s\n", cx->num, cx->card_name);
800
801 return 0;
802
803free_streams:
804 cx18_streams_cleanup(cx);
805free_irq:
806 free_irq(cx->dev->irq, (void *)cx);
807free_i2c:
808 exit_cx18_i2c(cx);
809free_map:
810 cx18_iounmap(cx);
811free_mem:
812 release_mem_region(cx->base_addr, CX18_MEM_SIZE);
813free_workqueue:
814err:
815 if (retval == 0)
816 retval = -ENODEV;
817 CX18_ERR("Error %d on initialization\n", retval);
818
819 kfree(cx18_cards[cx18_cards_active]);
820 cx18_cards[cx18_cards_active] = NULL;
821 return retval;
822}
823
824int cx18_init_on_first_open(struct cx18 *cx)
825{
826 int video_input;
827 int fw_retry_count = 3;
828 struct v4l2_frequency vf;
829
830 if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
831 return -ENXIO;
832
833 if (test_and_set_bit(CX18_F_I_INITED, &cx->i_flags))
834 return 0;
835
836 while (--fw_retry_count > 0) {
837 /* load firmware */
838 if (cx18_firmware_init(cx) == 0)
839 break;
840 if (fw_retry_count > 1)
841 CX18_WARN("Retry loading firmware\n");
842 }
843
844 if (fw_retry_count == 0) {
845 set_bit(CX18_F_I_FAILED, &cx->i_flags);
846 return -ENXIO;
847 }
848 set_bit(CX18_F_I_LOADED_FW, &cx->i_flags);
849
850 /* Init the firmware twice to work around a silicon bug
851 * transport related. */
852
853 fw_retry_count = 3;
854 while (--fw_retry_count > 0) {
855 /* load firmware */
856 if (cx18_firmware_init(cx) == 0)
857 break;
858 if (fw_retry_count > 1)
859 CX18_WARN("Retry loading firmware\n");
860 }
861
862 if (fw_retry_count == 0) {
863 set_bit(CX18_F_I_FAILED, &cx->i_flags);
864 return -ENXIO;
865 }
866
867 vf.tuner = 0;
868 vf.type = V4L2_TUNER_ANALOG_TV;
869 vf.frequency = 6400; /* the tuner 'baseline' frequency */
870
871 /* Set initial frequency. For PAL/SECAM broadcasts no
872 'default' channel exists AFAIK. */
873 if (cx->std == V4L2_STD_NTSC_M_JP)
874 vf.frequency = 1460; /* ch. 1 91250*16/1000 */
875 else if (cx->std & V4L2_STD_NTSC_M)
876 vf.frequency = 1076; /* ch. 4 67250*16/1000 */
877
878 video_input = cx->active_input;
879 cx->active_input++; /* Force update of input */
880 cx18_v4l2_ioctls(cx, NULL, VIDIOC_S_INPUT, &video_input);
881
882 /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
883 in one place. */
884 cx->std++; /* Force full standard initialization */
885 cx18_v4l2_ioctls(cx, NULL, VIDIOC_S_STD, &cx->tuner_std);
886 cx18_v4l2_ioctls(cx, NULL, VIDIOC_S_FREQUENCY, &vf);
887 return 0;
888}
889
890static void cx18_remove(struct pci_dev *pci_dev)
891{
892 struct cx18 *cx = pci_get_drvdata(pci_dev);
893
894 CX18_DEBUG_INFO("Removing Card #%d\n", cx->num);
895
896 /* Stop all captures */
897 CX18_DEBUG_INFO("Stopping all streams\n");
898 if (atomic_read(&cx->capturing) > 0)
899 cx18_stop_all_captures(cx);
900
901 /* Interrupts */
902 sw1_irq_disable(IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU);
903 sw2_irq_disable(IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);
904
905 cx18_halt_firmware(cx);
906
907 cx18_streams_cleanup(cx);
908
909 exit_cx18_i2c(cx);
910
911 free_irq(cx->dev->irq, (void *)cx);
912
913 if (cx->dev)
914 cx18_iounmap(cx);
915
916 release_mem_region(cx->base_addr, CX18_MEM_SIZE);
917
918 pci_disable_device(cx->dev);
919
920 CX18_INFO("Removed %s, card #%d\n", cx->card_name, cx->num);
921}
922
923/* define a pci_driver for card detection */
924static struct pci_driver cx18_pci_driver = {
925 .name = "cx18",
926 .id_table = cx18_pci_tbl,
927 .probe = cx18_probe,
928 .remove = cx18_remove,
929};
930
931static int module_start(void)
932{
933 printk(KERN_INFO "cx18: Start initialization, version %s\n", CX18_VERSION);
934
935 memset(cx18_cards, 0, sizeof(cx18_cards));
936
937 /* Validate parameters */
938 if (cx18_first_minor < 0 || cx18_first_minor >= CX18_MAX_CARDS) {
939 printk(KERN_ERR "cx18: Exiting, ivtv_first_minor must be between 0 and %d\n",
940 CX18_MAX_CARDS - 1);
941 return -1;
942 }
943
944 if (cx18_debug < 0 || cx18_debug > 511) {
945 cx18_debug = 0;
946 printk(KERN_INFO "cx18: Debug value must be >= 0 and <= 511!\n");
947 }
948
949 if (pci_register_driver(&cx18_pci_driver)) {
950 printk(KERN_ERR "cx18: Error detecting PCI card\n");
951 return -ENODEV;
952 }
953 printk(KERN_INFO "cx18: End initialization\n");
954 return 0;
955}
956
957static void module_cleanup(void)
958{
959 int i;
960
961 pci_unregister_driver(&cx18_pci_driver);
962
963 for (i = 0; i < cx18_cards_active; i++) {
964 if (cx18_cards[i] == NULL)
965 continue;
966 kfree(cx18_cards[i]);
967 }
968}
969
970module_init(module_start);
971module_exit(module_cleanup);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
new file mode 100644
index 000000000000..2ee939193bb7
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -0,0 +1,500 @@
1/*
2 * cx18 driver internal defines and structures
3 *
4 * Derived from ivtv-driver.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#ifndef CX18_DRIVER_H
25#define CX18_DRIVER_H
26
27#include <linux/version.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/sched.h>
33#include <linux/fs.h>
34#include <linux/pci.h>
35#include <linux/interrupt.h>
36#include <linux/spinlock.h>
37#include <linux/i2c.h>
38#include <linux/i2c-algo-bit.h>
39#include <linux/list.h>
40#include <linux/unistd.h>
41#include <linux/byteorder/swab.h>
42#include <linux/pagemap.h>
43#include <linux/workqueue.h>
44#include <linux/mutex.h>
45
46#include <linux/dvb/video.h>
47#include <linux/dvb/audio.h>
48#include <media/v4l2-common.h>
49#include <media/tuner.h>
50#include "cx18-mailbox.h"
51#include "cx18-av-core.h"
52#include "cx23418.h"
53
54/* DVB */
55#include "demux.h"
56#include "dmxdev.h"
57#include "dvb_demux.h"
58#include "dvb_frontend.h"
59#include "dvb_net.h"
60#include "dvbdev.h"
61
62#ifndef CONFIG_PCI
63# error "This driver requires kernel PCI support."
64#endif
65
66#define CX18_MEM_OFFSET 0x00000000
67#define CX18_MEM_SIZE 0x04000000
68#define CX18_REG_OFFSET 0x02000000
69
70/* Maximum cx18 driver instances. */
71#define CX18_MAX_CARDS 32
72
73/* Supported cards */
74#define CX18_CARD_HVR_1600_ESMT 0 /* Hauppauge HVR 1600 (ESMT memory) */
75#define CX18_CARD_HVR_1600_SAMSUNG 1 /* Hauppauge HVR 1600 (Samsung memory) */
76#define CX18_CARD_COMPRO_H900 2 /* Compro VideoMate H900 */
77#define CX18_CARD_YUAN_MPC718 3 /* Yuan MPC718 */
78#define CX18_CARD_LAST 3
79
80#define CX18_ENC_STREAM_TYPE_MPG 0
81#define CX18_ENC_STREAM_TYPE_TS 1
82#define CX18_ENC_STREAM_TYPE_YUV 2
83#define CX18_ENC_STREAM_TYPE_VBI 3
84#define CX18_ENC_STREAM_TYPE_PCM 4
85#define CX18_ENC_STREAM_TYPE_IDX 5
86#define CX18_ENC_STREAM_TYPE_RAD 6
87#define CX18_MAX_STREAMS 7
88
89/* system vendor and device IDs */
90#define PCI_VENDOR_ID_CX 0x14f1
91#define PCI_DEVICE_ID_CX23418 0x5b7a
92
93/* subsystem vendor ID */
94#define CX18_PCI_ID_HAUPPAUGE 0x0070
95#define CX18_PCI_ID_COMPRO 0x185b
96#define CX18_PCI_ID_YUAN 0x12ab
97
98/* ======================================================================== */
99/* ========================== START USER SETTABLE DMA VARIABLES =========== */
100/* ======================================================================== */
101
102/* DMA Buffers, Default size in MB allocated */
103#define CX18_DEFAULT_ENC_TS_BUFFERS 1
104#define CX18_DEFAULT_ENC_MPG_BUFFERS 2
105#define CX18_DEFAULT_ENC_IDX_BUFFERS 1
106#define CX18_DEFAULT_ENC_YUV_BUFFERS 2
107#define CX18_DEFAULT_ENC_VBI_BUFFERS 1
108#define CX18_DEFAULT_ENC_PCM_BUFFERS 1
109
110/* i2c stuff */
111#define I2C_CLIENTS_MAX 16
112
113/* debugging */
114
115/* Flag to turn on high volume debugging */
116#define CX18_DBGFLG_WARN (1 << 0)
117#define CX18_DBGFLG_INFO (1 << 1)
118#define CX18_DBGFLG_API (1 << 2)
119#define CX18_DBGFLG_DMA (1 << 3)
120#define CX18_DBGFLG_IOCTL (1 << 4)
121#define CX18_DBGFLG_FILE (1 << 5)
122#define CX18_DBGFLG_I2C (1 << 6)
123#define CX18_DBGFLG_IRQ (1 << 7)
124/* Flag to turn on high volume debugging */
125#define CX18_DBGFLG_HIGHVOL (1 << 8)
126
127/* NOTE: extra space before comma in 'cx->num , ## args' is required for
128 gcc-2.95, otherwise it won't compile. */
129#define CX18_DEBUG(x, type, fmt, args...) \
130 do { \
131 if ((x) & cx18_debug) \
132 printk(KERN_INFO "cx18-%d " type ": " fmt, cx->num , ## args); \
133 } while (0)
134#define CX18_DEBUG_WARN(fmt, args...) CX18_DEBUG(CX18_DBGFLG_WARN, "warning", fmt , ## args)
135#define CX18_DEBUG_INFO(fmt, args...) CX18_DEBUG(CX18_DBGFLG_INFO, "info", fmt , ## args)
136#define CX18_DEBUG_API(fmt, args...) CX18_DEBUG(CX18_DBGFLG_API, "api", fmt , ## args)
137#define CX18_DEBUG_DMA(fmt, args...) CX18_DEBUG(CX18_DBGFLG_DMA, "dma", fmt , ## args)
138#define CX18_DEBUG_IOCTL(fmt, args...) CX18_DEBUG(CX18_DBGFLG_IOCTL, "ioctl", fmt , ## args)
139#define CX18_DEBUG_FILE(fmt, args...) CX18_DEBUG(CX18_DBGFLG_FILE, "file", fmt , ## args)
140#define CX18_DEBUG_I2C(fmt, args...) CX18_DEBUG(CX18_DBGFLG_I2C, "i2c", fmt , ## args)
141#define CX18_DEBUG_IRQ(fmt, args...) CX18_DEBUG(CX18_DBGFLG_IRQ, "irq", fmt , ## args)
142
143#define CX18_DEBUG_HIGH_VOL(x, type, fmt, args...) \
144 do { \
145 if (((x) & cx18_debug) && (cx18_debug & CX18_DBGFLG_HIGHVOL)) \
146 printk(KERN_INFO "cx18%d " type ": " fmt, cx->num , ## args); \
147 } while (0)
148#define CX18_DEBUG_HI_WARN(fmt, args...) CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_WARN, "warning", fmt , ## args)
149#define CX18_DEBUG_HI_INFO(fmt, args...) CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_INFO, "info", fmt , ## args)
150#define CX18_DEBUG_HI_API(fmt, args...) CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_API, "api", fmt , ## args)
151#define CX18_DEBUG_HI_DMA(fmt, args...) CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_DMA, "dma", fmt , ## args)
152#define CX18_DEBUG_HI_IOCTL(fmt, args...) CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_IOCTL, "ioctl", fmt , ## args)
153#define CX18_DEBUG_HI_FILE(fmt, args...) CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_FILE, "file", fmt , ## args)
154#define CX18_DEBUG_HI_I2C(fmt, args...) CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_I2C, "i2c", fmt , ## args)
155#define CX18_DEBUG_HI_IRQ(fmt, args...) CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_IRQ, "irq", fmt , ## args)
156
157/* Standard kernel messages */
158#define CX18_ERR(fmt, args...) printk(KERN_ERR "cx18-%d: " fmt, cx->num , ## args)
159#define CX18_WARN(fmt, args...) printk(KERN_WARNING "cx18-%d: " fmt, cx->num , ## args)
160#define CX18_INFO(fmt, args...) printk(KERN_INFO "cx18-%d: " fmt, cx->num , ## args)
161
162/* Values for CX18_API_DEC_PLAYBACK_SPEED mpeg_frame_type_mask parameter: */
163#define MPEG_FRAME_TYPE_IFRAME 1
164#define MPEG_FRAME_TYPE_IFRAME_PFRAME 3
165#define MPEG_FRAME_TYPE_ALL 7
166
167#define CX18_MAX_PGM_INDEX (400)
168
169extern int cx18_debug;
170
171
172struct cx18_options {
173 int megabytes[CX18_MAX_STREAMS]; /* Size in megabytes of each stream */
174 int cardtype; /* force card type on load */
175 int tuner; /* set tuner on load */
176 int radio; /* enable/disable radio */
177};
178
179/* per-buffer bit flags */
180#define CX18_F_B_NEED_BUF_SWAP 0 /* this buffer should be byte swapped */
181
182/* per-stream, s_flags */
183#define CX18_F_S_CLAIMED 3 /* this stream is claimed */
184#define CX18_F_S_STREAMING 4 /* the fw is decoding/encoding this stream */
185#define CX18_F_S_INTERNAL_USE 5 /* this stream is used internally (sliced VBI processing) */
186#define CX18_F_S_STREAMOFF 7 /* signal end of stream EOS */
187#define CX18_F_S_APPL_IO 8 /* this stream is used read/written by an application */
188
189/* per-cx18, i_flags */
190#define CX18_F_I_LOADED_FW 0 /* Loaded the firmware the first time */
191#define CX18_F_I_EOS 4 /* End of encoder stream reached */
192#define CX18_F_I_RADIO_USER 5 /* The radio tuner is selected */
193#define CX18_F_I_ENC_PAUSED 13 /* the encoder is paused */
194#define CX18_F_I_INITED 21 /* set after first open */
195#define CX18_F_I_FAILED 22 /* set if first open failed */
196
197/* These are the VBI types as they appear in the embedded VBI private packets. */
198#define CX18_SLICED_TYPE_TELETEXT_B (1)
199#define CX18_SLICED_TYPE_CAPTION_525 (4)
200#define CX18_SLICED_TYPE_WSS_625 (5)
201#define CX18_SLICED_TYPE_VPS (7)
202
203struct cx18_buffer {
204 struct list_head list;
205 dma_addr_t dma_handle;
206 u32 id;
207 unsigned long b_flags;
208 char *buf;
209
210 u32 bytesused;
211 u32 readpos;
212};
213
214struct cx18_queue {
215 struct list_head list;
216 u32 buffers;
217 u32 length;
218 u32 bytesused;
219};
220
221struct cx18_dvb {
222 struct dmx_frontend hw_frontend;
223 struct dmx_frontend mem_frontend;
224 struct dmxdev dmxdev;
225 struct dvb_adapter dvb_adapter;
226 struct dvb_demux demux;
227 struct dvb_frontend *fe;
228 struct dvb_net dvbnet;
229 int enabled;
230 int feeding;
231
232 struct mutex feedlock;
233
234};
235
236struct cx18; /* forward reference */
237struct cx18_scb; /* forward reference */
238
239struct cx18_stream {
240 /* These first four fields are always set, even if the stream
241 is not actually created. */
242 struct video_device *v4l2dev; /* NULL when stream not created */
243 struct cx18 *cx; /* for ease of use */
244 const char *name; /* name of the stream */
245 int type; /* stream type */
246 u32 handle; /* task handle */
247 unsigned mdl_offset;
248
249 u32 id;
250 spinlock_t qlock; /* locks access to the queues */
251 unsigned long s_flags; /* status flags, see above */
252 int dma; /* can be PCI_DMA_TODEVICE,
253 PCI_DMA_FROMDEVICE or
254 PCI_DMA_NONE */
255 u64 dma_pts;
256 wait_queue_head_t waitq;
257
258 /* Buffer Stats */
259 u32 buffers;
260 u32 buf_size;
261 u32 buffers_stolen;
262
263 /* Buffer Queues */
264 struct cx18_queue q_free; /* free buffers */
265 struct cx18_queue q_full; /* full buffers */
266 struct cx18_queue q_io; /* waiting for I/O */
267
268 /* DVB / Digital Transport */
269 struct cx18_dvb dvb;
270};
271
272struct cx18_open_id {
273 u32 open_id;
274 int type;
275 enum v4l2_priority prio;
276 struct cx18 *cx;
277};
278
279/* forward declaration of struct defined in cx18-cards.h */
280struct cx18_card;
281
282
283#define CX18_VBI_FRAMES 32
284
285/* VBI data */
286struct vbi_info {
287 u32 enc_size;
288 u32 frame;
289 u8 cc_data_odd[256];
290 u8 cc_data_even[256];
291 int cc_pos;
292 u8 cc_no_update;
293 u8 vps[5];
294 u8 vps_found;
295 int wss;
296 u8 wss_found;
297 u8 wss_no_update;
298 u32 raw_decoder_line_size;
299 u8 raw_decoder_sav_odd_field;
300 u8 raw_decoder_sav_even_field;
301 u32 sliced_decoder_line_size;
302 u8 sliced_decoder_sav_odd_field;
303 u8 sliced_decoder_sav_even_field;
304 struct v4l2_format in;
305 /* convenience pointer to sliced struct in vbi_in union */
306 struct v4l2_sliced_vbi_format *sliced_in;
307 u32 service_set_in;
308 int insert_mpeg;
309
310 /* Buffer for the maximum of 2 * 18 * packet_size sliced VBI lines.
311 One for /dev/vbi0 and one for /dev/vbi8 */
312 struct v4l2_sliced_vbi_data sliced_data[36];
313
314 /* Buffer for VBI data inserted into MPEG stream.
315 The first byte is a dummy byte that's never used.
316 The next 16 bytes contain the MPEG header for the VBI data,
317 the remainder is the actual VBI data.
318 The max size accepted by the MPEG VBI reinsertion turns out
319 to be 1552 bytes, which happens to be 4 + (1 + 42) * (2 * 18) bytes,
320 where 4 is a four byte header, 42 is the max sliced VBI payload, 1 is
321 a single line header byte and 2 * 18 is the number of VBI lines per frame.
322
323 However, it seems that the data must be 1K aligned, so we have to
324 pad the data until the 1 or 2 K boundary.
325
326 This pointer array will allocate 2049 bytes to store each VBI frame. */
327 u8 *sliced_mpeg_data[CX18_VBI_FRAMES];
328 u32 sliced_mpeg_size[CX18_VBI_FRAMES];
329 struct cx18_buffer sliced_mpeg_buf;
330 u32 inserted_frame;
331
332 u32 start[2], count;
333 u32 raw_size;
334 u32 sliced_size;
335};
336
337/* Per cx23418, per I2C bus private algo callback data */
338struct cx18_i2c_algo_callback_data {
339 struct cx18 *cx;
340 int bus_index; /* 0 or 1 for the cx23418's 1st or 2nd I2C bus */
341};
342
343/* Struct to hold info about cx18 cards */
344struct cx18 {
345 int num; /* board number, -1 during init! */
346 char name[8]; /* board name for printk and interrupts (e.g. 'cx180') */
347 struct pci_dev *dev; /* PCI device */
348 const struct cx18_card *card; /* card information */
349 const char *card_name; /* full name of the card */
350 const struct cx18_card_tuner_i2c *card_i2c; /* i2c addresses to probe for tuner */
351 u8 is_50hz;
352 u8 is_60hz;
353 u8 is_out_50hz;
354 u8 is_out_60hz;
355 u8 nof_inputs; /* number of video inputs */
356 u8 nof_audio_inputs; /* number of audio inputs */
357 u16 buffer_id; /* buffer ID counter */
358 u32 v4l2_cap; /* V4L2 capabilities of card */
359 u32 hw_flags; /* Hardware description of the board */
360 unsigned mdl_offset;
361 struct cx18_scb *scb; /* pointer to SCB */
362
363 struct cx18_av_state av_state;
364
365 /* codec settings */
366 struct cx2341x_mpeg_params params;
367 u32 filter_mode;
368 u32 temporal_strength;
369 u32 spatial_strength;
370
371 /* dualwatch */
372 unsigned long dualwatch_jiffies;
373 u16 dualwatch_stereo_mode;
374
375 /* Digitizer type */
376 int digitizer; /* 0x00EF = saa7114 0x00FO = saa7115 0x0106 = mic */
377
378 struct mutex serialize_lock; /* mutex used to serialize open/close/start/stop/ioctl operations */
379 struct cx18_options options; /* User options */
380 int stream_buf_size[CX18_MAX_STREAMS]; /* Stream buffer size */
381 struct cx18_stream streams[CX18_MAX_STREAMS]; /* Stream data */
382 unsigned long i_flags; /* global cx18 flags */
383 atomic_t capturing; /* count number of active capture streams */
384 spinlock_t lock; /* lock access to this struct */
385 int search_pack_header;
386
387 spinlock_t dma_reg_lock; /* lock access to DMA engine registers */
388
389 int open_id; /* incremented each time an open occurs, used as
390 unique ID. Starts at 1, so 0 can be used as
391 uninitialized value in the stream->id. */
392
393 u32 base_addr;
394 struct v4l2_prio_state prio;
395
396 u8 card_rev;
397 void __iomem *enc_mem, *reg_mem;
398
399 struct vbi_info vbi;
400
401 u32 pgm_info_offset;
402 u32 pgm_info_num;
403 u32 pgm_info_write_idx;
404 u32 pgm_info_read_idx;
405 struct v4l2_enc_idx_entry pgm_info[CX18_MAX_PGM_INDEX];
406
407 u64 mpg_data_received;
408 u64 vbi_data_inserted;
409
410 wait_queue_head_t mb_apu_waitq;
411 wait_queue_head_t mb_cpu_waitq;
412 wait_queue_head_t mb_epu_waitq;
413 wait_queue_head_t mb_hpu_waitq;
414 wait_queue_head_t cap_w;
415 /* when the current DMA is finished this queue is woken up */
416 wait_queue_head_t dma_waitq;
417
418 /* i2c */
419 struct i2c_adapter i2c_adap[2];
420 struct i2c_algo_bit_data i2c_algo[2];
421 struct cx18_i2c_algo_callback_data i2c_algo_cb_data[2];
422 struct i2c_client i2c_client[2];
423 struct mutex i2c_bus_lock[2];
424 struct i2c_client *i2c_clients[I2C_CLIENTS_MAX];
425
426 /* v4l2 and User settings */
427
428 /* codec settings */
429 u32 audio_input;
430 u32 active_input;
431 u32 active_output;
432 v4l2_std_id std;
433 v4l2_std_id tuner_std; /* The norm of the tuner (fixed) */
434};
435
436/* Globals */
437extern struct cx18 *cx18_cards[];
438extern int cx18_cards_active;
439extern int cx18_first_minor;
440extern spinlock_t cx18_cards_lock;
441
442/*==============Prototypes==================*/
443
444/* Return non-zero if a signal is pending */
445int cx18_msleep_timeout(unsigned int msecs, int intr);
446
447/* Wait on queue, returns -EINTR if interrupted */
448int cx18_waitq(wait_queue_head_t *waitq);
449
450/* Read Hauppauge eeprom */
451struct tveeprom; /* forward reference */
452void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv);
453
454/* First-open initialization: load firmware, etc. */
455int cx18_init_on_first_open(struct cx18 *cx);
456
457/* This is a PCI post thing, where if the pci register is not read, then
458 the write doesn't always take effect right away. By reading back the
459 register any pending PCI writes will be performed (in order), and so
460 you can be sure that the writes are guaranteed to be done.
461
462 Rarely needed, only in some timing sensitive cases.
463 Apparently if this is not done some motherboards seem
464 to kill the firmware and get into the broken state until computer is
465 rebooted. */
466#define write_sync(val, reg) \
467 do { writel(val, reg); readl(reg); } while (0)
468
469#define read_reg(reg) readl(cx->reg_mem + (reg))
470#define write_reg(val, reg) writel(val, cx->reg_mem + (reg))
471#define write_reg_sync(val, reg) \
472 do { write_reg(val, reg); read_reg(reg); } while (0)
473
474#define read_enc(addr) readl(cx->enc_mem + (u32)(addr))
475#define write_enc(val, addr) writel(val, cx->enc_mem + (u32)(addr))
476#define write_enc_sync(val, addr) \
477 do { write_enc(val, addr); read_enc(addr); } while (0)
478
479#define sw1_irq_enable(val) do { \
480 write_reg(val, SW1_INT_STATUS); \
481 write_reg(read_reg(SW1_INT_ENABLE_PCI) | (val), SW1_INT_ENABLE_PCI); \
482} while (0)
483
484#define sw1_irq_disable(val) \
485 write_reg(read_reg(SW1_INT_ENABLE_PCI) & ~(val), SW1_INT_ENABLE_PCI);
486
487#define sw2_irq_enable(val) do { \
488 write_reg(val, SW2_INT_STATUS); \
489 write_reg(read_reg(SW2_INT_ENABLE_PCI) | (val), SW2_INT_ENABLE_PCI); \
490} while (0)
491
492#define sw2_irq_disable(val) \
493 write_reg(read_reg(SW2_INT_ENABLE_PCI) & ~(val), SW2_INT_ENABLE_PCI);
494
495#define setup_page(addr) do { \
496 u32 val = read_reg(0xD000F8) & ~0x1f00; \
497 write_reg(val | (((addr) >> 17) & 0x1f00), 0xD000F8); \
498} while (0)
499
500#endif /* CX18_DRIVER_H */
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
new file mode 100644
index 000000000000..65efe69d939a
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -0,0 +1,288 @@
1/*
2 * cx18 functions for DVB support
3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "cx18-version.h"
23#include "cx18-dvb.h"
24#include "cx18-streams.h"
25#include "cx18-cards.h"
26#include "s5h1409.h"
27
28/* Wait until the MXL500X driver is merged */
29#ifdef HAVE_MXL500X
30#include "mxl500x.h"
31#endif
32
33DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
34
35#define CX18_REG_DMUX_NUM_PORT_0_CONTROL 0xd5a000
36
37#ifdef HAVE_MXL500X
38static struct mxl500x_config hauppauge_hvr1600_tuner = {
39 .delsys = MXL500x_MODE_ATSC,
40 .octf = MXL500x_OCTF_CH,
41 .xtal_freq = 16000000,
42 .iflo_freq = 5380000,
43 .ref_freq = 322800000,
44 .rssi_ena = MXL_RSSI_ENABLE,
45 .addr = 0xC6 >> 1,
46};
47
48static struct s5h1409_config hauppauge_hvr1600_config = {
49 .demod_address = 0x32 >> 1,
50 .output_mode = S5H1409_SERIAL_OUTPUT,
51 .gpio = S5H1409_GPIO_ON,
52 .qam_if = 44000,
53 .inversion = S5H1409_INVERSION_OFF,
54 .status_mode = S5H1409_DEMODLOCKING,
55 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
56
57};
58#endif
59
60static int dvb_register(struct cx18_stream *stream);
61
62/* Kernel DVB framework calls this when the feed needs to start.
63 * The CX18 framework should enable the transport DMA handling
64 * and queue processing.
65 */
66static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
67{
68 struct dvb_demux *demux = feed->demux;
69 struct cx18_stream *stream = (struct cx18_stream *) demux->priv;
70 struct cx18 *cx = stream->cx;
71 int ret = -EINVAL;
72 u32 v;
73
74 CX18_DEBUG_INFO("Start feed: pid = 0x%x index = %d\n",
75 feed->pid, feed->index);
76 switch (cx->card->type) {
77 case CX18_CARD_HVR_1600_ESMT:
78 case CX18_CARD_HVR_1600_SAMSUNG:
79 v = read_reg(CX18_REG_DMUX_NUM_PORT_0_CONTROL);
80 v |= 0x00400000; /* Serial Mode */
81 v |= 0x00002000; /* Data Length - Byte */
82 v |= 0x00010000; /* Error - Polarity */
83 v |= 0x00020000; /* Error - Passthru */
84 v |= 0x000c0000; /* Error - Ignore */
85 write_reg(v, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
86 break;
87
88 default:
89 /* Assumption - Parallel transport - Signalling
90 * undefined or default.
91 */
92 break;
93 }
94
95 if (!demux->dmx.frontend)
96 return -EINVAL;
97
98 if (stream) {
99 mutex_lock(&stream->dvb.feedlock);
100 if (stream->dvb.feeding++ == 0) {
101 CX18_DEBUG_INFO("Starting Transport DMA\n");
102 ret = cx18_start_v4l2_encode_stream(stream);
103 } else
104 ret = 0;
105 mutex_unlock(&stream->dvb.feedlock);
106 }
107
108 return ret;
109}
110
111/* Kernel DVB framework calls this when the feed needs to stop. */
112static int cx18_dvb_stop_feed(struct dvb_demux_feed *feed)
113{
114 struct dvb_demux *demux = feed->demux;
115 struct cx18_stream *stream = (struct cx18_stream *)demux->priv;
116 struct cx18 *cx = stream->cx;
117 int ret = -EINVAL;
118
119 CX18_DEBUG_INFO("Stop feed: pid = 0x%x index = %d\n",
120 feed->pid, feed->index);
121
122 if (stream) {
123 mutex_lock(&stream->dvb.feedlock);
124 if (--stream->dvb.feeding == 0) {
125 CX18_DEBUG_INFO("Stopping Transport DMA\n");
126 ret = cx18_stop_v4l2_encode_stream(stream, 0);
127 } else
128 ret = 0;
129 mutex_unlock(&stream->dvb.feedlock);
130 }
131
132 return ret;
133}
134
135int cx18_dvb_register(struct cx18_stream *stream)
136{
137 struct cx18 *cx = stream->cx;
138 struct cx18_dvb *dvb = &stream->dvb;
139 struct dvb_adapter *dvb_adapter;
140 struct dvb_demux *dvbdemux;
141 struct dmx_demux *dmx;
142 int ret;
143
144 if (!dvb)
145 return -EINVAL;
146
147 ret = dvb_register_adapter(&dvb->dvb_adapter,
148 CX18_DRIVER_NAME,
149 THIS_MODULE, &cx->dev->dev, adapter_nr);
150 if (ret < 0)
151 goto err_out;
152
153 dvb_adapter = &dvb->dvb_adapter;
154
155 dvbdemux = &dvb->demux;
156
157 dvbdemux->priv = (void *)stream;
158
159 dvbdemux->filternum = 256;
160 dvbdemux->feednum = 256;
161 dvbdemux->start_feed = cx18_dvb_start_feed;
162 dvbdemux->stop_feed = cx18_dvb_stop_feed;
163 dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
164 DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING);
165 ret = dvb_dmx_init(dvbdemux);
166 if (ret < 0)
167 goto err_dvb_unregister_adapter;
168
169 dmx = &dvbdemux->dmx;
170
171 dvb->hw_frontend.source = DMX_FRONTEND_0;
172 dvb->mem_frontend.source = DMX_MEMORY_FE;
173 dvb->dmxdev.filternum = 256;
174 dvb->dmxdev.demux = dmx;
175
176 ret = dvb_dmxdev_init(&dvb->dmxdev, dvb_adapter);
177 if (ret < 0)
178 goto err_dvb_dmx_release;
179
180 ret = dmx->add_frontend(dmx, &dvb->hw_frontend);
181 if (ret < 0)
182 goto err_dvb_dmxdev_release;
183
184 ret = dmx->add_frontend(dmx, &dvb->mem_frontend);
185 if (ret < 0)
186 goto err_remove_hw_frontend;
187
188 ret = dmx->connect_frontend(dmx, &dvb->hw_frontend);
189 if (ret < 0)
190 goto err_remove_mem_frontend;
191
192 ret = dvb_register(stream);
193 if (ret < 0)
194 goto err_disconnect_frontend;
195
196 dvb_net_init(dvb_adapter, &dvb->dvbnet, dmx);
197
198 CX18_INFO("DVB Frontend registered\n");
199 mutex_init(&dvb->feedlock);
200 dvb->enabled = 1;
201 return ret;
202
203err_disconnect_frontend:
204 dmx->disconnect_frontend(dmx);
205err_remove_mem_frontend:
206 dmx->remove_frontend(dmx, &dvb->mem_frontend);
207err_remove_hw_frontend:
208 dmx->remove_frontend(dmx, &dvb->hw_frontend);
209err_dvb_dmxdev_release:
210 dvb_dmxdev_release(&dvb->dmxdev);
211err_dvb_dmx_release:
212 dvb_dmx_release(dvbdemux);
213err_dvb_unregister_adapter:
214 dvb_unregister_adapter(dvb_adapter);
215err_out:
216 return ret;
217}
218
219void cx18_dvb_unregister(struct cx18_stream *stream)
220{
221 struct cx18 *cx = stream->cx;
222 struct cx18_dvb *dvb = &stream->dvb;
223 struct dvb_adapter *dvb_adapter;
224 struct dvb_demux *dvbdemux;
225 struct dmx_demux *dmx;
226
227 CX18_INFO("unregister DVB\n");
228
229 dvb_adapter = &dvb->dvb_adapter;
230 dvbdemux = &dvb->demux;
231 dmx = &dvbdemux->dmx;
232
233 dmx->close(dmx);
234 dvb_net_release(&dvb->dvbnet);
235 dmx->remove_frontend(dmx, &dvb->mem_frontend);
236 dmx->remove_frontend(dmx, &dvb->hw_frontend);
237 dvb_dmxdev_release(&dvb->dmxdev);
238 dvb_dmx_release(dvbdemux);
239 dvb_unregister_frontend(dvb->fe);
240 dvb_frontend_detach(dvb->fe);
241 dvb_unregister_adapter(dvb_adapter);
242}
243
244/* All the DVB attach calls go here, this function get's modified
245 * for each new card. No other function in this file needs
246 * to change.
247 */
248static int dvb_register(struct cx18_stream *stream)
249{
250 struct cx18_dvb *dvb = &stream->dvb;
251 struct cx18 *cx = stream->cx;
252 int ret = 0;
253
254 switch (cx->card->type) {
255/* Wait until the MXL500X driver is merged */
256#ifdef HAVE_MXL500X
257 case CX18_CARD_HVR_1600_ESMT:
258 case CX18_CARD_HVR_1600_SAMSUNG:
259 dvb->fe = dvb_attach(s5h1409_attach,
260 &hauppauge_hvr1600_config,
261 &cx->i2c_adap[0]);
262 if (dvb->fe != NULL) {
263 dvb_attach(mxl500x_attach, dvb->fe,
264 &hauppauge_hvr1600_tuner,
265 &cx->i2c_adap[0]);
266 ret = 0;
267 }
268 break;
269#endif
270 default:
271 /* No Digital Tv Support */
272 break;
273 }
274
275 if (dvb->fe == NULL) {
276 CX18_ERR("frontend initialization failed\n");
277 return -1;
278 }
279
280 ret = dvb_register_frontend(&dvb->dvb_adapter, dvb->fe);
281 if (ret < 0) {
282 if (dvb->fe->ops.release)
283 dvb->fe->ops.release(dvb->fe);
284 return ret;
285 }
286
287 return ret;
288}
diff --git a/drivers/media/video/cx18/cx18-dvb.h b/drivers/media/video/cx18/cx18-dvb.h
new file mode 100644
index 000000000000..d6a6ccda79a9
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-dvb.h
@@ -0,0 +1,25 @@
1/*
2 * cx18 functions for DVB support
3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 *
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include "cx18-driver.h"
23
24int cx18_dvb_register(struct cx18_stream *stream);
25void cx18_dvb_unregister(struct cx18_stream *stream);
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
new file mode 100644
index 000000000000..69303065a294
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -0,0 +1,711 @@
1/*
2 * cx18 file operation functions
3 *
4 * Derived from ivtv-fileops.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-fileops.h"
26#include "cx18-i2c.h"
27#include "cx18-queue.h"
28#include "cx18-vbi.h"
29#include "cx18-audio.h"
30#include "cx18-mailbox.h"
31#include "cx18-scb.h"
32#include "cx18-streams.h"
33#include "cx18-controls.h"
34#include "cx18-ioctl.h"
35#include "cx18-cards.h"
36
37/* This function tries to claim the stream for a specific file descriptor.
38 If no one else is using this stream then the stream is claimed and
39 associated VBI streams are also automatically claimed.
40 Possible error returns: -EBUSY if someone else has claimed
41 the stream or 0 on success. */
42int cx18_claim_stream(struct cx18_open_id *id, int type)
43{
44 struct cx18 *cx = id->cx;
45 struct cx18_stream *s = &cx->streams[type];
46 struct cx18_stream *s_vbi;
47 int vbi_type;
48
49 if (test_and_set_bit(CX18_F_S_CLAIMED, &s->s_flags)) {
50 /* someone already claimed this stream */
51 if (s->id == id->open_id) {
52 /* yes, this file descriptor did. So that's OK. */
53 return 0;
54 }
55 if (s->id == -1 && type == CX18_ENC_STREAM_TYPE_VBI) {
56 /* VBI is handled already internally, now also assign
57 the file descriptor to this stream for external
58 reading of the stream. */
59 s->id = id->open_id;
60 CX18_DEBUG_INFO("Start Read VBI\n");
61 return 0;
62 }
63 /* someone else is using this stream already */
64 CX18_DEBUG_INFO("Stream %d is busy\n", type);
65 return -EBUSY;
66 }
67 s->id = id->open_id;
68
69 /* CX18_DEC_STREAM_TYPE_MPG needs to claim CX18_DEC_STREAM_TYPE_VBI,
70 CX18_ENC_STREAM_TYPE_MPG needs to claim CX18_ENC_STREAM_TYPE_VBI
71 (provided VBI insertion is on and sliced VBI is selected), for all
72 other streams we're done */
73 if (type == CX18_ENC_STREAM_TYPE_MPG &&
74 cx->vbi.insert_mpeg && cx->vbi.sliced_in->service_set) {
75 vbi_type = CX18_ENC_STREAM_TYPE_VBI;
76 } else {
77 return 0;
78 }
79 s_vbi = &cx->streams[vbi_type];
80
81 set_bit(CX18_F_S_CLAIMED, &s_vbi->s_flags);
82
83 /* mark that it is used internally */
84 set_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags);
85 return 0;
86}
87
88/* This function releases a previously claimed stream. It will take into
89 account associated VBI streams. */
90void cx18_release_stream(struct cx18_stream *s)
91{
92 struct cx18 *cx = s->cx;
93 struct cx18_stream *s_vbi;
94
95 s->id = -1;
96 if (s->type == CX18_ENC_STREAM_TYPE_VBI &&
97 test_bit(CX18_F_S_INTERNAL_USE, &s->s_flags)) {
98 /* this stream is still in use internally */
99 return;
100 }
101 if (!test_and_clear_bit(CX18_F_S_CLAIMED, &s->s_flags)) {
102 CX18_DEBUG_WARN("Release stream %s not in use!\n", s->name);
103 return;
104 }
105
106 cx18_flush_queues(s);
107
108 /* CX18_ENC_STREAM_TYPE_MPG needs to release CX18_ENC_STREAM_TYPE_VBI,
109 for all other streams we're done */
110 if (s->type == CX18_ENC_STREAM_TYPE_MPG)
111 s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
112 else
113 return;
114
115 /* clear internal use flag */
116 if (!test_and_clear_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags)) {
117 /* was already cleared */
118 return;
119 }
120 if (s_vbi->id != -1) {
121 /* VBI stream still claimed by a file descriptor */
122 return;
123 }
124 clear_bit(CX18_F_S_CLAIMED, &s_vbi->s_flags);
125 cx18_flush_queues(s_vbi);
126}
127
128static void cx18_dualwatch(struct cx18 *cx)
129{
130 struct v4l2_tuner vt;
131 u16 new_bitmap;
132 u16 new_stereo_mode;
133 const u16 stereo_mask = 0x0300;
134 const u16 dual = 0x0200;
135
136 new_stereo_mode = cx->params.audio_properties & stereo_mask;
137 memset(&vt, 0, sizeof(vt));
138 cx18_call_i2c_clients(cx, VIDIOC_G_TUNER, &vt);
139 if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 &&
140 (vt.rxsubchans & V4L2_TUNER_SUB_LANG2))
141 new_stereo_mode = dual;
142
143 if (new_stereo_mode == cx->dualwatch_stereo_mode)
144 return;
145
146 new_bitmap = new_stereo_mode | (cx->params.audio_properties & ~stereo_mask);
147
148 CX18_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x. new audio_bitmask=0x%ux\n",
149 cx->dualwatch_stereo_mode, new_stereo_mode, new_bitmap);
150
151 if (cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
152 cx18_find_handle(cx), new_bitmap) == 0) {
153 cx->dualwatch_stereo_mode = new_stereo_mode;
154 return;
155 }
156 CX18_DEBUG_INFO("dualwatch: changing stereo flag failed\n");
157}
158
159
160static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block, int *err)
161{
162 struct cx18 *cx = s->cx;
163 struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
164 struct cx18_buffer *buf;
165 DEFINE_WAIT(wait);
166
167 *err = 0;
168 while (1) {
169 if (s->type == CX18_ENC_STREAM_TYPE_MPG) {
170
171 if (time_after(jiffies, cx->dualwatch_jiffies + msecs_to_jiffies(1000))) {
172 cx->dualwatch_jiffies = jiffies;
173 cx18_dualwatch(cx);
174 }
175 if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
176 !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
177 while ((buf = cx18_dequeue(s_vbi, &s_vbi->q_full))) {
178 /* byteswap and process VBI data */
179/* cx18_process_vbi_data(cx, buf, s_vbi->dma_pts, s_vbi->type); */
180 cx18_enqueue(s_vbi, buf, &s_vbi->q_free);
181 }
182 }
183 buf = &cx->vbi.sliced_mpeg_buf;
184 if (buf->readpos != buf->bytesused)
185 return buf;
186 }
187
188 /* do we have leftover data? */
189 buf = cx18_dequeue(s, &s->q_io);
190 if (buf)
191 return buf;
192
193 /* do we have new data? */
194 buf = cx18_dequeue(s, &s->q_full);
195 if (buf) {
196 if (!test_and_clear_bit(CX18_F_B_NEED_BUF_SWAP,
197 &buf->b_flags))
198 return buf;
199 if (s->type == CX18_ENC_STREAM_TYPE_MPG)
200 /* byteswap MPG data */
201 cx18_buf_swap(buf);
202 else {
203 /* byteswap and process VBI data */
204 cx18_process_vbi_data(cx, buf,
205 s->dma_pts, s->type);
206 }
207 return buf;
208 }
209
210 /* return if end of stream */
211 if (!test_bit(CX18_F_S_STREAMING, &s->s_flags)) {
212 CX18_DEBUG_INFO("EOS %s\n", s->name);
213 return NULL;
214 }
215
216 /* return if file was opened with O_NONBLOCK */
217 if (non_block) {
218 *err = -EAGAIN;
219 return NULL;
220 }
221
222 /* wait for more data to arrive */
223 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
224 /* New buffers might have become available before we were added
225 to the waitqueue */
226 if (!s->q_full.buffers)
227 schedule();
228 finish_wait(&s->waitq, &wait);
229 if (signal_pending(current)) {
230 /* return if a signal was received */
231 CX18_DEBUG_INFO("User stopped %s\n", s->name);
232 *err = -EINTR;
233 return NULL;
234 }
235 }
236}
237
238static void cx18_setup_sliced_vbi_buf(struct cx18 *cx)
239{
240 int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES;
241
242 cx->vbi.sliced_mpeg_buf.buf = cx->vbi.sliced_mpeg_data[idx];
243 cx->vbi.sliced_mpeg_buf.bytesused = cx->vbi.sliced_mpeg_size[idx];
244 cx->vbi.sliced_mpeg_buf.readpos = 0;
245}
246
247static size_t cx18_copy_buf_to_user(struct cx18_stream *s,
248 struct cx18_buffer *buf, char __user *ubuf, size_t ucount)
249{
250 struct cx18 *cx = s->cx;
251 size_t len = buf->bytesused - buf->readpos;
252
253 if (len > ucount)
254 len = ucount;
255 if (cx->vbi.insert_mpeg && s->type == CX18_ENC_STREAM_TYPE_MPG &&
256 cx->vbi.sliced_in->service_set && buf != &cx->vbi.sliced_mpeg_buf) {
257 const char *start = buf->buf + buf->readpos;
258 const char *p = start + 1;
259 const u8 *q;
260 u8 ch = cx->search_pack_header ? 0xba : 0xe0;
261 int stuffing, i;
262
263 while (start + len > p) {
264 q = memchr(p, 0, start + len - p);
265 if (q == NULL)
266 break;
267 p = q + 1;
268 if ((char *)q + 15 >= buf->buf + buf->bytesused ||
269 q[1] != 0 || q[2] != 1 || q[3] != ch)
270 continue;
271 if (!cx->search_pack_header) {
272 if ((q[6] & 0xc0) != 0x80)
273 continue;
274 if (((q[7] & 0xc0) == 0x80 &&
275 (q[9] & 0xf0) == 0x20) ||
276 ((q[7] & 0xc0) == 0xc0 &&
277 (q[9] & 0xf0) == 0x30)) {
278 ch = 0xba;
279 cx->search_pack_header = 1;
280 p = q + 9;
281 }
282 continue;
283 }
284 stuffing = q[13] & 7;
285 /* all stuffing bytes must be 0xff */
286 for (i = 0; i < stuffing; i++)
287 if (q[14 + i] != 0xff)
288 break;
289 if (i == stuffing &&
290 (q[4] & 0xc4) == 0x44 &&
291 (q[12] & 3) == 3 &&
292 q[14 + stuffing] == 0 &&
293 q[15 + stuffing] == 0 &&
294 q[16 + stuffing] == 1) {
295 cx->search_pack_header = 0;
296 len = (char *)q - start;
297 cx18_setup_sliced_vbi_buf(cx);
298 break;
299 }
300 }
301 }
302 if (copy_to_user(ubuf, (u8 *)buf->buf + buf->readpos, len)) {
303 CX18_DEBUG_WARN("copy %zd bytes to user failed for %s\n",
304 len, s->name);
305 return -EFAULT;
306 }
307 buf->readpos += len;
308 if (s->type == CX18_ENC_STREAM_TYPE_MPG &&
309 buf != &cx->vbi.sliced_mpeg_buf)
310 cx->mpg_data_received += len;
311 return len;
312}
313
314static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf,
315 size_t tot_count, int non_block)
316{
317 struct cx18 *cx = s->cx;
318 size_t tot_written = 0;
319 int single_frame = 0;
320
321 if (atomic_read(&cx->capturing) == 0 && s->id == -1) {
322 /* shouldn't happen */
323 CX18_DEBUG_WARN("Stream %s not initialized before read\n",
324 s->name);
325 return -EIO;
326 }
327
328 /* Each VBI buffer is one frame, the v4l2 API says that for VBI the
329 frames should arrive one-by-one, so make sure we never output more
330 than one VBI frame at a time */
331 if (s->type == CX18_ENC_STREAM_TYPE_VBI &&
332 cx->vbi.sliced_in->service_set)
333 single_frame = 1;
334
335 for (;;) {
336 struct cx18_buffer *buf;
337 int rc;
338
339 buf = cx18_get_buffer(s, non_block, &rc);
340 /* if there is no data available... */
341 if (buf == NULL) {
342 /* if we got data, then return that regardless */
343 if (tot_written)
344 break;
345 /* EOS condition */
346 if (rc == 0) {
347 clear_bit(CX18_F_S_STREAMOFF, &s->s_flags);
348 clear_bit(CX18_F_S_APPL_IO, &s->s_flags);
349 cx18_release_stream(s);
350 }
351 /* set errno */
352 return rc;
353 }
354
355 rc = cx18_copy_buf_to_user(s, buf, ubuf + tot_written,
356 tot_count - tot_written);
357
358 if (buf != &cx->vbi.sliced_mpeg_buf) {
359 if (buf->readpos == buf->bytesused) {
360 cx18_buf_sync_for_device(s, buf);
361 cx18_enqueue(s, buf, &s->q_free);
362 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5,
363 s->handle,
364 (void *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
365 1, buf->id, s->buf_size);
366 } else
367 cx18_enqueue(s, buf, &s->q_io);
368 } else if (buf->readpos == buf->bytesused) {
369 int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES;
370
371 cx->vbi.sliced_mpeg_size[idx] = 0;
372 cx->vbi.inserted_frame++;
373 cx->vbi_data_inserted += buf->bytesused;
374 }
375 if (rc < 0)
376 return rc;
377 tot_written += rc;
378
379 if (tot_written == tot_count || single_frame)
380 break;
381 }
382 return tot_written;
383}
384
385static ssize_t cx18_read_pos(struct cx18_stream *s, char __user *ubuf,
386 size_t count, loff_t *pos, int non_block)
387{
388 ssize_t rc = count ? cx18_read(s, ubuf, count, non_block) : 0;
389 struct cx18 *cx = s->cx;
390
391 CX18_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
392 if (rc > 0)
393 pos += rc;
394 return rc;
395}
396
397int cx18_start_capture(struct cx18_open_id *id)
398{
399 struct cx18 *cx = id->cx;
400 struct cx18_stream *s = &cx->streams[id->type];
401 struct cx18_stream *s_vbi;
402
403 if (s->type == CX18_ENC_STREAM_TYPE_RAD) {
404 /* you cannot read from these stream types. */
405 return -EPERM;
406 }
407
408 /* Try to claim this stream. */
409 if (cx18_claim_stream(id, s->type))
410 return -EBUSY;
411
412 /* If capture is already in progress, then we also have to
413 do nothing extra. */
414 if (test_bit(CX18_F_S_STREAMOFF, &s->s_flags) ||
415 test_and_set_bit(CX18_F_S_STREAMING, &s->s_flags)) {
416 set_bit(CX18_F_S_APPL_IO, &s->s_flags);
417 return 0;
418 }
419
420 /* Start VBI capture if required */
421 s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
422 if (s->type == CX18_ENC_STREAM_TYPE_MPG &&
423 test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
424 !test_and_set_bit(CX18_F_S_STREAMING, &s_vbi->s_flags)) {
425 /* Note: the CX18_ENC_STREAM_TYPE_VBI is claimed
426 automatically when the MPG stream is claimed.
427 We only need to start the VBI capturing. */
428 if (cx18_start_v4l2_encode_stream(s_vbi)) {
429 CX18_DEBUG_WARN("VBI capture start failed\n");
430
431 /* Failure, clean up and return an error */
432 clear_bit(CX18_F_S_STREAMING, &s_vbi->s_flags);
433 clear_bit(CX18_F_S_STREAMING, &s->s_flags);
434 /* also releases the associated VBI stream */
435 cx18_release_stream(s);
436 return -EIO;
437 }
438 CX18_DEBUG_INFO("VBI insertion started\n");
439 }
440
441 /* Tell the card to start capturing */
442 if (!cx18_start_v4l2_encode_stream(s)) {
443 /* We're done */
444 set_bit(CX18_F_S_APPL_IO, &s->s_flags);
445 /* Resume a possibly paused encoder */
446 if (test_and_clear_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags))
447 cx18_vapi(cx, CX18_CPU_CAPTURE_PAUSE, 1, s->handle);
448 return 0;
449 }
450
451 /* failure, clean up */
452 CX18_DEBUG_WARN("Failed to start capturing for stream %s\n", s->name);
453
454 /* Note: the CX18_ENC_STREAM_TYPE_VBI is released
455 automatically when the MPG stream is released.
456 We only need to stop the VBI capturing. */
457 if (s->type == CX18_ENC_STREAM_TYPE_MPG &&
458 test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags)) {
459 cx18_stop_v4l2_encode_stream(s_vbi, 0);
460 clear_bit(CX18_F_S_STREAMING, &s_vbi->s_flags);
461 }
462 clear_bit(CX18_F_S_STREAMING, &s->s_flags);
463 cx18_release_stream(s);
464 return -EIO;
465}
466
467ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
468 loff_t *pos)
469{
470 struct cx18_open_id *id = filp->private_data;
471 struct cx18 *cx = id->cx;
472 struct cx18_stream *s = &cx->streams[id->type];
473 int rc;
474
475 CX18_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
476
477 mutex_lock(&cx->serialize_lock);
478 rc = cx18_start_capture(id);
479 mutex_unlock(&cx->serialize_lock);
480 if (rc)
481 return rc;
482 return cx18_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
483}
484
485unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
486{
487 struct cx18_open_id *id = filp->private_data;
488 struct cx18 *cx = id->cx;
489 struct cx18_stream *s = &cx->streams[id->type];
490 int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags);
491
492 /* Start a capture if there is none */
493 if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags)) {
494 int rc;
495
496 mutex_lock(&cx->serialize_lock);
497 rc = cx18_start_capture(id);
498 mutex_unlock(&cx->serialize_lock);
499 if (rc) {
500 CX18_DEBUG_INFO("Could not start capture for %s (%d)\n",
501 s->name, rc);
502 return POLLERR;
503 }
504 CX18_DEBUG_FILE("Encoder poll started capture\n");
505 }
506
507 /* add stream's waitq to the poll list */
508 CX18_DEBUG_HI_FILE("Encoder poll\n");
509 poll_wait(filp, &s->waitq, wait);
510
511 if (s->q_full.length || s->q_io.length)
512 return POLLIN | POLLRDNORM;
513 if (eof)
514 return POLLHUP;
515 return 0;
516}
517
518void cx18_stop_capture(struct cx18_open_id *id, int gop_end)
519{
520 struct cx18 *cx = id->cx;
521 struct cx18_stream *s = &cx->streams[id->type];
522
523 CX18_DEBUG_IOCTL("close() of %s\n", s->name);
524
525 /* 'Unclaim' this stream */
526
527 /* Stop capturing */
528 if (test_bit(CX18_F_S_STREAMING, &s->s_flags)) {
529 struct cx18_stream *s_vbi =
530 &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
531
532 CX18_DEBUG_INFO("close stopping capture\n");
533 /* Special case: a running VBI capture for VBI insertion
534 in the mpeg stream. Need to stop that too. */
535 if (id->type == CX18_ENC_STREAM_TYPE_MPG &&
536 test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags) &&
537 !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
538 CX18_DEBUG_INFO("close stopping embedded VBI capture\n");
539 cx18_stop_v4l2_encode_stream(s_vbi, 0);
540 }
541 if (id->type == CX18_ENC_STREAM_TYPE_VBI &&
542 test_bit(CX18_F_S_INTERNAL_USE, &s->s_flags))
543 /* Also used internally, don't stop capturing */
544 s->id = -1;
545 else
546 cx18_stop_v4l2_encode_stream(s, gop_end);
547 }
548 if (!gop_end) {
549 clear_bit(CX18_F_S_APPL_IO, &s->s_flags);
550 clear_bit(CX18_F_S_STREAMOFF, &s->s_flags);
551 cx18_release_stream(s);
552 }
553}
554
555int cx18_v4l2_close(struct inode *inode, struct file *filp)
556{
557 struct cx18_open_id *id = filp->private_data;
558 struct cx18 *cx = id->cx;
559 struct cx18_stream *s = &cx->streams[id->type];
560
561 CX18_DEBUG_IOCTL("close() of %s\n", s->name);
562
563 v4l2_prio_close(&cx->prio, &id->prio);
564
565 /* Easy case first: this stream was never claimed by us */
566 if (s->id != id->open_id) {
567 kfree(id);
568 return 0;
569 }
570
571 /* 'Unclaim' this stream */
572
573 /* Stop radio */
574 mutex_lock(&cx->serialize_lock);
575 if (id->type == CX18_ENC_STREAM_TYPE_RAD) {
576 /* Closing radio device, return to TV mode */
577 cx18_mute(cx);
578 /* Mark that the radio is no longer in use */
579 clear_bit(CX18_F_I_RADIO_USER, &cx->i_flags);
580 /* Switch tuner to TV */
581 cx18_call_i2c_clients(cx, VIDIOC_S_STD, &cx->std);
582 /* Select correct audio input (i.e. TV tuner or Line in) */
583 cx18_audio_set_io(cx);
584 if (atomic_read(&cx->capturing) > 0) {
585 /* Undo video mute */
586 cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, s->handle,
587 cx->params.video_mute |
588 (cx->params.video_mute_yuv << 8));
589 }
590 /* Done! Unmute and continue. */
591 cx18_unmute(cx);
592 cx18_release_stream(s);
593 } else {
594 cx18_stop_capture(id, 0);
595 }
596 kfree(id);
597 mutex_unlock(&cx->serialize_lock);
598 return 0;
599}
600
601static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
602{
603 struct cx18 *cx = s->cx;
604 struct cx18_open_id *item;
605
606 CX18_DEBUG_FILE("open %s\n", s->name);
607
608 /* Allocate memory */
609 item = kmalloc(sizeof(struct cx18_open_id), GFP_KERNEL);
610 if (NULL == item) {
611 CX18_DEBUG_WARN("nomem on v4l2 open\n");
612 return -ENOMEM;
613 }
614 item->cx = cx;
615 item->type = s->type;
616 v4l2_prio_open(&cx->prio, &item->prio);
617
618 item->open_id = cx->open_id++;
619 filp->private_data = item;
620
621 if (item->type == CX18_ENC_STREAM_TYPE_RAD) {
622 /* Try to claim this stream */
623 if (cx18_claim_stream(item, item->type)) {
624 /* No, it's already in use */
625 kfree(item);
626 return -EBUSY;
627 }
628
629 if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
630 if (atomic_read(&cx->capturing) > 0) {
631 /* switching to radio while capture is
632 in progress is not polite */
633 cx18_release_stream(s);
634 kfree(item);
635 return -EBUSY;
636 }
637 }
638
639 /* Mark that the radio is being used. */
640 set_bit(CX18_F_I_RADIO_USER, &cx->i_flags);
641 /* We have the radio */
642 cx18_mute(cx);
643 /* Switch tuner to radio */
644 cx18_call_i2c_clients(cx, AUDC_SET_RADIO, NULL);
645 /* Select the correct audio input (i.e. radio tuner) */
646 cx18_audio_set_io(cx);
647 /* Done! Unmute and continue. */
648 cx18_unmute(cx);
649 }
650 return 0;
651}
652
653int cx18_v4l2_open(struct inode *inode, struct file *filp)
654{
655 int res, x, y = 0;
656 struct cx18 *cx = NULL;
657 struct cx18_stream *s = NULL;
658 int minor = iminor(inode);
659
660 /* Find which card this open was on */
661 spin_lock(&cx18_cards_lock);
662 for (x = 0; cx == NULL && x < cx18_cards_active; x++) {
663 /* find out which stream this open was on */
664 for (y = 0; y < CX18_MAX_STREAMS; y++) {
665 s = &cx18_cards[x]->streams[y];
666 if (s->v4l2dev && s->v4l2dev->minor == minor) {
667 cx = cx18_cards[x];
668 break;
669 }
670 }
671 }
672 spin_unlock(&cx18_cards_lock);
673
674 if (cx == NULL) {
675 /* Couldn't find a device registered
676 on that minor, shouldn't happen! */
677 printk(KERN_WARNING "No cx18 device found on minor %d\n",
678 minor);
679 return -ENXIO;
680 }
681
682 mutex_lock(&cx->serialize_lock);
683 if (cx18_init_on_first_open(cx)) {
684 CX18_ERR("Failed to initialize on minor %d\n", minor);
685 mutex_unlock(&cx->serialize_lock);
686 return -ENXIO;
687 }
688 res = cx18_serialized_open(s, filp);
689 mutex_unlock(&cx->serialize_lock);
690 return res;
691}
692
693void cx18_mute(struct cx18 *cx)
694{
695 if (atomic_read(&cx->capturing))
696 cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
697 cx18_find_handle(cx), 1);
698 CX18_DEBUG_INFO("Mute\n");
699}
700
701void cx18_unmute(struct cx18 *cx)
702{
703 if (atomic_read(&cx->capturing)) {
704 cx18_msleep_timeout(100, 0);
705 cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2,
706 cx18_find_handle(cx), 12);
707 cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
708 cx18_find_handle(cx), 0);
709 }
710 CX18_DEBUG_INFO("Unmute\n");
711}
diff --git a/drivers/media/video/cx18/cx18-fileops.h b/drivers/media/video/cx18/cx18-fileops.h
new file mode 100644
index 000000000000..16cdafbd24c5
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-fileops.h
@@ -0,0 +1,45 @@
1/*
2 * cx18 file operation functions
3 *
4 * Derived from ivtv-fileops.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24/* Testing/Debugging */
25int cx18_v4l2_open(struct inode *inode, struct file *filp);
26ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
27 loff_t *pos);
28ssize_t cx18_v4l2_write(struct file *filp, const char __user *buf, size_t count,
29 loff_t *pos);
30int cx18_v4l2_close(struct inode *inode, struct file *filp);
31unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
32int cx18_start_capture(struct cx18_open_id *id);
33void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
34void cx18_mute(struct cx18 *cx);
35void cx18_unmute(struct cx18 *cx);
36
37/* Utilities */
38
39/* Try to claim a stream for the filehandle. Return 0 on success,
40 -EBUSY if stream already claimed. Once a stream is claimed, it
41 remains claimed until the associated filehandle is closed. */
42int cx18_claim_stream(struct cx18_open_id *id, int type);
43
44/* Release a previously claimed stream. */
45void cx18_release_stream(struct cx18_stream *s);
diff --git a/drivers/media/video/cx18/cx18-firmware.c b/drivers/media/video/cx18/cx18-firmware.c
new file mode 100644
index 000000000000..2694ce350631
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-firmware.c
@@ -0,0 +1,373 @@
1/*
2 * cx18 firmware functions
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#include "cx18-driver.h"
23#include "cx18-scb.h"
24#include "cx18-irq.h"
25#include "cx18-firmware.h"
26#include "cx18-cards.h"
27#include <linux/firmware.h>
28
29#define CX18_PROC_SOFT_RESET 0xc70010
30#define CX18_DDR_SOFT_RESET 0xc70014
31#define CX18_CLOCK_SELECT1 0xc71000
32#define CX18_CLOCK_SELECT2 0xc71004
33#define CX18_HALF_CLOCK_SELECT1 0xc71008
34#define CX18_HALF_CLOCK_SELECT2 0xc7100C
35#define CX18_CLOCK_POLARITY1 0xc71010
36#define CX18_CLOCK_POLARITY2 0xc71014
37#define CX18_ADD_DELAY_ENABLE1 0xc71018
38#define CX18_ADD_DELAY_ENABLE2 0xc7101C
39#define CX18_CLOCK_ENABLE1 0xc71020
40#define CX18_CLOCK_ENABLE2 0xc71024
41
42#define CX18_REG_BUS_TIMEOUT_EN 0xc72024
43
44#define CX18_AUDIO_ENABLE 0xc72014
45#define CX18_REG_BUS_TIMEOUT_EN 0xc72024
46
47#define CX18_FAST_CLOCK_PLL_INT 0xc78000
48#define CX18_FAST_CLOCK_PLL_FRAC 0xc78004
49#define CX18_FAST_CLOCK_PLL_POST 0xc78008
50#define CX18_FAST_CLOCK_PLL_PRESCALE 0xc7800C
51#define CX18_FAST_CLOCK_PLL_ADJUST_BANDWIDTH 0xc78010
52
53#define CX18_SLOW_CLOCK_PLL_INT 0xc78014
54#define CX18_SLOW_CLOCK_PLL_FRAC 0xc78018
55#define CX18_SLOW_CLOCK_PLL_POST 0xc7801C
56#define CX18_MPEG_CLOCK_PLL_INT 0xc78040
57#define CX18_MPEG_CLOCK_PLL_FRAC 0xc78044
58#define CX18_MPEG_CLOCK_PLL_POST 0xc78048
59#define CX18_PLL_POWER_DOWN 0xc78088
60#define CX18_SW1_INT_STATUS 0xc73104
61#define CX18_SW1_INT_ENABLE_PCI 0xc7311C
62#define CX18_SW2_INT_SET 0xc73140
63#define CX18_SW2_INT_STATUS 0xc73144
64#define CX18_ADEC_CONTROL 0xc78120
65
66#define CX18_DDR_REQUEST_ENABLE 0xc80000
67#define CX18_DDR_CHIP_CONFIG 0xc80004
68#define CX18_DDR_REFRESH 0xc80008
69#define CX18_DDR_TIMING1 0xc8000C
70#define CX18_DDR_TIMING2 0xc80010
71#define CX18_DDR_POWER_REG 0xc8001C
72
73#define CX18_DDR_TUNE_LANE 0xc80048
74#define CX18_DDR_INITIAL_EMRS 0xc80054
75#define CX18_DDR_MB_PER_ROW_7 0xc8009C
76#define CX18_DDR_BASE_63_ADDR 0xc804FC
77
78#define CX18_WMB_CLIENT02 0xc90108
79#define CX18_WMB_CLIENT05 0xc90114
80#define CX18_WMB_CLIENT06 0xc90118
81#define CX18_WMB_CLIENT07 0xc9011C
82#define CX18_WMB_CLIENT08 0xc90120
83#define CX18_WMB_CLIENT09 0xc90124
84#define CX18_WMB_CLIENT10 0xc90128
85#define CX18_WMB_CLIENT11 0xc9012C
86#define CX18_WMB_CLIENT12 0xc90130
87#define CX18_WMB_CLIENT13 0xc90134
88#define CX18_WMB_CLIENT14 0xc90138
89
90#define CX18_DSP0_INTERRUPT_MASK 0xd0004C
91
92/* Encoder/decoder firmware sizes */
93#define CX18_FW_CPU_SIZE (174716)
94#define CX18_FW_APU_SIZE (141200)
95
96#define APU_ROM_SYNC1 0x6D676553 /* "mgeS" */
97#define APU_ROM_SYNC2 0x72646548 /* "rdeH" */
98
99struct cx18_apu_rom_seghdr {
100 u32 sync1;
101 u32 sync2;
102 u32 addr;
103 u32 size;
104};
105
106static int load_cpu_fw_direct(const char *fn, u8 __iomem *mem, struct cx18 *cx, long size)
107{
108 const struct firmware *fw = NULL;
109 int retries = 3;
110 int i, j;
111 u32 __iomem *dst = (u32 __iomem *)mem;
112 const u32 *src;
113
114retry:
115 if (!retries || request_firmware(&fw, fn, &cx->dev->dev)) {
116 CX18_ERR("Unable to open firmware %s (must be %ld bytes)\n",
117 fn, size);
118 CX18_ERR("Did you put the firmware in the hotplug firmware directory?\n");
119 return -ENOMEM;
120 }
121
122 src = (const u32 *)fw->data;
123
124 if (fw->size != size) {
125 /* Due to race conditions in firmware loading (esp. with
126 udev <0.95) the wrong file was sometimes loaded. So we check
127 filesizes to see if at least the right-sized file was
128 loaded. If not, then we retry. */
129 CX18_INFO("retry: file loaded was not %s (expected size %ld, got %zd)\n",
130 fn, size, fw->size);
131 release_firmware(fw);
132 retries--;
133 goto retry;
134 }
135 for (i = 0; i < fw->size; i += 4096) {
136 setup_page(i);
137 for (j = i; j < fw->size && j < i + 4096; j += 4) {
138 /* no need for endianness conversion on the ppc */
139 __raw_writel(*src, dst);
140 if (__raw_readl(dst) != *src) {
141 CX18_ERR("Mismatch at offset %x\n", i);
142 release_firmware(fw);
143 return -EIO;
144 }
145 dst++;
146 src++;
147 }
148 }
149 if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags))
150 CX18_INFO("loaded %s firmware (%zd bytes)\n", fn, fw->size);
151 release_firmware(fw);
152 return size;
153}
154
155static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx, long size)
156{
157 const struct firmware *fw = NULL;
158 int retries = 3;
159 int i, j;
160 const u32 *src;
161 struct cx18_apu_rom_seghdr seghdr;
162 const u8 *vers;
163 u32 offset = 0;
164 u32 apu_version = 0;
165 int sz;
166
167retry:
168 if (!retries || request_firmware(&fw, fn, &cx->dev->dev)) {
169 CX18_ERR("unable to open firmware %s (must be %ld bytes)\n",
170 fn, size);
171 CX18_ERR("did you put the firmware in the hotplug firmware directory?\n");
172 return -ENOMEM;
173 }
174
175 src = (const u32 *)fw->data;
176 vers = fw->data + sizeof(seghdr);
177 sz = fw->size;
178
179 if (fw->size != size) {
180 /* Due to race conditions in firmware loading (esp. with
181 udev <0.95) the wrong file was sometimes loaded. So we check
182 filesizes to see if at least the right-sized file was
183 loaded. If not, then we retry. */
184 CX18_INFO("retry: file loaded was not %s (expected size %ld, got %zd)\n",
185 fn, size, fw->size);
186 release_firmware(fw);
187 retries--;
188 goto retry;
189 }
190 apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
191 while (offset + sizeof(seghdr) < size) {
192 /* TODO: byteswapping */
193 memcpy(&seghdr, src + offset / 4, sizeof(seghdr));
194 offset += sizeof(seghdr);
195 if (seghdr.sync1 != APU_ROM_SYNC1 ||
196 seghdr.sync2 != APU_ROM_SYNC2) {
197 offset += seghdr.size;
198 continue;
199 }
200 CX18_DEBUG_INFO("load segment %x-%x\n", seghdr.addr,
201 seghdr.addr + seghdr.size - 1);
202 if (offset + seghdr.size > sz)
203 break;
204 for (i = 0; i < seghdr.size; i += 4096) {
205 setup_page(offset + i);
206 for (j = i; j < seghdr.size && j < i + 4096; j += 4) {
207 /* no need for endianness conversion on the ppc */
208 __raw_writel(src[(offset + j) / 4], dst + seghdr.addr + j);
209 if (__raw_readl(dst + seghdr.addr + j) != src[(offset + j) / 4]) {
210 CX18_ERR("Mismatch at offset %x\n", offset + j);
211 release_firmware(fw);
212 return -EIO;
213 }
214 }
215 }
216 offset += seghdr.size;
217 }
218 if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags))
219 CX18_INFO("loaded %s firmware V%08x (%zd bytes)\n",
220 fn, apu_version, fw->size);
221 release_firmware(fw);
222 /* Clear bit0 for APU to start from 0 */
223 write_reg(read_reg(0xc72030) & ~1, 0xc72030);
224 return size;
225}
226
227void cx18_halt_firmware(struct cx18 *cx)
228{
229 CX18_DEBUG_INFO("Preparing for firmware halt.\n");
230 write_reg(0x000F000F, CX18_PROC_SOFT_RESET); /* stop the fw */
231 write_reg(0x00020002, CX18_ADEC_CONTROL);
232}
233
234void cx18_init_power(struct cx18 *cx, int lowpwr)
235{
236 /* power-down Spare and AOM PLLs */
237 /* power-up fast, slow and mpeg PLLs */
238 write_reg(0x00000008, CX18_PLL_POWER_DOWN);
239
240 /* ADEC out of sleep */
241 write_reg(0x00020000, CX18_ADEC_CONTROL);
242
243 /* The fast clock is at 200/245 MHz */
244 write_reg(lowpwr ? 0xD : 0x11, CX18_FAST_CLOCK_PLL_INT);
245 write_reg(lowpwr ? 0x1EFBF37 : 0x038E3D7, CX18_FAST_CLOCK_PLL_FRAC);
246
247 write_reg(2, CX18_FAST_CLOCK_PLL_POST);
248 write_reg(1, CX18_FAST_CLOCK_PLL_PRESCALE);
249 write_reg(4, CX18_FAST_CLOCK_PLL_ADJUST_BANDWIDTH);
250
251 /* set slow clock to 125/120 MHz */
252 write_reg(lowpwr ? 0x11 : 0x10, CX18_SLOW_CLOCK_PLL_INT);
253 write_reg(lowpwr ? 0xEBAF05 : 0x18618A8, CX18_SLOW_CLOCK_PLL_FRAC);
254 write_reg(4, CX18_SLOW_CLOCK_PLL_POST);
255
256 /* mpeg clock pll 54MHz */
257 write_reg(0xF, CX18_MPEG_CLOCK_PLL_INT);
258 write_reg(0x2BCFEF, CX18_MPEG_CLOCK_PLL_FRAC);
259 write_reg(8, CX18_MPEG_CLOCK_PLL_POST);
260
261 /* Defaults */
262 /* APU = SC or SC/2 = 125/62.5 */
263 /* EPU = SC = 125 */
264 /* DDR = FC = 180 */
265 /* ENC = SC = 125 */
266 /* AI1 = SC = 125 */
267 /* VIM2 = disabled */
268 /* PCI = FC/2 = 90 */
269 /* AI2 = disabled */
270 /* DEMUX = disabled */
271 /* AO = SC/2 = 62.5 */
272 /* SER = 54MHz */
273 /* VFC = disabled */
274 /* USB = disabled */
275
276 write_reg(lowpwr ? 0xFFFF0020 : 0x00060004, CX18_CLOCK_SELECT1);
277 write_reg(lowpwr ? 0xFFFF0004 : 0x00060006, CX18_CLOCK_SELECT2);
278
279 write_reg(0xFFFF0002, CX18_HALF_CLOCK_SELECT1);
280 write_reg(0xFFFF0104, CX18_HALF_CLOCK_SELECT2);
281
282 write_reg(0xFFFF9026, CX18_CLOCK_ENABLE1);
283 write_reg(0xFFFF3105, CX18_CLOCK_ENABLE2);
284}
285
286void cx18_init_memory(struct cx18 *cx)
287{
288 cx18_msleep_timeout(10, 0);
289 write_reg(0x10000, CX18_DDR_SOFT_RESET);
290 cx18_msleep_timeout(10, 0);
291
292 write_reg(cx->card->ddr.chip_config, CX18_DDR_CHIP_CONFIG);
293
294 cx18_msleep_timeout(10, 0);
295
296 write_reg(cx->card->ddr.refresh, CX18_DDR_REFRESH);
297 write_reg(cx->card->ddr.timing1, CX18_DDR_TIMING1);
298 write_reg(cx->card->ddr.timing2, CX18_DDR_TIMING2);
299
300 cx18_msleep_timeout(10, 0);
301
302 /* Initialize DQS pad time */
303 write_reg(cx->card->ddr.tune_lane, CX18_DDR_TUNE_LANE);
304 write_reg(cx->card->ddr.initial_emrs, CX18_DDR_INITIAL_EMRS);
305
306 cx18_msleep_timeout(10, 0);
307
308 write_reg(0x20000, CX18_DDR_SOFT_RESET);
309 cx18_msleep_timeout(10, 0);
310
311 /* use power-down mode when idle */
312 write_reg(0x00000010, CX18_DDR_POWER_REG);
313
314 write_reg(0x10001, CX18_REG_BUS_TIMEOUT_EN);
315
316 write_reg(0x48, CX18_DDR_MB_PER_ROW_7);
317 write_reg(0xE0000, CX18_DDR_BASE_63_ADDR);
318
319 write_reg(0x00000101, CX18_WMB_CLIENT02); /* AO */
320 write_reg(0x00000101, CX18_WMB_CLIENT09); /* AI2 */
321 write_reg(0x00000101, CX18_WMB_CLIENT05); /* VIM1 */
322 write_reg(0x00000101, CX18_WMB_CLIENT06); /* AI1 */
323 write_reg(0x00000101, CX18_WMB_CLIENT07); /* 3D comb */
324 write_reg(0x00000101, CX18_WMB_CLIENT10); /* ME */
325 write_reg(0x00000101, CX18_WMB_CLIENT12); /* ENC */
326 write_reg(0x00000101, CX18_WMB_CLIENT13); /* PK */
327 write_reg(0x00000101, CX18_WMB_CLIENT11); /* RC */
328 write_reg(0x00000101, CX18_WMB_CLIENT14); /* AVO */
329}
330
331int cx18_firmware_init(struct cx18 *cx)
332{
333 /* Allow chip to control CLKRUN */
334 write_reg(0x5, CX18_DSP0_INTERRUPT_MASK);
335
336 write_reg(0x000F000F, CX18_PROC_SOFT_RESET); /* stop the fw */
337
338 cx18_msleep_timeout(1, 0);
339
340 sw1_irq_enable(IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU);
341 sw2_irq_enable(IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);
342
343 /* Only if the processor is not running */
344 if (read_reg(CX18_PROC_SOFT_RESET) & 8) {
345 int sz = load_apu_fw_direct("v4l-cx23418-apu.fw",
346 cx->enc_mem, cx, CX18_FW_APU_SIZE);
347
348 sz = sz <= 0 ? sz : load_cpu_fw_direct("v4l-cx23418-cpu.fw",
349 cx->enc_mem, cx, CX18_FW_CPU_SIZE);
350
351 if (sz > 0) {
352 int retries = 0;
353
354 /* start the CPU */
355 write_reg(0x00080000, CX18_PROC_SOFT_RESET);
356 while (retries++ < 50) { /* Loop for max 500mS */
357 if ((read_reg(CX18_PROC_SOFT_RESET) & 1) == 0)
358 break;
359 cx18_msleep_timeout(10, 0);
360 }
361 cx18_msleep_timeout(200, 0);
362 if (retries == 51) {
363 CX18_ERR("Could not start the CPU\n");
364 return -EIO;
365 }
366 }
367 if (sz <= 0)
368 return -EIO;
369 }
370 /* initialize GPIO */
371 write_reg(0x14001400, 0xC78110);
372 return 0;
373}
diff --git a/drivers/media/video/cx18/cx18-firmware.h b/drivers/media/video/cx18/cx18-firmware.h
new file mode 100644
index 000000000000..38d4c05e8499
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-firmware.h
@@ -0,0 +1,25 @@
1/*
2 * cx18 firmware functions
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22int cx18_firmware_init(struct cx18 *cx);
23void cx18_halt_firmware(struct cx18 *cx);
24void cx18_init_memory(struct cx18 *cx);
25void cx18_init_power(struct cx18 *cx, int lowpwr);
diff --git a/drivers/media/video/cx18/cx18-gpio.c b/drivers/media/video/cx18/cx18-gpio.c
new file mode 100644
index 000000000000..19253e6b8673
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-gpio.c
@@ -0,0 +1,74 @@
1/*
2 * cx18 gpio functions
3 *
4 * Derived from ivtv-gpio.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-cards.h"
26#include "cx18-gpio.h"
27#include "tuner-xc2028.h"
28
29/********************* GPIO stuffs *********************/
30
31/* GPIO registers */
32#define CX18_REG_GPIO_IN 0xc72010
33#define CX18_REG_GPIO_OUT1 0xc78100
34#define CX18_REG_GPIO_DIR1 0xc78108
35#define CX18_REG_GPIO_OUT2 0xc78104
36#define CX18_REG_GPIO_DIR2 0xc7810c
37
38/*
39 * HVR-1600 GPIO pins, courtesy of Hauppauge:
40 *
41 * gpio0: zilog ir process reset pin
42 * gpio1: zilog programming pin (you should never use this)
43 * gpio12: cx24227 reset pin
44 * gpio13: cs5345 reset pin
45*/
46
47void cx18_gpio_init(struct cx18 *cx)
48{
49 if (cx->card->gpio_init.direction == 0)
50 return;
51
52 CX18_DEBUG_INFO("GPIO initial dir: %08x out: %08x\n",
53 read_reg(CX18_REG_GPIO_DIR1), read_reg(CX18_REG_GPIO_OUT1));
54
55 /* init output data then direction */
56 write_reg(cx->card->gpio_init.direction << 16, CX18_REG_GPIO_DIR1);
57 write_reg(0, CX18_REG_GPIO_DIR2);
58 write_reg((cx->card->gpio_init.direction << 16) |
59 cx->card->gpio_init.initial_value, CX18_REG_GPIO_OUT1);
60 write_reg(0, CX18_REG_GPIO_OUT2);
61}
62
63/* Xceive tuner reset function */
64int cx18_reset_tuner_gpio(void *dev, int cmd, int value)
65{
66 struct i2c_algo_bit_data *algo = dev;
67 struct cx18 *cx = algo->data;
68/* int curdir, curout;*/
69
70 if (cmd != XC2028_TUNER_RESET)
71 return 0;
72 CX18_DEBUG_INFO("Resetting tuner\n");
73 return 0;
74}
diff --git a/drivers/media/video/cx18/cx18-gpio.h b/drivers/media/video/cx18/cx18-gpio.h
new file mode 100644
index 000000000000..41bac8856b50
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-gpio.h
@@ -0,0 +1,24 @@
1/*
2 * cx18 gpio functions
3 *
4 * Derived from ivtv-gpio.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23void cx18_gpio_init(struct cx18 *cx);
24int cx18_reset_tuner_gpio(void *dev, int cmd, int value);
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
new file mode 100644
index 000000000000..18c88d1e4833
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -0,0 +1,431 @@
1/*
2 * cx18 I2C functions
3 *
4 * Derived from ivtv-i2c.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-cards.h"
26#include "cx18-gpio.h"
27#include "cx18-av-core.h"
28
29#include <media/ir-kbd-i2c.h>
30
31#define CX18_REG_I2C_1_WR 0xf15000
32#define CX18_REG_I2C_1_RD 0xf15008
33#define CX18_REG_I2C_2_WR 0xf25100
34#define CX18_REG_I2C_2_RD 0xf25108
35
36#define SETSCL_BIT 0x0001
37#define SETSDL_BIT 0x0002
38#define GETSCL_BIT 0x0004
39#define GETSDL_BIT 0x0008
40
41#ifndef I2C_ADAP_CLASS_TV_ANALOG
42#define I2C_ADAP_CLASS_TV_ANALOG I2C_CLASS_TV_ANALOG
43#endif
44
45#define CX18_CS5345_I2C_ADDR 0x4c
46
47/* This array should match the CX18_HW_ defines */
48static const u8 hw_driverids[] = {
49 I2C_DRIVERID_TUNER,
50 I2C_DRIVERID_TVEEPROM,
51 I2C_DRIVERID_CS5345,
52 0, /* CX18_HW_GPIO dummy driver ID */
53 0 /* CX18_HW_CX23418 dummy driver ID */
54};
55
56/* This array should match the CX18_HW_ defines */
57static const u8 hw_addrs[] = {
58 0,
59 0,
60 CX18_CS5345_I2C_ADDR,
61 0, /* CX18_HW_GPIO dummy driver ID */
62 0, /* CX18_HW_CX23418 dummy driver ID */
63};
64
65/* This array should match the CX18_HW_ defines */
66/* This might well become a card-specific array */
67static const u8 hw_bus[] = {
68 0,
69 0,
70 0,
71 0, /* CX18_HW_GPIO dummy driver ID */
72 0, /* CX18_HW_CX23418 dummy driver ID */
73};
74
75/* This array should match the CX18_HW_ defines */
76static const char * const hw_drivernames[] = {
77 "tuner",
78 "tveeprom",
79 "cs5345",
80 "gpio",
81 "cx23418",
82};
83
84int cx18_i2c_register(struct cx18 *cx, unsigned idx)
85{
86 struct i2c_board_info info;
87 struct i2c_client *c;
88 u8 id, bus;
89 int i;
90
91 CX18_DEBUG_I2C("i2c client register\n");
92 if (idx >= ARRAY_SIZE(hw_driverids) || hw_driverids[idx] == 0)
93 return -1;
94 id = hw_driverids[idx];
95 bus = hw_bus[idx];
96 memset(&info, 0, sizeof(info));
97 strlcpy(info.driver_name, hw_drivernames[idx],
98 sizeof(info.driver_name));
99 info.addr = hw_addrs[idx];
100 for (i = 0; i < I2C_CLIENTS_MAX; i++)
101 if (cx->i2c_clients[i] == NULL)
102 break;
103
104 if (i == I2C_CLIENTS_MAX) {
105 CX18_ERR("insufficient room for new I2C client!\n");
106 return -ENOMEM;
107 }
108
109 if (id != I2C_DRIVERID_TUNER) {
110 c = i2c_new_device(&cx->i2c_adap[bus], &info);
111 if (c->driver == NULL)
112 i2c_unregister_device(c);
113 else
114 cx->i2c_clients[i] = c;
115 return cx->i2c_clients[i] ? 0 : -ENODEV;
116 }
117
118 /* special tuner handling */
119 c = i2c_new_probed_device(&cx->i2c_adap[1], &info, cx->card_i2c->radio);
120 if (c && c->driver == NULL)
121 i2c_unregister_device(c);
122 else if (c)
123 cx->i2c_clients[i++] = c;
124 c = i2c_new_probed_device(&cx->i2c_adap[1], &info, cx->card_i2c->demod);
125 if (c && c->driver == NULL)
126 i2c_unregister_device(c);
127 else if (c)
128 cx->i2c_clients[i++] = c;
129 c = i2c_new_probed_device(&cx->i2c_adap[1], &info, cx->card_i2c->tv);
130 if (c && c->driver == NULL)
131 i2c_unregister_device(c);
132 else if (c)
133 cx->i2c_clients[i++] = c;
134 return 0;
135}
136
137static int attach_inform(struct i2c_client *client)
138{
139 return 0;
140}
141
142static int detach_inform(struct i2c_client *client)
143{
144 int i;
145 struct cx18 *cx = (struct cx18 *)i2c_get_adapdata(client->adapter);
146
147 CX18_DEBUG_I2C("i2c client detach\n");
148 for (i = 0; i < I2C_CLIENTS_MAX; i++) {
149 if (cx->i2c_clients[i] == client) {
150 cx->i2c_clients[i] = NULL;
151 break;
152 }
153 }
154 CX18_DEBUG_I2C("i2c detach [client=%s,%s]\n",
155 client->name, (i < I2C_CLIENTS_MAX) ? "ok" : "failed");
156
157 return 0;
158}
159
160static void cx18_setscl(void *data, int state)
161{
162 struct cx18 *cx = ((struct cx18_i2c_algo_callback_data *)data)->cx;
163 int bus_index = ((struct cx18_i2c_algo_callback_data *)data)->bus_index;
164 u32 addr = bus_index ? CX18_REG_I2C_2_WR : CX18_REG_I2C_1_WR;
165 u32 r = read_reg(addr);
166
167 if (state)
168 write_reg_sync(r | SETSCL_BIT, addr);
169 else
170 write_reg_sync(r & ~SETSCL_BIT, addr);
171}
172
173static void cx18_setsda(void *data, int state)
174{
175 struct cx18 *cx = ((struct cx18_i2c_algo_callback_data *)data)->cx;
176 int bus_index = ((struct cx18_i2c_algo_callback_data *)data)->bus_index;
177 u32 addr = bus_index ? CX18_REG_I2C_2_WR : CX18_REG_I2C_1_WR;
178 u32 r = read_reg(addr);
179
180 if (state)
181 write_reg_sync(r | SETSDL_BIT, addr);
182 else
183 write_reg_sync(r & ~SETSDL_BIT, addr);
184}
185
186static int cx18_getscl(void *data)
187{
188 struct cx18 *cx = ((struct cx18_i2c_algo_callback_data *)data)->cx;
189 int bus_index = ((struct cx18_i2c_algo_callback_data *)data)->bus_index;
190 u32 addr = bus_index ? CX18_REG_I2C_2_RD : CX18_REG_I2C_1_RD;
191
192 return read_reg(addr) & GETSCL_BIT;
193}
194
195static int cx18_getsda(void *data)
196{
197 struct cx18 *cx = ((struct cx18_i2c_algo_callback_data *)data)->cx;
198 int bus_index = ((struct cx18_i2c_algo_callback_data *)data)->bus_index;
199 u32 addr = bus_index ? CX18_REG_I2C_2_RD : CX18_REG_I2C_1_RD;
200
201 return read_reg(addr) & GETSDL_BIT;
202}
203
204/* template for i2c-bit-algo */
205static struct i2c_adapter cx18_i2c_adap_template = {
206 .name = "cx18 i2c driver",
207 .id = I2C_HW_B_CX2341X,
208 .algo = NULL, /* set by i2c-algo-bit */
209 .algo_data = NULL, /* filled from template */
210 .client_register = attach_inform,
211 .client_unregister = detach_inform,
212 .owner = THIS_MODULE,
213};
214
215#define CX18_SCL_PERIOD (10) /* usecs. 10 usec is period for a 100 KHz clock */
216#define CX18_ALGO_BIT_TIMEOUT (2) /* seconds */
217
218static struct i2c_algo_bit_data cx18_i2c_algo_template = {
219 .setsda = cx18_setsda,
220 .setscl = cx18_setscl,
221 .getsda = cx18_getsda,
222 .getscl = cx18_getscl,
223 .udelay = CX18_SCL_PERIOD/2, /* 1/2 clock period in usec*/
224 .timeout = CX18_ALGO_BIT_TIMEOUT*HZ /* jiffies */
225};
226
227static struct i2c_client cx18_i2c_client_template = {
228 .name = "cx18 internal",
229};
230
231int cx18_call_i2c_client(struct cx18 *cx, int addr, unsigned cmd, void *arg)
232{
233 struct i2c_client *client;
234 int retval;
235 int i;
236
237 CX18_DEBUG_I2C("call_i2c_client addr=%02x\n", addr);
238 for (i = 0; i < I2C_CLIENTS_MAX; i++) {
239 client = cx->i2c_clients[i];
240 if (client == NULL || client->driver == NULL ||
241 client->driver->command == NULL)
242 continue;
243 if (addr == client->addr) {
244 retval = client->driver->command(client, cmd, arg);
245 return retval;
246 }
247 }
248 if (cmd != VIDIOC_G_CHIP_IDENT)
249 CX18_ERR("i2c addr 0x%02x not found for cmd 0x%x!\n",
250 addr, cmd);
251 return -ENODEV;
252}
253
254/* Find the i2c device based on the driver ID and return
255 its i2c address or -ENODEV if no matching device was found. */
256static int cx18_i2c_id_addr(struct cx18 *cx, u32 id)
257{
258 struct i2c_client *client;
259 int retval = -ENODEV;
260 int i;
261
262 for (i = 0; i < I2C_CLIENTS_MAX; i++) {
263 client = cx->i2c_clients[i];
264 if (client == NULL || client->driver == NULL)
265 continue;
266 if (id == client->driver->id) {
267 retval = client->addr;
268 break;
269 }
270 }
271 return retval;
272}
273
274/* Find the i2c device name matching the DRIVERID */
275static const char *cx18_i2c_id_name(u32 id)
276{
277 int i;
278
279 for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
280 if (hw_driverids[i] == id)
281 return hw_drivernames[i];
282 return "unknown device";
283}
284
285/* Find the i2c device name matching the CX18_HW_ flag */
286static const char *cx18_i2c_hw_name(u32 hw)
287{
288 int i;
289
290 for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
291 if (1 << i == hw)
292 return hw_drivernames[i];
293 return "unknown device";
294}
295
296/* Find the i2c device matching the CX18_HW_ flag and return
297 its i2c address or -ENODEV if no matching device was found. */
298int cx18_i2c_hw_addr(struct cx18 *cx, u32 hw)
299{
300 int i;
301
302 for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
303 if (1 << i == hw)
304 return cx18_i2c_id_addr(cx, hw_driverids[i]);
305 return -ENODEV;
306}
307
308/* Calls i2c device based on CX18_HW_ flag. If hw == 0, then do nothing.
309 If hw == CX18_HW_GPIO then call the gpio handler. */
310int cx18_i2c_hw(struct cx18 *cx, u32 hw, unsigned int cmd, void *arg)
311{
312 int addr;
313
314 if (hw == CX18_HW_GPIO || hw == 0)
315 return 0;
316 if (hw == CX18_HW_CX23418)
317 return cx18_av_cmd(cx, cmd, arg);
318
319 addr = cx18_i2c_hw_addr(cx, hw);
320 if (addr < 0) {
321 CX18_ERR("i2c hardware 0x%08x (%s) not found for cmd 0x%x!\n",
322 hw, cx18_i2c_hw_name(hw), cmd);
323 return addr;
324 }
325 return cx18_call_i2c_client(cx, addr, cmd, arg);
326}
327
328/* Calls i2c device based on I2C driver ID. */
329int cx18_i2c_id(struct cx18 *cx, u32 id, unsigned int cmd, void *arg)
330{
331 int addr;
332
333 addr = cx18_i2c_id_addr(cx, id);
334 if (addr < 0) {
335 if (cmd != VIDIOC_G_CHIP_IDENT)
336 CX18_ERR("i2c ID 0x%08x (%s) not found for cmd 0x%x!\n",
337 id, cx18_i2c_id_name(id), cmd);
338 return addr;
339 }
340 return cx18_call_i2c_client(cx, addr, cmd, arg);
341}
342
343/* broadcast cmd for all I2C clients and for the gpio subsystem */
344void cx18_call_i2c_clients(struct cx18 *cx, unsigned int cmd, void *arg)
345{
346 if (cx->i2c_adap[0].algo == NULL || cx->i2c_adap[1].algo == NULL) {
347 CX18_ERR("adapter is not set\n");
348 return;
349 }
350 cx18_av_cmd(cx, cmd, arg);
351 i2c_clients_command(&cx->i2c_adap[0], cmd, arg);
352 i2c_clients_command(&cx->i2c_adap[1], cmd, arg);
353}
354
355/* init + register i2c algo-bit adapter */
356int init_cx18_i2c(struct cx18 *cx)
357{
358 int i;
359 CX18_DEBUG_I2C("i2c init\n");
360
361 for (i = 0; i < 2; i++) {
362 memcpy(&cx->i2c_adap[i], &cx18_i2c_adap_template,
363 sizeof(struct i2c_adapter));
364 memcpy(&cx->i2c_algo[i], &cx18_i2c_algo_template,
365 sizeof(struct i2c_algo_bit_data));
366 cx->i2c_algo_cb_data[i].cx = cx;
367 cx->i2c_algo_cb_data[i].bus_index = i;
368 cx->i2c_algo[i].data = &cx->i2c_algo_cb_data[i];
369 cx->i2c_adap[i].algo_data = &cx->i2c_algo[i];
370
371 sprintf(cx->i2c_adap[i].name + strlen(cx->i2c_adap[i].name),
372 " #%d-%d", cx->num, i);
373 i2c_set_adapdata(&cx->i2c_adap[i], cx);
374
375 memcpy(&cx->i2c_client[i], &cx18_i2c_client_template,
376 sizeof(struct i2c_client));
377 sprintf(cx->i2c_client[i].name +
378 strlen(cx->i2c_client[i].name), "%d", i);
379 cx->i2c_client[i].adapter = &cx->i2c_adap[i];
380 cx->i2c_adap[i].dev.parent = &cx->dev->dev;
381 }
382
383 if (read_reg(CX18_REG_I2C_2_WR) != 0x0003c02f) {
384 /* Reset/Unreset I2C hardware block */
385 write_reg(0x10000000, 0xc71004); /* Clock select 220MHz */
386 write_reg_sync(0x10001000, 0xc71024); /* Clock Enable */
387 }
388 /* courtesy of Steven Toth <stoth@hauppauge.com> */
389 write_reg_sync(0x00c00000, 0xc7001c);
390 mdelay(10);
391 write_reg_sync(0x00c000c0, 0xc7001c);
392 mdelay(10);
393 write_reg_sync(0x00c00000, 0xc7001c);
394
395 write_reg_sync(0x00c00000, 0xc730c8); /* Set to edge-triggered intrs. */
396 write_reg_sync(0x00c00000, 0xc730c4); /* Clear any stale intrs */
397
398 /* Hw I2C1 Clock Freq ~100kHz */
399 write_reg_sync(0x00021c0f & ~4, CX18_REG_I2C_1_WR);
400 cx18_setscl(&cx->i2c_algo_cb_data[0], 1);
401 cx18_setsda(&cx->i2c_algo_cb_data[0], 1);
402
403 /* Hw I2C2 Clock Freq ~100kHz */
404 write_reg_sync(0x00021c0f & ~4, CX18_REG_I2C_2_WR);
405 cx18_setscl(&cx->i2c_algo_cb_data[1], 1);
406 cx18_setsda(&cx->i2c_algo_cb_data[1], 1);
407
408 return i2c_bit_add_bus(&cx->i2c_adap[0]) ||
409 i2c_bit_add_bus(&cx->i2c_adap[1]);
410}
411
412void exit_cx18_i2c(struct cx18 *cx)
413{
414 int i;
415 CX18_DEBUG_I2C("i2c exit\n");
416 write_reg(read_reg(CX18_REG_I2C_1_WR) | 4, CX18_REG_I2C_1_WR);
417 write_reg(read_reg(CX18_REG_I2C_2_WR) | 4, CX18_REG_I2C_2_WR);
418
419 for (i = 0; i < 2; i++) {
420 i2c_del_adapter(&cx->i2c_adap[i]);
421 }
422}
423
424/*
425 Hauppauge HVR1600 should have:
426 32 cx24227
427 98 unknown
428 a0 eeprom
429 c2 tuner
430 e? zilog ir
431 */
diff --git a/drivers/media/video/cx18/cx18-i2c.h b/drivers/media/video/cx18/cx18-i2c.h
new file mode 100644
index 000000000000..113c3f9a2cc0
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-i2c.h
@@ -0,0 +1,33 @@
1/*
2 * cx18 I2C functions
3 *
4 * Derived from ivtv-i2c.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24int cx18_i2c_hw_addr(struct cx18 *cx, u32 hw);
25int cx18_i2c_hw(struct cx18 *cx, u32 hw, unsigned int cmd, void *arg);
26int cx18_i2c_id(struct cx18 *cx, u32 id, unsigned int cmd, void *arg);
27int cx18_call_i2c_client(struct cx18 *cx, int addr, unsigned cmd, void *arg);
28void cx18_call_i2c_clients(struct cx18 *cx, unsigned int cmd, void *arg);
29int cx18_i2c_register(struct cx18 *cx, unsigned idx);
30
31/* init + register i2c algo-bit adapter */
32int init_cx18_i2c(struct cx18 *cx);
33void exit_cx18_i2c(struct cx18 *cx);
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
new file mode 100644
index 000000000000..dbdcb86ec5aa
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -0,0 +1,851 @@
1/*
2 * cx18 ioctl system call
3 *
4 * Derived from ivtv-ioctl.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-version.h"
26#include "cx18-mailbox.h"
27#include "cx18-i2c.h"
28#include "cx18-queue.h"
29#include "cx18-fileops.h"
30#include "cx18-vbi.h"
31#include "cx18-audio.h"
32#include "cx18-video.h"
33#include "cx18-streams.h"
34#include "cx18-ioctl.h"
35#include "cx18-gpio.h"
36#include "cx18-controls.h"
37#include "cx18-cards.h"
38#include "cx18-av-core.h"
39#include <media/tveeprom.h>
40#include <media/v4l2-chip-ident.h>
41#include <linux/i2c-id.h>
42
43u16 cx18_service2vbi(int type)
44{
45 switch (type) {
46 case V4L2_SLICED_TELETEXT_B:
47 return CX18_SLICED_TYPE_TELETEXT_B;
48 case V4L2_SLICED_CAPTION_525:
49 return CX18_SLICED_TYPE_CAPTION_525;
50 case V4L2_SLICED_WSS_625:
51 return CX18_SLICED_TYPE_WSS_625;
52 case V4L2_SLICED_VPS:
53 return CX18_SLICED_TYPE_VPS;
54 default:
55 return 0;
56 }
57}
58
59static int valid_service_line(int field, int line, int is_pal)
60{
61 return (is_pal && line >= 6 && (line != 23 || field == 0)) ||
62 (!is_pal && line >= 10 && line < 22);
63}
64
65static u16 select_service_from_set(int field, int line, u16 set, int is_pal)
66{
67 u16 valid_set = (is_pal ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525);
68 int i;
69
70 set = set & valid_set;
71 if (set == 0 || !valid_service_line(field, line, is_pal))
72 return 0;
73 if (!is_pal) {
74 if (line == 21 && (set & V4L2_SLICED_CAPTION_525))
75 return V4L2_SLICED_CAPTION_525;
76 } else {
77 if (line == 16 && field == 0 && (set & V4L2_SLICED_VPS))
78 return V4L2_SLICED_VPS;
79 if (line == 23 && field == 0 && (set & V4L2_SLICED_WSS_625))
80 return V4L2_SLICED_WSS_625;
81 if (line == 23)
82 return 0;
83 }
84 for (i = 0; i < 32; i++) {
85 if ((1 << i) & set)
86 return 1 << i;
87 }
88 return 0;
89}
90
91void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
92{
93 u16 set = fmt->service_set;
94 int f, l;
95
96 fmt->service_set = 0;
97 for (f = 0; f < 2; f++) {
98 for (l = 0; l < 24; l++)
99 fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal);
100 }
101}
102
103static int check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
104{
105 int f, l;
106 u16 set = 0;
107
108 for (f = 0; f < 2; f++) {
109 for (l = 0; l < 24; l++) {
110 fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal);
111 set |= fmt->service_lines[f][l];
112 }
113 }
114 return set != 0;
115}
116
117u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt)
118{
119 int f, l;
120 u16 set = 0;
121
122 for (f = 0; f < 2; f++) {
123 for (l = 0; l < 24; l++)
124 set |= fmt->service_lines[f][l];
125 }
126 return set;
127}
128
129static const struct {
130 v4l2_std_id std;
131 char *name;
132} enum_stds[] = {
133 { V4L2_STD_PAL_BG | V4L2_STD_PAL_H, "PAL-BGH" },
134 { V4L2_STD_PAL_DK, "PAL-DK" },
135 { V4L2_STD_PAL_I, "PAL-I" },
136 { V4L2_STD_PAL_M, "PAL-M" },
137 { V4L2_STD_PAL_N, "PAL-N" },
138 { V4L2_STD_PAL_Nc, "PAL-Nc" },
139 { V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, "SECAM-BGH" },
140 { V4L2_STD_SECAM_DK, "SECAM-DK" },
141 { V4L2_STD_SECAM_L, "SECAM-L" },
142 { V4L2_STD_SECAM_LC, "SECAM-L'" },
143 { V4L2_STD_NTSC_M, "NTSC-M" },
144 { V4L2_STD_NTSC_M_JP, "NTSC-J" },
145 { V4L2_STD_NTSC_M_KR, "NTSC-K" },
146};
147
148static const struct v4l2_standard cx18_std_60hz = {
149 .frameperiod = {.numerator = 1001, .denominator = 30000},
150 .framelines = 525,
151};
152
153static const struct v4l2_standard cx18_std_50hz = {
154 .frameperiod = { .numerator = 1, .denominator = 25 },
155 .framelines = 625,
156};
157
158static int cx18_cxc(struct cx18 *cx, unsigned int cmd, void *arg)
159{
160 struct v4l2_register *regs = arg;
161 unsigned long flags;
162
163 if (!capable(CAP_SYS_ADMIN))
164 return -EPERM;
165 if (regs->reg >= CX18_MEM_OFFSET + CX18_MEM_SIZE)
166 return -EINVAL;
167
168 spin_lock_irqsave(&cx18_cards_lock, flags);
169 if (cmd == VIDIOC_DBG_G_REGISTER)
170 regs->val = read_enc(regs->reg);
171 else
172 write_enc(regs->val, regs->reg);
173 spin_unlock_irqrestore(&cx18_cards_lock, flags);
174 return 0;
175}
176
177static int cx18_get_fmt(struct cx18 *cx, int streamtype, struct v4l2_format *fmt)
178{
179 switch (fmt->type) {
180 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
181 fmt->fmt.pix.width = cx->params.width;
182 fmt->fmt.pix.height = cx->params.height;
183 fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
184 fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
185 if (streamtype == CX18_ENC_STREAM_TYPE_YUV) {
186 fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_HM12;
187 /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */
188 fmt->fmt.pix.sizeimage =
189 fmt->fmt.pix.height * fmt->fmt.pix.width +
190 fmt->fmt.pix.height * (fmt->fmt.pix.width / 2);
191 } else {
192 fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
193 fmt->fmt.pix.sizeimage = 128 * 1024;
194 }
195 break;
196
197 case V4L2_BUF_TYPE_VBI_CAPTURE:
198 fmt->fmt.vbi.sampling_rate = 27000000;
199 fmt->fmt.vbi.offset = 248;
200 fmt->fmt.vbi.samples_per_line = cx->vbi.raw_decoder_line_size - 4;
201 fmt->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
202 fmt->fmt.vbi.start[0] = cx->vbi.start[0];
203 fmt->fmt.vbi.start[1] = cx->vbi.start[1];
204 fmt->fmt.vbi.count[0] = fmt->fmt.vbi.count[1] = cx->vbi.count;
205 break;
206
207 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
208 {
209 struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
210
211 vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
212 memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved));
213 memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines));
214
215 cx18_av_cmd(cx, VIDIOC_G_FMT, fmt);
216 vbifmt->service_set = cx18_get_service_set(vbifmt);
217 break;
218 }
219 default:
220 return -EINVAL;
221 }
222 return 0;
223}
224
225static int cx18_try_or_set_fmt(struct cx18 *cx, int streamtype,
226 struct v4l2_format *fmt, int set_fmt)
227{
228 struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
229 u16 set;
230
231 /* set window size */
232 if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
233 int w = fmt->fmt.pix.width;
234 int h = fmt->fmt.pix.height;
235
236 if (w > 720)
237 w = 720;
238 else if (w < 1)
239 w = 1;
240 if (h > (cx->is_50hz ? 576 : 480))
241 h = (cx->is_50hz ? 576 : 480);
242 else if (h < 2)
243 h = 2;
244 cx18_get_fmt(cx, streamtype, fmt);
245 fmt->fmt.pix.width = w;
246 fmt->fmt.pix.height = h;
247
248 if (!set_fmt || (cx->params.width == w && cx->params.height == h))
249 return 0;
250 if (atomic_read(&cx->capturing) > 0)
251 return -EBUSY;
252
253 cx->params.width = w;
254 cx->params.height = h;
255 if (w != 720 || h != (cx->is_50hz ? 576 : 480))
256 cx->params.video_temporal_filter = 0;
257 else
258 cx->params.video_temporal_filter = 8;
259 cx18_av_cmd(cx, VIDIOC_S_FMT, fmt);
260 return cx18_get_fmt(cx, streamtype, fmt);
261 }
262
263 /* set raw VBI format */
264 if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
265 if (set_fmt && streamtype == CX18_ENC_STREAM_TYPE_VBI &&
266 cx->vbi.sliced_in->service_set &&
267 atomic_read(&cx->capturing) > 0)
268 return -EBUSY;
269 if (set_fmt) {
270 cx->vbi.sliced_in->service_set = 0;
271 cx18_av_cmd(cx, VIDIOC_S_FMT, &cx->vbi.in);
272 }
273 return cx18_get_fmt(cx, streamtype, fmt);
274 }
275
276 /* any else but sliced VBI capture is an error */
277 if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
278 return -EINVAL;
279
280 /* TODO: implement sliced VBI, for now silently return 0 */
281 return 0;
282
283 /* set sliced VBI capture format */
284 vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
285 memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved));
286
287 if (vbifmt->service_set)
288 cx18_expand_service_set(vbifmt, cx->is_50hz);
289 set = check_service_set(vbifmt, cx->is_50hz);
290 vbifmt->service_set = cx18_get_service_set(vbifmt);
291
292 if (!set_fmt)
293 return 0;
294 if (set == 0)
295 return -EINVAL;
296 if (atomic_read(&cx->capturing) > 0 && cx->vbi.sliced_in->service_set == 0)
297 return -EBUSY;
298 cx18_av_cmd(cx, VIDIOC_S_FMT, fmt);
299 memcpy(cx->vbi.sliced_in, vbifmt, sizeof(*cx->vbi.sliced_in));
300 return 0;
301}
302
303static int cx18_debug_ioctls(struct file *filp, unsigned int cmd, void *arg)
304{
305 struct cx18_open_id *id = (struct cx18_open_id *)filp->private_data;
306 struct cx18 *cx = id->cx;
307 struct v4l2_register *reg = arg;
308
309 switch (cmd) {
310 /* ioctls to allow direct access to the encoder registers for testing */
311 case VIDIOC_DBG_G_REGISTER:
312 if (v4l2_chip_match_host(reg->match_type, reg->match_chip))
313 return cx18_cxc(cx, cmd, arg);
314 if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER)
315 return cx18_i2c_id(cx, reg->match_chip, cmd, arg);
316 return cx18_call_i2c_client(cx, reg->match_chip, cmd, arg);
317
318 case VIDIOC_DBG_S_REGISTER:
319 if (v4l2_chip_match_host(reg->match_type, reg->match_chip))
320 return cx18_cxc(cx, cmd, arg);
321 if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER)
322 return cx18_i2c_id(cx, reg->match_chip, cmd, arg);
323 return cx18_call_i2c_client(cx, reg->match_chip, cmd, arg);
324
325 case VIDIOC_G_CHIP_IDENT: {
326 struct v4l2_chip_ident *chip = arg;
327
328 chip->ident = V4L2_IDENT_NONE;
329 chip->revision = 0;
330 if (reg->match_type == V4L2_CHIP_MATCH_HOST) {
331 if (v4l2_chip_match_host(reg->match_type, reg->match_chip)) {
332 struct v4l2_chip_ident *chip = arg;
333
334 chip->ident = V4L2_IDENT_CX23418;
335 }
336 return 0;
337 }
338 if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER)
339 return cx18_i2c_id(cx, reg->match_chip, cmd, arg);
340 if (reg->match_type == V4L2_CHIP_MATCH_I2C_ADDR)
341 return cx18_call_i2c_client(cx, reg->match_chip, cmd, arg);
342 return -EINVAL;
343 }
344
345 case VIDIOC_INT_S_AUDIO_ROUTING: {
346 struct v4l2_routing *route = arg;
347
348 cx18_audio_set_route(cx, route);
349 break;
350 }
351
352 default:
353 return -EINVAL;
354 }
355 return 0;
356}
357
358int cx18_v4l2_ioctls(struct cx18 *cx, struct file *filp, unsigned cmd, void *arg)
359{
360 struct cx18_open_id *id = NULL;
361
362 if (filp)
363 id = (struct cx18_open_id *)filp->private_data;
364
365 switch (cmd) {
366 case VIDIOC_G_PRIORITY:
367 {
368 enum v4l2_priority *p = arg;
369
370 *p = v4l2_prio_max(&cx->prio);
371 break;
372 }
373
374 case VIDIOC_S_PRIORITY:
375 {
376 enum v4l2_priority *prio = arg;
377
378 return v4l2_prio_change(&cx->prio, &id->prio, *prio);
379 }
380
381 case VIDIOC_QUERYCAP:{
382 struct v4l2_capability *vcap = arg;
383
384 memset(vcap, 0, sizeof(*vcap));
385 strlcpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver));
386 strlcpy(vcap->card, cx->card_name, sizeof(vcap->card));
387 strlcpy(vcap->bus_info, pci_name(cx->dev), sizeof(vcap->bus_info));
388 vcap->version = CX18_DRIVER_VERSION; /* version */
389 vcap->capabilities = cx->v4l2_cap; /* capabilities */
390
391 /* reserved.. must set to 0! */
392 vcap->reserved[0] = vcap->reserved[1] =
393 vcap->reserved[2] = vcap->reserved[3] = 0;
394 break;
395 }
396
397 case VIDIOC_ENUMAUDIO:{
398 struct v4l2_audio *vin = arg;
399
400 return cx18_get_audio_input(cx, vin->index, vin);
401 }
402
403 case VIDIOC_G_AUDIO:{
404 struct v4l2_audio *vin = arg;
405
406 vin->index = cx->audio_input;
407 return cx18_get_audio_input(cx, vin->index, vin);
408 }
409
410 case VIDIOC_S_AUDIO:{
411 struct v4l2_audio *vout = arg;
412
413 if (vout->index >= cx->nof_audio_inputs)
414 return -EINVAL;
415 cx->audio_input = vout->index;
416 cx18_audio_set_io(cx);
417 break;
418 }
419
420 case VIDIOC_ENUMINPUT:{
421 struct v4l2_input *vin = arg;
422
423 /* set it to defaults from our table */
424 return cx18_get_input(cx, vin->index, vin);
425 }
426
427 case VIDIOC_TRY_FMT:
428 case VIDIOC_S_FMT: {
429 struct v4l2_format *fmt = arg;
430
431 return cx18_try_or_set_fmt(cx, id->type, fmt, cmd == VIDIOC_S_FMT);
432 }
433
434 case VIDIOC_G_FMT: {
435 struct v4l2_format *fmt = arg;
436 int type = fmt->type;
437
438 memset(fmt, 0, sizeof(*fmt));
439 fmt->type = type;
440 return cx18_get_fmt(cx, id->type, fmt);
441 }
442
443 case VIDIOC_CROPCAP: {
444 struct v4l2_cropcap *cropcap = arg;
445
446 if (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
447 return -EINVAL;
448 cropcap->bounds.top = cropcap->bounds.left = 0;
449 cropcap->bounds.width = 720;
450 cropcap->bounds.height = cx->is_50hz ? 576 : 480;
451 cropcap->pixelaspect.numerator = cx->is_50hz ? 59 : 10;
452 cropcap->pixelaspect.denominator = cx->is_50hz ? 54 : 11;
453 cropcap->defrect = cropcap->bounds;
454 return 0;
455 }
456
457 case VIDIOC_S_CROP: {
458 struct v4l2_crop *crop = arg;
459
460 if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
461 return -EINVAL;
462 return cx18_av_cmd(cx, VIDIOC_S_CROP, arg);
463 }
464
465 case VIDIOC_G_CROP: {
466 struct v4l2_crop *crop = arg;
467
468 if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
469 return -EINVAL;
470 return cx18_av_cmd(cx, VIDIOC_G_CROP, arg);
471 }
472
473 case VIDIOC_ENUM_FMT: {
474 static struct v4l2_fmtdesc formats[] = {
475 { 0, 0, 0,
476 "HM12 (YUV 4:1:1)", V4L2_PIX_FMT_HM12,
477 { 0, 0, 0, 0 }
478 },
479 { 1, 0, V4L2_FMT_FLAG_COMPRESSED,
480 "MPEG", V4L2_PIX_FMT_MPEG,
481 { 0, 0, 0, 0 }
482 }
483 };
484 struct v4l2_fmtdesc *fmt = arg;
485 enum v4l2_buf_type type = fmt->type;
486
487 switch (type) {
488 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
489 break;
490 default:
491 return -EINVAL;
492 }
493 if (fmt->index > 1)
494 return -EINVAL;
495 *fmt = formats[fmt->index];
496 fmt->type = type;
497 return 0;
498 }
499
500 case VIDIOC_G_INPUT:{
501 *(int *)arg = cx->active_input;
502 break;
503 }
504
505 case VIDIOC_S_INPUT:{
506 int inp = *(int *)arg;
507
508 if (inp < 0 || inp >= cx->nof_inputs)
509 return -EINVAL;
510
511 if (inp == cx->active_input) {
512 CX18_DEBUG_INFO("Input unchanged\n");
513 break;
514 }
515 CX18_DEBUG_INFO("Changing input from %d to %d\n",
516 cx->active_input, inp);
517
518 cx->active_input = inp;
519 /* Set the audio input to whatever is appropriate for the
520 input type. */
521 cx->audio_input = cx->card->video_inputs[inp].audio_index;
522
523 /* prevent others from messing with the streams until
524 we're finished changing inputs. */
525 cx18_mute(cx);
526 cx18_video_set_io(cx);
527 cx18_audio_set_io(cx);
528 cx18_unmute(cx);
529 break;
530 }
531
532 case VIDIOC_G_FREQUENCY:{
533 struct v4l2_frequency *vf = arg;
534
535 if (vf->tuner != 0)
536 return -EINVAL;
537 cx18_call_i2c_clients(cx, cmd, arg);
538 break;
539 }
540
541 case VIDIOC_S_FREQUENCY:{
542 struct v4l2_frequency vf = *(struct v4l2_frequency *)arg;
543
544 if (vf.tuner != 0)
545 return -EINVAL;
546
547 cx18_mute(cx);
548 CX18_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf.frequency);
549 cx18_call_i2c_clients(cx, cmd, &vf);
550 cx18_unmute(cx);
551 break;
552 }
553
554 case VIDIOC_ENUMSTD:{
555 struct v4l2_standard *vs = arg;
556 int idx = vs->index;
557
558 if (idx < 0 || idx >= ARRAY_SIZE(enum_stds))
559 return -EINVAL;
560
561 *vs = (enum_stds[idx].std & V4L2_STD_525_60) ?
562 cx18_std_60hz : cx18_std_50hz;
563 vs->index = idx;
564 vs->id = enum_stds[idx].std;
565 strlcpy(vs->name, enum_stds[idx].name, sizeof(vs->name));
566 break;
567 }
568
569 case VIDIOC_G_STD:{
570 *(v4l2_std_id *) arg = cx->std;
571 break;
572 }
573
574 case VIDIOC_S_STD: {
575 v4l2_std_id std = *(v4l2_std_id *) arg;
576
577 if ((std & V4L2_STD_ALL) == 0)
578 return -EINVAL;
579
580 if (std == cx->std)
581 break;
582
583 if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags) ||
584 atomic_read(&cx->capturing) > 0) {
585 /* Switching standard would turn off the radio or mess
586 with already running streams, prevent that by
587 returning EBUSY. */
588 return -EBUSY;
589 }
590
591 cx->std = std;
592 cx->is_60hz = (std & V4L2_STD_525_60) ? 1 : 0;
593 cx->params.is_50hz = cx->is_50hz = !cx->is_60hz;
594 cx->params.width = 720;
595 cx->params.height = cx->is_50hz ? 576 : 480;
596 cx->vbi.count = cx->is_50hz ? 18 : 12;
597 cx->vbi.start[0] = cx->is_50hz ? 6 : 10;
598 cx->vbi.start[1] = cx->is_50hz ? 318 : 273;
599 cx->vbi.sliced_decoder_line_size = cx->is_60hz ? 272 : 284;
600 CX18_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)cx->std);
601
602 /* Tuner */
603 cx18_call_i2c_clients(cx, VIDIOC_S_STD, &cx->std);
604 break;
605 }
606
607 case VIDIOC_S_TUNER: { /* Setting tuner can only set audio mode */
608 struct v4l2_tuner *vt = arg;
609
610 if (vt->index != 0)
611 return -EINVAL;
612
613 cx18_call_i2c_clients(cx, VIDIOC_S_TUNER, vt);
614 break;
615 }
616
617 case VIDIOC_G_TUNER: {
618 struct v4l2_tuner *vt = arg;
619
620 if (vt->index != 0)
621 return -EINVAL;
622
623 memset(vt, 0, sizeof(*vt));
624 cx18_call_i2c_clients(cx, VIDIOC_G_TUNER, vt);
625
626 if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
627 strlcpy(vt->name, "cx18 Radio Tuner", sizeof(vt->name));
628 vt->type = V4L2_TUNER_RADIO;
629 } else {
630 strlcpy(vt->name, "cx18 TV Tuner", sizeof(vt->name));
631 vt->type = V4L2_TUNER_ANALOG_TV;
632 }
633 break;
634 }
635
636 case VIDIOC_G_SLICED_VBI_CAP: {
637 struct v4l2_sliced_vbi_cap *cap = arg;
638 int set = cx->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
639 int f, l;
640 enum v4l2_buf_type type = cap->type;
641
642 memset(cap, 0, sizeof(*cap));
643 cap->type = type;
644 if (type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
645 for (f = 0; f < 2; f++) {
646 for (l = 0; l < 24; l++) {
647 if (valid_service_line(f, l, cx->is_50hz))
648 cap->service_lines[f][l] = set;
649 }
650 }
651 return 0;
652 }
653 return -EINVAL;
654 }
655
656 case VIDIOC_ENCODER_CMD:
657 case VIDIOC_TRY_ENCODER_CMD: {
658 struct v4l2_encoder_cmd *enc = arg;
659 int try = cmd == VIDIOC_TRY_ENCODER_CMD;
660
661 memset(&enc->raw, 0, sizeof(enc->raw));
662 switch (enc->cmd) {
663 case V4L2_ENC_CMD_START:
664 enc->flags = 0;
665 if (try)
666 return 0;
667 return cx18_start_capture(id);
668
669 case V4L2_ENC_CMD_STOP:
670 enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END;
671 if (try)
672 return 0;
673 cx18_stop_capture(id, enc->flags & V4L2_ENC_CMD_STOP_AT_GOP_END);
674 return 0;
675
676 case V4L2_ENC_CMD_PAUSE:
677 enc->flags = 0;
678 if (try)
679 return 0;
680 if (!atomic_read(&cx->capturing))
681 return -EPERM;
682 if (test_and_set_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags))
683 return 0;
684 cx18_mute(cx);
685 cx18_vapi(cx, CX18_CPU_CAPTURE_PAUSE, 1, cx18_find_handle(cx));
686 break;
687
688 case V4L2_ENC_CMD_RESUME:
689 enc->flags = 0;
690 if (try)
691 return 0;
692 if (!atomic_read(&cx->capturing))
693 return -EPERM;
694 if (!test_and_clear_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags))
695 return 0;
696 cx18_vapi(cx, CX18_CPU_CAPTURE_RESUME, 1, cx18_find_handle(cx));
697 cx18_unmute(cx);
698 break;
699 default:
700 return -EINVAL;
701 }
702 break;
703 }
704
705 case VIDIOC_LOG_STATUS:
706 {
707 struct v4l2_input vidin;
708 struct v4l2_audio audin;
709 int i;
710
711 CX18_INFO("================= START STATUS CARD #%d =================\n", cx->num);
712 if (cx->hw_flags & CX18_HW_TVEEPROM) {
713 struct tveeprom tv;
714
715 cx18_read_eeprom(cx, &tv);
716 }
717 cx18_call_i2c_clients(cx, VIDIOC_LOG_STATUS, NULL);
718 cx18_get_input(cx, cx->active_input, &vidin);
719 cx18_get_audio_input(cx, cx->audio_input, &audin);
720 CX18_INFO("Video Input: %s\n", vidin.name);
721 CX18_INFO("Audio Input: %s\n", audin.name);
722 CX18_INFO("Tuner: %s\n",
723 test_bit(CX18_F_I_RADIO_USER, &cx->i_flags) ?
724 "Radio" : "TV");
725 cx2341x_log_status(&cx->params, cx->name);
726 CX18_INFO("Status flags: 0x%08lx\n", cx->i_flags);
727 for (i = 0; i < CX18_MAX_STREAMS; i++) {
728 struct cx18_stream *s = &cx->streams[i];
729
730 if (s->v4l2dev == NULL || s->buffers == 0)
731 continue;
732 CX18_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n",
733 s->name, s->s_flags,
734 (s->buffers - s->q_free.buffers) * 100 / s->buffers,
735 (s->buffers * s->buf_size) / 1024, s->buffers);
736 }
737 CX18_INFO("Read MPEG/VBI: %lld/%lld bytes\n",
738 (long long)cx->mpg_data_received,
739 (long long)cx->vbi_data_inserted);
740 CX18_INFO("================== END STATUS CARD #%d ==================\n", cx->num);
741 break;
742 }
743
744 default:
745 return -EINVAL;
746 }
747 return 0;
748}
749
750static int cx18_v4l2_do_ioctl(struct inode *inode, struct file *filp,
751 unsigned int cmd, void *arg)
752{
753 struct cx18_open_id *id = (struct cx18_open_id *)filp->private_data;
754 struct cx18 *cx = id->cx;
755 int ret;
756
757 /* check priority */
758 switch (cmd) {
759 case VIDIOC_S_CTRL:
760 case VIDIOC_S_STD:
761 case VIDIOC_S_INPUT:
762 case VIDIOC_S_TUNER:
763 case VIDIOC_S_FREQUENCY:
764 case VIDIOC_S_FMT:
765 case VIDIOC_S_CROP:
766 case VIDIOC_S_EXT_CTRLS:
767 ret = v4l2_prio_check(&cx->prio, &id->prio);
768 if (ret)
769 return ret;
770 }
771
772 switch (cmd) {
773 case VIDIOC_DBG_G_REGISTER:
774 case VIDIOC_DBG_S_REGISTER:
775 case VIDIOC_G_CHIP_IDENT:
776 case VIDIOC_INT_S_AUDIO_ROUTING:
777 case VIDIOC_INT_RESET:
778 if (cx18_debug & CX18_DBGFLG_IOCTL) {
779 printk(KERN_INFO "cx18%d ioctl: ", cx->num);
780 v4l_printk_ioctl(cmd);
781 }
782 return cx18_debug_ioctls(filp, cmd, arg);
783
784 case VIDIOC_G_PRIORITY:
785 case VIDIOC_S_PRIORITY:
786 case VIDIOC_QUERYCAP:
787 case VIDIOC_ENUMINPUT:
788 case VIDIOC_G_INPUT:
789 case VIDIOC_S_INPUT:
790 case VIDIOC_G_FMT:
791 case VIDIOC_S_FMT:
792 case VIDIOC_TRY_FMT:
793 case VIDIOC_ENUM_FMT:
794 case VIDIOC_CROPCAP:
795 case VIDIOC_G_CROP:
796 case VIDIOC_S_CROP:
797 case VIDIOC_G_FREQUENCY:
798 case VIDIOC_S_FREQUENCY:
799 case VIDIOC_ENUMSTD:
800 case VIDIOC_G_STD:
801 case VIDIOC_S_STD:
802 case VIDIOC_S_TUNER:
803 case VIDIOC_G_TUNER:
804 case VIDIOC_ENUMAUDIO:
805 case VIDIOC_S_AUDIO:
806 case VIDIOC_G_AUDIO:
807 case VIDIOC_G_SLICED_VBI_CAP:
808 case VIDIOC_LOG_STATUS:
809 case VIDIOC_G_ENC_INDEX:
810 case VIDIOC_ENCODER_CMD:
811 case VIDIOC_TRY_ENCODER_CMD:
812 if (cx18_debug & CX18_DBGFLG_IOCTL) {
813 printk(KERN_INFO "cx18%d ioctl: ", cx->num);
814 v4l_printk_ioctl(cmd);
815 }
816 return cx18_v4l2_ioctls(cx, filp, cmd, arg);
817
818 case VIDIOC_QUERYMENU:
819 case VIDIOC_QUERYCTRL:
820 case VIDIOC_S_CTRL:
821 case VIDIOC_G_CTRL:
822 case VIDIOC_S_EXT_CTRLS:
823 case VIDIOC_G_EXT_CTRLS:
824 case VIDIOC_TRY_EXT_CTRLS:
825 if (cx18_debug & CX18_DBGFLG_IOCTL) {
826 printk(KERN_INFO "cx18%d ioctl: ", cx->num);
827 v4l_printk_ioctl(cmd);
828 }
829 return cx18_control_ioctls(cx, cmd, arg);
830
831 case 0x00005401: /* Handle isatty() calls */
832 return -EINVAL;
833 default:
834 return v4l_compat_translate_ioctl(inode, filp, cmd, arg,
835 cx18_v4l2_do_ioctl);
836 }
837 return 0;
838}
839
840int cx18_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
841 unsigned long arg)
842{
843 struct cx18_open_id *id = (struct cx18_open_id *)filp->private_data;
844 struct cx18 *cx = id->cx;
845 int res;
846
847 mutex_lock(&cx->serialize_lock);
848 res = video_usercopy(inode, filp, cmd, arg, cx18_v4l2_do_ioctl);
849 mutex_unlock(&cx->serialize_lock);
850 return res;
851}
diff --git a/drivers/media/video/cx18/cx18-ioctl.h b/drivers/media/video/cx18/cx18-ioctl.h
new file mode 100644
index 000000000000..9f4c7eb2897f
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-ioctl.h
@@ -0,0 +1,30 @@
1/*
2 * cx18 ioctl system call
3 *
4 * Derived from ivtv-ioctl.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24u16 cx18_service2vbi(int type);
25void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
26u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt);
27int cx18_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
28 unsigned long arg);
29int cx18_v4l2_ioctls(struct cx18 *cx, struct file *filp, unsigned cmd,
30 void *arg);
diff --git a/drivers/media/video/cx18/cx18-irq.c b/drivers/media/video/cx18/cx18-irq.c
new file mode 100644
index 000000000000..6e14f8bda559
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-irq.c
@@ -0,0 +1,179 @@
1/*
2 * cx18 interrupt handling
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#include "cx18-driver.h"
23#include "cx18-firmware.h"
24#include "cx18-fileops.h"
25#include "cx18-queue.h"
26#include "cx18-irq.h"
27#include "cx18-ioctl.h"
28#include "cx18-mailbox.h"
29#include "cx18-vbi.h"
30#include "cx18-scb.h"
31
32#define DMA_MAGIC_COOKIE 0x000001fe
33
34static void epu_dma_done(struct cx18 *cx, struct cx18_mailbox *mb)
35{
36 u32 handle = mb->args[0];
37 struct cx18_stream *s = NULL;
38 struct cx18_buffer *buf;
39 u32 off;
40 int i;
41 int id;
42
43 for (i = 0; i < CX18_MAX_STREAMS; i++) {
44 s = &cx->streams[i];
45 if ((handle == s->handle) && (s->dvb.enabled))
46 break;
47 if (s->v4l2dev && handle == s->handle)
48 break;
49 }
50 if (i == CX18_MAX_STREAMS) {
51 CX18_WARN("DMA done for unknown handle %d for stream %s\n",
52 handle, s->name);
53 mb->error = CXERR_NOT_OPEN;
54 mb->cmd = 0;
55 cx18_mb_ack(cx, mb);
56 return;
57 }
58
59 off = mb->args[1];
60 if (mb->args[2] != 1)
61 CX18_WARN("Ack struct = %d for %s\n",
62 mb->args[2], s->name);
63 id = read_enc(off);
64 buf = cx18_queue_find_buf(s, id, read_enc(off + 4));
65 CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id);
66 if (buf) {
67 cx18_buf_sync_for_cpu(s, buf);
68 if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
69 /* process the buffer here */
70 CX18_DEBUG_HI_DMA("TS recv and sent bytesused=%d\n",
71 buf->bytesused);
72
73 dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
74 buf->bytesused);
75
76 cx18_buf_sync_for_device(s, buf);
77 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
78 (void *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
79 1, buf->id, s->buf_size);
80 } else
81 set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
82 } else {
83 CX18_WARN("Could not find buf %d for stream %s\n",
84 read_enc(off), s->name);
85 }
86 mb->error = 0;
87 mb->cmd = 0;
88 cx18_mb_ack(cx, mb);
89 wake_up(&cx->dma_waitq);
90 if (s->id != -1)
91 wake_up(&s->waitq);
92}
93
94static void epu_debug(struct cx18 *cx, struct cx18_mailbox *mb)
95{
96 char str[256] = { 0 };
97 char *p;
98
99 if (mb->args[1]) {
100 setup_page(mb->args[1]);
101 memcpy_fromio(str, cx->enc_mem + mb->args[1], 252);
102 str[252] = 0;
103 }
104 cx18_mb_ack(cx, mb);
105 CX18_DEBUG_INFO("%x %s\n", mb->args[0], str);
106 p = strchr(str, '.');
107 if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
108 CX18_INFO("FW version: %s\n", p - 1);
109}
110
111static void hpu_cmd(struct cx18 *cx, u32 sw1)
112{
113 struct cx18_mailbox mb;
114
115 if (sw1 & IRQ_CPU_TO_EPU) {
116 memcpy_fromio(&mb, &cx->scb->cpu2epu_mb, sizeof(mb));
117 mb.error = 0;
118
119 switch (mb.cmd) {
120 case CX18_EPU_DMA_DONE:
121 epu_dma_done(cx, &mb);
122 break;
123 case CX18_EPU_DEBUG:
124 epu_debug(cx, &mb);
125 break;
126 default:
127 CX18_WARN("Unexpected mailbox command %08x\n", mb.cmd);
128 break;
129 }
130 }
131 if (sw1 & (IRQ_APU_TO_EPU | IRQ_HPU_TO_EPU))
132 CX18_WARN("Unexpected interrupt %08x\n", sw1);
133}
134
135irqreturn_t cx18_irq_handler(int irq, void *dev_id)
136{
137 struct cx18 *cx = (struct cx18 *)dev_id;
138 u32 sw1, sw1_mask;
139 u32 sw2, sw2_mask;
140 u32 hw2, hw2_mask;
141
142 spin_lock(&cx->dma_reg_lock);
143
144 hw2_mask = read_reg(HW2_INT_MASK5_PCI);
145 hw2 = read_reg(HW2_INT_CLR_STATUS) & hw2_mask;
146 sw2_mask = read_reg(SW2_INT_ENABLE_PCI) | IRQ_EPU_TO_HPU_ACK;
147 sw2 = read_reg(SW2_INT_STATUS) & sw2_mask;
148 sw1_mask = read_reg(SW1_INT_ENABLE_PCI) | IRQ_EPU_TO_HPU;
149 sw1 = read_reg(SW1_INT_STATUS) & sw1_mask;
150
151 write_reg(sw2&sw2_mask, SW2_INT_STATUS);
152 write_reg(sw1&sw1_mask, SW1_INT_STATUS);
153 write_reg(hw2&hw2_mask, HW2_INT_CLR_STATUS);
154
155 if (sw1 || sw2 || hw2)
156 CX18_DEBUG_HI_IRQ("SW1: %x SW2: %x HW2: %x\n", sw1, sw2, hw2);
157
158 /* To do: interrupt-based I2C handling
159 if (hw2 & 0x00c00000) {
160 }
161 */
162
163 if (sw2) {
164 if (sw2 & (cx->scb->cpu2hpu_irq_ack | cx->scb->cpu2epu_irq_ack))
165 wake_up(&cx->mb_cpu_waitq);
166 if (sw2 & (cx->scb->apu2hpu_irq_ack | cx->scb->apu2epu_irq_ack))
167 wake_up(&cx->mb_apu_waitq);
168 if (sw2 & cx->scb->epu2hpu_irq_ack)
169 wake_up(&cx->mb_epu_waitq);
170 if (sw2 & cx->scb->hpu2epu_irq_ack)
171 wake_up(&cx->mb_hpu_waitq);
172 }
173
174 if (sw1)
175 hpu_cmd(cx, sw1);
176 spin_unlock(&cx->dma_reg_lock);
177
178 return (hw2 | sw1 | sw2) ? IRQ_HANDLED : IRQ_NONE;
179}
diff --git a/drivers/media/video/cx18/cx18-irq.h b/drivers/media/video/cx18/cx18-irq.h
new file mode 100644
index 000000000000..379f704f5cba
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-irq.h
@@ -0,0 +1,37 @@
1/*
2 * cx18 interrupt handling
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#define HW2_I2C1_INT (1 << 22)
23#define HW2_I2C2_INT (1 << 23)
24#define HW2_INT_CLR_STATUS 0xc730c4
25#define HW2_INT_MASK5_PCI 0xc730e4
26#define SW1_INT_SET 0xc73100
27#define SW1_INT_STATUS 0xc73104
28#define SW1_INT_ENABLE_PCI 0xc7311c
29#define SW2_INT_SET 0xc73140
30#define SW2_INT_STATUS 0xc73144
31#define SW2_INT_ENABLE_PCI 0xc7315c
32
33irqreturn_t cx18_irq_handler(int irq, void *dev_id);
34
35void cx18_irq_work_handler(struct work_struct *work);
36void cx18_dma_stream_dec_prepare(struct cx18_stream *s, u32 offset, int lock);
37void cx18_unfinished_dma(unsigned long arg);
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
new file mode 100644
index 000000000000..0c5f328bca54
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -0,0 +1,372 @@
1/*
2 * cx18 mailbox functions
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#include <stdarg.h>
23
24#include "cx18-driver.h"
25#include "cx18-scb.h"
26#include "cx18-irq.h"
27#include "cx18-mailbox.h"
28
29#define API_FAST (1 << 2) /* Short timeout */
30#define API_SLOW (1 << 3) /* Additional 300ms timeout */
31
32#define APU 0
33#define CPU 1
34#define EPU 2
35#define HPU 3
36
37struct cx18_api_info {
38 u32 cmd;
39 u8 flags; /* Flags, see above */
40 u8 rpu; /* Processing unit */
41 const char *name; /* The name of the command */
42};
43
44#define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
45
46static const struct cx18_api_info api_info[] = {
47 /* MPEG encoder API */
48 API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
49 API_ENTRY(CPU, CX18_EPU_DEBUG, 0),
50 API_ENTRY(CPU, CX18_CREATE_TASK, 0),
51 API_ENTRY(CPU, CX18_DESTROY_TASK, 0),
52 API_ENTRY(CPU, CX18_CPU_CAPTURE_START, API_SLOW),
53 API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP, API_SLOW),
54 API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE, 0),
55 API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME, 0),
56 API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
57 API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 0),
58 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN, 0),
59 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE, 0),
60 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION, 0),
61 API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM, 0),
62 API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 0),
63 API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING, 0),
64 API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE, 0),
65 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS, 0),
66 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE, 0),
67 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE, 0),
68 API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS, 0),
69 API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM, API_SLOW),
70 API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO, 0),
71 API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT, 0),
72 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID, 0),
73 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID, 0),
74 API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE, 0),
75 API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE, 0),
76 API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION, 0),
77 API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO, 0),
78 API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME, 0),
79 API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM, 0),
80 API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER, 0),
81 API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0),
82 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0),
83 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST),
84 API_ENTRY(0, 0, 0),
85};
86
87static const struct cx18_api_info *find_api_info(u32 cmd)
88{
89 int i;
90
91 for (i = 0; api_info[i].cmd; i++)
92 if (api_info[i].cmd == cmd)
93 return &api_info[i];
94 return NULL;
95}
96
97static struct cx18_mailbox *cx18_mb_is_complete(struct cx18 *cx, int rpu,
98 u32 *state, u32 *irq, u32 *req)
99{
100 struct cx18_mailbox *mb = NULL;
101 int wait_count = 0;
102 u32 ack;
103
104 switch (rpu) {
105 case APU:
106 mb = &cx->scb->epu2apu_mb;
107 *state = readl(&cx->scb->apu_state);
108 *irq = readl(&cx->scb->epu2apu_irq);
109 break;
110
111 case CPU:
112 mb = &cx->scb->epu2cpu_mb;
113 *state = readl(&cx->scb->cpu_state);
114 *irq = readl(&cx->scb->epu2cpu_irq);
115 break;
116
117 case HPU:
118 mb = &cx->scb->epu2hpu_mb;
119 *state = readl(&cx->scb->hpu_state);
120 *irq = readl(&cx->scb->epu2hpu_irq);
121 break;
122 }
123
124 if (mb == NULL)
125 return mb;
126
127 do {
128 *req = readl(&mb->request);
129 ack = readl(&mb->ack);
130 wait_count++;
131 } while (*req != ack && wait_count < 600);
132
133 if (*req == ack) {
134 (*req)++;
135 if (*req == 0 || *req == 0xffffffff)
136 *req = 1;
137 return mb;
138 }
139 return NULL;
140}
141
142long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb)
143{
144 const struct cx18_api_info *info = find_api_info(mb->cmd);
145 struct cx18_mailbox *ack_mb;
146 u32 ack_irq;
147 u8 rpu = CPU;
148
149 if (info == NULL && mb->cmd) {
150 CX18_WARN("Cannot ack unknown command %x\n", mb->cmd);
151 return -EINVAL;
152 }
153 if (info)
154 rpu = info->rpu;
155
156 switch (rpu) {
157 case HPU:
158 ack_irq = IRQ_EPU_TO_HPU_ACK;
159 ack_mb = &cx->scb->hpu2epu_mb;
160 break;
161 case APU:
162 ack_irq = IRQ_EPU_TO_APU_ACK;
163 ack_mb = &cx->scb->apu2epu_mb;
164 break;
165 case CPU:
166 ack_irq = IRQ_EPU_TO_CPU_ACK;
167 ack_mb = &cx->scb->cpu2epu_mb;
168 break;
169 default:
170 CX18_WARN("Unknown RPU for command %x\n", mb->cmd);
171 return -EINVAL;
172 }
173
174 setup_page(SCB_OFFSET);
175 write_sync(mb->request, &ack_mb->ack);
176 write_reg(ack_irq, SW2_INT_SET);
177 return 0;
178}
179
180
181static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
182{
183 const struct cx18_api_info *info = find_api_info(cmd);
184 u32 state = 0, irq = 0, req, oldreq, err;
185 struct cx18_mailbox *mb;
186 wait_queue_head_t *waitq;
187 int timeout = 100;
188 int cnt = 0;
189 int sig = 0;
190 int i;
191
192 if (info == NULL) {
193 CX18_WARN("unknown cmd %x\n", cmd);
194 return -EINVAL;
195 }
196
197 if (cmd == CX18_CPU_DE_SET_MDL)
198 CX18_DEBUG_HI_API("%s\n", info->name);
199 else
200 CX18_DEBUG_API("%s\n", info->name);
201 setup_page(SCB_OFFSET);
202 mb = cx18_mb_is_complete(cx, info->rpu, &state, &irq, &req);
203
204 if (mb == NULL) {
205 CX18_ERR("mb %s busy\n", info->name);
206 return -EBUSY;
207 }
208
209 oldreq = req - 1;
210 writel(cmd, &mb->cmd);
211 for (i = 0; i < args; i++)
212 writel(data[i], &mb->args[i]);
213 writel(0, &mb->error);
214 writel(req, &mb->request);
215
216 switch (info->rpu) {
217 case APU: waitq = &cx->mb_apu_waitq; break;
218 case CPU: waitq = &cx->mb_cpu_waitq; break;
219 case EPU: waitq = &cx->mb_epu_waitq; break;
220 case HPU: waitq = &cx->mb_hpu_waitq; break;
221 default: return -EINVAL;
222 }
223 if (info->flags & API_FAST)
224 timeout /= 2;
225 write_reg(irq, SW1_INT_SET);
226
227 while (!sig && readl(&mb->ack) != readl(&mb->request) && cnt < 660) {
228 if (cnt > 200 && !in_atomic())
229 sig = cx18_msleep_timeout(10, 1);
230 cnt++;
231 }
232 if (sig)
233 return -EINTR;
234 if (cnt == 660) {
235 writel(oldreq, &mb->request);
236 CX18_ERR("mb %s failed\n", info->name);
237 return -EINVAL;
238 }
239 for (i = 0; i < MAX_MB_ARGUMENTS; i++)
240 data[i] = readl(&mb->args[i]);
241 err = readl(&mb->error);
242 if (!in_atomic() && (info->flags & API_SLOW))
243 cx18_msleep_timeout(300, 0);
244 if (err)
245 CX18_DEBUG_API("mailbox error %08x for command %s\n", err,
246 info->name);
247 return err ? -EIO : 0;
248}
249
250int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[])
251{
252 int res = cx18_api_call(cx, cmd, args, data);
253
254 /* Allow a single retry, probably already too late though.
255 If there is no free mailbox then that is usually an indication
256 of a more serious problem. */
257 return (res == -EBUSY) ? cx18_api_call(cx, cmd, args, data) : res;
258}
259
260static int cx18_set_filter_param(struct cx18_stream *s)
261{
262 struct cx18 *cx = s->cx;
263 u32 mode;
264 int ret;
265
266 mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0);
267 ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
268 s->handle, 1, mode, cx->spatial_strength);
269 mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0);
270 ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
271 s->handle, 0, mode, cx->temporal_strength);
272 ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
273 s->handle, 2, cx->filter_mode >> 2, 0);
274 return ret;
275}
276
277int cx18_api_func(void *priv, u32 cmd, int in, int out,
278 u32 data[CX2341X_MBOX_MAX_DATA])
279{
280 struct cx18 *cx = priv;
281 struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_MPG];
282
283 switch (cmd) {
284 case CX2341X_ENC_SET_OUTPUT_PORT:
285 return 0;
286 case CX2341X_ENC_SET_FRAME_RATE:
287 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6,
288 s->handle, 0, 0, 0, 0, data[0]);
289 case CX2341X_ENC_SET_FRAME_SIZE:
290 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3,
291 s->handle, data[1], data[0]);
292 case CX2341X_ENC_SET_STREAM_TYPE:
293 return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2,
294 s->handle, data[0]);
295 case CX2341X_ENC_SET_ASPECT_RATIO:
296 return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2,
297 s->handle, data[0]);
298
299 case CX2341X_ENC_SET_GOP_PROPERTIES:
300 return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3,
301 s->handle, data[0], data[1]);
302 case CX2341X_ENC_SET_GOP_CLOSURE:
303 return 0;
304 case CX2341X_ENC_SET_AUDIO_PROPERTIES:
305 return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
306 s->handle, data[0]);
307 case CX2341X_ENC_MUTE_AUDIO:
308 return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
309 s->handle, data[0]);
310 case CX2341X_ENC_SET_BIT_RATE:
311 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5,
312 s->handle, data[0], data[1], data[2], data[3]);
313 case CX2341X_ENC_MUTE_VIDEO:
314 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
315 s->handle, data[0]);
316 case CX2341X_ENC_SET_FRAME_DROP_RATE:
317 return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2,
318 s->handle, data[0]);
319 case CX2341X_ENC_MISC:
320 return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4,
321 s->handle, data[0], data[1], data[2]);
322 case CX2341X_ENC_SET_DNR_FILTER_MODE:
323 cx->filter_mode = (data[0] & 3) | (data[1] << 2);
324 return cx18_set_filter_param(s);
325 case CX2341X_ENC_SET_DNR_FILTER_PROPS:
326 cx->spatial_strength = data[0];
327 cx->temporal_strength = data[1];
328 return cx18_set_filter_param(s);
329 case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
330 return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3,
331 s->handle, data[0], data[1]);
332 case CX2341X_ENC_SET_CORING_LEVELS:
333 return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5,
334 s->handle, data[0], data[1], data[2], data[3]);
335 }
336 CX18_WARN("Unknown cmd %x\n", cmd);
337 return 0;
338}
339
340int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS],
341 u32 cmd, int args, ...)
342{
343 va_list ap;
344 int i;
345
346 va_start(ap, args);
347 for (i = 0; i < args; i++)
348 data[i] = va_arg(ap, u32);
349 va_end(ap);
350 return cx18_api(cx, cmd, args, data);
351}
352
353int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
354{
355 u32 data[MAX_MB_ARGUMENTS];
356 va_list ap;
357 int i;
358
359 if (cx == NULL) {
360 CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
361 return 0;
362 }
363 if (args > MAX_MB_ARGUMENTS) {
364 CX18_ERR("args too big (cmd=%x)\n", cmd);
365 args = MAX_MB_ARGUMENTS;
366 }
367 va_start(ap, args);
368 for (i = 0; i < args; i++)
369 data[i] = va_arg(ap, u32);
370 va_end(ap);
371 return cx18_api(cx, cmd, args, data);
372}
diff --git a/drivers/media/video/cx18/cx18-mailbox.h b/drivers/media/video/cx18/cx18-mailbox.h
new file mode 100644
index 000000000000..d995641536b3
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-mailbox.h
@@ -0,0 +1,73 @@
1/*
2 * cx18 mailbox functions
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#ifndef _CX18_MAILBOX_H_
23#define _CX18_MAILBOX_H_
24
25/* mailbox max args */
26#define MAX_MB_ARGUMENTS 6
27/* compatibility, should be same as the define in cx2341x.h */
28#define CX2341X_MBOX_MAX_DATA 16
29
30#define MB_RESERVED_HANDLE_0 0
31#define MB_RESERVED_HANDLE_1 0xFFFFFFFF
32
33struct cx18;
34
35/* The cx18_mailbox struct is the mailbox structure which is used for passing
36 messages between processors */
37struct cx18_mailbox {
38 /* The sender sets a handle in 'request' after he fills the command. The
39 'request' should be different than 'ack'. The sender, also, generates
40 an interrupt on XPU2YPU_irq where XPU is the sender and YPU is the
41 receiver. */
42 u32 request;
43 /* The receiver detects a new command when 'req' is different than 'ack'.
44 He sets 'ack' to the same value as 'req' to clear the command. He, also,
45 generates an interrupt on YPU2XPU_irq where XPU is the sender and YPU
46 is the receiver. */
47 u32 ack;
48 u32 reserved[6];
49 /* 'cmd' identifies the command. The list of these commands are in
50 cx23418.h */
51 u32 cmd;
52 /* Each command can have up to 6 arguments */
53 u32 args[MAX_MB_ARGUMENTS];
54 /* The return code can be one of the codes in the file cx23418.h. If the
55 command is completed successfuly, the error will be ERR_SYS_SUCCESS.
56 If it is pending, the code is ERR_SYS_PENDING. If it failed, the error
57 code would indicate the task from which the error originated and will
58 be one of the errors in cx23418.h. In that case, the following
59 applies ((error & 0xff) != 0).
60 If the command is pending, the return will be passed in a MB from the
61 receiver to the sender. 'req' will be returned in args[0] */
62 u32 error;
63};
64
65int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[]);
66int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS], u32 cmd,
67 int args, ...);
68int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...);
69int cx18_api_func(void *priv, u32 cmd, int in, int out,
70 u32 data[CX2341X_MBOX_MAX_DATA]);
71long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb);
72
73#endif
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c
new file mode 100644
index 000000000000..65af1bb507ca
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-queue.c
@@ -0,0 +1,282 @@
1/*
2 * cx18 buffer queues
3 *
4 * Derived from ivtv-queue.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-streams.h"
26#include "cx18-queue.h"
27#include "cx18-scb.h"
28
29int cx18_buf_copy_from_user(struct cx18_stream *s, struct cx18_buffer *buf,
30 const char __user *src, int copybytes)
31{
32 if (s->buf_size - buf->bytesused < copybytes)
33 copybytes = s->buf_size - buf->bytesused;
34 if (copy_from_user(buf->buf + buf->bytesused, src, copybytes))
35 return -EFAULT;
36 buf->bytesused += copybytes;
37 return copybytes;
38}
39
40void cx18_buf_swap(struct cx18_buffer *buf)
41{
42 int i;
43
44 for (i = 0; i < buf->bytesused; i += 4)
45 swab32s((u32 *)(buf->buf + i));
46}
47
48void cx18_queue_init(struct cx18_queue *q)
49{
50 INIT_LIST_HEAD(&q->list);
51 q->buffers = 0;
52 q->length = 0;
53 q->bytesused = 0;
54}
55
56void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
57 struct cx18_queue *q)
58{
59 unsigned long flags = 0;
60
61 /* clear the buffer if it is going to be enqueued to the free queue */
62 if (q == &s->q_free) {
63 buf->bytesused = 0;
64 buf->readpos = 0;
65 buf->b_flags = 0;
66 }
67 spin_lock_irqsave(&s->qlock, flags);
68 list_add_tail(&buf->list, &q->list);
69 q->buffers++;
70 q->length += s->buf_size;
71 q->bytesused += buf->bytesused - buf->readpos;
72 spin_unlock_irqrestore(&s->qlock, flags);
73}
74
75struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
76{
77 struct cx18_buffer *buf = NULL;
78 unsigned long flags = 0;
79
80 spin_lock_irqsave(&s->qlock, flags);
81 if (!list_empty(&q->list)) {
82 buf = list_entry(q->list.next, struct cx18_buffer, list);
83 list_del_init(q->list.next);
84 q->buffers--;
85 q->length -= s->buf_size;
86 q->bytesused -= buf->bytesused - buf->readpos;
87 }
88 spin_unlock_irqrestore(&s->qlock, flags);
89 return buf;
90}
91
92struct cx18_buffer *cx18_queue_find_buf(struct cx18_stream *s, u32 id,
93 u32 bytesused)
94{
95 struct cx18 *cx = s->cx;
96 struct list_head *p;
97
98 list_for_each(p, &s->q_free.list) {
99 struct cx18_buffer *buf =
100 list_entry(p, struct cx18_buffer, list);
101
102 if (buf->id != id)
103 continue;
104 buf->bytesused = bytesused;
105 /* the transport buffers are handled differently,
106 so there is no need to move them to the full queue */
107 if (s->type == CX18_ENC_STREAM_TYPE_TS)
108 return buf;
109 s->q_free.buffers--;
110 s->q_free.length -= s->buf_size;
111 s->q_full.buffers++;
112 s->q_full.length += s->buf_size;
113 s->q_full.bytesused += buf->bytesused;
114 list_move_tail(&buf->list, &s->q_full.list);
115 return buf;
116 }
117 CX18_ERR("Cannot find buffer %d for stream %s\n", id, s->name);
118 return NULL;
119}
120
121static void cx18_queue_move_buf(struct cx18_stream *s, struct cx18_queue *from,
122 struct cx18_queue *to, int clear, int full)
123{
124 struct cx18_buffer *buf =
125 list_entry(from->list.next, struct cx18_buffer, list);
126
127 list_move_tail(from->list.next, &to->list);
128 from->buffers--;
129 from->length -= s->buf_size;
130 from->bytesused -= buf->bytesused - buf->readpos;
131 /* special handling for q_free */
132 if (clear)
133 buf->bytesused = buf->readpos = buf->b_flags = 0;
134 else if (full) {
135 /* special handling for stolen buffers, assume
136 all bytes are used. */
137 buf->bytesused = s->buf_size;
138 buf->readpos = buf->b_flags = 0;
139 }
140 to->buffers++;
141 to->length += s->buf_size;
142 to->bytesused += buf->bytesused - buf->readpos;
143}
144
145/* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
146 If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
147 If 'steal' != NULL, then buffers may also taken from that queue if
148 needed.
149
150 The buffer is automatically cleared if it goes to the free queue. It is
151 also cleared if buffers need to be taken from the 'steal' queue and
152 the 'from' queue is the free queue.
153
154 When 'from' is q_free, then needed_bytes is compared to the total
155 available buffer length, otherwise needed_bytes is compared to the
156 bytesused value. For the 'steal' queue the total available buffer
157 length is always used.
158
159 -ENOMEM is returned if the buffers could not be obtained, 0 if all
160 buffers where obtained from the 'from' list and if non-zero then
161 the number of stolen buffers is returned. */
162int cx18_queue_move(struct cx18_stream *s, struct cx18_queue *from,
163 struct cx18_queue *steal, struct cx18_queue *to, int needed_bytes)
164{
165 unsigned long flags;
166 int rc = 0;
167 int from_free = from == &s->q_free;
168 int to_free = to == &s->q_free;
169 int bytes_available;
170
171 spin_lock_irqsave(&s->qlock, flags);
172 if (needed_bytes == 0) {
173 from_free = 1;
174 needed_bytes = from->length;
175 }
176
177 bytes_available = from_free ? from->length : from->bytesused;
178 bytes_available += steal ? steal->length : 0;
179
180 if (bytes_available < needed_bytes) {
181 spin_unlock_irqrestore(&s->qlock, flags);
182 return -ENOMEM;
183 }
184 if (from_free) {
185 u32 old_length = to->length;
186
187 while (to->length - old_length < needed_bytes) {
188 if (list_empty(&from->list))
189 from = steal;
190 if (from == steal)
191 rc++; /* keep track of 'stolen' buffers */
192 cx18_queue_move_buf(s, from, to, 1, 0);
193 }
194 } else {
195 u32 old_bytesused = to->bytesused;
196
197 while (to->bytesused - old_bytesused < needed_bytes) {
198 if (list_empty(&from->list))
199 from = steal;
200 if (from == steal)
201 rc++; /* keep track of 'stolen' buffers */
202 cx18_queue_move_buf(s, from, to, to_free, rc);
203 }
204 }
205 spin_unlock_irqrestore(&s->qlock, flags);
206 return rc;
207}
208
209void cx18_flush_queues(struct cx18_stream *s)
210{
211 cx18_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
212 cx18_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
213}
214
215int cx18_stream_alloc(struct cx18_stream *s)
216{
217 struct cx18 *cx = s->cx;
218 int i;
219
220 if (s->buffers == 0)
221 return 0;
222
223 CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%dkB total)\n",
224 s->name, s->buffers, s->buf_size,
225 s->buffers * s->buf_size / 1024);
226
227 if (((char *)&cx->scb->cpu_mdl[cx->mdl_offset + s->buffers] -
228 (char *)cx->scb) > SCB_RESERVED_SIZE) {
229 unsigned bufsz = (((char *)cx->scb) + SCB_RESERVED_SIZE -
230 ((char *)cx->scb->cpu_mdl));
231
232 CX18_ERR("Too many buffers, cannot fit in SCB area\n");
233 CX18_ERR("Max buffers = %zd\n",
234 bufsz / sizeof(struct cx18_mdl));
235 return -ENOMEM;
236 }
237
238 s->mdl_offset = cx->mdl_offset;
239
240 /* allocate stream buffers. Initially all buffers are in q_free. */
241 for (i = 0; i < s->buffers; i++) {
242 struct cx18_buffer *buf =
243 kzalloc(sizeof(struct cx18_buffer), GFP_KERNEL);
244
245 if (buf == NULL)
246 break;
247 buf->buf = kmalloc(s->buf_size, GFP_KERNEL);
248 if (buf->buf == NULL) {
249 kfree(buf);
250 break;
251 }
252 buf->id = cx->buffer_id++;
253 INIT_LIST_HEAD(&buf->list);
254 buf->dma_handle = pci_map_single(s->cx->dev,
255 buf->buf, s->buf_size, s->dma);
256 cx18_buf_sync_for_cpu(s, buf);
257 cx18_enqueue(s, buf, &s->q_free);
258 }
259 if (i == s->buffers) {
260 cx->mdl_offset += s->buffers;
261 return 0;
262 }
263 CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
264 cx18_stream_free(s);
265 return -ENOMEM;
266}
267
268void cx18_stream_free(struct cx18_stream *s)
269{
270 struct cx18_buffer *buf;
271
272 /* move all buffers to q_free */
273 cx18_flush_queues(s);
274
275 /* empty q_free */
276 while ((buf = cx18_dequeue(s, &s->q_free))) {
277 pci_unmap_single(s->cx->dev, buf->dma_handle,
278 s->buf_size, s->dma);
279 kfree(buf->buf);
280 kfree(buf);
281 }
282}
diff --git a/drivers/media/video/cx18/cx18-queue.h b/drivers/media/video/cx18/cx18-queue.h
new file mode 100644
index 000000000000..f86c8a6fa6e7
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-queue.h
@@ -0,0 +1,59 @@
1/*
2 * cx18 buffer queues
3 *
4 * Derived from ivtv-queue.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#define CX18_DMA_UNMAPPED ((u32) -1)
25
26/* cx18_buffer utility functions */
27
28static inline void cx18_buf_sync_for_cpu(struct cx18_stream *s,
29 struct cx18_buffer *buf)
30{
31 pci_dma_sync_single_for_cpu(s->cx->dev, buf->dma_handle,
32 s->buf_size, s->dma);
33}
34
35static inline void cx18_buf_sync_for_device(struct cx18_stream *s,
36 struct cx18_buffer *buf)
37{
38 pci_dma_sync_single_for_device(s->cx->dev, buf->dma_handle,
39 s->buf_size, s->dma);
40}
41
42int cx18_buf_copy_from_user(struct cx18_stream *s, struct cx18_buffer *buf,
43 const char __user *src, int copybytes);
44void cx18_buf_swap(struct cx18_buffer *buf);
45
46/* cx18_queue utility functions */
47void cx18_queue_init(struct cx18_queue *q);
48void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
49 struct cx18_queue *q);
50struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
51int cx18_queue_move(struct cx18_stream *s, struct cx18_queue *from,
52 struct cx18_queue *steal, struct cx18_queue *to, int needed_bytes);
53struct cx18_buffer *cx18_queue_find_buf(struct cx18_stream *s, u32 id,
54 u32 bytesused);
55void cx18_flush_queues(struct cx18_stream *s);
56
57/* cx18_stream utility functions */
58int cx18_stream_alloc(struct cx18_stream *s);
59void cx18_stream_free(struct cx18_stream *s);
diff --git a/drivers/media/video/cx18/cx18-scb.c b/drivers/media/video/cx18/cx18-scb.c
new file mode 100644
index 000000000000..30bc803e30da
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-scb.c
@@ -0,0 +1,121 @@
1/*
2 * cx18 System Control Block initialization
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#include "cx18-driver.h"
23#include "cx18-scb.h"
24
25void cx18_init_scb(struct cx18 *cx)
26{
27 setup_page(SCB_OFFSET);
28 memset_io(cx->scb, 0, 0x10000);
29
30 writel(IRQ_APU_TO_CPU, &cx->scb->apu2cpu_irq);
31 writel(IRQ_CPU_TO_APU_ACK, &cx->scb->cpu2apu_irq_ack);
32 writel(IRQ_HPU_TO_CPU, &cx->scb->hpu2cpu_irq);
33 writel(IRQ_CPU_TO_HPU_ACK, &cx->scb->cpu2hpu_irq_ack);
34 writel(IRQ_PPU_TO_CPU, &cx->scb->ppu2cpu_irq);
35 writel(IRQ_CPU_TO_PPU_ACK, &cx->scb->cpu2ppu_irq_ack);
36 writel(IRQ_EPU_TO_CPU, &cx->scb->epu2cpu_irq);
37 writel(IRQ_CPU_TO_EPU_ACK, &cx->scb->cpu2epu_irq_ack);
38
39 writel(IRQ_CPU_TO_APU, &cx->scb->cpu2apu_irq);
40 writel(IRQ_APU_TO_CPU_ACK, &cx->scb->apu2cpu_irq_ack);
41 writel(IRQ_HPU_TO_APU, &cx->scb->hpu2apu_irq);
42 writel(IRQ_APU_TO_HPU_ACK, &cx->scb->apu2hpu_irq_ack);
43 writel(IRQ_PPU_TO_APU, &cx->scb->ppu2apu_irq);
44 writel(IRQ_APU_TO_PPU_ACK, &cx->scb->apu2ppu_irq_ack);
45 writel(IRQ_EPU_TO_APU, &cx->scb->epu2apu_irq);
46 writel(IRQ_APU_TO_EPU_ACK, &cx->scb->apu2epu_irq_ack);
47
48 writel(IRQ_CPU_TO_HPU, &cx->scb->cpu2hpu_irq);
49 writel(IRQ_HPU_TO_CPU_ACK, &cx->scb->hpu2cpu_irq_ack);
50 writel(IRQ_APU_TO_HPU, &cx->scb->apu2hpu_irq);
51 writel(IRQ_HPU_TO_APU_ACK, &cx->scb->hpu2apu_irq_ack);
52 writel(IRQ_PPU_TO_HPU, &cx->scb->ppu2hpu_irq);
53 writel(IRQ_HPU_TO_PPU_ACK, &cx->scb->hpu2ppu_irq_ack);
54 writel(IRQ_EPU_TO_HPU, &cx->scb->epu2hpu_irq);
55 writel(IRQ_HPU_TO_EPU_ACK, &cx->scb->hpu2epu_irq_ack);
56
57 writel(IRQ_CPU_TO_PPU, &cx->scb->cpu2ppu_irq);
58 writel(IRQ_PPU_TO_CPU_ACK, &cx->scb->ppu2cpu_irq_ack);
59 writel(IRQ_APU_TO_PPU, &cx->scb->apu2ppu_irq);
60 writel(IRQ_PPU_TO_APU_ACK, &cx->scb->ppu2apu_irq_ack);
61 writel(IRQ_HPU_TO_PPU, &cx->scb->hpu2ppu_irq);
62 writel(IRQ_PPU_TO_HPU_ACK, &cx->scb->ppu2hpu_irq_ack);
63 writel(IRQ_EPU_TO_PPU, &cx->scb->epu2ppu_irq);
64 writel(IRQ_PPU_TO_EPU_ACK, &cx->scb->ppu2epu_irq_ack);
65
66 writel(IRQ_CPU_TO_EPU, &cx->scb->cpu2epu_irq);
67 writel(IRQ_EPU_TO_CPU_ACK, &cx->scb->epu2cpu_irq_ack);
68 writel(IRQ_APU_TO_EPU, &cx->scb->apu2epu_irq);
69 writel(IRQ_EPU_TO_APU_ACK, &cx->scb->epu2apu_irq_ack);
70 writel(IRQ_HPU_TO_EPU, &cx->scb->hpu2epu_irq);
71 writel(IRQ_EPU_TO_HPU_ACK, &cx->scb->epu2hpu_irq_ack);
72 writel(IRQ_PPU_TO_EPU, &cx->scb->ppu2epu_irq);
73 writel(IRQ_EPU_TO_PPU_ACK, &cx->scb->epu2ppu_irq_ack);
74
75 writel(SCB_OFFSET + offsetof(struct cx18_scb, apu2cpu_mb),
76 &cx->scb->apu2cpu_mb_offset);
77 writel(SCB_OFFSET + offsetof(struct cx18_scb, hpu2cpu_mb),
78 &cx->scb->hpu2cpu_mb_offset);
79 writel(SCB_OFFSET + offsetof(struct cx18_scb, ppu2cpu_mb),
80 &cx->scb->ppu2cpu_mb_offset);
81 writel(SCB_OFFSET + offsetof(struct cx18_scb, epu2cpu_mb),
82 &cx->scb->epu2cpu_mb_offset);
83 writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu2apu_mb),
84 &cx->scb->cpu2apu_mb_offset);
85 writel(SCB_OFFSET + offsetof(struct cx18_scb, hpu2apu_mb),
86 &cx->scb->hpu2apu_mb_offset);
87 writel(SCB_OFFSET + offsetof(struct cx18_scb, ppu2apu_mb),
88 &cx->scb->ppu2apu_mb_offset);
89 writel(SCB_OFFSET + offsetof(struct cx18_scb, epu2apu_mb),
90 &cx->scb->epu2apu_mb_offset);
91 writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu2hpu_mb),
92 &cx->scb->cpu2hpu_mb_offset);
93 writel(SCB_OFFSET + offsetof(struct cx18_scb, apu2hpu_mb),
94 &cx->scb->apu2hpu_mb_offset);
95 writel(SCB_OFFSET + offsetof(struct cx18_scb, ppu2hpu_mb),
96 &cx->scb->ppu2hpu_mb_offset);
97 writel(SCB_OFFSET + offsetof(struct cx18_scb, epu2hpu_mb),
98 &cx->scb->epu2hpu_mb_offset);
99 writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu2ppu_mb),
100 &cx->scb->cpu2ppu_mb_offset);
101 writel(SCB_OFFSET + offsetof(struct cx18_scb, apu2ppu_mb),
102 &cx->scb->apu2ppu_mb_offset);
103 writel(SCB_OFFSET + offsetof(struct cx18_scb, hpu2ppu_mb),
104 &cx->scb->hpu2ppu_mb_offset);
105 writel(SCB_OFFSET + offsetof(struct cx18_scb, epu2ppu_mb),
106 &cx->scb->epu2ppu_mb_offset);
107 writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu2epu_mb),
108 &cx->scb->cpu2epu_mb_offset);
109 writel(SCB_OFFSET + offsetof(struct cx18_scb, apu2epu_mb),
110 &cx->scb->apu2epu_mb_offset);
111 writel(SCB_OFFSET + offsetof(struct cx18_scb, hpu2epu_mb),
112 &cx->scb->hpu2epu_mb_offset);
113 writel(SCB_OFFSET + offsetof(struct cx18_scb, ppu2epu_mb),
114 &cx->scb->ppu2epu_mb_offset);
115
116 writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu_state),
117 &cx->scb->ipc_offset);
118
119 writel(1, &cx->scb->hpu_state);
120 writel(1, &cx->scb->epu_state);
121}
diff --git a/drivers/media/video/cx18/cx18-scb.h b/drivers/media/video/cx18/cx18-scb.h
new file mode 100644
index 000000000000..86b4cb15d163
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-scb.h
@@ -0,0 +1,285 @@
1/*
2 * cx18 System Control Block initialization
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#ifndef CX18_SCB_H
23#define CX18_SCB_H
24
25#include "cx18-mailbox.h"
26
27/* NOTE: All ACK interrupts are in the SW2 register. All non-ACK interrupts
28 are in the SW1 register. */
29
30#define IRQ_APU_TO_CPU 0x00000001
31#define IRQ_CPU_TO_APU_ACK 0x00000001
32#define IRQ_HPU_TO_CPU 0x00000002
33#define IRQ_CPU_TO_HPU_ACK 0x00000002
34#define IRQ_PPU_TO_CPU 0x00000004
35#define IRQ_CPU_TO_PPU_ACK 0x00000004
36#define IRQ_EPU_TO_CPU 0x00000008
37#define IRQ_CPU_TO_EPU_ACK 0x00000008
38
39#define IRQ_CPU_TO_APU 0x00000010
40#define IRQ_APU_TO_CPU_ACK 0x00000010
41#define IRQ_HPU_TO_APU 0x00000020
42#define IRQ_APU_TO_HPU_ACK 0x00000020
43#define IRQ_PPU_TO_APU 0x00000040
44#define IRQ_APU_TO_PPU_ACK 0x00000040
45#define IRQ_EPU_TO_APU 0x00000080
46#define IRQ_APU_TO_EPU_ACK 0x00000080
47
48#define IRQ_CPU_TO_HPU 0x00000100
49#define IRQ_HPU_TO_CPU_ACK 0x00000100
50#define IRQ_APU_TO_HPU 0x00000200
51#define IRQ_HPU_TO_APU_ACK 0x00000200
52#define IRQ_PPU_TO_HPU 0x00000400
53#define IRQ_HPU_TO_PPU_ACK 0x00000400
54#define IRQ_EPU_TO_HPU 0x00000800
55#define IRQ_HPU_TO_EPU_ACK 0x00000800
56
57#define IRQ_CPU_TO_PPU 0x00001000
58#define IRQ_PPU_TO_CPU_ACK 0x00001000
59#define IRQ_APU_TO_PPU 0x00002000
60#define IRQ_PPU_TO_APU_ACK 0x00002000
61#define IRQ_HPU_TO_PPU 0x00004000
62#define IRQ_PPU_TO_HPU_ACK 0x00004000
63#define IRQ_EPU_TO_PPU 0x00008000
64#define IRQ_PPU_TO_EPU_ACK 0x00008000
65
66#define IRQ_CPU_TO_EPU 0x00010000
67#define IRQ_EPU_TO_CPU_ACK 0x00010000
68#define IRQ_APU_TO_EPU 0x00020000
69#define IRQ_EPU_TO_APU_ACK 0x00020000
70#define IRQ_HPU_TO_EPU 0x00040000
71#define IRQ_EPU_TO_HPU_ACK 0x00040000
72#define IRQ_PPU_TO_EPU 0x00080000
73#define IRQ_EPU_TO_PPU_ACK 0x00080000
74
75#define SCB_OFFSET 0xDC0000
76
77/* If Firmware uses fixed memory map, it shall not allocate the area
78 between SCB_OFFSET and SCB_OFFSET+SCB_RESERVED_SIZE-1 inclusive */
79#define SCB_RESERVED_SIZE 0x10000
80
81
82/* This structure is used by EPU to provide memory descriptors in its memory */
83struct cx18_mdl {
84 u32 paddr; /* Physical address of a buffer segment */
85 u32 length; /* Length of the buffer segment */
86};
87
88/* This structure is used by CPU to provide completed buffers information */
89struct cx18_mdl_ack {
90 u32 id; /* ID of a completed MDL */
91 u32 data_used; /* Total data filled in the MDL for buffer 'id' */
92};
93
94struct cx18_scb {
95 /* These fields form the System Control Block which is used at boot time
96 for localizing the IPC data as well as the code positions for all
97 processors. The offsets are from the start of this struct. */
98
99 /* Offset where to find the Inter-Processor Communication data */
100 u32 ipc_offset;
101 u32 reserved01[7];
102 /* Offset where to find the start of the CPU code */
103 u32 cpu_code_offset;
104 u32 reserved02[3];
105 /* Offset where to find the start of the APU code */
106 u32 apu_code_offset;
107 u32 reserved03[3];
108 /* Offset where to find the start of the HPU code */
109 u32 hpu_code_offset;
110 u32 reserved04[3];
111 /* Offset where to find the start of the PPU code */
112 u32 ppu_code_offset;
113 u32 reserved05[3];
114
115 /* These fields form Inter-Processor Communication data which is used
116 by all processors to locate the information needed for communicating
117 with other processors */
118
119 /* Fields for CPU: */
120
121 /* bit 0: 1/0 processor ready/not ready. Set other bits to 0. */
122 u32 cpu_state;
123 u32 reserved1[7];
124 /* Offset to the mailbox used for sending commands from APU to CPU */
125 u32 apu2cpu_mb_offset;
126 /* Value to write to register SW1 register set (0xC7003100) after the
127 command is ready */
128 u32 apu2cpu_irq;
129 /* Value to write to register SW2 register set (0xC7003140) after the
130 command is cleared */
131 u32 apu2cpu_irq_ack;
132 u32 reserved2[13];
133
134 u32 hpu2cpu_mb_offset;
135 u32 hpu2cpu_irq;
136 u32 hpu2cpu_irq_ack;
137 u32 reserved3[13];
138
139 u32 ppu2cpu_mb_offset;
140 u32 ppu2cpu_irq;
141 u32 ppu2cpu_irq_ack;
142 u32 reserved4[13];
143
144 u32 epu2cpu_mb_offset;
145 u32 epu2cpu_irq;
146 u32 epu2cpu_irq_ack;
147 u32 reserved5[13];
148 u32 reserved6[8];
149
150 /* Fields for APU: */
151
152 u32 apu_state;
153 u32 reserved11[7];
154 u32 cpu2apu_mb_offset;
155 u32 cpu2apu_irq;
156 u32 cpu2apu_irq_ack;
157 u32 reserved12[13];
158
159 u32 hpu2apu_mb_offset;
160 u32 hpu2apu_irq;
161 u32 hpu2apu_irq_ack;
162 u32 reserved13[13];
163
164 u32 ppu2apu_mb_offset;
165 u32 ppu2apu_irq;
166 u32 ppu2apu_irq_ack;
167 u32 reserved14[13];
168
169 u32 epu2apu_mb_offset;
170 u32 epu2apu_irq;
171 u32 epu2apu_irq_ack;
172 u32 reserved15[13];
173 u32 reserved16[8];
174
175 /* Fields for HPU: */
176
177 u32 hpu_state;
178 u32 reserved21[7];
179 u32 cpu2hpu_mb_offset;
180 u32 cpu2hpu_irq;
181 u32 cpu2hpu_irq_ack;
182 u32 reserved22[13];
183
184 u32 apu2hpu_mb_offset;
185 u32 apu2hpu_irq;
186 u32 apu2hpu_irq_ack;
187 u32 reserved23[13];
188
189 u32 ppu2hpu_mb_offset;
190 u32 ppu2hpu_irq;
191 u32 ppu2hpu_irq_ack;
192 u32 reserved24[13];
193
194 u32 epu2hpu_mb_offset;
195 u32 epu2hpu_irq;
196 u32 epu2hpu_irq_ack;
197 u32 reserved25[13];
198 u32 reserved26[8];
199
200 /* Fields for PPU: */
201
202 u32 ppu_state;
203 u32 reserved31[7];
204 u32 cpu2ppu_mb_offset;
205 u32 cpu2ppu_irq;
206 u32 cpu2ppu_irq_ack;
207 u32 reserved32[13];
208
209 u32 apu2ppu_mb_offset;
210 u32 apu2ppu_irq;
211 u32 apu2ppu_irq_ack;
212 u32 reserved33[13];
213
214 u32 hpu2ppu_mb_offset;
215 u32 hpu2ppu_irq;
216 u32 hpu2ppu_irq_ack;
217 u32 reserved34[13];
218
219 u32 epu2ppu_mb_offset;
220 u32 epu2ppu_irq;
221 u32 epu2ppu_irq_ack;
222 u32 reserved35[13];
223 u32 reserved36[8];
224
225 /* Fields for EPU: */
226
227 u32 epu_state;
228 u32 reserved41[7];
229 u32 cpu2epu_mb_offset;
230 u32 cpu2epu_irq;
231 u32 cpu2epu_irq_ack;
232 u32 reserved42[13];
233
234 u32 apu2epu_mb_offset;
235 u32 apu2epu_irq;
236 u32 apu2epu_irq_ack;
237 u32 reserved43[13];
238
239 u32 hpu2epu_mb_offset;
240 u32 hpu2epu_irq;
241 u32 hpu2epu_irq_ack;
242 u32 reserved44[13];
243
244 u32 ppu2epu_mb_offset;
245 u32 ppu2epu_irq;
246 u32 ppu2epu_irq_ack;
247 u32 reserved45[13];
248 u32 reserved46[8];
249
250 u32 semaphores[8]; /* Semaphores */
251
252 u32 reserved50[32]; /* Reserved for future use */
253
254 struct cx18_mailbox apu2cpu_mb;
255 struct cx18_mailbox hpu2cpu_mb;
256 struct cx18_mailbox ppu2cpu_mb;
257 struct cx18_mailbox epu2cpu_mb;
258
259 struct cx18_mailbox cpu2apu_mb;
260 struct cx18_mailbox hpu2apu_mb;
261 struct cx18_mailbox ppu2apu_mb;
262 struct cx18_mailbox epu2apu_mb;
263
264 struct cx18_mailbox cpu2hpu_mb;
265 struct cx18_mailbox apu2hpu_mb;
266 struct cx18_mailbox ppu2hpu_mb;
267 struct cx18_mailbox epu2hpu_mb;
268
269 struct cx18_mailbox cpu2ppu_mb;
270 struct cx18_mailbox apu2ppu_mb;
271 struct cx18_mailbox hpu2ppu_mb;
272 struct cx18_mailbox epu2ppu_mb;
273
274 struct cx18_mailbox cpu2epu_mb;
275 struct cx18_mailbox apu2epu_mb;
276 struct cx18_mailbox hpu2epu_mb;
277 struct cx18_mailbox ppu2epu_mb;
278
279 struct cx18_mdl_ack cpu_mdl_ack[CX18_MAX_STREAMS][2];
280 struct cx18_mdl cpu_mdl[1];
281};
282
283void cx18_init_scb(struct cx18 *cx);
284
285#endif
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
new file mode 100644
index 000000000000..afb141b2027a
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -0,0 +1,566 @@
1/*
2 * cx18 init/start/stop/exit stream functions
3 *
4 * Derived from ivtv-streams.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-fileops.h"
26#include "cx18-mailbox.h"
27#include "cx18-i2c.h"
28#include "cx18-queue.h"
29#include "cx18-ioctl.h"
30#include "cx18-streams.h"
31#include "cx18-cards.h"
32#include "cx18-scb.h"
33#include "cx18-av-core.h"
34#include "cx18-dvb.h"
35
36#define CX18_DSP0_INTERRUPT_MASK 0xd0004C
37
38static struct file_operations cx18_v4l2_enc_fops = {
39 .owner = THIS_MODULE,
40 .read = cx18_v4l2_read,
41 .open = cx18_v4l2_open,
42 .ioctl = cx18_v4l2_ioctl,
43 .release = cx18_v4l2_close,
44 .poll = cx18_v4l2_enc_poll,
45};
46
47/* offset from 0 to register ts v4l2 minors on */
48#define CX18_V4L2_ENC_TS_OFFSET 16
49/* offset from 0 to register pcm v4l2 minors on */
50#define CX18_V4L2_ENC_PCM_OFFSET 24
51/* offset from 0 to register yuv v4l2 minors on */
52#define CX18_V4L2_ENC_YUV_OFFSET 32
53
54static struct {
55 const char *name;
56 int vfl_type;
57 int minor_offset;
58 int dma;
59 enum v4l2_buf_type buf_type;
60 struct file_operations *fops;
61} cx18_stream_info[] = {
62 { /* CX18_ENC_STREAM_TYPE_MPG */
63 "encoder MPEG",
64 VFL_TYPE_GRABBER, 0,
65 PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
66 &cx18_v4l2_enc_fops
67 },
68 { /* CX18_ENC_STREAM_TYPE_TS */
69 "TS",
70 VFL_TYPE_GRABBER, -1,
71 PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
72 &cx18_v4l2_enc_fops
73 },
74 { /* CX18_ENC_STREAM_TYPE_YUV */
75 "encoder YUV",
76 VFL_TYPE_GRABBER, CX18_V4L2_ENC_YUV_OFFSET,
77 PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
78 &cx18_v4l2_enc_fops
79 },
80 { /* CX18_ENC_STREAM_TYPE_VBI */
81 "encoder VBI",
82 VFL_TYPE_VBI, 0,
83 PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VBI_CAPTURE,
84 &cx18_v4l2_enc_fops
85 },
86 { /* CX18_ENC_STREAM_TYPE_PCM */
87 "encoder PCM audio",
88 VFL_TYPE_GRABBER, CX18_V4L2_ENC_PCM_OFFSET,
89 PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_PRIVATE,
90 &cx18_v4l2_enc_fops
91 },
92 { /* CX18_ENC_STREAM_TYPE_IDX */
93 "encoder IDX",
94 VFL_TYPE_GRABBER, -1,
95 PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
96 &cx18_v4l2_enc_fops
97 },
98 { /* CX18_ENC_STREAM_TYPE_RAD */
99 "encoder radio",
100 VFL_TYPE_RADIO, 0,
101 PCI_DMA_NONE, V4L2_BUF_TYPE_PRIVATE,
102 &cx18_v4l2_enc_fops
103 },
104};
105
106static void cx18_stream_init(struct cx18 *cx, int type)
107{
108 struct cx18_stream *s = &cx->streams[type];
109 struct video_device *dev = s->v4l2dev;
110 u32 max_size = cx->options.megabytes[type] * 1024 * 1024;
111
112 /* we need to keep v4l2dev, so restore it afterwards */
113 memset(s, 0, sizeof(*s));
114 s->v4l2dev = dev;
115
116 /* initialize cx18_stream fields */
117 s->cx = cx;
118 s->type = type;
119 s->name = cx18_stream_info[type].name;
120 s->handle = 0xffffffff;
121
122 s->dma = cx18_stream_info[type].dma;
123 s->buf_size = cx->stream_buf_size[type];
124 if (s->buf_size)
125 s->buffers = max_size / s->buf_size;
126 if (s->buffers > 63) {
127 /* Each stream has a maximum of 63 buffers,
128 ensure we do not exceed that. */
129 s->buffers = 63;
130 s->buf_size = (max_size / s->buffers) & ~0xfff;
131 }
132 spin_lock_init(&s->qlock);
133 init_waitqueue_head(&s->waitq);
134 s->id = -1;
135 cx18_queue_init(&s->q_free);
136 cx18_queue_init(&s->q_full);
137 cx18_queue_init(&s->q_io);
138}
139
140static int cx18_prep_dev(struct cx18 *cx, int type)
141{
142 struct cx18_stream *s = &cx->streams[type];
143 u32 cap = cx->v4l2_cap;
144 int minor_offset = cx18_stream_info[type].minor_offset;
145 int minor;
146
147 /* These four fields are always initialized. If v4l2dev == NULL, then
148 this stream is not in use. In that case no other fields but these
149 four can be used. */
150 s->v4l2dev = NULL;
151 s->cx = cx;
152 s->type = type;
153 s->name = cx18_stream_info[type].name;
154
155 /* Check whether the radio is supported */
156 if (type == CX18_ENC_STREAM_TYPE_RAD && !(cap & V4L2_CAP_RADIO))
157 return 0;
158
159 /* Check whether VBI is supported */
160 if (type == CX18_ENC_STREAM_TYPE_VBI &&
161 !(cap & (V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE)))
162 return 0;
163
164 /* card number + user defined offset + device offset */
165 minor = cx->num + cx18_first_minor + minor_offset;
166
167 /* User explicitly selected 0 buffers for these streams, so don't
168 create them. */
169 if (cx18_stream_info[type].dma != PCI_DMA_NONE &&
170 cx->options.megabytes[type] == 0) {
171 CX18_INFO("Disabled %s device\n", cx18_stream_info[type].name);
172 return 0;
173 }
174
175 cx18_stream_init(cx, type);
176
177 if (minor_offset == -1)
178 return 0;
179
180 /* allocate and initialize the v4l2 video device structure */
181 s->v4l2dev = video_device_alloc();
182 if (s->v4l2dev == NULL) {
183 CX18_ERR("Couldn't allocate v4l2 video_device for %s\n",
184 s->name);
185 return -ENOMEM;
186 }
187
188 s->v4l2dev->type =
189 VID_TYPE_CAPTURE | VID_TYPE_TUNER | VID_TYPE_TELETEXT |
190 VID_TYPE_CLIPPING | VID_TYPE_SCALES | VID_TYPE_MPEG_ENCODER;
191 snprintf(s->v4l2dev->name, sizeof(s->v4l2dev->name), "cx18%d %s",
192 cx->num, s->name);
193
194 s->v4l2dev->minor = minor;
195 s->v4l2dev->dev = &cx->dev->dev;
196 s->v4l2dev->fops = cx18_stream_info[type].fops;
197 s->v4l2dev->release = video_device_release;
198
199 return 0;
200}
201
202/* Initialize v4l2 variables and register v4l2 devices */
203int cx18_streams_setup(struct cx18 *cx)
204{
205 int type;
206
207 /* Setup V4L2 Devices */
208 for (type = 0; type < CX18_MAX_STREAMS; type++) {
209 /* Prepare device */
210 if (cx18_prep_dev(cx, type))
211 break;
212
213 /* Allocate Stream */
214 if (cx18_stream_alloc(&cx->streams[type]))
215 break;
216 }
217 if (type == CX18_MAX_STREAMS)
218 return 0;
219
220 /* One or more streams could not be initialized. Clean 'em all up. */
221 cx18_streams_cleanup(cx);
222 return -ENOMEM;
223}
224
225static int cx18_reg_dev(struct cx18 *cx, int type)
226{
227 struct cx18_stream *s = &cx->streams[type];
228 int vfl_type = cx18_stream_info[type].vfl_type;
229 int minor;
230
231 /* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something?
232 * We need a VFL_TYPE_TS defined.
233 */
234 if (strcmp("TS", s->name) == 0) {
235 /* just return if no DVB is supported */
236 if ((cx->card->hw_all & CX18_HW_DVB) == 0)
237 return 0;
238 if (cx18_dvb_register(s) < 0) {
239 CX18_ERR("DVB failed to register\n");
240 return -EINVAL;
241 }
242 }
243
244 if (s->v4l2dev == NULL)
245 return 0;
246
247 minor = s->v4l2dev->minor;
248
249 /* Register device. First try the desired minor, then any free one. */
250 if (video_register_device(s->v4l2dev, vfl_type, minor) &&
251 video_register_device(s->v4l2dev, vfl_type, -1)) {
252 CX18_ERR("Couldn't register v4l2 device for %s minor %d\n",
253 s->name, minor);
254 video_device_release(s->v4l2dev);
255 s->v4l2dev = NULL;
256 return -ENOMEM;
257 }
258 minor = s->v4l2dev->minor;
259
260 switch (vfl_type) {
261 case VFL_TYPE_GRABBER:
262 CX18_INFO("Registered device video%d for %s (%d MB)\n",
263 minor, s->name, cx->options.megabytes[type]);
264 break;
265
266 case VFL_TYPE_RADIO:
267 CX18_INFO("Registered device radio%d for %s\n",
268 minor - MINOR_VFL_TYPE_RADIO_MIN, s->name);
269 break;
270
271 case VFL_TYPE_VBI:
272 if (cx->options.megabytes[type])
273 CX18_INFO("Registered device vbi%d for %s (%d MB)\n",
274 minor - MINOR_VFL_TYPE_VBI_MIN,
275 s->name, cx->options.megabytes[type]);
276 else
277 CX18_INFO("Registered device vbi%d for %s\n",
278 minor - MINOR_VFL_TYPE_VBI_MIN, s->name);
279 break;
280 }
281
282 return 0;
283}
284
285/* Register v4l2 devices */
286int cx18_streams_register(struct cx18 *cx)
287{
288 int type;
289 int err = 0;
290
291 /* Register V4L2 devices */
292 for (type = 0; type < CX18_MAX_STREAMS; type++)
293 err |= cx18_reg_dev(cx, type);
294
295 if (err == 0)
296 return 0;
297
298 /* One or more streams could not be initialized. Clean 'em all up. */
299 cx18_streams_cleanup(cx);
300 return -ENOMEM;
301}
302
303/* Unregister v4l2 devices */
304void cx18_streams_cleanup(struct cx18 *cx)
305{
306 struct video_device *vdev;
307 int type;
308
309 /* Teardown all streams */
310 for (type = 0; type < CX18_MAX_STREAMS; type++) {
311 if (cx->streams[type].dvb.enabled)
312 cx18_dvb_unregister(&cx->streams[type]);
313
314 vdev = cx->streams[type].v4l2dev;
315
316 cx->streams[type].v4l2dev = NULL;
317 if (vdev == NULL)
318 continue;
319
320 cx18_stream_free(&cx->streams[type]);
321
322 /* Unregister device */
323 video_unregister_device(vdev);
324 }
325}
326
327static void cx18_vbi_setup(struct cx18_stream *s)
328{
329 struct cx18 *cx = s->cx;
330 int raw = cx->vbi.sliced_in->service_set == 0;
331 u32 data[CX2341X_MBOX_MAX_DATA];
332 int lines;
333
334 if (cx->is_60hz) {
335 cx->vbi.count = 12;
336 cx->vbi.start[0] = 10;
337 cx->vbi.start[1] = 273;
338 } else { /* PAL/SECAM */
339 cx->vbi.count = 18;
340 cx->vbi.start[0] = 6;
341 cx->vbi.start[1] = 318;
342 }
343
344 /* setup VBI registers */
345 cx18_av_cmd(cx, VIDIOC_S_FMT, &cx->vbi.in);
346
347 /* determine number of lines and total number of VBI bytes.
348 A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1
349 The '- 1' byte is probably an unused U or V byte. Or something...
350 A sliced line takes 51 bytes: 4 byte frame header, 4 byte internal
351 header, 42 data bytes + checksum (to be confirmed) */
352 if (raw) {
353 lines = cx->vbi.count * 2;
354 } else {
355 lines = cx->is_60hz ? 24 : 38;
356 if (cx->is_60hz)
357 lines += 2;
358 }
359
360 cx->vbi.enc_size = lines *
361 (raw ? cx->vbi.raw_size : cx->vbi.sliced_size);
362
363 data[0] = s->handle;
364 /* Lines per field */
365 data[1] = (lines / 2) | ((lines / 2) << 16);
366 /* bytes per line */
367 data[2] = (raw ? cx->vbi.raw_size : cx->vbi.sliced_size);
368 /* Every X number of frames a VBI interrupt arrives
369 (frames as in 25 or 30 fps) */
370 data[3] = 1;
371 /* Setup VBI for the cx25840 digitizer */
372 if (raw) {
373 data[4] = 0x20602060;
374 data[5] = 0x30703070;
375 } else {
376 data[4] = 0xB0F0B0F0;
377 data[5] = 0xA0E0A0E0;
378 }
379
380 CX18_DEBUG_INFO("Setup VBI h: %d lines %x bpl %d fr %d %x %x\n",
381 data[0], data[1], data[2], data[3], data[4], data[5]);
382
383 if (s->type == CX18_ENC_STREAM_TYPE_VBI)
384 cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data);
385}
386
387int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
388{
389 u32 data[MAX_MB_ARGUMENTS];
390 struct cx18 *cx = s->cx;
391 struct list_head *p;
392 int ts = 0;
393 int captype = 0;
394
395 if (s->v4l2dev == NULL && s->dvb.enabled == 0)
396 return -EINVAL;
397
398 CX18_DEBUG_INFO("Start encoder stream %s\n", s->name);
399
400 switch (s->type) {
401 case CX18_ENC_STREAM_TYPE_MPG:
402 captype = CAPTURE_CHANNEL_TYPE_MPEG;
403 cx->mpg_data_received = cx->vbi_data_inserted = 0;
404 cx->dualwatch_jiffies = jiffies;
405 cx->dualwatch_stereo_mode = cx->params.audio_properties & 0x300;
406 cx->search_pack_header = 0;
407 break;
408
409 case CX18_ENC_STREAM_TYPE_TS:
410 captype = CAPTURE_CHANNEL_TYPE_TS;
411 ts = 1;
412 break;
413 case CX18_ENC_STREAM_TYPE_YUV:
414 captype = CAPTURE_CHANNEL_TYPE_YUV;
415 break;
416 case CX18_ENC_STREAM_TYPE_PCM:
417 captype = CAPTURE_CHANNEL_TYPE_PCM;
418 break;
419 case CX18_ENC_STREAM_TYPE_VBI:
420 captype = cx->vbi.sliced_in->service_set ?
421 CAPTURE_CHANNEL_TYPE_SLICED_VBI : CAPTURE_CHANNEL_TYPE_VBI;
422 cx->vbi.frame = 0;
423 cx->vbi.inserted_frame = 0;
424 memset(cx->vbi.sliced_mpeg_size,
425 0, sizeof(cx->vbi.sliced_mpeg_size));
426 break;
427 default:
428 return -EINVAL;
429 }
430 s->buffers_stolen = 0;
431
432 /* mute/unmute video */
433 cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
434 s->handle, !!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags));
435
436 /* Clear Streamoff flags in case left from last capture */
437 clear_bit(CX18_F_S_STREAMOFF, &s->s_flags);
438
439 cx18_vapi_result(cx, data, CX18_CREATE_TASK, 1, CPU_CMD_MASK_CAPTURE);
440 s->handle = data[0];
441 cx18_vapi(cx, CX18_CPU_SET_CHANNEL_TYPE, 2, s->handle, captype);
442
443 if (atomic_read(&cx->capturing) == 0 && !ts) {
444 /* Stuff from Windows, we don't know what it is */
445 cx18_vapi(cx, CX18_CPU_SET_VER_CROP_LINE, 2, s->handle, 0);
446 cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 3, 1);
447 cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 8, 0);
448 cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 4, 1);
449 cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2, s->handle, 12);
450
451 cx18_vapi(cx, CX18_CPU_SET_CAPTURE_LINE_NO, 3,
452 s->handle, cx->digitizer, cx->digitizer);
453
454 /* Setup VBI */
455 if (cx->v4l2_cap & V4L2_CAP_VBI_CAPTURE)
456 cx18_vbi_setup(s);
457
458 /* assign program index info.
459 Mask 7: select I/P/B, Num_req: 400 max */
460 cx18_vapi_result(cx, data, CX18_CPU_SET_INDEXTABLE, 1, 0);
461
462 /* Setup API for Stream */
463 cx2341x_update(cx, cx18_api_func, NULL, &cx->params);
464 }
465
466 if (atomic_read(&cx->capturing) == 0) {
467 clear_bit(CX18_F_I_EOS, &cx->i_flags);
468 write_reg(7, CX18_DSP0_INTERRUPT_MASK);
469 }
470
471 cx18_vapi(cx, CX18_CPU_DE_SET_MDL_ACK, 3, s->handle,
472 (void *)&cx->scb->cpu_mdl_ack[s->type][0] - cx->enc_mem,
473 (void *)&cx->scb->cpu_mdl_ack[s->type][1] - cx->enc_mem);
474
475 list_for_each(p, &s->q_free.list) {
476 struct cx18_buffer *buf = list_entry(p, struct cx18_buffer, list);
477
478 writel(buf->dma_handle, &cx->scb->cpu_mdl[buf->id].paddr);
479 writel(s->buf_size, &cx->scb->cpu_mdl[buf->id].length);
480 cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
481 (void *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem, 1,
482 buf->id, s->buf_size);
483 }
484 /* begin_capture */
485 if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) {
486 CX18_DEBUG_WARN("Error starting capture!\n");
487 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
488 return -EINVAL;
489 }
490
491 /* you're live! sit back and await interrupts :) */
492 atomic_inc(&cx->capturing);
493 return 0;
494}
495
496void cx18_stop_all_captures(struct cx18 *cx)
497{
498 int i;
499
500 for (i = CX18_MAX_STREAMS - 1; i >= 0; i--) {
501 struct cx18_stream *s = &cx->streams[i];
502
503 if (s->v4l2dev == NULL && s->dvb.enabled == 0)
504 continue;
505 if (test_bit(CX18_F_S_STREAMING, &s->s_flags))
506 cx18_stop_v4l2_encode_stream(s, 0);
507 }
508}
509
510int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
511{
512 struct cx18 *cx = s->cx;
513 unsigned long then;
514
515 if (s->v4l2dev == NULL && s->dvb.enabled == 0)
516 return -EINVAL;
517
518 /* This function assumes that you are allowed to stop the capture
519 and that we are actually capturing */
520
521 CX18_DEBUG_INFO("Stop Capture\n");
522
523 if (atomic_read(&cx->capturing) == 0)
524 return 0;
525
526 if (s->type == CX18_ENC_STREAM_TYPE_MPG)
527 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, !gop_end);
528 else
529 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle);
530
531 then = jiffies;
532
533 if (s->type == CX18_ENC_STREAM_TYPE_MPG && gop_end) {
534 CX18_INFO("ignoring gop_end: not (yet?) supported by the firmware\n");
535 }
536
537 atomic_dec(&cx->capturing);
538
539 /* Clear capture and no-read bits */
540 clear_bit(CX18_F_S_STREAMING, &s->s_flags);
541
542 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
543 s->handle = 0xffffffff;
544
545 if (atomic_read(&cx->capturing) > 0)
546 return 0;
547
548 write_reg(5, CX18_DSP0_INTERRUPT_MASK);
549 wake_up(&s->waitq);
550
551 return 0;
552}
553
554u32 cx18_find_handle(struct cx18 *cx)
555{
556 int i;
557
558 /* find first available handle to be used for global settings */
559 for (i = 0; i < CX18_MAX_STREAMS; i++) {
560 struct cx18_stream *s = &cx->streams[i];
561
562 if (s->v4l2dev && s->handle)
563 return s->handle;
564 }
565 return 0;
566}
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
new file mode 100644
index 000000000000..8c7ba7d2fa79
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -0,0 +1,33 @@
1/*
2 * cx18 init/start/stop/exit stream functions
3 *
4 * Derived from ivtv-streams.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24u32 cx18_find_handle(struct cx18 *cx);
25int cx18_streams_setup(struct cx18 *cx);
26int cx18_streams_register(struct cx18 *cx);
27void cx18_streams_cleanup(struct cx18 *cx);
28
29/* Capture related */
30int cx18_start_v4l2_encode_stream(struct cx18_stream *s);
31int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end);
32
33void cx18_stop_all_captures(struct cx18 *cx);
diff --git a/drivers/media/video/cx18/cx18-vbi.c b/drivers/media/video/cx18/cx18-vbi.c
new file mode 100644
index 000000000000..22e76ee3f447
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-vbi.c
@@ -0,0 +1,208 @@
1/*
2 * cx18 Vertical Blank Interval support functions
3 *
4 * Derived from ivtv-vbi.c
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24#include "cx18-driver.h"
25#include "cx18-vbi.h"
26#include "cx18-ioctl.h"
27#include "cx18-queue.h"
28#include "cx18-av-core.h"
29
30static void copy_vbi_data(struct cx18 *cx, int lines, u32 pts_stamp)
31{
32 int line = 0;
33 int i;
34 u32 linemask[2] = { 0, 0 };
35 unsigned short size;
36 static const u8 mpeg_hdr_data[] = {
37 0x00, 0x00, 0x01, 0xba, 0x44, 0x00, 0x0c, 0x66,
38 0x24, 0x01, 0x01, 0xd1, 0xd3, 0xfa, 0xff, 0xff,
39 0x00, 0x00, 0x01, 0xbd, 0x00, 0x1a, 0x84, 0x80,
40 0x07, 0x21, 0x00, 0x5d, 0x63, 0xa7, 0xff, 0xff
41 };
42 const int sd = sizeof(mpeg_hdr_data); /* start of vbi data */
43 int idx = cx->vbi.frame % CX18_VBI_FRAMES;
44 u8 *dst = &cx->vbi.sliced_mpeg_data[idx][0];
45
46 for (i = 0; i < lines; i++) {
47 struct v4l2_sliced_vbi_data *sdata = cx->vbi.sliced_data + i;
48 int f, l;
49
50 if (sdata->id == 0)
51 continue;
52
53 l = sdata->line - 6;
54 f = sdata->field;
55 if (f)
56 l += 18;
57 if (l < 32)
58 linemask[0] |= (1 << l);
59 else
60 linemask[1] |= (1 << (l - 32));
61 dst[sd + 12 + line * 43] = cx18_service2vbi(sdata->id);
62 memcpy(dst + sd + 12 + line * 43 + 1, sdata->data, 42);
63 line++;
64 }
65 memcpy(dst, mpeg_hdr_data, sizeof(mpeg_hdr_data));
66 if (line == 36) {
67 /* All lines are used, so there is no space for the linemask
68 (the max size of the VBI data is 36 * 43 + 4 bytes).
69 So in this case we use the magic number 'ITV0'. */
70 memcpy(dst + sd, "ITV0", 4);
71 memcpy(dst + sd + 4, dst + sd + 12, line * 43);
72 size = 4 + ((43 * line + 3) & ~3);
73 } else {
74 memcpy(dst + sd, "cx0", 4);
75 memcpy(dst + sd + 4, &linemask[0], 8);
76 size = 12 + ((43 * line + 3) & ~3);
77 }
78 dst[4+16] = (size + 10) >> 8;
79 dst[5+16] = (size + 10) & 0xff;
80 dst[9+16] = 0x21 | ((pts_stamp >> 29) & 0x6);
81 dst[10+16] = (pts_stamp >> 22) & 0xff;
82 dst[11+16] = 1 | ((pts_stamp >> 14) & 0xff);
83 dst[12+16] = (pts_stamp >> 7) & 0xff;
84 dst[13+16] = 1 | ((pts_stamp & 0x7f) << 1);
85 cx->vbi.sliced_mpeg_size[idx] = sd + size;
86}
87
88/* Compress raw VBI format, removes leading SAV codes and surplus space
89 after the field.
90 Returns new compressed size. */
91static u32 compress_raw_buf(struct cx18 *cx, u8 *buf, u32 size)
92{
93 u32 line_size = cx->vbi.raw_decoder_line_size;
94 u32 lines = cx->vbi.count;
95 u8 sav1 = cx->vbi.raw_decoder_sav_odd_field;
96 u8 sav2 = cx->vbi.raw_decoder_sav_even_field;
97 u8 *q = buf;
98 u8 *p;
99 int i;
100
101 for (i = 0; i < lines; i++) {
102 p = buf + i * line_size;
103
104 /* Look for SAV code */
105 if (p[0] != 0xff || p[1] || p[2] ||
106 (p[3] != sav1 && p[3] != sav2))
107 break;
108 memcpy(q, p + 4, line_size - 4);
109 q += line_size - 4;
110 }
111 return lines * (line_size - 4);
112}
113
114
115/* Compressed VBI format, all found sliced blocks put next to one another
116 Returns new compressed size */
117static u32 compress_sliced_buf(struct cx18 *cx, u32 line, u8 *buf,
118 u32 size, u8 sav)
119{
120 u32 line_size = cx->vbi.sliced_decoder_line_size;
121 struct v4l2_decode_vbi_line vbi;
122 int i;
123
124 /* find the first valid line */
125 for (i = 0; i < size; i++, buf++) {
126 if (buf[0] == 0xff && !buf[1] && !buf[2] && buf[3] == sav)
127 break;
128 }
129
130 size -= i;
131 if (size < line_size)
132 return line;
133 for (i = 0; i < size / line_size; i++) {
134 u8 *p = buf + i * line_size;
135
136 /* Look for SAV code */
137 if (p[0] != 0xff || p[1] || p[2] || p[3] != sav)
138 continue;
139 vbi.p = p + 4;
140 cx18_av_cmd(cx, VIDIOC_INT_DECODE_VBI_LINE, &vbi);
141 if (vbi.type) {
142 cx->vbi.sliced_data[line].id = vbi.type;
143 cx->vbi.sliced_data[line].field = vbi.is_second_field;
144 cx->vbi.sliced_data[line].line = vbi.line;
145 memcpy(cx->vbi.sliced_data[line].data, vbi.p, 42);
146 line++;
147 }
148 }
149 return line;
150}
151
152void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf,
153 u64 pts_stamp, int streamtype)
154{
155 u8 *p = (u8 *) buf->buf;
156 u32 size = buf->bytesused;
157 int lines;
158
159 if (streamtype != CX18_ENC_STREAM_TYPE_VBI)
160 return;
161
162 /* Raw VBI data */
163 if (cx->vbi.sliced_in->service_set == 0) {
164 u8 type;
165
166 cx18_buf_swap(buf);
167
168 type = p[3];
169
170 size = buf->bytesused = compress_raw_buf(cx, p, size);
171
172 /* second field of the frame? */
173 if (type == cx->vbi.raw_decoder_sav_even_field) {
174 /* Dirty hack needed for backwards
175 compatibility of old VBI software. */
176 p += size - 4;
177 memcpy(p, &cx->vbi.frame, 4);
178 cx->vbi.frame++;
179 }
180 return;
181 }
182
183 /* Sliced VBI data with data insertion */
184 cx18_buf_swap(buf);
185
186 /* first field */
187 lines = compress_sliced_buf(cx, 0, p, size / 2,
188 cx->vbi.sliced_decoder_sav_odd_field);
189 /* second field */
190 /* experimentation shows that the second half does not always
191 begin at the exact address. So start a bit earlier
192 (hence 32). */
193 lines = compress_sliced_buf(cx, lines, p + size / 2 - 32,
194 size / 2 + 32, cx->vbi.sliced_decoder_sav_even_field);
195 /* always return at least one empty line */
196 if (lines == 0) {
197 cx->vbi.sliced_data[0].id = 0;
198 cx->vbi.sliced_data[0].line = 0;
199 cx->vbi.sliced_data[0].field = 0;
200 lines = 1;
201 }
202 buf->bytesused = size = lines * sizeof(cx->vbi.sliced_data[0]);
203 memcpy(p, &cx->vbi.sliced_data[0], size);
204
205 if (cx->vbi.insert_mpeg)
206 copy_vbi_data(cx, lines, pts_stamp);
207 cx->vbi.frame++;
208}
diff --git a/drivers/media/video/cx18/cx18-vbi.h b/drivers/media/video/cx18/cx18-vbi.h
new file mode 100644
index 000000000000..c56ff7d28f20
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-vbi.h
@@ -0,0 +1,26 @@
1/*
2 * cx18 Vertical Blank Interval support functions
3 *
4 * Derived from ivtv-vbi.h
5 *
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 * 02111-1307 USA
22 */
23
24void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf,
25 u64 pts_stamp, int streamtype);
26int cx18_used_line(struct cx18 *cx, int line, int field);
diff --git a/drivers/media/video/cx18/cx18-version.h b/drivers/media/video/cx18/cx18-version.h
new file mode 100644
index 000000000000..d5c7a6f968dd
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-version.h
@@ -0,0 +1,34 @@
1/*
2 * cx18 driver version information
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#ifndef CX18_VERSION_H
23#define CX18_VERSION_H
24
25#define CX18_DRIVER_NAME "cx18"
26#define CX18_DRIVER_VERSION_MAJOR 1
27#define CX18_DRIVER_VERSION_MINOR 0
28#define CX18_DRIVER_VERSION_PATCHLEVEL 0
29
30#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL)
31#define CX18_DRIVER_VERSION KERNEL_VERSION(CX18_DRIVER_VERSION_MAJOR, \
32 CX18_DRIVER_VERSION_MINOR, CX18_DRIVER_VERSION_PATCHLEVEL)
33
34#endif
diff --git a/drivers/media/video/cx18/cx18-video.c b/drivers/media/video/cx18/cx18-video.c
new file mode 100644
index 000000000000..2e5c41939330
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-video.c
@@ -0,0 +1,45 @@
1/*
2 * cx18 video interface functions
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#include "cx18-driver.h"
23#include "cx18-video.h"
24#include "cx18-av-core.h"
25#include "cx18-cards.h"
26
27void cx18_video_set_io(struct cx18 *cx)
28{
29 struct v4l2_routing route;
30 int inp = cx->active_input;
31 u32 type;
32
33 route.input = cx->card->video_inputs[inp].video_input;
34 route.output = 0;
35 cx18_av_cmd(cx, VIDIOC_INT_S_VIDEO_ROUTING, &route);
36
37 type = cx->card->video_inputs[inp].video_type;
38
39 if (type == CX18_CARD_INPUT_VID_TUNER)
40 route.input = 0; /* Tuner */
41 else if (type < CX18_CARD_INPUT_COMPOSITE1)
42 route.input = 2; /* S-Video */
43 else
44 route.input = 1; /* Composite */
45}
diff --git a/drivers/media/video/cx18/cx18-video.h b/drivers/media/video/cx18/cx18-video.h
new file mode 100644
index 000000000000..529006a06e5c
--- /dev/null
+++ b/drivers/media/video/cx18/cx18-video.h
@@ -0,0 +1,22 @@
1/*
2 * cx18 video interface functions
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22void cx18_video_set_io(struct cx18 *cx);
diff --git a/drivers/media/video/cx18/cx23418.h b/drivers/media/video/cx18/cx23418.h
new file mode 100644
index 000000000000..33f78da9dba8
--- /dev/null
+++ b/drivers/media/video/cx18/cx23418.h
@@ -0,0 +1,458 @@
1/*
2 * cx18 header containing common defines.
3 *
4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * 02111-1307 USA
20 */
21
22#ifndef CX23418_H
23#define CX23418_H
24
25#include <media/cx2341x.h>
26
27#define MGR_CMD_MASK 0x40000000
28/* The MSB of the command code indicates that this is the completion of a
29 command */
30#define MGR_CMD_MASK_ACK (MGR_CMD_MASK | 0x80000000)
31
32/* Description: This command creates a new instance of a certain task
33 IN[0] - Task ID. This is one of the XPU_CMD_MASK_YYY where XPU is
34 the processor on which the task YYY will be created
35 OUT[0] - Task handle. This handle is passed along with commands to
36 dispatch to the right instance of the task
37 ReturnCode - One of the ERR_SYS_... */
38#define CX18_CREATE_TASK (MGR_CMD_MASK | 0x0001)
39
40/* Description: This command destroys an instance of a task
41 IN[0] - Task handle. Hanlde of the task to destroy
42 ReturnCode - One of the ERR_SYS_... */
43#define CX18_DESTROY_TASK (MGR_CMD_MASK | 0x0002)
44
45/* All commands for CPU have the following mask set */
46#define CPU_CMD_MASK 0x20000000
47#define CPU_CMD_MASK_ACK (CPU_CMD_MASK | 0x80000000)
48#define CPU_CMD_MASK_CAPTURE (CPU_CMD_MASK | 0x00020000)
49#define CPU_CMD_MASK_TS (CPU_CMD_MASK | 0x00040000)
50
51#define EPU_CMD_MASK 0x02000000
52#define EPU_CMD_MASK_DEBUG (EPU_CMD_MASK | 0x000000)
53#define EPU_CMD_MASK_DE (EPU_CMD_MASK | 0x040000)
54
55/* Description: This command indicates that a Memory Descriptor List has been
56 filled with the requested channel type
57 IN[0] - Task handle. Handle of the task
58 IN[1] - Offset of the MDL_ACK from the beginning of the local DDR.
59 IN[2] - Number of CNXT_MDL_ACK structures in the array pointed to by IN[1]
60 ReturnCode - One of the ERR_DE_... */
61#define CX18_EPU_DMA_DONE (EPU_CMD_MASK_DE | 0x0001)
62
63/* Something interesting happened
64 IN[0] - A value to log
65 IN[1] - An offset of a string in the MiniMe memory;
66 0/zero/NULL means "I have nothing to say" */
67#define CX18_EPU_DEBUG (EPU_CMD_MASK_DEBUG | 0x0003)
68
69/* Description: This command starts streaming with the set channel type
70 IN[0] - Task handle. Handle of the task to start
71 ReturnCode - One of the ERR_CAPTURE_... */
72#define CX18_CPU_CAPTURE_START (CPU_CMD_MASK_CAPTURE | 0x0002)
73
74/* Description: This command stops streaming with the set channel type
75 IN[0] - Task handle. Handle of the task to stop
76 IN[1] - 0 = stop at end of GOP, 1 = stop at end of frame (MPEG only)
77 ReturnCode - One of the ERR_CAPTURE_... */
78#define CX18_CPU_CAPTURE_STOP (CPU_CMD_MASK_CAPTURE | 0x0003)
79
80/* Description: This command pauses streaming with the set channel type
81 IN[0] - Task handle. Handle of the task to pause
82 ReturnCode - One of the ERR_CAPTURE_... */
83#define CX18_CPU_CAPTURE_PAUSE (CPU_CMD_MASK_CAPTURE | 0x0007)
84
85/* Description: This command resumes streaming with the set channel type
86 IN[0] - Task handle. Handle of the task to resume
87 ReturnCode - One of the ERR_CAPTURE_... */
88#define CX18_CPU_CAPTURE_RESUME (CPU_CMD_MASK_CAPTURE | 0x0008)
89
90#define CAPTURE_CHANNEL_TYPE_NONE 0
91#define CAPTURE_CHANNEL_TYPE_MPEG 1
92#define CAPTURE_CHANNEL_TYPE_INDEX 2
93#define CAPTURE_CHANNEL_TYPE_YUV 3
94#define CAPTURE_CHANNEL_TYPE_PCM 4
95#define CAPTURE_CHANNEL_TYPE_VBI 5
96#define CAPTURE_CHANNEL_TYPE_SLICED_VBI 6
97#define CAPTURE_CHANNEL_TYPE_TS 7
98#define CAPTURE_CHANNEL_TYPE_MAX 15
99
100/* Description: This command sets the channel type. This can only be done
101 when stopped.
102 IN[0] - Task handle. Handle of the task to start
103 IN[1] - Channel Type. See Below.
104 ReturnCode - One of the ERR_CAPTURE_... */
105#define CX18_CPU_SET_CHANNEL_TYPE (CPU_CMD_MASK_CAPTURE + 1)
106
107/* Description: Set stream output type
108 IN[0] - task handle. Handle of the task to start
109 IN[1] - type
110 ReturnCode - One of the ERR_CAPTURE_... */
111#define CX18_CPU_SET_STREAM_OUTPUT_TYPE (CPU_CMD_MASK_CAPTURE | 0x0012)
112
113/* Description: Set video input resolution and frame rate
114 IN[0] - task handle
115 IN[1] - reserved
116 IN[2] - reserved
117 IN[3] - reserved
118 IN[4] - reserved
119 IN[5] - frame rate, 0 - 29.97f/s, 1 - 25f/s
120 ReturnCode - One of the ERR_CAPTURE_... */
121#define CX18_CPU_SET_VIDEO_IN (CPU_CMD_MASK_CAPTURE | 0x0004)
122
123/* Description: Set video frame rate
124 IN[0] - task handle. Handle of the task to start
125 IN[1] - video bit rate mode
126 IN[2] - video average rate
127 IN[3] - video peak rate
128 IN[4] - system mux rate
129 ReturnCode - One of the ERR_CAPTURE_... */
130#define CX18_CPU_SET_VIDEO_RATE (CPU_CMD_MASK_CAPTURE | 0x0005)
131
132/* Description: Set video output resolution
133 IN[0] - task handle
134 IN[1] - horizontal size
135 IN[2] - vertical size
136 ReturnCode - One of the ERR_CAPTURE_... */
137#define CX18_CPU_SET_VIDEO_RESOLUTION (CPU_CMD_MASK_CAPTURE | 0x0006)
138
139/* Description: This command set filter parameters
140 IN[0] - Task handle. Handle of the task
141 IN[1] - type, 0 - temporal, 1 - spatial, 2 - median
142 IN[2] - mode, temporal/spatial: 0 - disable, 1 - static, 2 - dynamic
143 median: 0 = disable, 1 = horizontal, 2 = vertical,
144 3 = horizontal/vertical, 4 = diagonal
145 IN[3] - strength, temporal 0 - 31, spatial 0 - 15
146 ReturnCode - One of the ERR_CAPTURE_... */
147#define CX18_CPU_SET_FILTER_PARAM (CPU_CMD_MASK_CAPTURE | 0x0009)
148
149/* Description: This command set spatial filter type
150 IN[0] - Task handle.
151 IN[1] - luma type: 0 = disable, 1 = 1D horizontal only, 2 = 1D vertical only,
152 3 = 2D H/V separable, 4 = 2D symmetric non-separable
153 IN[2] - chroma type: 0 - diable, 1 = 1D horizontal
154 ReturnCode - One of the ERR_CAPTURE_... */
155#define CX18_CPU_SET_SPATIAL_FILTER_TYPE (CPU_CMD_MASK_CAPTURE | 0x000C)
156
157/* Description: This command set coring levels for median filter
158 IN[0] - Task handle.
159 IN[1] - luma_high
160 IN[2] - luma_low
161 IN[3] - chroma_high
162 IN[4] - chroma_low
163 ReturnCode - One of the ERR_CAPTURE_... */
164#define CX18_CPU_SET_MEDIAN_CORING (CPU_CMD_MASK_CAPTURE | 0x000E)
165
166/* Description: This command set the picture type mask for index file
167 IN[0] - 0 = disable index file output
168 1 = output I picture
169 2 = P picture
170 4 = B picture
171 other = illegal */
172#define CX18_CPU_SET_INDEXTABLE (CPU_CMD_MASK_CAPTURE | 0x0010)
173
174/* Description: Set audio parameters
175 IN[0] - task handle. Handle of the task to start
176 IN[1] - audio parameter
177 ReturnCode - One of the ERR_CAPTURE_... */
178#define CX18_CPU_SET_AUDIO_PARAMETERS (CPU_CMD_MASK_CAPTURE | 0x0011)
179
180/* Description: Set video mute
181 IN[0] - task handle. Handle of the task to start
182 IN[1] - bit31-24: muteYvalue
183 bit23-16: muteUvalue
184 bit15-8: muteVvalue
185 bit0: 1:mute, 0: unmute
186 ReturnCode - One of the ERR_CAPTURE_... */
187#define CX18_CPU_SET_VIDEO_MUTE (CPU_CMD_MASK_CAPTURE | 0x0013)
188
189/* Description: Set audio mute
190 IN[0] - task handle. Handle of the task to start
191 IN[1] - mute/unmute
192 ReturnCode - One of the ERR_CAPTURE_... */
193#define CX18_CPU_SET_AUDIO_MUTE (CPU_CMD_MASK_CAPTURE | 0x0014)
194
195/* Description: Set stream output type
196 IN[0] - task handle. Handle of the task to start
197 IN[1] - subType
198 SET_INITIAL_SCR 1
199 SET_QUALITY_MODE 2
200 SET_VIM_PROTECT_MODE 3
201 SET_PTS_CORRECTION 4
202 SET_USB_FLUSH_MODE 5
203 SET_MERAQPAR_ENABLE 6
204 SET_NAV_PACK_INSERTION 7
205 SET_SCENE_CHANGE_ENABLE 8
206 IN[2] - parameter 1
207 IN[3] - parameter 2
208 ReturnCode - One of the ERR_CAPTURE_... */
209#define CX18_CPU_SET_MISC_PARAMETERS (CPU_CMD_MASK_CAPTURE | 0x0015)
210
211/* Description: Set raw VBI parameters
212 IN[0] - Task handle
213 IN[1] - No. of input lines per field:
214 bit[15:0]: field 1,
215 bit[31:16]: field 2
216 IN[2] - No. of input bytes per line
217 IN[3] - No. of output frames per transfer
218 IN[4] - start code
219 IN[5] - stop code
220 ReturnCode */
221#define CX18_CPU_SET_RAW_VBI_PARAM (CPU_CMD_MASK_CAPTURE | 0x0016)
222
223/* Description: Set capture line No.
224 IN[0] - task handle. Handle of the task to start
225 IN[1] - height1
226 IN[2] - height2
227 ReturnCode - One of the ERR_CAPTURE_... */
228#define CX18_CPU_SET_CAPTURE_LINE_NO (CPU_CMD_MASK_CAPTURE | 0x0017)
229
230/* Description: Set copyright
231 IN[0] - task handle. Handle of the task to start
232 IN[1] - copyright
233 ReturnCode - One of the ERR_CAPTURE_... */
234#define CX18_CPU_SET_COPYRIGHT (CPU_CMD_MASK_CAPTURE | 0x0018)
235
236/* Description: Set audio PID
237 IN[0] - task handle. Handle of the task to start
238 IN[1] - PID
239 ReturnCode - One of the ERR_CAPTURE_... */
240#define CX18_CPU_SET_AUDIO_PID (CPU_CMD_MASK_CAPTURE | 0x0019)
241
242/* Description: Set video PID
243 IN[0] - task handle. Handle of the task to start
244 IN[1] - PID
245 ReturnCode - One of the ERR_CAPTURE_... */
246#define CX18_CPU_SET_VIDEO_PID (CPU_CMD_MASK_CAPTURE | 0x001A)
247
248/* Description: Set Vertical Crop Line
249 IN[0] - task handle. Handle of the task to start
250 IN[1] - Line
251 ReturnCode - One of the ERR_CAPTURE_... */
252#define CX18_CPU_SET_VER_CROP_LINE (CPU_CMD_MASK_CAPTURE | 0x001B)
253
254/* Description: Set COP structure
255 IN[0] - task handle. Handle of the task to start
256 IN[1] - M
257 IN[2] - N
258 ReturnCode - One of the ERR_CAPTURE_... */
259#define CX18_CPU_SET_GOP_STRUCTURE (CPU_CMD_MASK_CAPTURE | 0x001C)
260
261/* Description: Set Scene Change Detection
262 IN[0] - task handle. Handle of the task to start
263 IN[1] - scene change
264 ReturnCode - One of the ERR_CAPTURE_... */
265#define CX18_CPU_SET_SCENE_CHANGE_DETECTION (CPU_CMD_MASK_CAPTURE | 0x001D)
266
267/* Description: Set Aspect Ratio
268 IN[0] - task handle. Handle of the task to start
269 IN[1] - AspectRatio
270 ReturnCode - One of the ERR_CAPTURE_... */
271#define CX18_CPU_SET_ASPECT_RATIO (CPU_CMD_MASK_CAPTURE | 0x001E)
272
273/* Description: Set Skip Input Frame
274 IN[0] - task handle. Handle of the task to start
275 IN[1] - skip input frames
276 ReturnCode - One of the ERR_CAPTURE_... */
277#define CX18_CPU_SET_SKIP_INPUT_FRAME (CPU_CMD_MASK_CAPTURE | 0x001F)
278
279/* Description: Set sliced VBI parameters -
280 Note This API will only apply to MPEG and Sliced VBI Channels
281 IN[0] - Task handle
282 IN[1] - output type, 0 - CC, 1 - Moji, 2 - Teletext
283 IN[2] - start / stop line
284 bit[15:0] start line number
285 bit[31:16] stop line number
286 IN[3] - number of output frames per interrupt
287 IN[4] - VBI insertion mode
288 bit 0: output user data, 1 - enable
289 bit 1: output private stream, 1 - enable
290 bit 2: mux option, 0 - in GOP, 1 - in picture
291 bit[7:0] private stream ID
292 IN[5] - insertion period while mux option is in picture
293 ReturnCode - VBI data offset */
294#define CX18_CPU_SET_SLICED_VBI_PARAM (CPU_CMD_MASK_CAPTURE | 0x0020)
295
296/* Description: Set the user data place holder
297 IN[0] - type of data (0 for user)
298 IN[1] - Stuffing period
299 IN[2] - ID data size in word (less than 10)
300 IN[3] - Pointer to ID buffer */
301#define CX18_CPU_SET_USERDATA_PLACE_HOLDER (CPU_CMD_MASK_CAPTURE | 0x0021)
302
303
304/* Description:
305 In[0] Task Handle
306 return parameter:
307 Out[0] Reserved
308 Out[1] Video PTS bit[32:2] of last output video frame.
309 Out[2] Video PTS bit[ 1:0] of last output video frame.
310 Out[3] Hardware Video PTS counter bit[31:0],
311 these bits get incremented on every 90kHz clock tick.
312 Out[4] Hardware Video PTS counter bit32,
313 these bits get incremented on every 90kHz clock tick.
314 ReturnCode */
315#define CX18_CPU_GET_ENC_PTS (CPU_CMD_MASK_CAPTURE | 0x0022)
316
317/* Below is the list of commands related to the data exchange */
318#define CPU_CMD_MASK_DE (CPU_CMD_MASK | 0x040000)
319
320/* Description: This command provides the physical base address of the local
321 DDR as viewed by EPU
322 IN[0] - Physical offset where EPU has the local DDR mapped
323 ReturnCode - One of the ERR_DE_... */
324#define CPU_CMD_DE_SetBase (CPU_CMD_MASK_DE | 0x0001)
325
326/* Description: This command provides the offsets in the device memory where
327 the 2 cx18_mdl_ack blocks reside
328 IN[0] - Task handle. Handle of the task to start
329 IN[1] - Offset of the first cx18_mdl_ack from the beginning of the
330 local DDR.
331 IN[2] - Offset of the second cx18_mdl_ack from the beginning of the
332 local DDR.
333 ReturnCode - One of the ERR_DE_... */
334#define CX18_CPU_DE_SET_MDL_ACK (CPU_CMD_MASK_DE | 0x0002)
335
336/* Description: This command provides the offset to a Memory Descriptor List
337 IN[0] - Task handle. Handle of the task to start
338 IN[1] - Offset of the MDL from the beginning of the local DDR.
339 IN[2] - Number of cx18_mdl structures in the array pointed to by IN[1]
340 IN[3] - Buffer ID
341 IN[4] - Total buffer length
342 ReturnCode - One of the ERR_DE_... */
343#define CX18_CPU_DE_SET_MDL (CPU_CMD_MASK_DE | 0x0005)
344
345/* Description: This command requests return of all current Memory
346 Descriptor Lists to the driver
347 IN[0] - Task handle. Handle of the task to start
348 ReturnCode - One of the ERR_DE_... */
349/* #define CX18_CPU_DE_ReleaseMDL (CPU_CMD_MASK_DE | 0x0006) */
350
351/* Description: This command signals the cpu that the dat buffer has been
352 consumed and ready for re-use.
353 IN[0] - Task handle. Handle of the task
354 IN[1] - Offset of the data block from the beginning of the local DDR.
355 IN[2] - Number of bytes in the data block
356 ReturnCode - One of the ERR_DE_... */
357/* #define CX18_CPU_DE_RELEASE_BUFFER (CPU_CMD_MASK_DE | 0x0007) */
358
359/* No Error / Success */
360#define CNXT_OK 0x000000
361
362/* Received unknown command */
363#define CXERR_UNK_CMD 0x000001
364
365/* First parameter in the command is invalid */
366#define CXERR_INVALID_PARAM1 0x000002
367
368/* Second parameter in the command is invalid */
369#define CXERR_INVALID_PARAM2 0x000003
370
371/* Device interface is not open/found */
372#define CXERR_DEV_NOT_FOUND 0x000004
373
374/* Requested function is not implemented/available */
375#define CXERR_NOTSUPPORTED 0x000005
376
377/* Invalid pointer is provided */
378#define CXERR_BADPTR 0x000006
379
380/* Unable to allocate memory */
381#define CXERR_NOMEM 0x000007
382
383/* Object/Link not found */
384#define CXERR_LINK 0x000008
385
386/* Device busy, command cannot be executed */
387#define CXERR_BUSY 0x000009
388
389/* File/device/handle is not open. */
390#define CXERR_NOT_OPEN 0x00000A
391
392/* Value is out of range */
393#define CXERR_OUTOFRANGE 0x00000B
394
395/* Buffer overflow */
396#define CXERR_OVERFLOW 0x00000C
397
398/* Version mismatch */
399#define CXERR_BADVER 0x00000D
400
401/* Operation timed out */
402#define CXERR_TIMEOUT 0x00000E
403
404/* Operation aborted */
405#define CXERR_ABORT 0x00000F
406
407/* Specified I2C device not found for read/write */
408#define CXERR_I2CDEV_NOTFOUND 0x000010
409
410/* Error in I2C data xfer (but I2C device is present) */
411#define CXERR_I2CDEV_XFERERR 0x000011
412
413/* Chanel changing component not ready */
414#define CXERR_CHANNELNOTREADY 0x000012
415
416/* PPU (Presensation/Decoder) mail box is corrupted */
417#define CXERR_PPU_MB_CORRUPT 0x000013
418
419/* CPU (Capture/Encoder) mail box is corrupted */
420#define CXERR_CPU_MB_CORRUPT 0x000014
421
422/* APU (Audio) mail box is corrupted */
423#define CXERR_APU_MB_CORRUPT 0x000015
424
425/* Unable to open file for reading */
426#define CXERR_FILE_OPEN_READ 0x000016
427
428/* Unable to open file for writing */
429#define CXERR_FILE_OPEN_WRITE 0x000017
430
431/* Unable to find the I2C section specified */
432#define CXERR_I2C_BADSECTION 0x000018
433
434/* Error in I2C data xfer (but I2C device is present) */
435#define CXERR_I2CDEV_DATALOW 0x000019
436
437/* Error in I2C data xfer (but I2C device is present) */
438#define CXERR_I2CDEV_CLOCKLOW 0x00001A
439
440/* No Interrupt received from HW (for I2C access) */
441#define CXERR_NO_HW_I2C_INTR 0x00001B
442
443/* RPU is not ready to accept commands! */
444#define CXERR_RPU_NOT_READY 0x00001C
445
446/* RPU is not ready to accept commands! */
447#define CXERR_RPU_NO_ACK 0x00001D
448
449/* The are no buffers ready. Try again soon! */
450#define CXERR_NODATA_AGAIN 0x00001E
451
452/* The stream is stopping. Function not alllowed now! */
453#define CXERR_STOPPING_STATUS 0x00001F
454
455/* Trying to access hardware when the power is turned OFF */
456#define CXERR_DEVPOWER_OFF 0x000020
457
458#endif /* CX23418_H */
diff --git a/drivers/media/video/cx23885/Kconfig b/drivers/media/video/cx23885/Kconfig
index ca5fbce3a909..cadf936c3673 100644
--- a/drivers/media/video/cx23885/Kconfig
+++ b/drivers/media/video/cx23885/Kconfig
@@ -4,19 +4,19 @@ config VIDEO_CX23885
4 select I2C_ALGOBIT 4 select I2C_ALGOBIT
5 select FW_LOADER 5 select FW_LOADER
6 select VIDEO_BTCX 6 select VIDEO_BTCX
7 select VIDEO_TUNER 7 select MEDIA_TUNER
8 select VIDEO_TVEEPROM 8 select VIDEO_TVEEPROM
9 select VIDEO_IR 9 select VIDEO_IR
10 select VIDEOBUF_DVB 10 select VIDEOBUF_DVB
11 select VIDEO_CX25840 11 select VIDEO_CX25840
12 select DVB_TUNER_MT2131 if !DVB_FE_CUSTOMISE 12 select MEDIA_TUNER_MT2131 if !DVB_FE_CUSTOMISE
13 select DVB_S5H1409 if !DVB_FE_CUSTOMISE 13 select DVB_S5H1409 if !DVB_FE_CUSTOMISE
14 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 14 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
15 select DVB_PLL if !DVB_FE_CUSTOMISE 15 select DVB_PLL if !DVB_FE_CUSTOMISE
16 select TUNER_XC2028 if !DVB_FE_CUSTOMIZE 16 select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMIZE
17 select TUNER_TDA8290 if !DVB_FE_CUSTOMIZE 17 select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE
18 select DVB_TDA18271 if !DVB_FE_CUSTOMIZE 18 select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE
19 select DVB_TUNER_XC5000 if !DVB_FE_CUSTOMIZE 19 select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
20 select DVB_TDA10048 if !DVB_FE_CUSTOMIZE 20 select DVB_TDA10048 if !DVB_FE_CUSTOMIZE
21 ---help--- 21 ---help---
22 This is a video4linux driver for Conexant 23885 based 22 This is a video4linux driver for Conexant 23885 based
diff --git a/drivers/media/video/cx23885/Makefile b/drivers/media/video/cx23885/Makefile
index d7b0721af062..29c23b44c13c 100644
--- a/drivers/media/video/cx23885/Makefile
+++ b/drivers/media/video/cx23885/Makefile
@@ -3,6 +3,7 @@ cx23885-objs := cx23885-cards.o cx23885-video.o cx23885-vbi.o cx23885-core.o cx2
3obj-$(CONFIG_VIDEO_CX23885) += cx23885.o 3obj-$(CONFIG_VIDEO_CX23885) += cx23885.o
4 4
5EXTRA_CFLAGS += -Idrivers/media/video 5EXTRA_CFLAGS += -Idrivers/media/video
6EXTRA_CFLAGS += -Idrivers/media/common/tuners
6EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 7EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
7EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 8EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
8 9
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 7fde678b2c4a..88823810497c 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1209,7 +1209,8 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
1209 1209
1210/* ----------------------------------------------------------------------- */ 1210/* ----------------------------------------------------------------------- */
1211 1211
1212static int cx25840_probe(struct i2c_client *client) 1212static int cx25840_probe(struct i2c_client *client,
1213 const struct i2c_device_id *did)
1213{ 1214{
1214 struct cx25840_state *state; 1215 struct cx25840_state *state;
1215 u32 id; 1216 u32 id;
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 27635cdcbaf2..b0d7d6a7a4cc 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -5,7 +5,7 @@ config VIDEO_CX88
5 select FW_LOADER 5 select FW_LOADER
6 select VIDEO_BTCX 6 select VIDEO_BTCX
7 select VIDEOBUF_DMA_SG 7 select VIDEOBUF_DMA_SG
8 select VIDEO_TUNER 8 select MEDIA_TUNER
9 select VIDEO_TVEEPROM 9 select VIDEO_TVEEPROM
10 select VIDEO_IR 10 select VIDEO_IR
11 select VIDEO_WM8775 if VIDEO_HELPER_CHIPS_AUTO 11 select VIDEO_WM8775 if VIDEO_HELPER_CHIPS_AUTO
@@ -57,7 +57,7 @@ config VIDEO_CX88_DVB
57 select DVB_NXT200X if !DVB_FE_CUSTOMISE 57 select DVB_NXT200X if !DVB_FE_CUSTOMISE
58 select DVB_CX24123 if !DVB_FE_CUSTOMISE 58 select DVB_CX24123 if !DVB_FE_CUSTOMISE
59 select DVB_ISL6421 if !DVB_FE_CUSTOMISE 59 select DVB_ISL6421 if !DVB_FE_CUSTOMISE
60 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 60 select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
61 select DVB_S5H1411 if !DVB_FE_CUSTOMISE 61 select DVB_S5H1411 if !DVB_FE_CUSTOMISE
62 ---help--- 62 ---help---
63 This adds support for DVB/ATSC cards based on the 63 This adds support for DVB/ATSC cards based on the
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index 532cee35eb3c..6ec30f242578 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
10obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o 10obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
11 11
12EXTRA_CFLAGS += -Idrivers/media/video 12EXTRA_CFLAGS += -Idrivers/media/video
13EXTRA_CFLAGS += -Idrivers/media/common/tuners
13EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 14EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
14EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 15EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 2b6b283cda15..aeba26dc0a37 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -57,6 +57,9 @@ MODULE_PARM_DESC(latency,"pci latency timer");
57/* ------------------------------------------------------------------ */ 57/* ------------------------------------------------------------------ */
58/* board config info */ 58/* board config info */
59 59
60/* If radio_type !=UNSET, radio_addr should be specified
61 */
62
60static const struct cx88_board cx88_boards[] = { 63static const struct cx88_board cx88_boards[] = {
61 [CX88_BOARD_UNKNOWN] = { 64 [CX88_BOARD_UNKNOWN] = {
62 .name = "UNKNOWN/GENERIC", 65 .name = "UNKNOWN/GENERIC",
@@ -2446,25 +2449,31 @@ EXPORT_SYMBOL_GPL(cx88_setup_xc3028);
2446static void cx88_card_setup(struct cx88_core *core) 2449static void cx88_card_setup(struct cx88_core *core)
2447{ 2450{
2448 static u8 eeprom[256]; 2451 static u8 eeprom[256];
2452 struct tuner_setup tun_setup;
2453 unsigned int mode_mask = T_RADIO |
2454 T_ANALOG_TV |
2455 T_DIGITAL_TV;
2456
2457 memset(&tun_setup, 0, sizeof(tun_setup));
2449 2458
2450 if (0 == core->i2c_rc) { 2459 if (0 == core->i2c_rc) {
2451 core->i2c_client.addr = 0xa0 >> 1; 2460 core->i2c_client.addr = 0xa0 >> 1;
2452 tveeprom_read(&core->i2c_client,eeprom,sizeof(eeprom)); 2461 tveeprom_read(&core->i2c_client, eeprom, sizeof(eeprom));
2453 } 2462 }
2454 2463
2455 switch (core->boardnr) { 2464 switch (core->boardnr) {
2456 case CX88_BOARD_HAUPPAUGE: 2465 case CX88_BOARD_HAUPPAUGE:
2457 case CX88_BOARD_HAUPPAUGE_ROSLYN: 2466 case CX88_BOARD_HAUPPAUGE_ROSLYN:
2458 if (0 == core->i2c_rc) 2467 if (0 == core->i2c_rc)
2459 hauppauge_eeprom(core,eeprom+8); 2468 hauppauge_eeprom(core, eeprom+8);
2460 break; 2469 break;
2461 case CX88_BOARD_GDI: 2470 case CX88_BOARD_GDI:
2462 if (0 == core->i2c_rc) 2471 if (0 == core->i2c_rc)
2463 gdi_eeprom(core,eeprom); 2472 gdi_eeprom(core, eeprom);
2464 break; 2473 break;
2465 case CX88_BOARD_WINFAST2000XP_EXPERT: 2474 case CX88_BOARD_WINFAST2000XP_EXPERT:
2466 if (0 == core->i2c_rc) 2475 if (0 == core->i2c_rc)
2467 leadtek_eeprom(core,eeprom); 2476 leadtek_eeprom(core, eeprom);
2468 break; 2477 break;
2469 case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1: 2478 case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
2470 case CX88_BOARD_HAUPPAUGE_NOVASE2_S1: 2479 case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
@@ -2474,7 +2483,7 @@ static void cx88_card_setup(struct cx88_core *core)
2474 case CX88_BOARD_HAUPPAUGE_HVR3000: 2483 case CX88_BOARD_HAUPPAUGE_HVR3000:
2475 case CX88_BOARD_HAUPPAUGE_HVR1300: 2484 case CX88_BOARD_HAUPPAUGE_HVR1300:
2476 if (0 == core->i2c_rc) 2485 if (0 == core->i2c_rc)
2477 hauppauge_eeprom(core,eeprom); 2486 hauppauge_eeprom(core, eeprom);
2478 break; 2487 break;
2479 case CX88_BOARD_KWORLD_DVBS_100: 2488 case CX88_BOARD_KWORLD_DVBS_100:
2480 cx_write(MO_GP0_IO, 0x000007f8); 2489 cx_write(MO_GP0_IO, 0x000007f8);
@@ -2555,6 +2564,35 @@ static void cx88_card_setup(struct cx88_core *core)
2555 2564
2556 cx88_call_i2c_clients(core, TUNER_SET_CONFIG, &tea5767_cfg); 2565 cx88_call_i2c_clients(core, TUNER_SET_CONFIG, &tea5767_cfg);
2557 } 2566 }
2567 } /*end switch() */
2568
2569
2570 /* Setup tuners */
2571 if ((core->board.radio_type != UNSET)) {
2572 tun_setup.mode_mask = T_RADIO;
2573 tun_setup.type = core->board.radio_type;
2574 tun_setup.addr = core->board.radio_addr;
2575 tun_setup.tuner_callback = cx88_tuner_callback;
2576 cx88_call_i2c_clients(core, TUNER_SET_TYPE_ADDR, &tun_setup);
2577 mode_mask &= ~T_RADIO;
2578 }
2579
2580 if (core->board.tuner_type != TUNER_ABSENT) {
2581 tun_setup.mode_mask = mode_mask;
2582 tun_setup.type = core->board.tuner_type;
2583 tun_setup.addr = core->board.tuner_addr;
2584 tun_setup.tuner_callback = cx88_tuner_callback;
2585
2586 cx88_call_i2c_clients(core, TUNER_SET_TYPE_ADDR, &tun_setup);
2587 }
2588
2589 if (core->board.tda9887_conf) {
2590 struct v4l2_priv_tun_config tda9887_cfg;
2591
2592 tda9887_cfg.tuner = TUNER_TDA9887;
2593 tda9887_cfg.priv = &core->board.tda9887_conf;
2594
2595 cx88_call_i2c_clients(core, TUNER_SET_CONFIG, &tda9887_cfg);
2558 } 2596 }
2559 2597
2560 if (core->board.tuner_type == TUNER_XC2028) { 2598 if (core->board.tuner_type == TUNER_XC2028) {
@@ -2572,6 +2610,7 @@ static void cx88_card_setup(struct cx88_core *core)
2572 ctl.fname); 2610 ctl.fname);
2573 cx88_call_i2c_clients(core, TUNER_SET_CONFIG, &xc2028_cfg); 2611 cx88_call_i2c_clients(core, TUNER_SET_CONFIG, &xc2028_cfg);
2574 } 2612 }
2613 cx88_call_i2c_clients (core, TUNER_SET_STANDBY, NULL);
2575} 2614}
2576 2615
2577/* ------------------------------------------------------------------ */ 2616/* ------------------------------------------------------------------ */
@@ -2710,7 +2749,6 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
2710 if (TUNER_ABSENT != core->board.tuner_type) 2749 if (TUNER_ABSENT != core->board.tuner_type)
2711 request_module("tuner"); 2750 request_module("tuner");
2712 2751
2713 cx88_call_i2c_clients (core, TUNER_SET_STANDBY, NULL);
2714 cx88_card_setup(core); 2752 cx88_card_setup(core);
2715 cx88_ir_init(core, pci); 2753 cx88_ir_init(core, pci);
2716 2754
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index c6b44732a082..cb6a096069c7 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -99,42 +99,11 @@ static int cx8800_bit_getsda(void *data)
99 99
100static int attach_inform(struct i2c_client *client) 100static int attach_inform(struct i2c_client *client)
101{ 101{
102 struct tuner_setup tun_setup;
103 struct cx88_core *core = i2c_get_adapdata(client->adapter); 102 struct cx88_core *core = i2c_get_adapdata(client->adapter);
104 103
105 dprintk(1, "%s i2c attach [addr=0x%x,client=%s]\n", 104 dprintk(1, "%s i2c attach [addr=0x%x,client=%s]\n",
106 client->driver->driver.name, client->addr, client->name); 105 client->driver->driver.name, client->addr, client->name);
107 if (!client->driver->command)
108 return 0;
109
110 if (core->board.radio_type != UNSET) {
111 if ((core->board.radio_addr==ADDR_UNSET)||(core->board.radio_addr==client->addr)) {
112 tun_setup.mode_mask = T_RADIO;
113 tun_setup.type = core->board.radio_type;
114 tun_setup.addr = core->board.radio_addr;
115 tun_setup.tuner_callback = cx88_tuner_callback;
116 client->driver->command (client, TUNER_SET_TYPE_ADDR, &tun_setup);
117 }
118 }
119 if (core->board.tuner_type != UNSET) {
120 if ((core->board.tuner_addr==ADDR_UNSET)||(core->board.tuner_addr==client->addr)) {
121
122 tun_setup.mode_mask = T_ANALOG_TV;
123 tun_setup.type = core->board.tuner_type;
124 tun_setup.addr = core->board.tuner_addr;
125 tun_setup.tuner_callback = cx88_tuner_callback;
126 client->driver->command (client,TUNER_SET_TYPE_ADDR, &tun_setup);
127 }
128 }
129
130 if (core->board.tda9887_conf) {
131 struct v4l2_priv_tun_config tda9887_cfg;
132 106
133 tda9887_cfg.tuner = TUNER_TDA9887;
134 tda9887_cfg.priv = &core->board.tda9887_conf;
135
136 client->driver->command(client, TUNER_SET_CONFIG, &tda9887_cfg);
137 }
138 return 0; 107 return 0;
139} 108}
140 109
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index 9caffed2b6b8..c7c2896bbd8b 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -1,7 +1,7 @@
1config VIDEO_EM28XX 1config VIDEO_EM28XX
2 tristate "Empia EM28xx USB video capture support" 2 tristate "Empia EM28xx USB video capture support"
3 depends on VIDEO_DEV && I2C && INPUT 3 depends on VIDEO_DEV && I2C && INPUT
4 select VIDEO_TUNER 4 select MEDIA_TUNER
5 select VIDEO_TVEEPROM 5 select VIDEO_TVEEPROM
6 select VIDEO_IR 6 select VIDEO_IR
7 select VIDEOBUF_VMALLOC 7 select VIDEOBUF_VMALLOC
diff --git a/drivers/media/video/em28xx/Makefile b/drivers/media/video/em28xx/Makefile
index 3d1c3cc337fe..8137a8c94bfc 100644
--- a/drivers/media/video/em28xx/Makefile
+++ b/drivers/media/video/em28xx/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_VIDEO_EM28XX_ALSA) += em28xx-alsa.o
8obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o 8obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o
9 9
10EXTRA_CFLAGS += -Idrivers/media/video 10EXTRA_CFLAGS += -Idrivers/media/video
11EXTRA_CFLAGS += -Idrivers/media/common/tuners
11EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 12EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
12EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 13EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
13 14
diff --git a/drivers/media/video/ivtv/Kconfig b/drivers/media/video/ivtv/Kconfig
index b6171702c4d0..eec115bf9517 100644
--- a/drivers/media/video/ivtv/Kconfig
+++ b/drivers/media/video/ivtv/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_IVTV
4 select I2C_ALGOBIT 4 select I2C_ALGOBIT
5 select FW_LOADER 5 select FW_LOADER
6 select VIDEO_IR 6 select VIDEO_IR
7 select VIDEO_TUNER 7 select MEDIA_TUNER
8 select VIDEO_TVEEPROM 8 select VIDEO_TVEEPROM
9 select VIDEO_CX2341X 9 select VIDEO_CX2341X
10 select VIDEO_CX25840 10 select VIDEO_CX25840
diff --git a/drivers/media/video/ivtv/Makefile b/drivers/media/video/ivtv/Makefile
index a0389014fa88..26ce0d6eaee1 100644
--- a/drivers/media/video/ivtv/Makefile
+++ b/drivers/media/video/ivtv/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_VIDEO_IVTV) += ivtv.o
8obj-$(CONFIG_VIDEO_FB_IVTV) += ivtvfb.o 8obj-$(CONFIG_VIDEO_FB_IVTV) += ivtvfb.o
9 9
10EXTRA_CFLAGS += -Idrivers/media/video 10EXTRA_CFLAGS += -Idrivers/media/video
11EXTRA_CFLAGS += -Idrivers/media/common/tuners
11EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 12EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
12EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 13EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
13 14
diff --git a/drivers/media/video/ivtv/ivtv-cards.c b/drivers/media/video/ivtv/ivtv-cards.c
index e908649ea37c..4fb8faefe2ce 100644
--- a/drivers/media/video/ivtv/ivtv-cards.c
+++ b/drivers/media/video/ivtv/ivtv-cards.c
@@ -40,6 +40,8 @@
40#define MSP_MONO MSP_INPUT(MSP_IN_MONO, MSP_IN_TUNER1, \ 40#define MSP_MONO MSP_INPUT(MSP_IN_MONO, MSP_IN_TUNER1, \
41 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART) 41 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
42 42
43#define V4L2_STD_NOT_MN (V4L2_STD_PAL|V4L2_STD_SECAM)
44
43/* usual i2c tuner addresses to probe */ 45/* usual i2c tuner addresses to probe */
44static struct ivtv_card_tuner_i2c ivtv_i2c_std = { 46static struct ivtv_card_tuner_i2c ivtv_i2c_std = {
45 .radio = { I2C_CLIENT_END }, 47 .radio = { I2C_CLIENT_END },
@@ -298,7 +300,7 @@ static const struct ivtv_card ivtv_card_mpg600 = {
298 .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 }, 300 .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 },
299 .tuners = { 301 .tuners = {
300 /* The PAL tuner is confirmed */ 302 /* The PAL tuner is confirmed */
301 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME }, 303 { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FQ1216ME },
302 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 }, 304 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
303 }, 305 },
304 .pci_list = ivtv_pci_mpg600, 306 .pci_list = ivtv_pci_mpg600,
@@ -339,7 +341,7 @@ static const struct ivtv_card ivtv_card_mpg160 = {
339 .lang1 = 0x0004, .lang2 = 0x0000, .both = 0x0008 }, 341 .lang1 = 0x0004, .lang2 = 0x0000, .both = 0x0008 },
340 .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 }, 342 .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 },
341 .tuners = { 343 .tuners = {
342 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME }, 344 { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FQ1216ME },
343 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 }, 345 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
344 }, 346 },
345 .pci_list = ivtv_pci_mpg160, 347 .pci_list = ivtv_pci_mpg160,
@@ -375,7 +377,7 @@ static const struct ivtv_card ivtv_card_pg600 = {
375 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL }, 377 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL },
376 }, 378 },
377 .tuners = { 379 .tuners = {
378 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME }, 380 { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FQ1216ME },
379 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 }, 381 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
380 }, 382 },
381 .pci_list = ivtv_pci_pg600, 383 .pci_list = ivtv_pci_pg600,
@@ -416,7 +418,7 @@ static const struct ivtv_card ivtv_card_avc2410 = {
416 on the country/region setting of the user to decide which tuner 418 on the country/region setting of the user to decide which tuner
417 is available. */ 419 is available. */
418 .tuners = { 420 .tuners = {
419 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, 421 { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
420 { .std = V4L2_STD_ALL - V4L2_STD_NTSC_M_JP, 422 { .std = V4L2_STD_ALL - V4L2_STD_NTSC_M_JP,
421 .tuner = TUNER_PHILIPS_FM1236_MK3 }, 423 .tuner = TUNER_PHILIPS_FM1236_MK3 },
422 { .std = V4L2_STD_NTSC_M_JP, .tuner = TUNER_PHILIPS_FQ1286 }, 424 { .std = V4L2_STD_NTSC_M_JP, .tuner = TUNER_PHILIPS_FQ1286 },
@@ -490,7 +492,7 @@ static const struct ivtv_card ivtv_card_tg5000tv = {
490 .gpio_video_input = { .mask = 0x0030, .tuner = 0x0000, 492 .gpio_video_input = { .mask = 0x0030, .tuner = 0x0000,
491 .composite = 0x0010, .svideo = 0x0020 }, 493 .composite = 0x0010, .svideo = 0x0020 },
492 .tuners = { 494 .tuners = {
493 { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 }, 495 { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PHILIPS_FQ1286 },
494 }, 496 },
495 .pci_list = ivtv_pci_tg5000tv, 497 .pci_list = ivtv_pci_tg5000tv,
496 .i2c = &ivtv_i2c_std, 498 .i2c = &ivtv_i2c_std,
@@ -521,7 +523,7 @@ static const struct ivtv_card ivtv_card_va2000 = {
521 { IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER }, 523 { IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER },
522 }, 524 },
523 .tuners = { 525 .tuners = {
524 { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 }, 526 { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PHILIPS_FQ1286 },
525 }, 527 },
526 .pci_list = ivtv_pci_va2000, 528 .pci_list = ivtv_pci_va2000,
527 .i2c = &ivtv_i2c_std, 529 .i2c = &ivtv_i2c_std,
@@ -565,7 +567,7 @@ static const struct ivtv_card ivtv_card_cx23416gyc = {
565 .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000, 567 .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000,
566 .f44100 = 0x4000, .f48000 = 0x8000 }, 568 .f44100 = 0x4000, .f48000 = 0x8000 },
567 .tuners = { 569 .tuners = {
568 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, 570 { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
569 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 }, 571 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
570 }, 572 },
571 .pci_list = ivtv_pci_cx23416gyc, 573 .pci_list = ivtv_pci_cx23416gyc,
@@ -597,7 +599,7 @@ static const struct ivtv_card ivtv_card_cx23416gyc_nogr = {
597 .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000, 599 .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000,
598 .f44100 = 0x4000, .f48000 = 0x8000 }, 600 .f44100 = 0x4000, .f48000 = 0x8000 },
599 .tuners = { 601 .tuners = {
600 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, 602 { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
601 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 }, 603 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
602 }, 604 },
603 .i2c = &ivtv_i2c_std, 605 .i2c = &ivtv_i2c_std,
@@ -627,7 +629,7 @@ static const struct ivtv_card ivtv_card_cx23416gyc_nogrycs = {
627 .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000, 629 .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000,
628 .f44100 = 0x4000, .f48000 = 0x8000 }, 630 .f44100 = 0x4000, .f48000 = 0x8000 },
629 .tuners = { 631 .tuners = {
630 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, 632 { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
631 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 }, 633 { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
632 }, 634 },
633 .i2c = &ivtv_i2c_std, 635 .i2c = &ivtv_i2c_std,
@@ -667,7 +669,7 @@ static const struct ivtv_card ivtv_card_gv_mvprx = {
667 .gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 }, 669 .gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 },
668 .tuners = { 670 .tuners = {
669 /* This card has the Panasonic VP27 tuner */ 671 /* This card has the Panasonic VP27 tuner */
670 { .std = V4L2_STD_525_60, .tuner = TUNER_PANASONIC_VP27 }, 672 { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PANASONIC_VP27 },
671 }, 673 },
672 .pci_list = ivtv_pci_gv_mvprx, 674 .pci_list = ivtv_pci_gv_mvprx,
673 .i2c = &ivtv_i2c_std, 675 .i2c = &ivtv_i2c_std,
@@ -704,7 +706,7 @@ static const struct ivtv_card ivtv_card_gv_mvprx2e = {
704 .gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 }, 706 .gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 },
705 .tuners = { 707 .tuners = {
706 /* This card has the Panasonic VP27 tuner */ 708 /* This card has the Panasonic VP27 tuner */
707 { .std = V4L2_STD_525_60, .tuner = TUNER_PANASONIC_VP27 }, 709 { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PANASONIC_VP27 },
708 }, 710 },
709 .pci_list = ivtv_pci_gv_mvprx2e, 711 .pci_list = ivtv_pci_gv_mvprx2e,
710 .i2c = &ivtv_i2c_std, 712 .i2c = &ivtv_i2c_std,
@@ -739,7 +741,7 @@ static const struct ivtv_card ivtv_card_gotview_pci_dvd = {
739 .gpio_init = { .direction = 0xf000, .initial_value = 0xA000 }, 741 .gpio_init = { .direction = 0xf000, .initial_value = 0xA000 },
740 .tuners = { 742 .tuners = {
741 /* This card has a Philips FQ1216ME MK3 tuner */ 743 /* This card has a Philips FQ1216ME MK3 tuner */
742 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, 744 { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
743 }, 745 },
744 .pci_list = ivtv_pci_gotview_pci_dvd, 746 .pci_list = ivtv_pci_gotview_pci_dvd,
745 .i2c = &ivtv_i2c_std, 747 .i2c = &ivtv_i2c_std,
@@ -778,7 +780,7 @@ static const struct ivtv_card ivtv_card_gotview_pci_dvd2 = {
778 .gpio_audio_input = { .mask = 0x0800, .tuner = 0, .linein = 0, .radio = 0x0800 }, 780 .gpio_audio_input = { .mask = 0x0800, .tuner = 0, .linein = 0, .radio = 0x0800 },
779 .tuners = { 781 .tuners = {
780 /* This card has a Philips FQ1216ME MK5 tuner */ 782 /* This card has a Philips FQ1216ME MK5 tuner */
781 { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, 783 { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
782 }, 784 },
783 .pci_list = ivtv_pci_gotview_pci_dvd2, 785 .pci_list = ivtv_pci_gotview_pci_dvd2,
784 .i2c = &ivtv_i2c_std, 786 .i2c = &ivtv_i2c_std,
@@ -856,7 +858,7 @@ static const struct ivtv_card ivtv_card_dctmvtvp1 = {
856 .gpio_video_input = { .mask = 0x0030, .tuner = 0x0000, 858 .gpio_video_input = { .mask = 0x0030, .tuner = 0x0000,
857 .composite = 0x0010, .svideo = 0x0020}, 859 .composite = 0x0010, .svideo = 0x0020},
858 .tuners = { 860 .tuners = {
859 { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 }, 861 { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PHILIPS_FQ1286 },
860 }, 862 },
861 .pci_list = ivtv_pci_dctmvtvp1, 863 .pci_list = ivtv_pci_dctmvtvp1,
862 .i2c = &ivtv_i2c_std, 864 .i2c = &ivtv_i2c_std,
@@ -875,6 +877,7 @@ static const struct ivtv_card_pci_info ivtv_pci_pg600v2[] = {
875static const struct ivtv_card ivtv_card_pg600v2 = { 877static const struct ivtv_card ivtv_card_pg600v2 = {
876 .type = IVTV_CARD_PG600V2, 878 .type = IVTV_CARD_PG600V2,
877 .name = "Yuan PG600-2, GotView PCI DVD Lite", 879 .name = "Yuan PG600-2, GotView PCI DVD Lite",
880 .comment = "only Composite and S-Video inputs are supported, not the tuner\n",
878 .v4l2_capabilities = IVTV_CAP_ENCODER, 881 .v4l2_capabilities = IVTV_CAP_ENCODER,
879 .hw_video = IVTV_HW_CX25840, 882 .hw_video = IVTV_HW_CX25840,
880 .hw_audio = IVTV_HW_CX25840, 883 .hw_audio = IVTV_HW_CX25840,
@@ -921,6 +924,7 @@ static const struct ivtv_card ivtv_card_club3d = {
921 }, 924 },
922 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 }, 925 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
923 .gpio_init = { .direction = 0x1000, .initial_value = 0x1000 }, /* tuner reset */ 926 .gpio_init = { .direction = 0x1000, .initial_value = 0x1000 }, /* tuner reset */
927 .xceive_pin = 12,
924 .tuners = { 928 .tuners = {
925 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, 929 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
926 }, 930 },
@@ -944,15 +948,22 @@ static const struct ivtv_card ivtv_card_avertv_mce116 = {
944 .hw_video = IVTV_HW_CX25840, 948 .hw_video = IVTV_HW_CX25840,
945 .hw_audio = IVTV_HW_CX25840, 949 .hw_audio = IVTV_HW_CX25840,
946 .hw_audio_ctrl = IVTV_HW_CX25840, 950 .hw_audio_ctrl = IVTV_HW_CX25840,
947 .hw_all = IVTV_HW_CX25840 | IVTV_HW_WM8739, 951 .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER | IVTV_HW_WM8739,
948 .video_inputs = { 952 .video_inputs = {
949 { IVTV_CARD_INPUT_SVIDEO1, 0, CX25840_SVIDEO3 }, 953 { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 },
950 { IVTV_CARD_INPUT_COMPOSITE1, 0, CX25840_COMPOSITE1 }, 954 { IVTV_CARD_INPUT_SVIDEO1, 1, CX25840_SVIDEO3 },
955 { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
951 }, 956 },
952 .audio_inputs = { 957 .audio_inputs = {
958 { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
953 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 }, 959 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 },
954 }, 960 },
955 .gpio_init = { .direction = 0xe000, .initial_value = 0x4000 }, /* enable line-in */ 961 /* enable line-in */
962 .gpio_init = { .direction = 0xe400, .initial_value = 0x4400 },
963 .xceive_pin = 10,
964 .tuners = {
965 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
966 },
956 .pci_list = ivtv_pci_avertv_mce116, 967 .pci_list = ivtv_pci_avertv_mce116,
957 .i2c = &ivtv_i2c_std, 968 .i2c = &ivtv_i2c_std,
958}; 969};
@@ -990,7 +1001,7 @@ static const struct ivtv_card ivtv_card_aver_pvr150 = {
990 .gpio_audio_input = { .mask = 0x0800, .tuner = 0, .linein = 0, .radio = 0x0800 }, 1001 .gpio_audio_input = { .mask = 0x0800, .tuner = 0, .linein = 0, .radio = 0x0800 },
991 .tuners = { 1002 .tuners = {
992 /* This card has a Partsnic PTI-5NF05 tuner */ 1003 /* This card has a Partsnic PTI-5NF05 tuner */
993 { .std = V4L2_STD_525_60, .tuner = TUNER_TCL_2002N }, 1004 { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_TCL_2002N },
994 }, 1005 },
995 .pci_list = ivtv_pci_aver_pvr150, 1006 .pci_list = ivtv_pci_aver_pvr150,
996 .i2c = &ivtv_i2c_radio, 1007 .i2c = &ivtv_i2c_radio,
@@ -1058,12 +1069,48 @@ static const struct ivtv_card ivtv_card_asus_falcon2 = {
1058 }, 1069 },
1059 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, M52790_IN_TUNER }, 1070 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, M52790_IN_TUNER },
1060 .tuners = { 1071 .tuners = {
1061 { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FM1236_MK3 }, 1072 { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PHILIPS_FM1236_MK3 },
1062 }, 1073 },
1063 .pci_list = ivtv_pci_asus_falcon2, 1074 .pci_list = ivtv_pci_asus_falcon2,
1064 .i2c = &ivtv_i2c_std, 1075 .i2c = &ivtv_i2c_std,
1065}; 1076};
1066 1077
1078/* ------------------------------------------------------------------------- */
1079
1080/* AVerMedia M104 miniPCI card */
1081
1082static const struct ivtv_card_pci_info ivtv_pci_aver_m104[] = {
1083 { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xc136 },
1084 { 0, 0, 0 }
1085};
1086
1087static const struct ivtv_card ivtv_card_aver_m104 = {
1088 .type = IVTV_CARD_AVER_M104,
1089 .name = "AVerMedia M104",
1090 .comment = "Not yet supported!\n",
1091 .v4l2_capabilities = 0, /*IVTV_CAP_ENCODER,*/
1092 .hw_video = IVTV_HW_CX25840,
1093 .hw_audio = IVTV_HW_CX25840,
1094 .hw_audio_ctrl = IVTV_HW_CX25840,
1095 .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER | IVTV_HW_WM8739,
1096 .video_inputs = {
1097 { IVTV_CARD_INPUT_SVIDEO1, 0, CX25840_SVIDEO3 },
1098 { IVTV_CARD_INPUT_COMPOSITE1, 0, CX25840_COMPOSITE1 },
1099 },
1100 .audio_inputs = {
1101 { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 },
1102 },
1103 .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, 2 },
1104 /* enable line-in + reset tuner */
1105 .gpio_init = { .direction = 0xe400, .initial_value = 0x4000 },
1106 .xceive_pin = 10,
1107 .tuners = {
1108 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
1109 },
1110 .pci_list = ivtv_pci_aver_m104,
1111 .i2c = &ivtv_i2c_std,
1112};
1113
1067static const struct ivtv_card *ivtv_card_list[] = { 1114static const struct ivtv_card *ivtv_card_list[] = {
1068 &ivtv_card_pvr250, 1115 &ivtv_card_pvr250,
1069 &ivtv_card_pvr350, 1116 &ivtv_card_pvr350,
@@ -1089,6 +1136,7 @@ static const struct ivtv_card *ivtv_card_list[] = {
1089 &ivtv_card_asus_falcon2, 1136 &ivtv_card_asus_falcon2,
1090 &ivtv_card_aver_pvr150, 1137 &ivtv_card_aver_pvr150,
1091 &ivtv_card_aver_ezmaker, 1138 &ivtv_card_aver_ezmaker,
1139 &ivtv_card_aver_m104,
1092 1140
1093 /* Variations of standard cards but with the same PCI IDs. 1141 /* Variations of standard cards but with the same PCI IDs.
1094 These cards must come last in this list. */ 1142 These cards must come last in this list. */
@@ -1120,7 +1168,8 @@ int ivtv_get_input(struct ivtv *itv, u16 index, struct v4l2_input *input)
1120 if (index >= itv->nof_inputs) 1168 if (index >= itv->nof_inputs)
1121 return -EINVAL; 1169 return -EINVAL;
1122 input->index = index; 1170 input->index = index;
1123 strcpy(input->name, input_strs[card_input->video_type - 1]); 1171 strlcpy(input->name, input_strs[card_input->video_type - 1],
1172 sizeof(input->name));
1124 input->type = (card_input->video_type == IVTV_CARD_INPUT_VID_TUNER ? 1173 input->type = (card_input->video_type == IVTV_CARD_INPUT_VID_TUNER ?
1125 V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA); 1174 V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA);
1126 input->audioset = (1 << itv->nof_audio_inputs) - 1; 1175 input->audioset = (1 << itv->nof_audio_inputs) - 1;
@@ -1137,7 +1186,7 @@ int ivtv_get_output(struct ivtv *itv, u16 index, struct v4l2_output *output)
1137 if (index >= itv->card->nof_outputs) 1186 if (index >= itv->card->nof_outputs)
1138 return -EINVAL; 1187 return -EINVAL;
1139 output->index = index; 1188 output->index = index;
1140 strcpy(output->name, card_output->name); 1189 strlcpy(output->name, card_output->name, sizeof(output->name));
1141 output->type = V4L2_OUTPUT_TYPE_ANALOG; 1190 output->type = V4L2_OUTPUT_TYPE_ANALOG;
1142 output->audioset = 1; 1191 output->audioset = 1;
1143 output->std = V4L2_STD_ALL; 1192 output->std = V4L2_STD_ALL;
@@ -1156,7 +1205,8 @@ int ivtv_get_audio_input(struct ivtv *itv, u16 index, struct v4l2_audio *audio)
1156 memset(audio, 0, sizeof(*audio)); 1205 memset(audio, 0, sizeof(*audio));
1157 if (index >= itv->nof_audio_inputs) 1206 if (index >= itv->nof_audio_inputs)
1158 return -EINVAL; 1207 return -EINVAL;
1159 strcpy(audio->name, input_strs[aud_input->audio_type - 1]); 1208 strlcpy(audio->name, input_strs[aud_input->audio_type - 1],
1209 sizeof(audio->name));
1160 audio->index = index; 1210 audio->index = index;
1161 audio->capability = V4L2_AUDCAP_STEREO; 1211 audio->capability = V4L2_AUDCAP_STEREO;
1162 return 0; 1212 return 0;
@@ -1167,6 +1217,6 @@ int ivtv_get_audio_output(struct ivtv *itv, u16 index, struct v4l2_audioout *aud
1167 memset(aud_output, 0, sizeof(*aud_output)); 1217 memset(aud_output, 0, sizeof(*aud_output));
1168 if (itv->card->video_outputs == NULL || index != 0) 1218 if (itv->card->video_outputs == NULL || index != 0)
1169 return -EINVAL; 1219 return -EINVAL;
1170 strcpy(aud_output->name, "A/V Audio Out"); 1220 strlcpy(aud_output->name, "A/V Audio Out", sizeof(aud_output->name));
1171 return 0; 1221 return 0;
1172} 1222}
diff --git a/drivers/media/video/ivtv/ivtv-cards.h b/drivers/media/video/ivtv/ivtv-cards.h
index 9186fa2ee5fc..748485dcebbd 100644
--- a/drivers/media/video/ivtv/ivtv-cards.h
+++ b/drivers/media/video/ivtv/ivtv-cards.h
@@ -48,7 +48,8 @@
48#define IVTV_CARD_ASUS_FALCON2 21 /* ASUS Falcon2 */ 48#define IVTV_CARD_ASUS_FALCON2 21 /* ASUS Falcon2 */
49#define IVTV_CARD_AVER_PVR150PLUS 22 /* AVerMedia PVR-150 Plus */ 49#define IVTV_CARD_AVER_PVR150PLUS 22 /* AVerMedia PVR-150 Plus */
50#define IVTV_CARD_AVER_EZMAKER 23 /* AVerMedia EZMaker PCI Deluxe */ 50#define IVTV_CARD_AVER_EZMAKER 23 /* AVerMedia EZMaker PCI Deluxe */
51#define IVTV_CARD_LAST 23 51#define IVTV_CARD_AVER_M104 24 /* AverMedia M104 miniPCI card */
52#define IVTV_CARD_LAST 24
52 53
53/* Variants of existing cards but with the same PCI IDs. The driver 54/* Variants of existing cards but with the same PCI IDs. The driver
54 detects these based on other device information. 55 detects these based on other device information.
@@ -244,6 +245,7 @@ struct ivtv_card_tuner_i2c {
244struct ivtv_card { 245struct ivtv_card {
245 int type; 246 int type;
246 char *name; 247 char *name;
248 char *comment;
247 u32 v4l2_capabilities; 249 u32 v4l2_capabilities;
248 u32 hw_video; /* hardware used to process video */ 250 u32 hw_video; /* hardware used to process video */
249 u32 hw_audio; /* hardware used to process audio */ 251 u32 hw_audio; /* hardware used to process audio */
@@ -256,6 +258,7 @@ struct ivtv_card {
256 int nof_outputs; 258 int nof_outputs;
257 const struct ivtv_card_output *video_outputs; 259 const struct ivtv_card_output *video_outputs;
258 u8 gr_config; /* config byte for the ghost reduction device */ 260 u8 gr_config; /* config byte for the ghost reduction device */
261 u8 xceive_pin; /* XCeive tuner GPIO reset pin */
259 262
260 /* GPIO card-specific settings */ 263 /* GPIO card-specific settings */
261 struct ivtv_gpio_init gpio_init; 264 struct ivtv_gpio_init gpio_init;
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 065df53f80fd..ed020f722b05 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -190,6 +190,7 @@ MODULE_PARM_DESC(cardtype,
190 "\t\t\t22 = ASUS Falcon2\n" 190 "\t\t\t22 = ASUS Falcon2\n"
191 "\t\t\t23 = AverMedia PVR-150 Plus\n" 191 "\t\t\t23 = AverMedia PVR-150 Plus\n"
192 "\t\t\t24 = AverMedia EZMaker PCI Deluxe\n" 192 "\t\t\t24 = AverMedia EZMaker PCI Deluxe\n"
193 "\t\t\t25 = AverMedia M104 (not yet working)\n"
193 "\t\t\t 0 = Autodetect (default)\n" 194 "\t\t\t 0 = Autodetect (default)\n"
194 "\t\t\t-1 = Ignore this card\n\t\t"); 195 "\t\t\t-1 = Ignore this card\n\t\t");
195MODULE_PARM_DESC(pal, "Set PAL standard: BGH, DK, I, M, N, Nc, 60"); 196MODULE_PARM_DESC(pal, "Set PAL standard: BGH, DK, I, M, N, Nc, 60");
@@ -871,7 +872,7 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
871 unsigned i; 872 unsigned i;
872 873
873 /* load modules */ 874 /* load modules */
874#ifndef CONFIG_VIDEO_TUNER 875#ifndef CONFIG_MEDIA_TUNER
875 hw = ivtv_request_module(itv, hw, "tuner", IVTV_HW_TUNER); 876 hw = ivtv_request_module(itv, hw, "tuner", IVTV_HW_TUNER);
876#endif 877#endif
877#ifndef CONFIG_VIDEO_CX25840 878#ifndef CONFIG_VIDEO_CX25840
@@ -1048,7 +1049,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1048 IVTV_ENCODER_SIZE); 1049 IVTV_ENCODER_SIZE);
1049 if (!itv->enc_mem) { 1050 if (!itv->enc_mem) {
1050 IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n"); 1051 IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
1051 IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n"); 1052 IVTV_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
1052 retval = -ENOMEM; 1053 retval = -ENOMEM;
1053 goto free_mem; 1054 goto free_mem;
1054 } 1055 }
@@ -1060,7 +1061,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1060 IVTV_DECODER_SIZE); 1061 IVTV_DECODER_SIZE);
1061 if (!itv->dec_mem) { 1062 if (!itv->dec_mem) {
1062 IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n"); 1063 IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
1063 IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n"); 1064 IVTV_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
1064 retval = -ENOMEM; 1065 retval = -ENOMEM;
1065 goto free_mem; 1066 goto free_mem;
1066 } 1067 }
@@ -1076,7 +1077,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1076 ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); 1077 ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
1077 if (!itv->reg_mem) { 1078 if (!itv->reg_mem) {
1078 IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n"); 1079 IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
1079 IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n"); 1080 IVTV_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
1080 retval = -ENOMEM; 1081 retval = -ENOMEM;
1081 goto free_io; 1082 goto free_io;
1082 } 1083 }
@@ -1097,6 +1098,13 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1097 The PCI IDs are not always reliable. */ 1098 The PCI IDs are not always reliable. */
1098 ivtv_process_eeprom(itv); 1099 ivtv_process_eeprom(itv);
1099 } 1100 }
1101 if (itv->card->comment)
1102 IVTV_INFO("%s", itv->card->comment);
1103 if (itv->card->v4l2_capabilities == 0) {
1104 /* card was detected but is not supported */
1105 retval = -ENODEV;
1106 goto free_i2c;
1107 }
1100 1108
1101 if (itv->std == 0) { 1109 if (itv->std == 0) {
1102 itv->std = V4L2_STD_NTSC_M; 1110 itv->std = V4L2_STD_NTSC_M;
@@ -1195,13 +1203,6 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1195 ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std); 1203 ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std);
1196 } 1204 }
1197 1205
1198 retval = ivtv_streams_setup(itv);
1199 if (retval) {
1200 IVTV_ERR("Error %d setting up streams\n", retval);
1201 goto free_i2c;
1202 }
1203
1204 IVTV_DEBUG_IRQ("Masking interrupts\n");
1205 /* clear interrupt mask, effectively disabling interrupts */ 1206 /* clear interrupt mask, effectively disabling interrupts */
1206 ivtv_set_irq_mask(itv, 0xffffffff); 1207 ivtv_set_irq_mask(itv, 0xffffffff);
1207 1208
@@ -1210,32 +1211,38 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1210 IRQF_SHARED | IRQF_DISABLED, itv->name, (void *)itv); 1211 IRQF_SHARED | IRQF_DISABLED, itv->name, (void *)itv);
1211 if (retval) { 1212 if (retval) {
1212 IVTV_ERR("Failed to register irq %d\n", retval); 1213 IVTV_ERR("Failed to register irq %d\n", retval);
1213 goto free_streams; 1214 goto free_i2c;
1215 }
1216
1217 retval = ivtv_streams_setup(itv);
1218 if (retval) {
1219 IVTV_ERR("Error %d setting up streams\n", retval);
1220 goto free_irq;
1214 } 1221 }
1215 retval = ivtv_streams_register(itv); 1222 retval = ivtv_streams_register(itv);
1216 if (retval) { 1223 if (retval) {
1217 IVTV_ERR("Error %d registering devices\n", retval); 1224 IVTV_ERR("Error %d registering devices\n", retval);
1218 goto free_irq; 1225 goto free_streams;
1219 } 1226 }
1220 IVTV_INFO("Initialized card #%d: %s\n", itv->num, itv->card_name); 1227 IVTV_INFO("Initialized card #%d: %s\n", itv->num, itv->card_name);
1221 return 0; 1228 return 0;
1222 1229
1223 free_irq: 1230free_streams:
1224 free_irq(itv->dev->irq, (void *)itv);
1225 free_streams:
1226 ivtv_streams_cleanup(itv); 1231 ivtv_streams_cleanup(itv);
1227 free_i2c: 1232free_irq:
1233 free_irq(itv->dev->irq, (void *)itv);
1234free_i2c:
1228 exit_ivtv_i2c(itv); 1235 exit_ivtv_i2c(itv);
1229 free_io: 1236free_io:
1230 ivtv_iounmap(itv); 1237 ivtv_iounmap(itv);
1231 free_mem: 1238free_mem:
1232 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); 1239 release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
1233 release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); 1240 release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
1234 if (itv->has_cx23415) 1241 if (itv->has_cx23415)
1235 release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); 1242 release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
1236 free_workqueue: 1243free_workqueue:
1237 destroy_workqueue(itv->irq_work_queues); 1244 destroy_workqueue(itv->irq_work_queues);
1238 err: 1245err:
1239 if (retval == 0) 1246 if (retval == 0)
1240 retval = -ENODEV; 1247 retval = -ENODEV;
1241 IVTV_ERR("Error %d on initialization\n", retval); 1248 IVTV_ERR("Error %d on initialization\n", retval);
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index a7640c49f1d8..2b74b0ab1477 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -755,8 +755,10 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
755 IVTV_DEBUG_HI_FILE("Encoder poll\n"); 755 IVTV_DEBUG_HI_FILE("Encoder poll\n");
756 poll_wait(filp, &s->waitq, wait); 756 poll_wait(filp, &s->waitq, wait);
757 757
758 if (eof || s->q_full.length || s->q_io.length) 758 if (s->q_full.length || s->q_io.length)
759 return POLLIN | POLLRDNORM; 759 return POLLIN | POLLRDNORM;
760 if (eof)
761 return POLLHUP;
760 return 0; 762 return 0;
761} 763}
762 764
diff --git a/drivers/media/video/ivtv/ivtv-gpio.c b/drivers/media/video/ivtv/ivtv-gpio.c
index 688cd3856685..d8ac09f3cce6 100644
--- a/drivers/media/video/ivtv/ivtv-gpio.c
+++ b/drivers/media/video/ivtv/ivtv-gpio.c
@@ -128,20 +128,17 @@ int ivtv_reset_tuner_gpio(void *dev, int cmd, int value)
128{ 128{
129 struct i2c_algo_bit_data *algo = dev; 129 struct i2c_algo_bit_data *algo = dev;
130 struct ivtv *itv = algo->data; 130 struct ivtv *itv = algo->data;
131 int curdir, curout; 131 u32 curout;
132 132
133 if (cmd != XC2028_TUNER_RESET) 133 if (cmd != XC2028_TUNER_RESET)
134 return 0; 134 return 0;
135 IVTV_DEBUG_INFO("Resetting tuner\n"); 135 IVTV_DEBUG_INFO("Resetting tuner\n");
136 curout = read_reg(IVTV_REG_GPIO_OUT); 136 curout = read_reg(IVTV_REG_GPIO_OUT);
137 curdir = read_reg(IVTV_REG_GPIO_DIR); 137 curout &= ~(1 << itv->card->xceive_pin);
138 curdir |= (1 << 12); /* GPIO bit 12 */
139
140 curout &= ~(1 << 12);
141 write_reg(curout, IVTV_REG_GPIO_OUT); 138 write_reg(curout, IVTV_REG_GPIO_OUT);
142 schedule_timeout_interruptible(msecs_to_jiffies(1)); 139 schedule_timeout_interruptible(msecs_to_jiffies(1));
143 140
144 curout |= (1 << 12); 141 curout |= 1 << itv->card->xceive_pin;
145 write_reg(curout, IVTV_REG_GPIO_OUT); 142 write_reg(curout, IVTV_REG_GPIO_OUT);
146 schedule_timeout_interruptible(msecs_to_jiffies(1)); 143 schedule_timeout_interruptible(msecs_to_jiffies(1));
147 return 0; 144 return 0;
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 9824eafee021..771adf47e944 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -167,7 +167,8 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
167 return -1; 167 return -1;
168 id = hw_driverids[idx]; 168 id = hw_driverids[idx];
169 memset(&info, 0, sizeof(info)); 169 memset(&info, 0, sizeof(info));
170 strcpy(info.driver_name, hw_drivernames[idx]); 170 strlcpy(info.driver_name, hw_drivernames[idx],
171 sizeof(info.driver_name));
171 info.addr = hw_addrs[idx]; 172 info.addr = hw_addrs[idx];
172 for (i = 0; itv->i2c_clients[i] && i < I2C_CLIENTS_MAX; i++) {} 173 for (i = 0; itv->i2c_clients[i] && i < I2C_CLIENTS_MAX; i++) {}
173 174
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 15cac1812122..d508b5d0538c 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -243,20 +243,31 @@ static int ivtv_validate_speed(int cur_speed, int new_speed)
243 int fact = new_speed < 0 ? -1 : 1; 243 int fact = new_speed < 0 ? -1 : 1;
244 int s; 244 int s;
245 245
246 if (new_speed < 0) new_speed = -new_speed; 246 if (cur_speed == 0)
247 if (cur_speed < 0) cur_speed = -cur_speed; 247 cur_speed = 1000;
248 if (new_speed < 0)
249 new_speed = -new_speed;
250 if (cur_speed < 0)
251 cur_speed = -cur_speed;
248 252
249 if (cur_speed <= new_speed) { 253 if (cur_speed <= new_speed) {
250 if (new_speed > 1500) return fact * 2000; 254 if (new_speed > 1500)
251 if (new_speed > 1000) return fact * 1500; 255 return fact * 2000;
256 if (new_speed > 1000)
257 return fact * 1500;
252 } 258 }
253 else { 259 else {
254 if (new_speed >= 2000) return fact * 2000; 260 if (new_speed >= 2000)
255 if (new_speed >= 1500) return fact * 1500; 261 return fact * 2000;
256 if (new_speed >= 1000) return fact * 1000; 262 if (new_speed >= 1500)
257 } 263 return fact * 1500;
258 if (new_speed == 0) return 1000; 264 if (new_speed >= 1000)
259 if (new_speed == 1 || new_speed == 1000) return fact * new_speed; 265 return fact * 1000;
266 }
267 if (new_speed == 0)
268 return 1000;
269 if (new_speed == 1 || new_speed == 1000)
270 return fact * new_speed;
260 271
261 s = new_speed; 272 s = new_speed;
262 new_speed = 1000 / new_speed; 273 new_speed = 1000 / new_speed;
@@ -741,10 +752,9 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void
741 struct v4l2_capability *vcap = arg; 752 struct v4l2_capability *vcap = arg;
742 753
743 memset(vcap, 0, sizeof(*vcap)); 754 memset(vcap, 0, sizeof(*vcap));
744 strcpy(vcap->driver, IVTV_DRIVER_NAME); /* driver name */ 755 strlcpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
745 strncpy(vcap->card, itv->card_name, 756 strlcpy(vcap->card, itv->card_name, sizeof(vcap->card));
746 sizeof(vcap->card)-1); /* card type */ 757 strlcpy(vcap->bus_info, pci_name(itv->dev), sizeof(vcap->bus_info));
747 strcpy(vcap->bus_info, pci_name(itv->dev)); /* bus info... */
748 vcap->version = IVTV_DRIVER_VERSION; /* version */ 758 vcap->version = IVTV_DRIVER_VERSION; /* version */
749 vcap->capabilities = itv->v4l2_cap; /* capabilities */ 759 vcap->capabilities = itv->v4l2_cap; /* capabilities */
750 760
@@ -1018,7 +1028,7 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void
1018 ivtv_std_60hz : ivtv_std_50hz; 1028 ivtv_std_60hz : ivtv_std_50hz;
1019 vs->index = idx; 1029 vs->index = idx;
1020 vs->id = enum_stds[idx].std; 1030 vs->id = enum_stds[idx].std;
1021 strcpy(vs->name, enum_stds[idx].name); 1031 strlcpy(vs->name, enum_stds[idx].name, sizeof(vs->name));
1022 break; 1032 break;
1023 } 1033 }
1024 1034
@@ -1102,10 +1112,10 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void
1102 ivtv_call_i2c_clients(itv, VIDIOC_G_TUNER, vt); 1112 ivtv_call_i2c_clients(itv, VIDIOC_G_TUNER, vt);
1103 1113
1104 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) { 1114 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
1105 strcpy(vt->name, "ivtv Radio Tuner"); 1115 strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name));
1106 vt->type = V4L2_TUNER_RADIO; 1116 vt->type = V4L2_TUNER_RADIO;
1107 } else { 1117 } else {
1108 strcpy(vt->name, "ivtv TV Tuner"); 1118 strlcpy(vt->name, "ivtv TV Tuner", sizeof(vt->name));
1109 vt->type = V4L2_TUNER_ANALOG_TV; 1119 vt->type = V4L2_TUNER_ANALOG_TV;
1110 } 1120 }
1111 break; 1121 break;
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index a329c4689dbf..d8ba3a4a8761 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -384,7 +384,7 @@ static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
384 ivtv_stream_sync_for_device(s); 384 ivtv_stream_sync_for_device(s);
385 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR); 385 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
386 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER); 386 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
387 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100); 387 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
388 add_timer(&itv->dma_timer); 388 add_timer(&itv->dma_timer);
389} 389}
390 390
@@ -400,7 +400,7 @@ static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
400 ivtv_stream_sync_for_device(s); 400 ivtv_stream_sync_for_device(s);
401 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR); 401 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
402 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER); 402 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
403 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100); 403 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
404 add_timer(&itv->dma_timer); 404 add_timer(&itv->dma_timer);
405} 405}
406 406
diff --git a/drivers/media/video/ivtv/ivtv-version.h b/drivers/media/video/ivtv/ivtv-version.h
index 0f1d4cc4b4d9..02c5ab071d1b 100644
--- a/drivers/media/video/ivtv/ivtv-version.h
+++ b/drivers/media/video/ivtv/ivtv-version.h
@@ -23,7 +23,7 @@
23#define IVTV_DRIVER_NAME "ivtv" 23#define IVTV_DRIVER_NAME "ivtv"
24#define IVTV_DRIVER_VERSION_MAJOR 1 24#define IVTV_DRIVER_VERSION_MAJOR 1
25#define IVTV_DRIVER_VERSION_MINOR 2 25#define IVTV_DRIVER_VERSION_MINOR 2
26#define IVTV_DRIVER_VERSION_PATCHLEVEL 0 26#define IVTV_DRIVER_VERSION_PATCHLEVEL 1
27 27
28#define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL) 28#define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL)
29#define IVTV_DRIVER_VERSION KERNEL_VERSION(IVTV_DRIVER_VERSION_MAJOR,IVTV_DRIVER_VERSION_MINOR,IVTV_DRIVER_VERSION_PATCHLEVEL) 29#define IVTV_DRIVER_VERSION KERNEL_VERSION(IVTV_DRIVER_VERSION_MAJOR,IVTV_DRIVER_VERSION_MINOR,IVTV_DRIVER_VERSION_PATCHLEVEL)
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index 393d917cd672..62f70bd5e3cb 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -1098,8 +1098,8 @@ void ivtv_yuv_setup_stream_frame(struct ivtv *itv)
1098 ivtv_yuv_next_free(itv); 1098 ivtv_yuv_next_free(itv);
1099 1099
1100 /* Copy V4L2 parameters to an ivtv_dma_frame struct... */ 1100 /* Copy V4L2 parameters to an ivtv_dma_frame struct... */
1101 dma_args.y_source = 0L; 1101 dma_args.y_source = NULL;
1102 dma_args.uv_source = 0L; 1102 dma_args.uv_source = NULL;
1103 dma_args.src.left = 0; 1103 dma_args.src.left = 0;
1104 dma_args.src.top = 0; 1104 dma_args.src.top = 0;
1105 dma_args.src.width = yi->v4l2_src_w; 1105 dma_args.src.width = yi->v4l2_src_w;
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 3b23fc05f7c4..df789f683e63 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -532,7 +532,7 @@ static int ivtvfb_get_fix(struct ivtv *itv, struct fb_fix_screeninfo *fix)
532 532
533 IVTVFB_DEBUG_INFO("ivtvfb_get_fix\n"); 533 IVTVFB_DEBUG_INFO("ivtvfb_get_fix\n");
534 memset(fix, 0, sizeof(struct fb_fix_screeninfo)); 534 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
535 strcpy(fix->id, "cx23415 TV out"); 535 strlcpy(fix->id, "cx23415 TV out", sizeof(fix->id));
536 fix->smem_start = oi->video_pbase; 536 fix->smem_start = oi->video_pbase;
537 fix->smem_len = oi->video_buffer_size; 537 fix->smem_len = oi->video_buffer_size;
538 fix->type = FB_TYPE_PACKED_PIXELS; 538 fix->type = FB_TYPE_PACKED_PIXELS;
diff --git a/drivers/media/video/m52790.c b/drivers/media/video/m52790.c
index d4bf14c284ef..5b9dfa2c51b4 100644
--- a/drivers/media/video/m52790.c
+++ b/drivers/media/video/m52790.c
@@ -126,7 +126,8 @@ static int m52790_command(struct i2c_client *client, unsigned int cmd,
126 126
127/* i2c implementation */ 127/* i2c implementation */
128 128
129static int m52790_probe(struct i2c_client *client) 129static int m52790_probe(struct i2c_client *client,
130 const struct i2c_device_id *id)
130{ 131{
131 struct m52790_state *state; 132 struct m52790_state *state;
132 133
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index b73c740f7fb2..e6273162e123 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -805,7 +805,7 @@ static int msp_resume(struct i2c_client *client)
805 805
806/* ----------------------------------------------------------------------- */ 806/* ----------------------------------------------------------------------- */
807 807
808static int msp_probe(struct i2c_client *client) 808static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
809{ 809{
810 struct msp_state *state; 810 struct msp_state *state;
811 int (*thread_func)(void *data) = NULL; 811 int (*thread_func)(void *data) = NULL;
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 3fb5f63df1e6..179e47049a45 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -372,7 +372,7 @@ static int mt9m001_set_register(struct soc_camera_device *icd,
372} 372}
373#endif 373#endif
374 374
375const struct v4l2_queryctrl mt9m001_controls[] = { 375static const struct v4l2_queryctrl mt9m001_controls[] = {
376 { 376 {
377 .id = V4L2_CID_VFLIP, 377 .id = V4L2_CID_VFLIP,
378 .type = V4L2_CTRL_TYPE_BOOLEAN, 378 .type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -620,7 +620,8 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
620 soc_camera_video_stop(&mt9m001->icd); 620 soc_camera_video_stop(&mt9m001->icd);
621} 621}
622 622
623static int mt9m001_probe(struct i2c_client *client) 623static int mt9m001_probe(struct i2c_client *client,
624 const struct i2c_device_id *did)
624{ 625{
625 struct mt9m001 *mt9m001; 626 struct mt9m001 *mt9m001;
626 struct soc_camera_device *icd; 627 struct soc_camera_device *icd;
@@ -696,12 +697,19 @@ static int mt9m001_remove(struct i2c_client *client)
696 return 0; 697 return 0;
697} 698}
698 699
700static const struct i2c_device_id mt9m001_id[] = {
701 { "mt9m001", 0 },
702 { }
703};
704MODULE_DEVICE_TABLE(i2c, mt9m001_id);
705
699static struct i2c_driver mt9m001_i2c_driver = { 706static struct i2c_driver mt9m001_i2c_driver = {
700 .driver = { 707 .driver = {
701 .name = "mt9m001", 708 .name = "mt9m001",
702 }, 709 },
703 .probe = mt9m001_probe, 710 .probe = mt9m001_probe,
704 .remove = mt9m001_remove, 711 .remove = mt9m001_remove,
712 .id_table = mt9m001_id,
705}; 713};
706 714
707static int __init mt9m001_mod_init(void) 715static int __init mt9m001_mod_init(void)
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index d4b9e2744343..d1391ac55096 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -452,7 +452,7 @@ static int mt9v022_set_register(struct soc_camera_device *icd,
452} 452}
453#endif 453#endif
454 454
455const struct v4l2_queryctrl mt9v022_controls[] = { 455static const struct v4l2_queryctrl mt9v022_controls[] = {
456 { 456 {
457 .id = V4L2_CID_VFLIP, 457 .id = V4L2_CID_VFLIP,
458 .type = V4L2_CTRL_TYPE_BOOLEAN, 458 .type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -745,7 +745,8 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
745 soc_camera_video_stop(&mt9v022->icd); 745 soc_camera_video_stop(&mt9v022->icd);
746} 746}
747 747
748static int mt9v022_probe(struct i2c_client *client) 748static int mt9v022_probe(struct i2c_client *client,
749 const struct i2c_device_id *did)
749{ 750{
750 struct mt9v022 *mt9v022; 751 struct mt9v022 *mt9v022;
751 struct soc_camera_device *icd; 752 struct soc_camera_device *icd;
@@ -818,12 +819,19 @@ static int mt9v022_remove(struct i2c_client *client)
818 return 0; 819 return 0;
819} 820}
820 821
822static const struct i2c_device_id mt9v022_id[] = {
823 { "mt9v022", 0 },
824 { }
825};
826MODULE_DEVICE_TABLE(i2c, mt9v022_id);
827
821static struct i2c_driver mt9v022_i2c_driver = { 828static struct i2c_driver mt9v022_i2c_driver = {
822 .driver = { 829 .driver = {
823 .name = "mt9v022", 830 .name = "mt9v022",
824 }, 831 },
825 .probe = mt9v022_probe, 832 .probe = mt9v022_probe,
826 .remove = mt9v022_remove, 833 .remove = mt9v022_remove,
834 .id_table = mt9v022_id,
827}; 835};
828 836
829static int __init mt9v022_mod_init(void) 837static int __init mt9v022_mod_init(void)
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig
index 158b3d0c6532..9620c67fae77 100644
--- a/drivers/media/video/pvrusb2/Kconfig
+++ b/drivers/media/video/pvrusb2/Kconfig
@@ -1,14 +1,15 @@
1config VIDEO_PVRUSB2 1config VIDEO_PVRUSB2
2 tristate "Hauppauge WinTV-PVR USB2 support" 2 tristate "Hauppauge WinTV-PVR USB2 support"
3 depends on VIDEO_V4L2 && I2C && EXPERIMENTAL 3 depends on VIDEO_V4L2 && I2C
4 select FW_LOADER 4 select FW_LOADER
5 select VIDEO_TUNER 5 select MEDIA_TUNER
6 select VIDEO_TVEEPROM 6 select VIDEO_TVEEPROM
7 select VIDEO_CX2341X 7 select VIDEO_CX2341X
8 select VIDEO_SAA711X 8 select VIDEO_SAA711X
9 select VIDEO_CX25840 9 select VIDEO_CX25840
10 select VIDEO_MSP3400 10 select VIDEO_MSP3400
11 select VIDEO_WM8775 11 select VIDEO_WM8775
12 select VIDEO_CS53L32A
12 ---help--- 13 ---help---
13 This is a video4linux driver for Conexant 23416 based 14 This is a video4linux driver for Conexant 23416 based
14 usb2 personal video recorder devices. 15 usb2 personal video recorder devices.
@@ -16,32 +17,6 @@ config VIDEO_PVRUSB2
16 To compile this driver as a module, choose M here: the 17 To compile this driver as a module, choose M here: the
17 module will be called pvrusb2 18 module will be called pvrusb2
18 19
19config VIDEO_PVRUSB2_ONAIR_CREATOR
20 bool "pvrusb2 driver support for OnAir Creator model"
21 depends on VIDEO_PVRUSB2 && EXPERIMENTAL
22 select VIDEO_SAA711X
23 select VIDEO_CS53L32A
24 ---help---
25
26 This option enables support for the OnAir Creator USB tuner
27 device. This is a hybrid device, however currently only
28 analog mode is supported.
29
30 If you are in doubt, say Y.
31
32config VIDEO_PVRUSB2_ONAIR_USB2
33 bool "pvrusb2 driver support for OnAir USB2 model"
34 depends on VIDEO_PVRUSB2 && EXPERIMENTAL
35 select VIDEO_SAA711X
36 select VIDEO_CS53L32A
37 ---help---
38
39 This option enables support for the OnAir USB2 tuner device
40 (also known as the Sasem tuner). This is a hybrid device,
41 however currently only analog mode is supported.
42
43 If you are in doubt, say Y.
44
45config VIDEO_PVRUSB2_SYSFS 20config VIDEO_PVRUSB2_SYSFS
46 bool "pvrusb2 sysfs support (EXPERIMENTAL)" 21 bool "pvrusb2 sysfs support (EXPERIMENTAL)"
47 default y 22 default y
@@ -59,29 +34,23 @@ config VIDEO_PVRUSB2_SYSFS
59 Note: This feature is experimental and subject to change. 34 Note: This feature is experimental and subject to change.
60 35
61config VIDEO_PVRUSB2_DVB 36config VIDEO_PVRUSB2_DVB
62 bool "pvrusb2 DVB support (EXPERIMENTAL)" 37 bool "pvrusb2 ATSC/DVB support (EXPERIMENTAL)"
63 default n 38 default y
64 depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL 39 depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL
65 select DVB_LGDT330X if !DVB_FE_CUSTOMISE 40 select DVB_LGDT330X if !DVB_FE_CUSTOMISE
66 select DVB_S5H1409 if !DVB_FE_CUSTOMISE 41 select DVB_S5H1409 if !DVB_FE_CUSTOMISE
67 select DVB_S5H1411 if !DVB_FE_CUSTOMISE 42 select DVB_S5H1411 if !DVB_FE_CUSTOMISE
68 select DVB_TDA10048 if !DVB_FE_CUSTOMIZE 43 select DVB_TDA10048 if !DVB_FE_CUSTOMIZE
69 select DVB_TDA18271 if !DVB_FE_CUSTOMIZE 44 select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE
70 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 45 select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
71 select TUNER_TDA8290 if !DVB_FE_CUSTOMIZE 46 select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE
72 ---help--- 47 ---help---
73 48
74 This option enables compilation of a DVB interface for the 49 This option enables a DVB interface for the pvrusb2 driver.
75 pvrusb2 driver. Currently this is very very experimental. 50 If your device does not support digital television, this
76 It is also limiting - the DVB interface can only access the 51 feature will have no affect on the driver's operation.
77 digital side of hybrid devices, and there are going to be
78 issues if you attempt to mess with the V4L side at the same
79 time. Don't turn this on unless you know what you are
80 doing.
81
82 If you are in doubt, say N.
83 52
84 Note: This feature is very experimental and might break 53 If you are in doubt, say Y.
85 54
86config VIDEO_PVRUSB2_DEBUGIFC 55config VIDEO_PVRUSB2_DEBUGIFC
87 bool "pvrusb2 debug interface" 56 bool "pvrusb2 debug interface"
diff --git a/drivers/media/video/pvrusb2/Makefile b/drivers/media/video/pvrusb2/Makefile
index 5b3083c89aa9..4fda2de69ab7 100644
--- a/drivers/media/video/pvrusb2/Makefile
+++ b/drivers/media/video/pvrusb2/Makefile
@@ -16,5 +16,6 @@ pvrusb2-objs := pvrusb2-i2c-core.o pvrusb2-i2c-cmd-v4l2.o \
16obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2.o 16obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2.o
17 17
18EXTRA_CFLAGS += -Idrivers/media/video 18EXTRA_CFLAGS += -Idrivers/media/video
19EXTRA_CFLAGS += -Idrivers/media/common/tuners
19EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 20EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
20EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 21EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c
index 9a7c8e9c3e8b..8d859ccd48ec 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c
@@ -75,7 +75,7 @@ static void set_stereo(struct pvr2_msp3400_handler *ctxt)
75 pvr2_trace(PVR2_TRACE_CHIPS,"i2c msp3400 v4l2 set_stereo"); 75 pvr2_trace(PVR2_TRACE_CHIPS,"i2c msp3400 v4l2 set_stereo");
76 76
77 if ((sid < ARRAY_SIZE(routing_schemes)) && 77 if ((sid < ARRAY_SIZE(routing_schemes)) &&
78 ((sp = routing_schemes + sid) != 0) && 78 ((sp = routing_schemes + sid) != NULL) &&
79 (hdw->input_val >= 0) && 79 (hdw->input_val >= 0) &&
80 (hdw->input_val < sp->cnt)) { 80 (hdw->input_val < sp->cnt)) {
81 route.input = sp->def[hdw->input_val]; 81 route.input = sp->def[hdw->input_val];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
index b5db6a5bab31..73dcb1c57ae6 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -195,7 +195,7 @@ static int pvr2_context_thread_func(void *foo)
195int pvr2_context_global_init(void) 195int pvr2_context_global_init(void)
196{ 196{
197 pvr2_context_thread_ptr = kthread_run(pvr2_context_thread_func, 197 pvr2_context_thread_ptr = kthread_run(pvr2_context_thread_func,
198 0, 198 NULL,
199 "pvrusb2-context"); 199 "pvrusb2-context");
200 return (pvr2_context_thread_ptr ? 0 : -ENOMEM); 200 return (pvr2_context_thread_ptr ? 0 : -ENOMEM);
201} 201}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
index 97350b048b8d..29d50597c88a 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -123,7 +123,7 @@ static void set_input(struct pvr2_v4l_cx2584x *ctxt)
123 memset(&route,0,sizeof(route)); 123 memset(&route,0,sizeof(route));
124 124
125 if ((sid < ARRAY_SIZE(routing_schemes)) && 125 if ((sid < ARRAY_SIZE(routing_schemes)) &&
126 ((sp = routing_schemes + sid) != 0) && 126 ((sp = routing_schemes + sid) != NULL) &&
127 (hdw->input_val >= 0) && 127 (hdw->input_val >= 0) &&
128 (hdw->input_val < sp->cnt)) { 128 (hdw->input_val < sp->cnt)) {
129 vid_input = sp->def[hdw->input_val].vid; 129 vid_input = sp->def[hdw->input_val].vid;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debug.h b/drivers/media/video/pvrusb2/pvrusb2-debug.h
index 11537ddf8aa3..707d2d9635d7 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-debug.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-debug.h
@@ -54,6 +54,7 @@ extern int pvrusb2_debug;
54#define PVR2_TRACE_DATA_FLOW (1 << 25) /* Track data flow */ 54#define PVR2_TRACE_DATA_FLOW (1 << 25) /* Track data flow */
55#define PVR2_TRACE_DEBUGIFC (1 << 26) /* Debug interface actions */ 55#define PVR2_TRACE_DEBUGIFC (1 << 26) /* Debug interface actions */
56#define PVR2_TRACE_GPIO (1 << 27) /* GPIO state bit changes */ 56#define PVR2_TRACE_GPIO (1 << 27) /* GPIO state bit changes */
57#define PVR2_TRACE_DVB_FEED (1 << 28) /* DVB transport feed debug */
57 58
58 59
59#endif /* __PVRUSB2_HDW_INTERNAL_H */ 60#endif /* __PVRUSB2_HDW_INTERNAL_H */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
index 3a141d93e1a9..5bf6d8fda1f9 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
@@ -153,7 +153,6 @@ static const struct pvr2_device_desc pvr2_device_gotview_2d = {
153 153
154 154
155 155
156#ifdef CONFIG_VIDEO_PVRUSB2_ONAIR_CREATOR
157/*------------------------------------------------------------------------*/ 156/*------------------------------------------------------------------------*/
158/* OnAir Creator */ 157/* OnAir Creator */
159 158
@@ -212,11 +211,9 @@ static const struct pvr2_device_desc pvr2_device_onair_creator = {
212 .dvb_props = &pvr2_onair_creator_fe_props, 211 .dvb_props = &pvr2_onair_creator_fe_props,
213#endif 212#endif
214}; 213};
215#endif
216 214
217 215
218 216
219#ifdef CONFIG_VIDEO_PVRUSB2_ONAIR_USB2
220/*------------------------------------------------------------------------*/ 217/*------------------------------------------------------------------------*/
221/* OnAir USB 2.0 */ 218/* OnAir USB 2.0 */
222 219
@@ -274,7 +271,6 @@ static const struct pvr2_device_desc pvr2_device_onair_usb2 = {
274 .dvb_props = &pvr2_onair_usb2_fe_props, 271 .dvb_props = &pvr2_onair_usb2_fe_props,
275#endif 272#endif
276}; 273};
277#endif
278 274
279 275
280 276
@@ -497,14 +493,10 @@ struct usb_device_id pvr2_device_table[] = {
497 .driver_info = (kernel_ulong_t)&pvr2_device_gotview_2}, 493 .driver_info = (kernel_ulong_t)&pvr2_device_gotview_2},
498 { USB_DEVICE(0x1164, 0x0602), 494 { USB_DEVICE(0x1164, 0x0602),
499 .driver_info = (kernel_ulong_t)&pvr2_device_gotview_2d}, 495 .driver_info = (kernel_ulong_t)&pvr2_device_gotview_2d},
500#ifdef CONFIG_VIDEO_PVRUSB2_ONAIR_CREATOR
501 { USB_DEVICE(0x11ba, 0x1003), 496 { USB_DEVICE(0x11ba, 0x1003),
502 .driver_info = (kernel_ulong_t)&pvr2_device_onair_creator}, 497 .driver_info = (kernel_ulong_t)&pvr2_device_onair_creator},
503#endif
504#ifdef CONFIG_VIDEO_PVRUSB2_ONAIR_USB2
505 { USB_DEVICE(0x11ba, 0x1001), 498 { USB_DEVICE(0x11ba, 0x1001),
506 .driver_info = (kernel_ulong_t)&pvr2_device_onair_usb2}, 499 .driver_info = (kernel_ulong_t)&pvr2_device_onair_usb2},
507#endif
508 { USB_DEVICE(0x2040, 0x7300), 500 { USB_DEVICE(0x2040, 0x7300),
509 .driver_info = (kernel_ulong_t)&pvr2_device_73xxx}, 501 .driver_info = (kernel_ulong_t)&pvr2_device_73xxx},
510 { USB_DEVICE(0x2040, 0x7500), 502 { USB_DEVICE(0x2040, 0x7500),
diff --git a/drivers/media/video/pvrusb2/pvrusb2-dvb.c b/drivers/media/video/pvrusb2/pvrusb2-dvb.c
index 2e64f98d1241..6ec4bf81fc7f 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-dvb.c
@@ -21,6 +21,7 @@
21#include <linux/kthread.h> 21#include <linux/kthread.h>
22#include <linux/freezer.h> 22#include <linux/freezer.h>
23#include "dvbdev.h" 23#include "dvbdev.h"
24#include "pvrusb2-debug.h"
24#include "pvrusb2-hdw-internal.h" 25#include "pvrusb2-hdw-internal.h"
25#include "pvrusb2-hdw.h" 26#include "pvrusb2-hdw.h"
26#include "pvrusb2-io.h" 27#include "pvrusb2-io.h"
@@ -35,7 +36,7 @@ static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap)
35 struct pvr2_buffer *bp; 36 struct pvr2_buffer *bp;
36 struct pvr2_stream *stream; 37 struct pvr2_stream *stream;
37 38
38 printk(KERN_DEBUG "dvb thread started\n"); 39 pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread started");
39 set_freezable(); 40 set_freezable();
40 41
41 stream = adap->channel.stream->stream; 42 stream = adap->channel.stream->stream;
@@ -82,7 +83,7 @@ static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap)
82 /* If we get here and ret is < 0, then an error has occurred. 83 /* If we get here and ret is < 0, then an error has occurred.
83 Probably would be a good idea to communicate that to DVB core... */ 84 Probably would be a good idea to communicate that to DVB core... */
84 85
85 printk(KERN_DEBUG "dvb thread stopped\n"); 86 pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread stopped");
86 87
87 return 0; 88 return 0;
88} 89}
@@ -130,7 +131,7 @@ static void pvr2_dvb_stream_end(struct pvr2_dvb_adapter *adap)
130 for (idx = 0; idx < PVR2_DVB_BUFFER_COUNT; idx++) { 131 for (idx = 0; idx < PVR2_DVB_BUFFER_COUNT; idx++) {
131 if (!(adap->buffer_storage[idx])) continue; 132 if (!(adap->buffer_storage[idx])) continue;
132 kfree(adap->buffer_storage[idx]); 133 kfree(adap->buffer_storage[idx]);
133 adap->buffer_storage[idx] = 0; 134 adap->buffer_storage[idx] = NULL;
134 } 135 }
135 adap->stream_run = 0; 136 adap->stream_run = 0;
136 } 137 }
@@ -142,7 +143,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
142 unsigned int idx; 143 unsigned int idx;
143 int ret; 144 int ret;
144 struct pvr2_buffer *bp; 145 struct pvr2_buffer *bp;
145 struct pvr2_stream *stream = 0; 146 struct pvr2_stream *stream = NULL;
146 147
147 if (adap->stream_run) return -EIO; 148 if (adap->stream_run) return -EIO;
148 149
@@ -174,7 +175,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
174 ret = pvr2_hdw_set_streaming(adap->channel.hdw, 1); 175 ret = pvr2_hdw_set_streaming(adap->channel.hdw, 1);
175 if (ret < 0) return ret; 176 if (ret < 0) return ret;
176 177
177 while ((bp = pvr2_stream_get_idle_buffer(stream)) != 0) { 178 while ((bp = pvr2_stream_get_idle_buffer(stream)) != NULL) {
178 ret = pvr2_buffer_queue(bp); 179 ret = pvr2_buffer_queue(bp);
179 if (ret < 0) return ret; 180 if (ret < 0) return ret;
180 } 181 }
@@ -210,7 +211,8 @@ static int pvr2_dvb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
210 do { 211 do {
211 if (onoff) { 212 if (onoff) {
212 if (!adap->feedcount) { 213 if (!adap->feedcount) {
213 printk(KERN_DEBUG "start feeding\n"); 214 pvr2_trace(PVR2_TRACE_DVB_FEED,
215 "start feeding demux");
214 ret = pvr2_dvb_stream_start(adap); 216 ret = pvr2_dvb_stream_start(adap);
215 if (ret < 0) break; 217 if (ret < 0) break;
216 } 218 }
@@ -218,7 +220,8 @@ static int pvr2_dvb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
218 } else if (adap->feedcount > 0) { 220 } else if (adap->feedcount > 0) {
219 (adap->feedcount)--; 221 (adap->feedcount)--;
220 if (!adap->feedcount) { 222 if (!adap->feedcount) {
221 printk(KERN_DEBUG "stop feeding\n"); 223 pvr2_trace(PVR2_TRACE_DVB_FEED,
224 "stop feeding demux");
222 pvr2_dvb_stream_end(adap); 225 pvr2_dvb_stream_end(adap);
223 } 226 }
224 } 227 }
@@ -230,15 +233,13 @@ static int pvr2_dvb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
230 233
231static int pvr2_dvb_start_feed(struct dvb_demux_feed *dvbdmxfeed) 234static int pvr2_dvb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
232{ 235{
233 printk(KERN_DEBUG "start pid: 0x%04x, feedtype: %d\n", 236 pvr2_trace(PVR2_TRACE_DVB_FEED, "start pid: 0x%04x", dvbdmxfeed->pid);
234 dvbdmxfeed->pid, dvbdmxfeed->type);
235 return pvr2_dvb_ctrl_feed(dvbdmxfeed, 1); 237 return pvr2_dvb_ctrl_feed(dvbdmxfeed, 1);
236} 238}
237 239
238static int pvr2_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed) 240static int pvr2_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
239{ 241{
240 printk(KERN_DEBUG "stop pid: 0x%04x, feedtype: %d\n", 242 pvr2_trace(PVR2_TRACE_DVB_FEED, "stop pid: 0x%04x", dvbdmxfeed->pid);
241 dvbdmxfeed->pid, dvbdmxfeed->type);
242 return pvr2_dvb_ctrl_feed(dvbdmxfeed, 0); 243 return pvr2_dvb_ctrl_feed(dvbdmxfeed, 0);
243} 244}
244 245
@@ -259,7 +260,8 @@ static int pvr2_dvb_adapter_init(struct pvr2_dvb_adapter *adap)
259 &adap->channel.hdw->usb_dev->dev, 260 &adap->channel.hdw->usb_dev->dev,
260 adapter_nr); 261 adapter_nr);
261 if (ret < 0) { 262 if (ret < 0) {
262 err("dvb_register_adapter failed: error %d", ret); 263 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
264 "dvb_register_adapter failed: error %d", ret);
263 goto err; 265 goto err;
264 } 266 }
265 adap->dvb_adap.priv = adap; 267 adap->dvb_adap.priv = adap;
@@ -276,7 +278,8 @@ static int pvr2_dvb_adapter_init(struct pvr2_dvb_adapter *adap)
276 278
277 ret = dvb_dmx_init(&adap->demux); 279 ret = dvb_dmx_init(&adap->demux);
278 if (ret < 0) { 280 if (ret < 0) {
279 err("dvb_dmx_init failed: error %d", ret); 281 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
282 "dvb_dmx_init failed: error %d", ret);
280 goto err_dmx; 283 goto err_dmx;
281 } 284 }
282 285
@@ -286,7 +289,8 @@ static int pvr2_dvb_adapter_init(struct pvr2_dvb_adapter *adap)
286 289
287 ret = dvb_dmxdev_init(&adap->dmxdev, &adap->dvb_adap); 290 ret = dvb_dmxdev_init(&adap->dmxdev, &adap->dvb_adap);
288 if (ret < 0) { 291 if (ret < 0) {
289 err("dvb_dmxdev_init failed: error %d", ret); 292 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
293 "dvb_dmxdev_init failed: error %d", ret);
290 goto err_dmx_dev; 294 goto err_dmx_dev;
291 } 295 }
292 296
@@ -304,7 +308,7 @@ err:
304 308
305static int pvr2_dvb_adapter_exit(struct pvr2_dvb_adapter *adap) 309static int pvr2_dvb_adapter_exit(struct pvr2_dvb_adapter *adap)
306{ 310{
307 printk(KERN_DEBUG "unregistering DVB devices\n"); 311 pvr2_trace(PVR2_TRACE_INFO, "unregistering DVB devices");
308 dvb_net_release(&adap->dvb_net); 312 dvb_net_release(&adap->dvb_net);
309 adap->demux.dmx.close(&adap->demux.dmx); 313 adap->demux.dmx.close(&adap->demux.dmx);
310 dvb_dmxdev_release(&adap->dmxdev); 314 dvb_dmxdev_release(&adap->dmxdev);
@@ -320,7 +324,7 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
320 int ret = 0; 324 int ret = 0;
321 325
322 if (dvb_props == NULL) { 326 if (dvb_props == NULL) {
323 err("fe_props not defined!"); 327 pvr2_trace(PVR2_TRACE_ERROR_LEGS, "fe_props not defined!");
324 return -EINVAL; 328 return -EINVAL;
325 } 329 }
326 330
@@ -328,13 +332,15 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
328 &adap->channel, 332 &adap->channel,
329 (1 << PVR2_CVAL_INPUT_DTV)); 333 (1 << PVR2_CVAL_INPUT_DTV));
330 if (ret) { 334 if (ret) {
331 err("failed to grab control of dtv input (code=%d)", 335 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
336 "failed to grab control of dtv input (code=%d)",
332 ret); 337 ret);
333 return ret; 338 return ret;
334 } 339 }
335 340
336 if (dvb_props->frontend_attach == NULL) { 341 if (dvb_props->frontend_attach == NULL) {
337 err("frontend_attach not defined!"); 342 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
343 "frontend_attach not defined!");
338 ret = -EINVAL; 344 ret = -EINVAL;
339 goto done; 345 goto done;
340 } 346 }
@@ -342,7 +348,8 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
342 if ((dvb_props->frontend_attach(adap) == 0) && (adap->fe)) { 348 if ((dvb_props->frontend_attach(adap) == 0) && (adap->fe)) {
343 349
344 if (dvb_register_frontend(&adap->dvb_adap, adap->fe)) { 350 if (dvb_register_frontend(&adap->dvb_adap, adap->fe)) {
345 err("frontend registration failed!"); 351 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
352 "frontend registration failed!");
346 dvb_frontend_detach(adap->fe); 353 dvb_frontend_detach(adap->fe);
347 adap->fe = NULL; 354 adap->fe = NULL;
348 ret = -ENODEV; 355 ret = -ENODEV;
@@ -359,7 +366,8 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
359 adap->fe->ops.ts_bus_ctrl = pvr2_dvb_bus_ctrl; 366 adap->fe->ops.ts_bus_ctrl = pvr2_dvb_bus_ctrl;
360 367
361 } else { 368 } else {
362 err("no frontend was attached!"); 369 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
370 "no frontend was attached!");
363 ret = -ENODEV; 371 ret = -ENODEV;
364 return ret; 372 return ret;
365 } 373 }
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 087a18245560..e9b5d4e91327 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -1261,7 +1261,7 @@ struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp)
1261 fail: 1261 fail:
1262 pvr2_trace(PVR2_TRACE_STRUCT,"Failure creating pvr2_v4l2 id=%p",vp); 1262 pvr2_trace(PVR2_TRACE_STRUCT,"Failure creating pvr2_v4l2 id=%p",vp);
1263 pvr2_v4l2_destroy_no_lock(vp); 1263 pvr2_v4l2_destroy_no_lock(vp);
1264 return 0; 1264 return NULL;
1265} 1265}
1266 1266
1267/* 1267/*
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
index 7c47345501b6..2433a3160041 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
@@ -81,7 +81,7 @@ static void set_input(struct pvr2_v4l_decoder *ctxt)
81 pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_input(%d)",hdw->input_val); 81 pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_input(%d)",hdw->input_val);
82 82
83 if ((sid < ARRAY_SIZE(routing_schemes)) && 83 if ((sid < ARRAY_SIZE(routing_schemes)) &&
84 ((sp = routing_schemes + sid) != 0) && 84 ((sp = routing_schemes + sid) != NULL) &&
85 (hdw->input_val >= 0) && 85 (hdw->input_val >= 0) &&
86 (hdw->input_val < sp->cnt)) { 86 (hdw->input_val < sp->cnt)) {
87 route.input = sp->def[hdw->input_val]; 87 route.input = sp->def[hdw->input_val];
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index 416d05d4a969..e684108637ad 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1450,7 +1450,8 @@ static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *ar
1450 1450
1451/* ----------------------------------------------------------------------- */ 1451/* ----------------------------------------------------------------------- */
1452 1452
1453static int saa7115_probe(struct i2c_client *client) 1453static int saa7115_probe(struct i2c_client *client,
1454 const struct i2c_device_id *id)
1454{ 1455{
1455 struct saa711x_state *state; 1456 struct saa711x_state *state;
1456 int i; 1457 int i;
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index 06c88db656b4..e750cd65c1c3 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -661,7 +661,8 @@ static int saa7127_command(struct i2c_client *client,
661 661
662/* ----------------------------------------------------------------------- */ 662/* ----------------------------------------------------------------------- */
663 663
664static int saa7127_probe(struct i2c_client *client) 664static int saa7127_probe(struct i2c_client *client,
665 const struct i2c_device_id *id)
665{ 666{
666 struct saa7127_state *state; 667 struct saa7127_state *state;
667 struct v4l2_sliced_vbi_data vbi = { 0, 0, 0, 0 }; /* set to disabled */ 668 struct v4l2_sliced_vbi_data vbi = { 0, 0, 0, 0 }; /* set to disabled */
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index e086f14d5663..40e4c3bd2cb9 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_SAA7134
3 depends on VIDEO_DEV && PCI && I2C && INPUT 3 depends on VIDEO_DEV && PCI && I2C && INPUT
4 select VIDEOBUF_DMA_SG 4 select VIDEOBUF_DMA_SG
5 select VIDEO_IR 5 select VIDEO_IR
6 select VIDEO_TUNER 6 select MEDIA_TUNER
7 select VIDEO_TVEEPROM 7 select VIDEO_TVEEPROM
8 select CRC32 8 select CRC32
9 ---help--- 9 ---help---
@@ -35,9 +35,9 @@ config VIDEO_SAA7134_DVB
35 select DVB_NXT200X if !DVB_FE_CUSTOMISE 35 select DVB_NXT200X if !DVB_FE_CUSTOMISE
36 select DVB_TDA10086 if !DVB_FE_CUSTOMISE 36 select DVB_TDA10086 if !DVB_FE_CUSTOMISE
37 select DVB_TDA826X if !DVB_FE_CUSTOMISE 37 select DVB_TDA826X if !DVB_FE_CUSTOMISE
38 select DVB_TDA827X if !DVB_FE_CUSTOMISE 38 select MEDIA_TUNER_TDA827X if !DVB_FE_CUSTOMISE
39 select DVB_ISL6421 if !DVB_FE_CUSTOMISE 39 select DVB_ISL6421 if !DVB_FE_CUSTOMISE
40 select TUNER_SIMPLE if !DVB_FE_CUSTOMISE 40 select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
41 ---help--- 41 ---help---
42 This adds support for DVB cards based on the 42 This adds support for DVB cards based on the
43 Philips saa7134 chip. 43 Philips saa7134 chip.
diff --git a/drivers/media/video/saa7134/Makefile b/drivers/media/video/saa7134/Makefile
index 9aff937ba7a5..3dbaa19a6d00 100644
--- a/drivers/media/video/saa7134/Makefile
+++ b/drivers/media/video/saa7134/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o
11obj-$(CONFIG_VIDEO_SAA7134_DVB) += saa7134-dvb.o 11obj-$(CONFIG_VIDEO_SAA7134_DVB) += saa7134-dvb.o
12 12
13EXTRA_CFLAGS += -Idrivers/media/video 13EXTRA_CFLAGS += -Idrivers/media/video
14EXTRA_CFLAGS += -Idrivers/media/common/tuners
14EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 15EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
15EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 16EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 98375955a84b..b111903aa322 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -47,6 +47,9 @@ static char name_svideo[] = "S-Video";
47/* ------------------------------------------------------------------ */ 47/* ------------------------------------------------------------------ */
48/* board config info */ 48/* board config info */
49 49
50/* If radio_type !=UNSET, radio_addr should be specified
51 */
52
50struct saa7134_board saa7134_boards[] = { 53struct saa7134_board saa7134_boards[] = {
51 [SAA7134_BOARD_UNKNOWN] = { 54 [SAA7134_BOARD_UNKNOWN] = {
52 .name = "UNKNOWN/GENERIC", 55 .name = "UNKNOWN/GENERIC",
@@ -3087,7 +3090,7 @@ struct saa7134_board saa7134_boards[] = {
3087 .tuner_type = TUNER_PHILIPS_TD1316, /* untested */ 3090 .tuner_type = TUNER_PHILIPS_TD1316, /* untested */
3088 .radio_type = TUNER_TEA5767, /* untested */ 3091 .radio_type = TUNER_TEA5767, /* untested */
3089 .tuner_addr = ADDR_UNSET, 3092 .tuner_addr = ADDR_UNSET,
3090 .radio_addr = ADDR_UNSET, 3093 .radio_addr = 0x60,
3091 .tda9887_conf = TDA9887_PRESENT, 3094 .tda9887_conf = TDA9887_PRESENT,
3092 .mpeg = SAA7134_MPEG_DVB, 3095 .mpeg = SAA7134_MPEG_DVB,
3093 .inputs = {{ 3096 .inputs = {{
@@ -4247,6 +4250,36 @@ struct saa7134_board saa7134_boards[] = {
4247 .amux = LINE1, 4250 .amux = LINE1,
4248 } }, 4251 } },
4249 }, 4252 },
4253 [SAA7134_BOARD_BEHOLD_H6] = {
4254 /* Igor Kuznetsov <igk@igk.ru> */
4255 .name = "Beholder BeholdTV H6",
4256 .audio_clock = 0x00187de7,
4257 .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
4258 .radio_type = UNSET,
4259 .tuner_addr = ADDR_UNSET,
4260 .radio_addr = ADDR_UNSET,
4261 .tda9887_conf = TDA9887_PRESENT,
4262 .inputs = {{
4263 .name = name_tv,
4264 .vmux = 3,
4265 .amux = TV,
4266 .tv = 1,
4267 }, {
4268 .name = name_comp1,
4269 .vmux = 1,
4270 .amux = LINE1,
4271 }, {
4272 .name = name_svideo,
4273 .vmux = 8,
4274 .amux = LINE1,
4275 } },
4276 .radio = {
4277 .name = name_radio,
4278 .amux = LINE2,
4279 },
4280 /* no DVB support for now */
4281 /* .mpeg = SAA7134_MPEG_DVB, */
4282 },
4250}; 4283};
4251 4284
4252const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards); 4285const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -5197,6 +5230,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
5197 .subvendor = 0x5ace, 5230 .subvendor = 0x5ace,
5198 .subdevice = 0x6193, 5231 .subdevice = 0x6193,
5199 .driver_data = SAA7134_BOARD_BEHOLD_M6, 5232 .driver_data = SAA7134_BOARD_BEHOLD_M6,
5233 }, {
5234 .vendor = PCI_VENDOR_ID_PHILIPS,
5235 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5236 .subvendor = 0x5ace,
5237 .subdevice = 0x6191,
5238 .driver_data = SAA7134_BOARD_BEHOLD_M6,
5200 },{ 5239 },{
5201 .vendor = PCI_VENDOR_ID_PHILIPS, 5240 .vendor = PCI_VENDOR_ID_PHILIPS,
5202 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 5241 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
@@ -5246,6 +5285,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
5246 .subdevice = 0xc900, 5285 .subdevice = 0xc900,
5247 .driver_data = SAA7134_BOARD_VIDEOMATE_T750, 5286 .driver_data = SAA7134_BOARD_VIDEOMATE_T750,
5248 }, { 5287 }, {
5288 .vendor = PCI_VENDOR_ID_PHILIPS,
5289 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5290 .subvendor = 0x5ace,
5291 .subdevice = 0x6290,
5292 .driver_data = SAA7134_BOARD_BEHOLD_H6,
5293 }, {
5249 /* --- boards without eeprom + subsystem ID --- */ 5294 /* --- boards without eeprom + subsystem ID --- */
5250 .vendor = PCI_VENDOR_ID_PHILIPS, 5295 .vendor = PCI_VENDOR_ID_PHILIPS,
5251 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 5296 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -5577,20 +5622,87 @@ int saa7134_board_init1(struct saa7134_dev *dev)
5577 return 0; 5622 return 0;
5578} 5623}
5579 5624
5625static void saa7134_tuner_setup(struct saa7134_dev *dev)
5626{
5627 struct tuner_setup tun_setup;
5628 unsigned int mode_mask = T_RADIO |
5629 T_ANALOG_TV |
5630 T_DIGITAL_TV;
5631
5632 memset(&tun_setup, 0, sizeof(tun_setup));
5633 tun_setup.tuner_callback = saa7134_tuner_callback;
5634
5635 if (saa7134_boards[dev->board].radio_type != UNSET) {
5636 tun_setup.type = saa7134_boards[dev->board].radio_type;
5637 tun_setup.addr = saa7134_boards[dev->board].radio_addr;
5638
5639 tun_setup.mode_mask = T_RADIO;
5640
5641 saa7134_i2c_call_clients(dev, TUNER_SET_TYPE_ADDR, &tun_setup);
5642 mode_mask &= ~T_RADIO;
5643 }
5644
5645 if ((dev->tuner_type != TUNER_ABSENT) && (dev->tuner_type != UNSET)) {
5646 tun_setup.type = dev->tuner_type;
5647 tun_setup.addr = dev->tuner_addr;
5648 tun_setup.config = saa7134_boards[dev->board].tuner_config;
5649 tun_setup.tuner_callback = saa7134_tuner_callback;
5650
5651 tun_setup.mode_mask = mode_mask;
5652
5653 saa7134_i2c_call_clients(dev, TUNER_SET_TYPE_ADDR, &tun_setup);
5654 }
5655
5656 if (dev->tda9887_conf) {
5657 struct v4l2_priv_tun_config tda9887_cfg;
5658
5659 tda9887_cfg.tuner = TUNER_TDA9887;
5660 tda9887_cfg.priv = &dev->tda9887_conf;
5661
5662 saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG,
5663 &tda9887_cfg);
5664 }
5665
5666 if (dev->tuner_type == TUNER_XC2028) {
5667 struct v4l2_priv_tun_config xc2028_cfg;
5668 struct xc2028_ctrl ctl;
5669
5670 memset(&xc2028_cfg, 0, sizeof(ctl));
5671 memset(&ctl, 0, sizeof(ctl));
5672
5673 ctl.fname = XC2028_DEFAULT_FIRMWARE;
5674 ctl.max_len = 64;
5675
5676 switch (dev->board) {
5677 case SAA7134_BOARD_AVERMEDIA_A16D:
5678 ctl.demod = XC3028_FE_ZARLINK456;
5679 break;
5680 default:
5681 ctl.demod = XC3028_FE_OREN538;
5682 ctl.mts = 1;
5683 }
5684
5685 xc2028_cfg.tuner = TUNER_XC2028;
5686 xc2028_cfg.priv = &ctl;
5687
5688 saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, &xc2028_cfg);
5689 }
5690}
5691
5580/* stuff which needs working i2c */ 5692/* stuff which needs working i2c */
5581int saa7134_board_init2(struct saa7134_dev *dev) 5693int saa7134_board_init2(struct saa7134_dev *dev)
5582{ 5694{
5583 unsigned char buf; 5695 unsigned char buf;
5584 int board; 5696 int board;
5585 struct tuner_setup tun_setup; 5697
5586 tun_setup.config = 0; 5698 dev->tuner_type = saa7134_boards[dev->board].tuner_type;
5587 tun_setup.tuner_callback = saa7134_tuner_callback; 5699 dev->tuner_addr = saa7134_boards[dev->board].tuner_addr;
5588 5700
5589 switch (dev->board) { 5701 switch (dev->board) {
5590 case SAA7134_BOARD_BMK_MPEX_NOTUNER: 5702 case SAA7134_BOARD_BMK_MPEX_NOTUNER:
5591 case SAA7134_BOARD_BMK_MPEX_TUNER: 5703 case SAA7134_BOARD_BMK_MPEX_TUNER:
5592 dev->i2c_client.addr = 0x60; 5704 dev->i2c_client.addr = 0x60;
5593 board = (i2c_master_recv(&dev->i2c_client,&buf,0) < 0) 5705 board = (i2c_master_recv(&dev->i2c_client, &buf, 0) < 0)
5594 ? SAA7134_BOARD_BMK_MPEX_NOTUNER 5706 ? SAA7134_BOARD_BMK_MPEX_NOTUNER
5595 : SAA7134_BOARD_BMK_MPEX_TUNER; 5707 : SAA7134_BOARD_BMK_MPEX_TUNER;
5596 if (board == dev->board) 5708 if (board == dev->board)
@@ -5600,21 +5712,9 @@ int saa7134_board_init2(struct saa7134_dev *dev)
5600 saa7134_boards[dev->board].name); 5712 saa7134_boards[dev->board].name);
5601 dev->tuner_type = saa7134_boards[dev->board].tuner_type; 5713 dev->tuner_type = saa7134_boards[dev->board].tuner_type;
5602 5714
5603 if (TUNER_ABSENT != dev->tuner_type) {
5604 tun_setup.mode_mask = T_RADIO |
5605 T_ANALOG_TV |
5606 T_DIGITAL_TV;
5607 tun_setup.type = dev->tuner_type;
5608 tun_setup.addr = ADDR_UNSET;
5609 tun_setup.tuner_callback = saa7134_tuner_callback;
5610
5611 saa7134_i2c_call_clients(dev,
5612 TUNER_SET_TYPE_ADDR,
5613 &tun_setup);
5614 }
5615 break; 5715 break;
5616 case SAA7134_BOARD_MD7134: 5716 case SAA7134_BOARD_MD7134:
5617 { 5717 {
5618 u8 subaddr; 5718 u8 subaddr;
5619 u8 data[3]; 5719 u8 data[3];
5620 int ret, tuner_t; 5720 int ret, tuner_t;
@@ -5667,30 +5767,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
5667 } 5767 }
5668 5768
5669 printk(KERN_INFO "%s Tuner type is %d\n", dev->name, dev->tuner_type); 5769 printk(KERN_INFO "%s Tuner type is %d\n", dev->name, dev->tuner_type);
5670 if (dev->tuner_type == TUNER_PHILIPS_FMD1216ME_MK3) {
5671 struct v4l2_priv_tun_config tda9887_cfg;
5672
5673 tda9887_cfg.tuner = TUNER_TDA9887;
5674 tda9887_cfg.priv = &dev->tda9887_conf;
5675
5676 dev->tda9887_conf = TDA9887_PRESENT |
5677 TDA9887_PORT1_ACTIVE |
5678 TDA9887_PORT2_ACTIVE;
5679
5680 saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG,
5681 &tda9887_cfg);
5682 }
5683
5684 tun_setup.mode_mask = T_RADIO |
5685 T_ANALOG_TV |
5686 T_DIGITAL_TV;
5687 tun_setup.type = dev->tuner_type;
5688 tun_setup.addr = ADDR_UNSET;
5689
5690 saa7134_i2c_call_clients(dev,
5691 TUNER_SET_TYPE_ADDR, &tun_setup);
5692 }
5693 break; 5770 break;
5771 }
5694 case SAA7134_BOARD_PHILIPS_EUROPA: 5772 case SAA7134_BOARD_PHILIPS_EUROPA:
5695 if (dev->autodetected && (dev->eedata[0x41] == 0x1c)) { 5773 if (dev->autodetected && (dev->eedata[0x41] == 0x1c)) {
5696 /* Reconfigure board as Snake reference design */ 5774 /* Reconfigure board as Snake reference design */
@@ -5702,43 +5780,43 @@ int saa7134_board_init2(struct saa7134_dev *dev)
5702 } 5780 }
5703 case SAA7134_BOARD_VIDEOMATE_DVBT_300: 5781 case SAA7134_BOARD_VIDEOMATE_DVBT_300:
5704 case SAA7134_BOARD_ASUS_EUROPA2_HYBRID: 5782 case SAA7134_BOARD_ASUS_EUROPA2_HYBRID:
5783 {
5784
5705 /* The Philips EUROPA based hybrid boards have the tuner connected through 5785 /* The Philips EUROPA based hybrid boards have the tuner connected through
5706 * the channel decoder. We have to make it transparent to find it 5786 * the channel decoder. We have to make it transparent to find it
5707 */ 5787 */
5708 {
5709 u8 data[] = { 0x07, 0x02}; 5788 u8 data[] = { 0x07, 0x02};
5710 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 5789 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
5711 i2c_transfer(&dev->i2c_adap, &msg, 1); 5790 i2c_transfer(&dev->i2c_adap, &msg, 1);
5712 5791
5713 tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV;
5714 tun_setup.type = dev->tuner_type;
5715 tun_setup.addr = dev->tuner_addr;
5716
5717 saa7134_i2c_call_clients (dev, TUNER_SET_TYPE_ADDR,&tun_setup);
5718 }
5719 break; 5792 break;
5793 }
5720 case SAA7134_BOARD_PHILIPS_TIGER: 5794 case SAA7134_BOARD_PHILIPS_TIGER:
5721 case SAA7134_BOARD_PHILIPS_TIGER_S: 5795 case SAA7134_BOARD_PHILIPS_TIGER_S:
5722 { 5796 {
5723 u8 data[] = { 0x3c, 0x33, 0x60}; 5797 u8 data[] = { 0x3c, 0x33, 0x60};
5724 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 5798 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
5725 if(dev->autodetected && (dev->eedata[0x49] == 0x50)) { 5799 if (dev->autodetected && (dev->eedata[0x49] == 0x50)) {
5726 dev->board = SAA7134_BOARD_PHILIPS_TIGER_S; 5800 dev->board = SAA7134_BOARD_PHILIPS_TIGER_S;
5727 printk(KERN_INFO "%s: Reconfigured board as %s\n", 5801 printk(KERN_INFO "%s: Reconfigured board as %s\n",
5728 dev->name, saa7134_boards[dev->board].name); 5802 dev->name, saa7134_boards[dev->board].name);
5729 } 5803 }
5730 if(dev->board == SAA7134_BOARD_PHILIPS_TIGER_S) { 5804 if (dev->board == SAA7134_BOARD_PHILIPS_TIGER_S) {
5731 tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; 5805 dev->tuner_type = TUNER_PHILIPS_TDA8290;
5732 tun_setup.type = TUNER_PHILIPS_TDA8290; 5806
5733 tun_setup.addr = 0x4b; 5807 saa7134_tuner_setup(dev);
5734 tun_setup.config = 2;
5735 5808
5736 saa7134_i2c_call_clients (dev, TUNER_SET_TYPE_ADDR,&tun_setup);
5737 data[2] = 0x68; 5809 data[2] = 0x68;
5810 i2c_transfer(&dev->i2c_adap, &msg, 1);
5811
5812 /* Tuner setup is handled before I2C transfer.
5813 Due to that, there's no need to do it later
5814 */
5815 return 0;
5738 } 5816 }
5739 i2c_transfer(&dev->i2c_adap, &msg, 1); 5817 i2c_transfer(&dev->i2c_adap, &msg, 1);
5740 }
5741 break; 5818 break;
5819 }
5742 case SAA7134_BOARD_HAUPPAUGE_HVR1110: 5820 case SAA7134_BOARD_HAUPPAUGE_HVR1110:
5743 hauppauge_eeprom(dev, dev->eedata+0x80); 5821 hauppauge_eeprom(dev, dev->eedata+0x80);
5744 /* break intentionally omitted */ 5822 /* break intentionally omitted */
@@ -5751,52 +5829,55 @@ int saa7134_board_init2(struct saa7134_dev *dev)
5751 case SAA7134_BOARD_AVERMEDIA_SUPER_007: 5829 case SAA7134_BOARD_AVERMEDIA_SUPER_007:
5752 case SAA7134_BOARD_TWINHAN_DTV_DVB_3056: 5830 case SAA7134_BOARD_TWINHAN_DTV_DVB_3056:
5753 case SAA7134_BOARD_CREATIX_CTX953: 5831 case SAA7134_BOARD_CREATIX_CTX953:
5832 {
5754 /* this is a hybrid board, initialize to analog mode 5833 /* this is a hybrid board, initialize to analog mode
5755 * and configure firmware eeprom address 5834 * and configure firmware eeprom address
5756 */ 5835 */
5757 {
5758 u8 data[] = { 0x3c, 0x33, 0x60}; 5836 u8 data[] = { 0x3c, 0x33, 0x60};
5759 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 5837 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
5760 i2c_transfer(&dev->i2c_adap, &msg, 1); 5838 i2c_transfer(&dev->i2c_adap, &msg, 1);
5761 }
5762 break; 5839 break;
5840 }
5763 case SAA7134_BOARD_FLYDVB_TRIO: 5841 case SAA7134_BOARD_FLYDVB_TRIO:
5764 { 5842 {
5765 u8 data[] = { 0x3c, 0x33, 0x62}; 5843 u8 data[] = { 0x3c, 0x33, 0x62};
5766 struct i2c_msg msg = {.addr=0x09, .flags=0, .buf=data, .len = sizeof(data)}; 5844 struct i2c_msg msg = {.addr=0x09, .flags=0, .buf=data, .len = sizeof(data)};
5767 i2c_transfer(&dev->i2c_adap, &msg, 1); 5845 i2c_transfer(&dev->i2c_adap, &msg, 1);
5768 }
5769 break; 5846 break;
5847 }
5770 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: 5848 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
5771 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS: 5849 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
5850 {
5772 /* initialize analog mode */ 5851 /* initialize analog mode */
5773 {
5774 u8 data[] = { 0x3c, 0x33, 0x6a}; 5852 u8 data[] = { 0x3c, 0x33, 0x6a};
5775 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 5853 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
5776 i2c_transfer(&dev->i2c_adap, &msg, 1); 5854 i2c_transfer(&dev->i2c_adap, &msg, 1);
5777 }
5778 break; 5855 break;
5856 }
5779 case SAA7134_BOARD_CINERGY_HT_PCMCIA: 5857 case SAA7134_BOARD_CINERGY_HT_PCMCIA:
5780 case SAA7134_BOARD_CINERGY_HT_PCI: 5858 case SAA7134_BOARD_CINERGY_HT_PCI:
5859 {
5781 /* initialize analog mode */ 5860 /* initialize analog mode */
5782 {
5783 u8 data[] = { 0x3c, 0x33, 0x68}; 5861 u8 data[] = { 0x3c, 0x33, 0x68};
5784 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; 5862 struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
5785 i2c_transfer(&dev->i2c_adap, &msg, 1); 5863 i2c_transfer(&dev->i2c_adap, &msg, 1);
5786 }
5787 break; 5864 break;
5865 }
5788 case SAA7134_BOARD_KWORLD_ATSC110: 5866 case SAA7134_BOARD_KWORLD_ATSC110:
5789 { 5867 {
5790 /* enable tuner */ 5868 /* enable tuner */
5791 int i; 5869 int i;
5792 static const u8 buffer [] = { 0x10,0x12,0x13,0x04,0x16,0x00,0x14,0x04,0x017,0x00 }; 5870 static const u8 buffer [] = { 0x10, 0x12, 0x13, 0x04, 0x16,
5793 dev->i2c_client.addr = 0x0a; 5871 0x00, 0x14, 0x04, 0x17, 0x00 };
5794 for (i = 0; i < 5; i++) 5872 dev->i2c_client.addr = 0x0a;
5795 if (2 != i2c_master_send(&dev->i2c_client,&buffer[i*2],2)) 5873 for (i = 0; i < 5; i++)
5796 printk(KERN_WARNING "%s: Unable to enable tuner(%i).\n", 5874 if (2 != i2c_master_send(&dev->i2c_client,
5797 dev->name, i); 5875 &buffer[i*2], 2))
5798 } 5876 printk(KERN_WARNING
5877 "%s: Unable to enable tuner(%i).\n",
5878 dev->name, i);
5799 break; 5879 break;
5880 }
5800 case SAA7134_BOARD_VIDEOMATE_DVBT_200: 5881 case SAA7134_BOARD_VIDEOMATE_DVBT_200:
5801 case SAA7134_BOARD_VIDEOMATE_DVBT_200A: 5882 case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
5802 /* The T200 and the T200A share the same pci id. Consequently, 5883 /* The T200 and the T200A share the same pci id. Consequently,
@@ -5821,7 +5902,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
5821 } 5902 }
5822 break; 5903 break;
5823 case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM: 5904 case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM:
5824 { 5905 {
5825 struct v4l2_priv_tun_config tea5767_cfg; 5906 struct v4l2_priv_tun_config tea5767_cfg;
5826 struct tea5767_ctrl ctl; 5907 struct tea5767_ctrl ctl;
5827 5908
@@ -5832,34 +5913,11 @@ int saa7134_board_init2(struct saa7134_dev *dev)
5832 tea5767_cfg.tuner = TUNER_TEA5767; 5913 tea5767_cfg.tuner = TUNER_TEA5767;
5833 tea5767_cfg.priv = &ctl; 5914 tea5767_cfg.priv = &ctl;
5834 saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, &tea5767_cfg); 5915 saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, &tea5767_cfg);
5835 }
5836 break; 5916 break;
5837 } 5917 }
5918 } /* switch() */
5838 5919
5839 if (dev->tuner_type == TUNER_XC2028) { 5920 saa7134_tuner_setup(dev);
5840 struct v4l2_priv_tun_config xc2028_cfg;
5841 struct xc2028_ctrl ctl;
5842
5843 memset(&xc2028_cfg, 0, sizeof(ctl));
5844 memset(&ctl, 0, sizeof(ctl));
5845
5846 ctl.fname = XC2028_DEFAULT_FIRMWARE;
5847 ctl.max_len = 64;
5848
5849 switch (dev->board) {
5850 case SAA7134_BOARD_AVERMEDIA_A16D:
5851 ctl.demod = XC3028_FE_ZARLINK456;
5852 break;
5853 default:
5854 ctl.demod = XC3028_FE_OREN538;
5855 ctl.mts = 1;
5856 }
5857
5858 xc2028_cfg.tuner = TUNER_XC2028;
5859 xc2028_cfg.priv = &ctl;
5860
5861 saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, &xc2028_cfg);
5862 }
5863 5921
5864 return 0; 5922 return 0;
5865} 5923}
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index 2ccfaba0c490..d8af3863f2d3 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -324,8 +324,6 @@ static u32 functionality(struct i2c_adapter *adap)
324static int attach_inform(struct i2c_client *client) 324static int attach_inform(struct i2c_client *client)
325{ 325{
326 struct saa7134_dev *dev = client->adapter->algo_data; 326 struct saa7134_dev *dev = client->adapter->algo_data;
327 int tuner = dev->tuner_type;
328 struct tuner_setup tun_setup;
329 327
330 d1printk( "%s i2c attach [addr=0x%x,client=%s]\n", 328 d1printk( "%s i2c attach [addr=0x%x,client=%s]\n",
331 client->driver->driver.name, client->addr, client->name); 329 client->driver->driver.name, client->addr, client->name);
@@ -346,46 +344,6 @@ static int attach_inform(struct i2c_client *client)
346 } 344 }
347 } 345 }
348 346
349 if (!client->driver->command)
350 return 0;
351
352 if (saa7134_boards[dev->board].radio_type != UNSET) {
353
354 tun_setup.type = saa7134_boards[dev->board].radio_type;
355 tun_setup.addr = saa7134_boards[dev->board].radio_addr;
356
357 if ((tun_setup.addr == ADDR_UNSET) || (tun_setup.addr == client->addr)) {
358 tun_setup.mode_mask = T_RADIO;
359
360 client->driver->command(client, TUNER_SET_TYPE_ADDR, &tun_setup);
361 }
362 }
363
364 if (tuner != UNSET) {
365 tun_setup.type = tuner;
366 tun_setup.addr = saa7134_boards[dev->board].tuner_addr;
367 tun_setup.config = saa7134_boards[dev->board].tuner_config;
368 tun_setup.tuner_callback = saa7134_tuner_callback;
369
370 if ((tun_setup.addr == ADDR_UNSET)||(tun_setup.addr == client->addr)) {
371
372 tun_setup.mode_mask = T_ANALOG_TV;
373
374 client->driver->command(client,TUNER_SET_TYPE_ADDR, &tun_setup);
375 }
376
377 if (tuner == TUNER_TDA9887) {
378 struct v4l2_priv_tun_config tda9887_cfg;
379
380 tda9887_cfg.tuner = TUNER_TDA9887;
381 tda9887_cfg.priv = &dev->tda9887_conf;
382
383 client->driver->command(client, TUNER_SET_CONFIG,
384 &tda9887_cfg);
385 }
386 }
387
388
389 return 0; 347 return 0;
390} 348}
391 349
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 767ff30832f2..919632b10aae 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -531,6 +531,7 @@ void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir)
531 break; 531 break;
532 case SAA7134_BOARD_BEHOLD_607_9FM: 532 case SAA7134_BOARD_BEHOLD_607_9FM:
533 case SAA7134_BOARD_BEHOLD_M6: 533 case SAA7134_BOARD_BEHOLD_M6:
534 case SAA7134_BOARD_BEHOLD_H6:
534 snprintf(ir->c.name, sizeof(ir->c.name), "BeholdTV"); 535 snprintf(ir->c.name, sizeof(ir->c.name), "BeholdTV");
535 ir->get_key = get_key_beholdm6xx; 536 ir->get_key = get_key_beholdm6xx;
536 ir->ir_codes = ir_codes_behold; 537 ir->ir_codes = ir_codes_behold;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 924ffd13637e..34ff0d4998f3 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -263,6 +263,7 @@ struct saa7134_format {
263#define SAA7134_BOARD_VIDEOMATE_T750 139 263#define SAA7134_BOARD_VIDEOMATE_T750 139
264#define SAA7134_BOARD_AVERMEDIA_A700_PRO 140 264#define SAA7134_BOARD_AVERMEDIA_A700_PRO 140
265#define SAA7134_BOARD_AVERMEDIA_A700_HYBRID 141 265#define SAA7134_BOARD_AVERMEDIA_A700_HYBRID 141
266#define SAA7134_BOARD_BEHOLD_H6 142
266 267
267 268
268#define SAA7134_MAXBOARDS 8 269#define SAA7134_MAXBOARDS 8
diff --git a/drivers/media/video/saa717x.c b/drivers/media/video/saa717x.c
index 53c5edbcf7ea..72c4081feff5 100644
--- a/drivers/media/video/saa717x.c
+++ b/drivers/media/video/saa717x.c
@@ -1418,7 +1418,8 @@ static int saa717x_command(struct i2c_client *client, unsigned cmd, void *arg)
1418/* i2c implementation */ 1418/* i2c implementation */
1419 1419
1420/* ----------------------------------------------------------------------- */ 1420/* ----------------------------------------------------------------------- */
1421static int saa717x_probe(struct i2c_client *client) 1421static int saa717x_probe(struct i2c_client *client,
1422 const struct i2c_device_id *did)
1422{ 1423{
1423 struct saa717x_state *decoder; 1424 struct saa717x_state *decoder;
1424 u8 id = 0; 1425 u8 id = 0;
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c
index 6943b447a1bd..e57a64605778 100644
--- a/drivers/media/video/tcm825x.c
+++ b/drivers/media/video/tcm825x.c
@@ -840,7 +840,8 @@ static struct v4l2_int_device tcm825x_int_device = {
840 }, 840 },
841}; 841};
842 842
843static int tcm825x_probe(struct i2c_client *client) 843static int tcm825x_probe(struct i2c_client *client,
844 const struct i2c_device_id *did)
844{ 845{
845 struct tcm825x_sensor *sensor = &tcm825x; 846 struct tcm825x_sensor *sensor = &tcm825x;
846 int rval; 847 int rval;
diff --git a/drivers/media/video/tlv320aic23b.c b/drivers/media/video/tlv320aic23b.c
index dc7b9c220b90..f1db54202dea 100644
--- a/drivers/media/video/tlv320aic23b.c
+++ b/drivers/media/video/tlv320aic23b.c
@@ -125,7 +125,8 @@ static int tlv320aic23b_command(struct i2c_client *client,
125 * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' 125 * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
126 */ 126 */
127 127
128static int tlv320aic23b_probe(struct i2c_client *client) 128static int tlv320aic23b_probe(struct i2c_client *client,
129 const struct i2c_device_id *id)
129{ 130{
130 struct tlv320aic23b_state *state; 131 struct tlv320aic23b_state *state;
131 132
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 2b72e10e6b9f..6bf104ea051d 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -33,6 +33,46 @@
33 33
34#define PREFIX t->i2c->driver->driver.name 34#define PREFIX t->i2c->driver->driver.name
35 35
36/** This macro allows us to probe dynamically, avoiding static links */
37#ifdef CONFIG_MEDIA_ATTACH
38#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
39 int __r = -EINVAL; \
40 typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
41 if (__a) { \
42 __r = (int) __a(ARGS); \
43 } else { \
44 printk(KERN_ERR "TUNER: Unable to find " \
45 "symbol "#FUNCTION"()\n"); \
46 } \
47 symbol_put(FUNCTION); \
48 __r; \
49})
50
51static void tuner_detach(struct dvb_frontend *fe)
52{
53 if (fe->ops.tuner_ops.release) {
54 fe->ops.tuner_ops.release(fe);
55 symbol_put_addr(fe->ops.tuner_ops.release);
56 }
57 if (fe->ops.analog_ops.release) {
58 fe->ops.analog_ops.release(fe);
59 symbol_put_addr(fe->ops.analog_ops.release);
60 }
61}
62#else
63#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
64 FUNCTION(ARGS); \
65})
66
67static void tuner_detach(struct dvb_frontend *fe)
68{
69 if (fe->ops.tuner_ops.release)
70 fe->ops.tuner_ops.release(fe);
71 if (fe->ops.analog_ops.release)
72 fe->ops.analog_ops.release(fe);
73}
74#endif
75
36struct tuner { 76struct tuner {
37 /* device */ 77 /* device */
38 struct dvb_frontend fe; 78 struct dvb_frontend fe;
@@ -56,7 +96,7 @@ struct tuner {
56 96
57/* standard i2c insmod options */ 97/* standard i2c insmod options */
58static unsigned short normal_i2c[] = { 98static unsigned short normal_i2c[] = {
59#if defined(CONFIG_TUNER_TEA5761) || (defined(CONFIG_TUNER_TEA5761_MODULE) && defined(MODULE)) 99#if defined(CONFIG_MEDIA_TUNER_TEA5761) || (defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE) && defined(MODULE))
60 0x10, 100 0x10,
61#endif 101#endif
62 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */ 102 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
@@ -139,22 +179,6 @@ static void fe_set_params(struct dvb_frontend *fe,
139 fe_tuner_ops->set_analog_params(fe, params); 179 fe_tuner_ops->set_analog_params(fe, params);
140} 180}
141 181
142static void fe_release(struct dvb_frontend *fe)
143{
144 if (fe->ops.tuner_ops.release)
145 fe->ops.tuner_ops.release(fe);
146
147 /* DO NOT kfree(fe->analog_demod_priv)
148 *
149 * If we are in this function, analog_demod_priv contains a pointer
150 * to struct tuner *t. This will be kfree'd in tuner_detach().
151 *
152 * Otherwise, fe->ops.analog_demod_ops->release will
153 * handle the cleanup for analog demodulator modules.
154 */
155 fe->analog_demod_priv = NULL;
156}
157
158static void fe_standby(struct dvb_frontend *fe) 182static void fe_standby(struct dvb_frontend *fe)
159{ 183{
160 struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; 184 struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
@@ -191,7 +215,6 @@ static void tuner_status(struct dvb_frontend *fe);
191static struct analog_demod_ops tuner_core_ops = { 215static struct analog_demod_ops tuner_core_ops = {
192 .set_params = fe_set_params, 216 .set_params = fe_set_params,
193 .standby = fe_standby, 217 .standby = fe_standby,
194 .release = fe_release,
195 .has_signal = fe_has_signal, 218 .has_signal = fe_has_signal,
196 .set_config = fe_set_config, 219 .set_config = fe_set_config,
197 .tuner_status = tuner_status 220 .tuner_status = tuner_status
@@ -323,7 +346,8 @@ static void attach_tda829x(struct tuner *t)
323 .lna_cfg = t->config, 346 .lna_cfg = t->config,
324 .tuner_callback = t->tuner_callback, 347 .tuner_callback = t->tuner_callback,
325 }; 348 };
326 tda829x_attach(&t->fe, t->i2c->adapter, t->i2c->addr, &cfg); 349 dvb_attach(tda829x_attach,
350 &t->fe, t->i2c->adapter, t->i2c->addr, &cfg);
327} 351}
328 352
329static struct xc5000_config xc5000_cfg; 353static struct xc5000_config xc5000_cfg;
@@ -356,12 +380,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
356 } 380 }
357 381
358 /* discard private data, in case set_type() was previously called */ 382 /* discard private data, in case set_type() was previously called */
359 if (analog_ops->release) 383 tuner_detach(&t->fe);
360 analog_ops->release(&t->fe); 384 t->fe.analog_demod_priv = NULL;
361 385
362 switch (t->type) { 386 switch (t->type) {
363 case TUNER_MT2032: 387 case TUNER_MT2032:
364 microtune_attach(&t->fe, t->i2c->adapter, t->i2c->addr); 388 dvb_attach(microtune_attach,
389 &t->fe, t->i2c->adapter, t->i2c->addr);
365 break; 390 break;
366 case TUNER_PHILIPS_TDA8290: 391 case TUNER_PHILIPS_TDA8290:
367 { 392 {
@@ -369,12 +394,14 @@ static void set_type(struct i2c_client *c, unsigned int type,
369 break; 394 break;
370 } 395 }
371 case TUNER_TEA5767: 396 case TUNER_TEA5767:
372 if (!tea5767_attach(&t->fe, t->i2c->adapter, t->i2c->addr)) 397 if (!dvb_attach(tea5767_attach, &t->fe,
398 t->i2c->adapter, t->i2c->addr))
373 goto attach_failed; 399 goto attach_failed;
374 t->mode_mask = T_RADIO; 400 t->mode_mask = T_RADIO;
375 break; 401 break;
376 case TUNER_TEA5761: 402 case TUNER_TEA5761:
377 if (!tea5761_attach(&t->fe, t->i2c->adapter, t->i2c->addr)) 403 if (!dvb_attach(tea5761_attach, &t->fe,
404 t->i2c->adapter, t->i2c->addr))
378 goto attach_failed; 405 goto attach_failed;
379 t->mode_mask = T_RADIO; 406 t->mode_mask = T_RADIO;
380 break; 407 break;
@@ -388,8 +415,8 @@ static void set_type(struct i2c_client *c, unsigned int type,
388 buffer[2] = 0x86; 415 buffer[2] = 0x86;
389 buffer[3] = 0x54; 416 buffer[3] = 0x54;
390 i2c_master_send(c, buffer, 4); 417 i2c_master_send(c, buffer, 4);
391 if (!simple_tuner_attach(&t->fe, t->i2c->adapter, t->i2c->addr, 418 if (!dvb_attach(simple_tuner_attach, &t->fe,
392 t->type)) 419 t->i2c->adapter, t->i2c->addr, t->type))
393 goto attach_failed; 420 goto attach_failed;
394 break; 421 break;
395 case TUNER_PHILIPS_TD1316: 422 case TUNER_PHILIPS_TD1316:
@@ -397,9 +424,9 @@ static void set_type(struct i2c_client *c, unsigned int type,
397 buffer[1] = 0xdc; 424 buffer[1] = 0xdc;
398 buffer[2] = 0x86; 425 buffer[2] = 0x86;
399 buffer[3] = 0xa4; 426 buffer[3] = 0xa4;
400 i2c_master_send(c,buffer,4); 427 i2c_master_send(c, buffer, 4);
401 if (!simple_tuner_attach(&t->fe, t->i2c->adapter, 428 if (!dvb_attach(simple_tuner_attach, &t->fe,
402 t->i2c->addr, t->type)) 429 t->i2c->adapter, t->i2c->addr, t->type))
403 goto attach_failed; 430 goto attach_failed;
404 break; 431 break;
405 case TUNER_XC2028: 432 case TUNER_XC2028:
@@ -409,12 +436,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
409 .i2c_addr = t->i2c->addr, 436 .i2c_addr = t->i2c->addr,
410 .callback = t->tuner_callback, 437 .callback = t->tuner_callback,
411 }; 438 };
412 if (!xc2028_attach(&t->fe, &cfg)) 439 if (!dvb_attach(xc2028_attach, &t->fe, &cfg))
413 goto attach_failed; 440 goto attach_failed;
414 break; 441 break;
415 } 442 }
416 case TUNER_TDA9887: 443 case TUNER_TDA9887:
417 tda9887_attach(&t->fe, t->i2c->adapter, t->i2c->addr); 444 dvb_attach(tda9887_attach,
445 &t->fe, t->i2c->adapter, t->i2c->addr);
418 break; 446 break;
419 case TUNER_XC5000: 447 case TUNER_XC5000:
420 { 448 {
@@ -424,7 +452,8 @@ static void set_type(struct i2c_client *c, unsigned int type,
424 xc5000_cfg.if_khz = 5380; 452 xc5000_cfg.if_khz = 5380;
425 xc5000_cfg.priv = c->adapter->algo_data; 453 xc5000_cfg.priv = c->adapter->algo_data;
426 xc5000_cfg.tuner_callback = t->tuner_callback; 454 xc5000_cfg.tuner_callback = t->tuner_callback;
427 if (!xc5000_attach(&t->fe, t->i2c->adapter, &xc5000_cfg)) 455 if (!dvb_attach(xc5000_attach,
456 &t->fe, t->i2c->adapter, &xc5000_cfg))
428 goto attach_failed; 457 goto attach_failed;
429 458
430 xc_tuner_ops = &t->fe.ops.tuner_ops; 459 xc_tuner_ops = &t->fe.ops.tuner_ops;
@@ -433,8 +462,8 @@ static void set_type(struct i2c_client *c, unsigned int type,
433 break; 462 break;
434 } 463 }
435 default: 464 default:
436 if (!simple_tuner_attach(&t->fe, t->i2c->adapter, 465 if (!dvb_attach(simple_tuner_attach, &t->fe,
437 t->i2c->addr, t->type)) 466 t->i2c->adapter, t->i2c->addr, t->type))
438 goto attach_failed; 467 goto attach_failed;
439 468
440 break; 469 break;
@@ -442,12 +471,14 @@ static void set_type(struct i2c_client *c, unsigned int type,
442 471
443 if ((NULL == analog_ops->set_params) && 472 if ((NULL == analog_ops->set_params) &&
444 (fe_tuner_ops->set_analog_params)) { 473 (fe_tuner_ops->set_analog_params)) {
474
445 strlcpy(t->i2c->name, fe_tuner_ops->info.name, 475 strlcpy(t->i2c->name, fe_tuner_ops->info.name,
446 sizeof(t->i2c->name)); 476 sizeof(t->i2c->name));
447 477
448 t->fe.analog_demod_priv = t; 478 t->fe.analog_demod_priv = t;
449 memcpy(analog_ops, &tuner_core_ops, 479 memcpy(analog_ops, &tuner_core_ops,
450 sizeof(struct analog_demod_ops)); 480 sizeof(struct analog_demod_ops));
481
451 } else { 482 } else {
452 strlcpy(t->i2c->name, analog_ops->info.name, 483 strlcpy(t->i2c->name, analog_ops->info.name,
453 sizeof(t->i2c->name)); 484 sizeof(t->i2c->name));
@@ -645,8 +676,8 @@ static void tuner_status(struct dvb_frontend *fe)
645{ 676{
646 struct tuner *t = fe->analog_demod_priv; 677 struct tuner *t = fe->analog_demod_priv;
647 unsigned long freq, freq_fraction; 678 unsigned long freq, freq_fraction;
648 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; 679 struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
649 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; 680 struct analog_demod_ops *analog_ops = &fe->ops.analog_ops;
650 const char *p; 681 const char *p;
651 682
652 switch (t->mode) { 683 switch (t->mode) {
@@ -730,8 +761,10 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
730 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; 761 struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
731 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; 762 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
732 763
733 if (tuner_debug>1) 764 if (tuner_debug > 1) {
734 v4l_i2c_print_ioctl(client,cmd); 765 v4l_i2c_print_ioctl(client,cmd);
766 printk("\n");
767 }
735 768
736 switch (cmd) { 769 switch (cmd) {
737 /* --- configuration --- */ 770 /* --- configuration --- */
@@ -1073,7 +1106,8 @@ static void tuner_lookup(struct i2c_adapter *adap,
1073/* During client attach, set_type is called by adapter's attach_inform callback. 1106/* During client attach, set_type is called by adapter's attach_inform callback.
1074 set_type must then be completed by tuner_probe. 1107 set_type must then be completed by tuner_probe.
1075 */ 1108 */
1076static int tuner_probe(struct i2c_client *client) 1109static int tuner_probe(struct i2c_client *client,
1110 const struct i2c_device_id *id)
1077{ 1111{
1078 struct tuner *t; 1112 struct tuner *t;
1079 struct tuner *radio; 1113 struct tuner *radio;
@@ -1111,8 +1145,9 @@ static int tuner_probe(struct i2c_client *client)
1111 if (!no_autodetect) { 1145 if (!no_autodetect) {
1112 switch (client->addr) { 1146 switch (client->addr) {
1113 case 0x10: 1147 case 0x10:
1114 if (tea5761_autodetection(t->i2c->adapter, 1148 if (tuner_symbol_probe(tea5761_autodetection,
1115 t->i2c->addr) >= 0) { 1149 t->i2c->adapter,
1150 t->i2c->addr) >= 0) {
1116 t->type = TUNER_TEA5761; 1151 t->type = TUNER_TEA5761;
1117 t->mode_mask = T_RADIO; 1152 t->mode_mask = T_RADIO;
1118 t->mode = T_STANDBY; 1153 t->mode = T_STANDBY;
@@ -1131,8 +1166,8 @@ static int tuner_probe(struct i2c_client *client)
1131 case 0x4b: 1166 case 0x4b:
1132 /* If chip is not tda8290, don't register. 1167 /* If chip is not tda8290, don't register.
1133 since it can be tda9887*/ 1168 since it can be tda9887*/
1134 if (tda829x_probe(t->i2c->adapter, 1169 if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
1135 t->i2c->addr) == 0) { 1170 t->i2c->addr) == 0) {
1136 tuner_dbg("tda829x detected\n"); 1171 tuner_dbg("tda829x detected\n");
1137 } else { 1172 } else {
1138 /* Default is being tda9887 */ 1173 /* Default is being tda9887 */
@@ -1144,7 +1179,8 @@ static int tuner_probe(struct i2c_client *client)
1144 } 1179 }
1145 break; 1180 break;
1146 case 0x60: 1181 case 0x60:
1147 if (tea5767_autodetection(t->i2c->adapter, t->i2c->addr) 1182 if (tuner_symbol_probe(tea5767_autodetection,
1183 t->i2c->adapter, t->i2c->addr)
1148 != EINVAL) { 1184 != EINVAL) {
1149 t->type = TUNER_TEA5767; 1185 t->type = TUNER_TEA5767;
1150 t->mode_mask = T_RADIO; 1186 t->mode_mask = T_RADIO;
@@ -1233,10 +1269,9 @@ static int tuner_legacy_probe(struct i2c_adapter *adap)
1233static int tuner_remove(struct i2c_client *client) 1269static int tuner_remove(struct i2c_client *client)
1234{ 1270{
1235 struct tuner *t = i2c_get_clientdata(client); 1271 struct tuner *t = i2c_get_clientdata(client);
1236 struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
1237 1272
1238 if (analog_ops->release) 1273 tuner_detach(&t->fe);
1239 analog_ops->release(&t->fe); 1274 t->fe.analog_demod_priv = NULL;
1240 1275
1241 list_del(&t->list); 1276 list_del(&t->list);
1242 kfree(t); 1277 kfree(t);
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index f29a2cd0f2f2..6f9945b04e1f 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -1461,7 +1461,7 @@ static struct CHIPDESC chiplist[] = {
1461/* ---------------------------------------------------------------------- */ 1461/* ---------------------------------------------------------------------- */
1462/* i2c registration */ 1462/* i2c registration */
1463 1463
1464static int chip_probe(struct i2c_client *client) 1464static int chip_probe(struct i2c_client *client, const struct i2c_device_id *id)
1465{ 1465{
1466 struct CHIPSTATE *chip; 1466 struct CHIPSTATE *chip;
1467 struct CHIPDESC *desc; 1467 struct CHIPDESC *desc;
diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c
index bd201397a2ac..93bfd19dec7d 100644
--- a/drivers/media/video/upd64031a.c
+++ b/drivers/media/video/upd64031a.c
@@ -195,7 +195,8 @@ static int upd64031a_command(struct i2c_client *client, unsigned cmd, void *arg)
195 195
196/* i2c implementation */ 196/* i2c implementation */
197 197
198static int upd64031a_probe(struct i2c_client *client) 198static int upd64031a_probe(struct i2c_client *client,
199 const struct i2c_device_id *id)
199{ 200{
200 struct upd64031a_state *state; 201 struct upd64031a_state *state;
201 int i; 202 int i;
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
index 2d9a88f70c85..9ab712a56ce0 100644
--- a/drivers/media/video/upd64083.c
+++ b/drivers/media/video/upd64083.c
@@ -172,7 +172,8 @@ static int upd64083_command(struct i2c_client *client, unsigned cmd, void *arg)
172 172
173/* i2c implementation */ 173/* i2c implementation */
174 174
175static int upd64083_probe(struct i2c_client *client) 175static int upd64083_probe(struct i2c_client *client,
176 const struct i2c_device_id *id)
176{ 177{
177 struct upd64083_state *state; 178 struct upd64083_state *state;
178 int i; 179 int i;
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 64819353276a..17f542dfb366 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -70,12 +70,6 @@
70 70
71#define VICAM_HEADER_SIZE 64 71#define VICAM_HEADER_SIZE 64
72 72
73#define clamp( x, l, h ) max_t( __typeof__( x ), \
74 ( l ), \
75 min_t( __typeof__( x ), \
76 ( h ), \
77 ( x ) ) )
78
79/* Not sure what all the bytes in these char 73/* Not sure what all the bytes in these char
80 * arrays do, but they're necessary to make 74 * arrays do, but they're necessary to make
81 * the camera work. 75 * the camera work.
diff --git a/drivers/media/video/usbvision/Kconfig b/drivers/media/video/usbvision/Kconfig
index fc24ef05b3f3..74e1d3075a20 100644
--- a/drivers/media/video/usbvision/Kconfig
+++ b/drivers/media/video/usbvision/Kconfig
@@ -1,7 +1,7 @@
1config VIDEO_USBVISION 1config VIDEO_USBVISION
2 tristate "USB video devices based on Nogatech NT1003/1004/1005" 2 tristate "USB video devices based on Nogatech NT1003/1004/1005"
3 depends on I2C && VIDEO_V4L2 3 depends on I2C && VIDEO_V4L2
4 select VIDEO_TUNER 4 select MEDIA_TUNER
5 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO 5 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
6 ---help--- 6 ---help---
7 There are more than 50 different USB video devices based on 7 There are more than 50 different USB video devices based on
diff --git a/drivers/media/video/usbvision/Makefile b/drivers/media/video/usbvision/Makefile
index 9ac92a80c645..338718750945 100644
--- a/drivers/media/video/usbvision/Makefile
+++ b/drivers/media/video/usbvision/Makefile
@@ -3,3 +3,4 @@ usbvision-objs := usbvision-core.o usbvision-video.o usbvision-i2c.o usbvision-
3obj-$(CONFIG_VIDEO_USBVISION) += usbvision.o 3obj-$(CONFIG_VIDEO_USBVISION) += usbvision.o
4 4
5EXTRA_CFLAGS += -Idrivers/media/video 5EXTRA_CFLAGS += -Idrivers/media/video
6EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 34deb68ae568..e9dd996fd5df 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -710,13 +710,14 @@ EXPORT_SYMBOL(v4l2_chip_ident_i2c_client);
710/* Helper function for I2C legacy drivers */ 710/* Helper function for I2C legacy drivers */
711 711
712int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver *driver, 712int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver *driver,
713 const char *name, int (*probe)(struct i2c_client *)) 713 const char *name,
714 int (*probe)(struct i2c_client *, const struct i2c_device_id *))
714{ 715{
715 struct i2c_client *client; 716 struct i2c_client *client;
716 int err; 717 int err;
717 718
718 client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); 719 client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
719 if (client == 0) 720 if (!client)
720 return -ENOMEM; 721 return -ENOMEM;
721 722
722 client->addr = address; 723 client->addr = address;
@@ -724,7 +725,7 @@ int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver
724 client->driver = driver; 725 client->driver = driver;
725 strlcpy(client->name, name, sizeof(client->name)); 726 strlcpy(client->name, name, sizeof(client->name));
726 727
727 err = probe(client); 728 err = probe(client, NULL);
728 if (err == 0) { 729 if (err == 0) {
729 i2c_attach_client(client); 730 i2c_attach_client(client);
730 } else { 731 } else {
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index fc51e4918bbf..982f4463896c 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -97,7 +97,10 @@ int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
97void *videobuf_queue_to_vmalloc (struct videobuf_queue *q, 97void *videobuf_queue_to_vmalloc (struct videobuf_queue *q,
98 struct videobuf_buffer *buf) 98 struct videobuf_buffer *buf)
99{ 99{
100 return CALL(q, vmalloc, buf); 100 if (q->int_ops->vmalloc)
101 return q->int_ops->vmalloc(buf);
102 else
103 return NULL;
101} 104}
102EXPORT_SYMBOL_GPL(videobuf_queue_to_vmalloc); 105EXPORT_SYMBOL_GPL(videobuf_queue_to_vmalloc);
103 106
diff --git a/drivers/media/video/vp27smpx.c b/drivers/media/video/vp27smpx.c
index 282c81403c97..fac0deba24af 100644
--- a/drivers/media/video/vp27smpx.c
+++ b/drivers/media/video/vp27smpx.c
@@ -121,7 +121,8 @@ static int vp27smpx_command(struct i2c_client *client, unsigned cmd, void *arg)
121 * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' 121 * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
122 */ 122 */
123 123
124static int vp27smpx_probe(struct i2c_client *client) 124static int vp27smpx_probe(struct i2c_client *client,
125 const struct i2c_device_id *id)
125{ 126{
126 struct vp27smpx_state *state; 127 struct vp27smpx_state *state;
127 128
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
index 31795b4f8b63..0f8ed8461fba 100644
--- a/drivers/media/video/wm8739.c
+++ b/drivers/media/video/wm8739.c
@@ -261,7 +261,8 @@ static int wm8739_command(struct i2c_client *client, unsigned cmd, void *arg)
261 261
262/* i2c implementation */ 262/* i2c implementation */
263 263
264static int wm8739_probe(struct i2c_client *client) 264static int wm8739_probe(struct i2c_client *client,
265 const struct i2c_device_id *id)
265{ 266{
266 struct wm8739_state *state; 267 struct wm8739_state *state;
267 268
diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
index 869f9e7946b6..67a409e60c46 100644
--- a/drivers/media/video/wm8775.c
+++ b/drivers/media/video/wm8775.c
@@ -159,7 +159,8 @@ static int wm8775_command(struct i2c_client *client, unsigned cmd, void *arg)
159 * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' 159 * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
160 */ 160 */
161 161
162static int wm8775_probe(struct i2c_client *client) 162static int wm8775_probe(struct i2c_client *client,
163 const struct i2c_device_id *id)
163{ 164{
164 struct wm8775_state *state; 165 struct wm8775_state *state;
165 166
diff --git a/drivers/media/video/zoran_procfs.c b/drivers/media/video/zoran_procfs.c
index 328ed6e7ac6a..870bc5a70e3f 100644
--- a/drivers/media/video/zoran_procfs.c
+++ b/drivers/media/video/zoran_procfs.c
@@ -180,6 +180,7 @@ static ssize_t zoran_write(struct file *file, const char __user *buffer,
180} 180}
181 181
182static const struct file_operations zoran_operations = { 182static const struct file_operations zoran_operations = {
183 .owner = THIS_MODULE,
183 .open = zoran_open, 184 .open = zoran_open,
184 .read = seq_read, 185 .read = seq_read,
185 .write = zoran_write, 186 .write = zoran_write,
@@ -195,10 +196,8 @@ zoran_proc_init (struct zoran *zr)
195 char name[8]; 196 char name[8];
196 197
197 snprintf(name, 7, "zoran%d", zr->id); 198 snprintf(name, 7, "zoran%d", zr->id);
198 if ((zr->zoran_proc = create_proc_entry(name, 0, NULL))) { 199 zr->zoran_proc = proc_create_data(name, 0, NULL, &zoran_operations, zr);
199 zr->zoran_proc->data = zr; 200 if (zr->zoran_proc != NULL) {
200 zr->zoran_proc->owner = THIS_MODULE;
201 zr->zoran_proc->proc_fops = &zoran_operations;
202 dprintk(2, 201 dprintk(2,
203 KERN_INFO 202 KERN_INFO
204 "%s: procfs entry /proc/%s allocated. data=%p\n", 203 "%s: procfs entry /proc/%s allocated. data=%p\n",
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a95314897402..81483de8c0fd 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -371,7 +371,7 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
371 /* connect the i2o_block_request to the request */ 371 /* connect the i2o_block_request to the request */
372 if (!req->special) { 372 if (!req->special) {
373 ireq = i2o_block_request_alloc(); 373 ireq = i2o_block_request_alloc();
374 if (unlikely(IS_ERR(ireq))) { 374 if (IS_ERR(ireq)) {
375 osm_debug("unable to allocate i2o_block_request!\n"); 375 osm_debug("unable to allocate i2o_block_request!\n");
376 return BLKPREP_DEFER; 376 return BLKPREP_DEFER;
377 } 377 }
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 6fdd072201f9..54a3016ff45d 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -1893,13 +1893,11 @@ static int i2o_proc_create_entries(struct proc_dir_entry *dir,
1893 struct proc_dir_entry *tmp; 1893 struct proc_dir_entry *tmp;
1894 1894
1895 while (i2o_pe->name) { 1895 while (i2o_pe->name) {
1896 tmp = create_proc_entry(i2o_pe->name, i2o_pe->mode, dir); 1896 tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir,
1897 i2o_pe->fops, data);
1897 if (!tmp) 1898 if (!tmp)
1898 return -1; 1899 return -1;
1899 1900
1900 tmp->data = data;
1901 tmp->proc_fops = i2o_pe->fops;
1902
1903 i2o_pe++; 1901 i2o_pe++;
1904 } 1902 }
1905 1903
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index f6f2d960cadb..ef8a492766a7 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -132,7 +132,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
132 132
133 if (iter >= MAX_ASIC_ISR_LOOPS) 133 if (iter >= MAX_ASIC_ISR_LOOPS)
134 printk(KERN_ERR "%s: interrupt processing overrun\n", 134 printk(KERN_ERR "%s: interrupt processing overrun\n",
135 __FUNCTION__); 135 __func__);
136} 136}
137 137
138static inline int asic3_irq_to_bank(struct asic3 *asic, int irq) 138static inline int asic3_irq_to_bank(struct asic3 *asic, int irq)
@@ -409,7 +409,7 @@ int asic3_gpio_get_value(struct asic3 *asic, unsigned gpio)
409 return asic3_get_gpio_d(asic, Status) & mask; 409 return asic3_get_gpio_d(asic, Status) & mask;
410 default: 410 default:
411 printk(KERN_ERR "%s: invalid GPIO value 0x%x", 411 printk(KERN_ERR "%s: invalid GPIO value 0x%x",
412 __FUNCTION__, gpio); 412 __func__, gpio);
413 return -EINVAL; 413 return -EINVAL;
414 } 414 }
415} 415}
@@ -437,7 +437,7 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val)
437 return; 437 return;
438 default: 438 default:
439 printk(KERN_ERR "%s: invalid GPIO value 0x%x", 439 printk(KERN_ERR "%s: invalid GPIO value 0x%x",
440 __FUNCTION__, gpio); 440 __func__, gpio);
441 return; 441 return;
442 } 442 }
443} 443}
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 4edc120a6359..633cbba072f0 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -132,8 +132,9 @@ static struct ds1wm_platform_data ds1wm_pdata = {
132 .disable = ds1wm_disable, 132 .disable = ds1wm_disable,
133}; 133};
134 134
135static int ds1wm_device_add(struct device *pasic3_dev, int bus_shift) 135static int ds1wm_device_add(struct platform_device *pasic3_pdev, int bus_shift)
136{ 136{
137 struct device *pasic3_dev = &pasic3_pdev->dev;
137 struct pasic3_data *asic = pasic3_dev->driver_data; 138 struct pasic3_data *asic = pasic3_dev->driver_data;
138 struct platform_device *pdev; 139 struct platform_device *pdev;
139 int ret; 140 int ret;
@@ -144,8 +145,8 @@ static int ds1wm_device_add(struct device *pasic3_dev, int bus_shift)
144 return -ENOMEM; 145 return -ENOMEM;
145 } 146 }
146 147
147 ret = platform_device_add_resources(pdev, pdev->resource, 148 ret = platform_device_add_resources(pdev, pasic3_pdev->resource,
148 pdev->num_resources); 149 pasic3_pdev->num_resources);
149 if (ret < 0) { 150 if (ret < 0) {
150 dev_dbg(pasic3_dev, "failed to add DS1WM resources\n"); 151 dev_dbg(pasic3_dev, "failed to add DS1WM resources\n");
151 goto exit_pdev_put; 152 goto exit_pdev_put;
@@ -207,7 +208,7 @@ static int __init pasic3_probe(struct platform_device *pdev)
207 return -ENOMEM; 208 return -ENOMEM;
208 } 209 }
209 210
210 ret = ds1wm_device_add(dev, asic->bus_shift); 211 ret = ds1wm_device_add(pdev, asic->bus_shift);
211 if (ret < 0) 212 if (ret < 0)
212 dev_warn(dev, "failed to register DS1WM\n"); 213 dev_warn(dev, "failed to register DS1WM\n");
213 214
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 6e655b4c6682..2fe64734d8af 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -349,11 +349,11 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
349 mode &= 3; /* get current power mode */ 349 mode &= 3; /* get current power mode */
350 350
351 if (unit >= ARRAY_SIZE(sm->unit_power)) { 351 if (unit >= ARRAY_SIZE(sm->unit_power)) {
352 dev_err(dev, "%s: bad unit %d\n", __FUNCTION__, unit); 352 dev_err(dev, "%s: bad unit %d\n", __func__, unit);
353 goto already; 353 goto already;
354 } 354 }
355 355
356 dev_dbg(sm->dev, "%s: unit %d, cur %d, to %d\n", __FUNCTION__, unit, 356 dev_dbg(sm->dev, "%s: unit %d, cur %d, to %d\n", __func__, unit,
357 sm->unit_power[unit], to); 357 sm->unit_power[unit], to);
358 358
359 if (to == 0 && sm->unit_power[unit] == 0) { 359 if (to == 0 && sm->unit_power[unit] == 0) {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 297a48f85446..636af2862308 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -140,6 +140,7 @@ config ACER_WMI
140 depends on EXPERIMENTAL 140 depends on EXPERIMENTAL
141 depends on ACPI 141 depends on ACPI
142 depends on LEDS_CLASS 142 depends on LEDS_CLASS
143 depends on NEW_LEDS
143 depends on BACKLIGHT_CLASS_DEVICE 144 depends on BACKLIGHT_CLASS_DEVICE
144 depends on SERIO_I8042 145 depends on SERIO_I8042
145 select ACPI_WMI 146 select ACPI_WMI
@@ -160,6 +161,7 @@ config ASUS_LAPTOP
160 depends on ACPI 161 depends on ACPI
161 depends on EXPERIMENTAL && !ACPI_ASUS 162 depends on EXPERIMENTAL && !ACPI_ASUS
162 depends on LEDS_CLASS 163 depends on LEDS_CLASS
164 depends on NEW_LEDS
163 depends on BACKLIGHT_CLASS_DEVICE 165 depends on BACKLIGHT_CLASS_DEVICE
164 ---help--- 166 ---help---
165 This is the new Linux driver for Asus laptops. It may also support some 167 This is the new Linux driver for Asus laptops. It may also support some
@@ -241,10 +243,13 @@ config SONYPI_COMPAT
241config THINKPAD_ACPI 243config THINKPAD_ACPI
242 tristate "ThinkPad ACPI Laptop Extras" 244 tristate "ThinkPad ACPI Laptop Extras"
243 depends on X86 && ACPI 245 depends on X86 && ACPI
246 select BACKLIGHT_LCD_SUPPORT
244 select BACKLIGHT_CLASS_DEVICE 247 select BACKLIGHT_CLASS_DEVICE
245 select HWMON 248 select HWMON
246 select NVRAM 249 select NVRAM
247 depends on INPUT 250 select INPUT
251 select NEW_LEDS
252 select LEDS_CLASS
248 ---help--- 253 ---help---
249 This is a driver for the IBM and Lenovo ThinkPad laptops. It adds 254 This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
250 support for Fn-Fx key combinations, Bluetooth control, video 255 support for Fn-Fx key combinations, Bluetooth control, video
@@ -344,6 +349,7 @@ config ATMEL_SSC
344config INTEL_MENLOW 349config INTEL_MENLOW
345 tristate "Thermal Management driver for Intel menlow platform" 350 tristate "Thermal Management driver for Intel menlow platform"
346 depends on ACPI_THERMAL 351 depends on ACPI_THERMAL
352 select THERMAL
347 depends on X86 353 depends on X86
348 ---help--- 354 ---help---
349 ACPI thermal management enhancement driver on 355 ACPI thermal management enhancement driver on
@@ -351,6 +357,19 @@ config INTEL_MENLOW
351 357
352 If unsure, say N. 358 If unsure, say N.
353 359
360config EEEPC_LAPTOP
361 tristate "Eee PC Hotkey Driver (EXPERIMENTAL)"
362 depends on X86
363 depends on ACPI
364 depends on BACKLIGHT_CLASS_DEVICE
365 depends on HWMON
366 depends on EXPERIMENTAL
367 ---help---
368 This driver supports the Fn-Fx keys on Eee PC laptops.
369 It also adds the ability to switch camera/wlan on/off.
370
371 If you have an Eee PC laptop, say Y or M here.
372
354config ENCLOSURE_SERVICES 373config ENCLOSURE_SERVICES
355 tristate "Enclosure Services" 374 tristate "Enclosure Services"
356 default n 375 default n
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5914da434854..1952875a272e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -7,7 +7,8 @@ obj-$(CONFIG_IBM_ASM) += ibmasm/
7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ 7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o 8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
9obj-$(CONFIG_ACER_WMI) += acer-wmi.o 9obj-$(CONFIG_ACER_WMI) += acer-wmi.o
10obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o 10obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
11obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
11obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o 12obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
12obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o 13obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
13obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o 14obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
new file mode 100644
index 000000000000..6d727609097f
--- /dev/null
+++ b/drivers/misc/eeepc-laptop.c
@@ -0,0 +1,666 @@
1/*
2 * eepc-laptop.c - Asus Eee PC extras
3 *
4 * Based on asus_acpi.c as patched for the Eee PC by Asus:
5 * ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar
6 * Based on eee.c from eeepc-linux
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/types.h>
23#include <linux/platform_device.h>
24#include <linux/backlight.h>
25#include <linux/fb.h>
26#include <linux/hwmon.h>
27#include <linux/hwmon-sysfs.h>
28#include <acpi/acpi_drivers.h>
29#include <acpi/acpi_bus.h>
30#include <linux/uaccess.h>
31
32#define EEEPC_LAPTOP_VERSION "0.1"
33
34#define EEEPC_HOTK_NAME "Eee PC Hotkey Driver"
35#define EEEPC_HOTK_FILE "eeepc"
36#define EEEPC_HOTK_CLASS "hotkey"
37#define EEEPC_HOTK_DEVICE_NAME "Hotkey"
38#define EEEPC_HOTK_HID "ASUS010"
39
40#define EEEPC_LOG EEEPC_HOTK_FILE ": "
41#define EEEPC_ERR KERN_ERR EEEPC_LOG
42#define EEEPC_WARNING KERN_WARNING EEEPC_LOG
43#define EEEPC_NOTICE KERN_NOTICE EEEPC_LOG
44#define EEEPC_INFO KERN_INFO EEEPC_LOG
45
46/*
47 * Definitions for Asus EeePC
48 */
49#define NOTIFY_WLAN_ON 0x10
50#define NOTIFY_BRN_MIN 0x20
51#define NOTIFY_BRN_MAX 0x2f
52
53enum {
54 DISABLE_ASL_WLAN = 0x0001,
55 DISABLE_ASL_BLUETOOTH = 0x0002,
56 DISABLE_ASL_IRDA = 0x0004,
57 DISABLE_ASL_CAMERA = 0x0008,
58 DISABLE_ASL_TV = 0x0010,
59 DISABLE_ASL_GPS = 0x0020,
60 DISABLE_ASL_DISPLAYSWITCH = 0x0040,
61 DISABLE_ASL_MODEM = 0x0080,
62 DISABLE_ASL_CARDREADER = 0x0100
63};
64
65enum {
66 CM_ASL_WLAN = 0,
67 CM_ASL_BLUETOOTH,
68 CM_ASL_IRDA,
69 CM_ASL_1394,
70 CM_ASL_CAMERA,
71 CM_ASL_TV,
72 CM_ASL_GPS,
73 CM_ASL_DVDROM,
74 CM_ASL_DISPLAYSWITCH,
75 CM_ASL_PANELBRIGHT,
76 CM_ASL_BIOSFLASH,
77 CM_ASL_ACPIFLASH,
78 CM_ASL_CPUFV,
79 CM_ASL_CPUTEMPERATURE,
80 CM_ASL_FANCPU,
81 CM_ASL_FANCHASSIS,
82 CM_ASL_USBPORT1,
83 CM_ASL_USBPORT2,
84 CM_ASL_USBPORT3,
85 CM_ASL_MODEM,
86 CM_ASL_CARDREADER,
87 CM_ASL_LID
88};
89
90const char *cm_getv[] = {
91 "WLDG", NULL, NULL, NULL,
92 "CAMG", NULL, NULL, NULL,
93 NULL, "PBLG", NULL, NULL,
94 "CFVG", NULL, NULL, NULL,
95 "USBG", NULL, NULL, "MODG",
96 "CRDG", "LIDG"
97};
98
99const char *cm_setv[] = {
100 "WLDS", NULL, NULL, NULL,
101 "CAMS", NULL, NULL, NULL,
102 "SDSP", "PBLS", "HDPS", NULL,
103 "CFVS", NULL, NULL, NULL,
104 "USBG", NULL, NULL, "MODS",
105 "CRDS", NULL
106};
107
108#define EEEPC_EC "\\_SB.PCI0.SBRG.EC0."
109
110#define EEEPC_EC_FAN_PWM EEEPC_EC "SC02" /* Fan PWM duty cycle (%) */
111#define EEEPC_EC_SC02 0x63
112#define EEEPC_EC_FAN_HRPM EEEPC_EC "SC05" /* High byte, fan speed (RPM) */
113#define EEEPC_EC_FAN_LRPM EEEPC_EC "SC06" /* Low byte, fan speed (RPM) */
114#define EEEPC_EC_FAN_CTRL EEEPC_EC "SFB3" /* Byte containing SF25 */
115#define EEEPC_EC_SFB3 0xD3
116
117/*
118 * This is the main structure, we can use it to store useful information
119 * about the hotk device
120 */
121struct eeepc_hotk {
122 struct acpi_device *device; /* the device we are in */
123 acpi_handle handle; /* the handle of the hotk device */
124 u32 cm_supported; /* the control methods supported
125 by this BIOS */
126 uint init_flag; /* Init flags */
127 u16 event_count[128]; /* count for each event */
128};
129
130/* The actual device the driver binds to */
131static struct eeepc_hotk *ehotk;
132
133/* Platform device/driver */
134static struct platform_driver platform_driver = {
135 .driver = {
136 .name = EEEPC_HOTK_FILE,
137 .owner = THIS_MODULE,
138 }
139};
140
141static struct platform_device *platform_device;
142
143/*
144 * The hotkey driver declaration
145 */
146static int eeepc_hotk_add(struct acpi_device *device);
147static int eeepc_hotk_remove(struct acpi_device *device, int type);
148
149static const struct acpi_device_id eeepc_device_ids[] = {
150 {EEEPC_HOTK_HID, 0},
151 {"", 0},
152};
153MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
154
155static struct acpi_driver eeepc_hotk_driver = {
156 .name = EEEPC_HOTK_NAME,
157 .class = EEEPC_HOTK_CLASS,
158 .ids = eeepc_device_ids,
159 .ops = {
160 .add = eeepc_hotk_add,
161 .remove = eeepc_hotk_remove,
162 },
163};
164
165/* The backlight device /sys/class/backlight */
166static struct backlight_device *eeepc_backlight_device;
167
168/* The hwmon device */
169static struct device *eeepc_hwmon_device;
170
171/*
172 * The backlight class declaration
173 */
174static int read_brightness(struct backlight_device *bd);
175static int update_bl_status(struct backlight_device *bd);
176static struct backlight_ops eeepcbl_ops = {
177 .get_brightness = read_brightness,
178 .update_status = update_bl_status,
179};
180
181MODULE_AUTHOR("Corentin Chary, Eric Cooper");
182MODULE_DESCRIPTION(EEEPC_HOTK_NAME);
183MODULE_LICENSE("GPL");
184
185/*
186 * ACPI Helpers
187 */
188static int write_acpi_int(acpi_handle handle, const char *method, int val,
189 struct acpi_buffer *output)
190{
191 struct acpi_object_list params;
192 union acpi_object in_obj;
193 acpi_status status;
194
195 params.count = 1;
196 params.pointer = &in_obj;
197 in_obj.type = ACPI_TYPE_INTEGER;
198 in_obj.integer.value = val;
199
200 status = acpi_evaluate_object(handle, (char *)method, &params, output);
201 return (status == AE_OK ? 0 : -1);
202}
203
204static int read_acpi_int(acpi_handle handle, const char *method, int *val)
205{
206 acpi_status status;
207 ulong result;
208
209 status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
210 if (ACPI_FAILURE(status)) {
211 *val = -1;
212 return -1;
213 } else {
214 *val = result;
215 return 0;
216 }
217}
218
219static int set_acpi(int cm, int value)
220{
221 if (ehotk->cm_supported & (0x1 << cm)) {
222 const char *method = cm_setv[cm];
223 if (method == NULL)
224 return -ENODEV;
225 if (write_acpi_int(ehotk->handle, method, value, NULL))
226 printk(EEEPC_WARNING "Error writing %s\n", method);
227 }
228 return 0;
229}
230
231static int get_acpi(int cm)
232{
233 int value = -1;
234 if ((ehotk->cm_supported & (0x1 << cm))) {
235 const char *method = cm_getv[cm];
236 if (method == NULL)
237 return -ENODEV;
238 if (read_acpi_int(ehotk->handle, method, &value))
239 printk(EEEPC_WARNING "Error reading %s\n", method);
240 }
241 return value;
242}
243
244/*
245 * Backlight
246 */
247static int read_brightness(struct backlight_device *bd)
248{
249 return get_acpi(CM_ASL_PANELBRIGHT);
250}
251
252static int set_brightness(struct backlight_device *bd, int value)
253{
254 value = max(0, min(15, value));
255 return set_acpi(CM_ASL_PANELBRIGHT, value);
256}
257
258static int update_bl_status(struct backlight_device *bd)
259{
260 return set_brightness(bd, bd->props.brightness);
261}
262
263/*
264 * Sys helpers
265 */
266static int parse_arg(const char *buf, unsigned long count, int *val)
267{
268 if (!count)
269 return 0;
270 if (sscanf(buf, "%i", val) != 1)
271 return -EINVAL;
272 return count;
273}
274
275static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
276{
277 int rv, value;
278
279 rv = parse_arg(buf, count, &value);
280 if (rv > 0)
281 set_acpi(cm, value);
282 return rv;
283}
284
285static ssize_t show_sys_acpi(int cm, char *buf)
286{
287 return sprintf(buf, "%d\n", get_acpi(cm));
288}
289
290#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \
291 static ssize_t show_##_name(struct device *dev, \
292 struct device_attribute *attr, \
293 char *buf) \
294 { \
295 return show_sys_acpi(_cm, buf); \
296 } \
297 static ssize_t store_##_name(struct device *dev, \
298 struct device_attribute *attr, \
299 const char *buf, size_t count) \
300 { \
301 return store_sys_acpi(_cm, buf, count); \
302 } \
303 static struct device_attribute dev_attr_##_name = { \
304 .attr = { \
305 .name = __stringify(_name), \
306 .mode = 0644 }, \
307 .show = show_##_name, \
308 .store = store_##_name, \
309 }
310
311EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
312EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
313EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
314EEEPC_CREATE_DEVICE_ATTR(wlan, CM_ASL_WLAN);
315
316static struct attribute *platform_attributes[] = {
317 &dev_attr_camera.attr,
318 &dev_attr_cardr.attr,
319 &dev_attr_disp.attr,
320 &dev_attr_wlan.attr,
321 NULL
322};
323
324static struct attribute_group platform_attribute_group = {
325 .attrs = platform_attributes
326};
327
328/*
329 * Hotkey functions
330 */
331static int eeepc_hotk_check(void)
332{
333 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
334 int result;
335
336 result = acpi_bus_get_status(ehotk->device);
337 if (result)
338 return result;
339 if (ehotk->device->status.present) {
340 if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
341 &buffer)) {
342 printk(EEEPC_ERR "Hotkey initialization failed\n");
343 return -ENODEV;
344 } else {
345 printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n",
346 ehotk->init_flag);
347 }
348 /* get control methods supported */
349 if (read_acpi_int(ehotk->handle, "CMSG"
350 , &ehotk->cm_supported)) {
351 printk(EEEPC_ERR
352 "Get control methods supported failed\n");
353 return -ENODEV;
354 } else {
355 printk(EEEPC_INFO
356 "Get control methods supported: 0x%x\n",
357 ehotk->cm_supported);
358 }
359 } else {
360 printk(EEEPC_ERR "Hotkey device not present, aborting\n");
361 return -EINVAL;
362 }
363 return 0;
364}
365
366static void notify_wlan(u32 *event)
367{
368 /* if DISABLE_ASL_WLAN is set, the notify code for fn+f2
369 will always be 0x10 */
370 if (ehotk->cm_supported & (0x1 << CM_ASL_WLAN)) {
371 const char *method = cm_getv[CM_ASL_WLAN];
372 int value;
373 if (read_acpi_int(ehotk->handle, method, &value))
374 printk(EEEPC_WARNING "Error reading %s\n",
375 method);
376 else if (value == 1)
377 *event = 0x11;
378 }
379}
380
381static void notify_brn(void)
382{
383 struct backlight_device *bd = eeepc_backlight_device;
384 bd->props.brightness = read_brightness(bd);
385}
386
387static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
388{
389 if (!ehotk)
390 return;
391 if (event == NOTIFY_WLAN_ON && (DISABLE_ASL_WLAN & ehotk->init_flag))
392 notify_wlan(&event);
393 if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
394 notify_brn();
395 acpi_bus_generate_proc_event(ehotk->device, event,
396 ehotk->event_count[event % 128]++);
397}
398
399static int eeepc_hotk_add(struct acpi_device *device)
400{
401 acpi_status status = AE_OK;
402 int result;
403
404 if (!device)
405 return -EINVAL;
406 printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n");
407 ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
408 if (!ehotk)
409 return -ENOMEM;
410 ehotk->init_flag = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
411 ehotk->handle = device->handle;
412 strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
413 strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
414 acpi_driver_data(device) = ehotk;
415 ehotk->device = device;
416 result = eeepc_hotk_check();
417 if (result)
418 goto end;
419 status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
420 eeepc_hotk_notify, ehotk);
421 if (ACPI_FAILURE(status))
422 printk(EEEPC_ERR "Error installing notify handler\n");
423 end:
424 if (result) {
425 kfree(ehotk);
426 ehotk = NULL;
427 }
428 return result;
429}
430
431static int eeepc_hotk_remove(struct acpi_device *device, int type)
432{
433 acpi_status status = 0;
434
435 if (!device || !acpi_driver_data(device))
436 return -EINVAL;
437 status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
438 eeepc_hotk_notify);
439 if (ACPI_FAILURE(status))
440 printk(EEEPC_ERR "Error removing notify handler\n");
441 kfree(ehotk);
442 return 0;
443}
444
445/*
446 * Hwmon
447 */
448static int eeepc_get_fan_pwm(void)
449{
450 int value = 0;
451
452 read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value);
453 return (value);
454}
455
456static void eeepc_set_fan_pwm(int value)
457{
458 value = SENSORS_LIMIT(value, 0, 100);
459 ec_write(EEEPC_EC_SC02, value);
460}
461
462static int eeepc_get_fan_rpm(void)
463{
464 int high = 0;
465 int low = 0;
466
467 read_acpi_int(NULL, EEEPC_EC_FAN_HRPM, &high);
468 read_acpi_int(NULL, EEEPC_EC_FAN_LRPM, &low);
469 return (high << 8 | low);
470}
471
472static int eeepc_get_fan_ctrl(void)
473{
474 int value = 0;
475
476 read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
477 return ((value & 0x02 ? 1 : 0));
478}
479
480static void eeepc_set_fan_ctrl(int manual)
481{
482 int value = 0;
483
484 read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
485 if (manual)
486 value |= 0x02;
487 else
488 value &= ~0x02;
489 ec_write(EEEPC_EC_SFB3, value);
490}
491
492static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
493{
494 int rv, value;
495
496 rv = parse_arg(buf, count, &value);
497 if (rv > 0)
498 set(value);
499 return rv;
500}
501
502static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
503{
504 return sprintf(buf, "%d\n", get());
505}
506
507#define EEEPC_CREATE_SENSOR_ATTR(_name, _mode, _set, _get) \
508 static ssize_t show_##_name(struct device *dev, \
509 struct device_attribute *attr, \
510 char *buf) \
511 { \
512 return show_sys_hwmon(_set, buf); \
513 } \
514 static ssize_t store_##_name(struct device *dev, \
515 struct device_attribute *attr, \
516 const char *buf, size_t count) \
517 { \
518 return store_sys_hwmon(_get, buf, count); \
519 } \
520 static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
521
522EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
523EEEPC_CREATE_SENSOR_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
524 eeepc_get_fan_pwm, eeepc_set_fan_pwm);
525EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
526 eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
527
528static struct attribute *hwmon_attributes[] = {
529 &sensor_dev_attr_fan1_pwm.dev_attr.attr,
530 &sensor_dev_attr_fan1_input.dev_attr.attr,
531 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
532 NULL
533};
534
535static struct attribute_group hwmon_attribute_group = {
536 .attrs = hwmon_attributes
537};
538
539/*
540 * exit/init
541 */
542static void eeepc_backlight_exit(void)
543{
544 if (eeepc_backlight_device)
545 backlight_device_unregister(eeepc_backlight_device);
546 eeepc_backlight_device = NULL;
547}
548
549static void eeepc_hwmon_exit(void)
550{
551 struct device *hwmon;
552
553 hwmon = eeepc_hwmon_device;
554 if (!hwmon)
555 return ;
556 hwmon_device_unregister(hwmon);
557 sysfs_remove_group(&hwmon->kobj,
558 &hwmon_attribute_group);
559 eeepc_hwmon_device = NULL;
560}
561
562static void __exit eeepc_laptop_exit(void)
563{
564 eeepc_backlight_exit();
565 eeepc_hwmon_exit();
566 acpi_bus_unregister_driver(&eeepc_hotk_driver);
567 sysfs_remove_group(&platform_device->dev.kobj,
568 &platform_attribute_group);
569 platform_device_unregister(platform_device);
570 platform_driver_unregister(&platform_driver);
571}
572
573static int eeepc_backlight_init(struct device *dev)
574{
575 struct backlight_device *bd;
576
577 bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
578 NULL, &eeepcbl_ops);
579 if (IS_ERR(bd)) {
580 printk(EEEPC_ERR
581 "Could not register eeepc backlight device\n");
582 eeepc_backlight_device = NULL;
583 return PTR_ERR(bd);
584 }
585 eeepc_backlight_device = bd;
586 bd->props.max_brightness = 15;
587 bd->props.brightness = read_brightness(NULL);
588 bd->props.power = FB_BLANK_UNBLANK;
589 backlight_update_status(bd);
590 return 0;
591}
592
593static int eeepc_hwmon_init(struct device *dev)
594{
595 struct device *hwmon;
596 int result;
597
598 hwmon = hwmon_device_register(dev);
599 if (IS_ERR(hwmon)) {
600 printk(EEEPC_ERR
601 "Could not register eeepc hwmon device\n");
602 eeepc_hwmon_device = NULL;
603 return PTR_ERR(hwmon);
604 }
605 eeepc_hwmon_device = hwmon;
606 result = sysfs_create_group(&hwmon->kobj,
607 &hwmon_attribute_group);
608 if (result)
609 eeepc_hwmon_exit();
610 return result;
611}
612
613static int __init eeepc_laptop_init(void)
614{
615 struct device *dev;
616 int result;
617
618 if (acpi_disabled)
619 return -ENODEV;
620 result = acpi_bus_register_driver(&eeepc_hotk_driver);
621 if (result < 0)
622 return result;
623 if (!ehotk) {
624 acpi_bus_unregister_driver(&eeepc_hotk_driver);
625 return -ENODEV;
626 }
627 dev = acpi_get_physical_device(ehotk->device->handle);
628 result = eeepc_backlight_init(dev);
629 if (result)
630 goto fail_backlight;
631 result = eeepc_hwmon_init(dev);
632 if (result)
633 goto fail_hwmon;
634 /* Register platform stuff */
635 result = platform_driver_register(&platform_driver);
636 if (result)
637 goto fail_platform_driver;
638 platform_device = platform_device_alloc(EEEPC_HOTK_FILE, -1);
639 if (!platform_device) {
640 result = -ENOMEM;
641 goto fail_platform_device1;
642 }
643 result = platform_device_add(platform_device);
644 if (result)
645 goto fail_platform_device2;
646 result = sysfs_create_group(&platform_device->dev.kobj,
647 &platform_attribute_group);
648 if (result)
649 goto fail_sysfs;
650 return 0;
651fail_sysfs:
652 platform_device_del(platform_device);
653fail_platform_device2:
654 platform_device_put(platform_device);
655fail_platform_device1:
656 platform_driver_unregister(&platform_driver);
657fail_platform_driver:
658 eeepc_hwmon_exit();
659fail_hwmon:
660 eeepc_backlight_exit();
661fail_backlight:
662 return result;
663}
664
665module_init(eeepc_laptop_init);
666module_exit(eeepc_laptop_exit);
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c
index 302e92418bbe..ff51ab67231c 100644
--- a/drivers/misc/hdpuftrs/hdpu_cpustate.c
+++ b/drivers/misc/hdpuftrs/hdpu_cpustate.c
@@ -210,13 +210,10 @@ static int hdpu_cpustate_probe(struct platform_device *pdev)
210 return ret; 210 return ret;
211 } 211 }
212 212
213 proc_de = create_proc_entry("sky_cpustate", 0666, &proc_root); 213 proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate);
214 if (!proc_de) { 214 if (!proc_de) {
215 printk(KERN_WARNING "sky_cpustate: " 215 printk(KERN_WARNING "sky_cpustate: "
216 "Unable to create proc entry\n"); 216 "Unable to create proc entry\n");
217 } else {
218 proc_de->proc_fops = &proc_cpustate;
219 proc_de->owner = THIS_MODULE;
220 } 217 }
221 218
222 printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); 219 printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
index 2fa36f7a6eb3..08e26beefe64 100644
--- a/drivers/misc/hdpuftrs/hdpu_nexus.c
+++ b/drivers/misc/hdpuftrs/hdpu_nexus.c
@@ -102,22 +102,17 @@ static int hdpu_nexus_probe(struct platform_device *pdev)
102 printk(KERN_ERR "sky_nexus: Could not map slot id\n"); 102 printk(KERN_ERR "sky_nexus: Could not map slot id\n");
103 } 103 }
104 104
105 hdpu_slot_id = create_proc_entry("sky_slot_id", 0666, &proc_root); 105 hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id);
106 if (!hdpu_slot_id) { 106 if (!hdpu_slot_id) {
107 printk(KERN_WARNING "sky_nexus: " 107 printk(KERN_WARNING "sky_nexus: "
108 "Unable to create proc dir entry: sky_slot_id\n"); 108 "Unable to create proc dir entry: sky_slot_id\n");
109 } else {
110 hdpu_slot_id->proc_fops = &proc_slot_id;
111 hdpu_slot_id->owner = THIS_MODULE;
112 } 109 }
113 110
114 hdpu_chassis_id = create_proc_entry("sky_chassis_id", 0666, &proc_root); 111 hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL,
115 if (!hdpu_chassis_id) { 112 &proc_chassis_id);
113 if (!hdpu_chassis_id)
116 printk(KERN_WARNING "sky_nexus: " 114 printk(KERN_WARNING "sky_nexus: "
117 "Unable to create proc dir entry: sky_chassis_id\n"); 115 "Unable to create proc dir entry: sky_chassis_id\n");
118 } else {
119 hdpu_chassis_id->proc_fops = &proc_chassis_id;
120 hdpu_chassis_id->owner = THIS_MODULE;
121 } 116 }
122 117
123 return 0; 118 return 0;
@@ -128,8 +123,8 @@ static int hdpu_nexus_remove(struct platform_device *pdev)
128 slot_id = -1; 123 slot_id = -1;
129 chassis_id = -1; 124 chassis_id = -1;
130 125
131 remove_proc_entry("sky_slot_id", &proc_root); 126 remove_proc_entry("sky_slot_id", NULL);
132 remove_proc_entry("sky_chassis_id", &proc_root); 127 remove_proc_entry("sky_chassis_id", NULL);
133 128
134 hdpu_slot_id = 0; 129 hdpu_slot_id = 0;
135 hdpu_chassis_id = 0; 130 hdpu_chassis_id = 0;
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 1a0e7978226a..276d3fb68094 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -96,7 +96,7 @@ static inline void do_exec_command(struct service_processor *sp)
96{ 96{
97 char tsbuf[32]; 97 char tsbuf[32];
98 98
99 dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); 99 dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
100 100
101 if (ibmasm_send_i2o_message(sp)) { 101 if (ibmasm_send_i2o_message(sp)) {
102 sp->current_command->status = IBMASM_CMD_FAILED; 102 sp->current_command->status = IBMASM_CMD_FAILED;
@@ -119,7 +119,7 @@ void ibmasm_exec_command(struct service_processor *sp, struct command *cmd)
119 unsigned long flags; 119 unsigned long flags;
120 char tsbuf[32]; 120 char tsbuf[32];
121 121
122 dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); 122 dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
123 123
124 spin_lock_irqsave(&sp->lock, flags); 124 spin_lock_irqsave(&sp->lock, flags);
125 125
@@ -139,7 +139,7 @@ static void exec_next_command(struct service_processor *sp)
139 unsigned long flags; 139 unsigned long flags;
140 char tsbuf[32]; 140 char tsbuf[32];
141 141
142 dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); 142 dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
143 143
144 spin_lock_irqsave(&sp->lock, flags); 144 spin_lock_irqsave(&sp->lock, flags);
145 sp->current_command = dequeue_command(sp); 145 sp->current_command = dequeue_command(sp);
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 3036e785b3e4..1bc4306572a4 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -75,9 +75,9 @@ void ibmasm_heartbeat_exit(struct service_processor *sp)
75{ 75{
76 char tsbuf[32]; 76 char tsbuf[32];
77 77
78 dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); 78 dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
79 ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL); 79 ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL);
80 dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); 80 dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
81 suspend_heartbeats = 1; 81 suspend_heartbeats = 1;
82 command_put(sp->heartbeat); 82 command_put(sp->heartbeat);
83} 83}
@@ -88,7 +88,7 @@ void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size
88 struct dot_command_header *header = (struct dot_command_header *)cmd->buffer; 88 struct dot_command_header *header = (struct dot_command_header *)cmd->buffer;
89 char tsbuf[32]; 89 char tsbuf[32];
90 90
91 dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); 91 dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
92 if (suspend_heartbeats) 92 if (suspend_heartbeats)
93 return; 93 return;
94 94
diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c
index 0c0bb3093e07..80a136352408 100644
--- a/drivers/misc/intel_menlow.c
+++ b/drivers/misc/intel_menlow.c
@@ -175,19 +175,17 @@ static int intel_menlow_memory_add(struct acpi_device *device)
175 goto end; 175 goto end;
176 } 176 }
177 177
178 if (cdev) { 178 acpi_driver_data(device) = cdev;
179 acpi_driver_data(device) = cdev; 179 result = sysfs_create_link(&device->dev.kobj,
180 result = sysfs_create_link(&device->dev.kobj, 180 &cdev->device.kobj, "thermal_cooling");
181 &cdev->device.kobj, "thermal_cooling"); 181 if (result)
182 if (result) 182 goto unregister;
183 goto unregister; 183
184 184 result = sysfs_create_link(&cdev->device.kobj,
185 result = sysfs_create_link(&cdev->device.kobj, 185 &device->dev.kobj, "device");
186 &device->dev.kobj, "device"); 186 if (result) {
187 if (result) { 187 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
188 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 188 goto unregister;
189 goto unregister;
190 }
191 } 189 }
192 190
193 end: 191 end:
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 05172d2613d6..6f76573e7c8a 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -75,7 +75,7 @@ ioc4_register_submodule(struct ioc4_submodule *is)
75 printk(KERN_WARNING 75 printk(KERN_WARNING
76 "%s: IOC4 submodule %s probe failed " 76 "%s: IOC4 submodule %s probe failed "
77 "for pci_dev %s", 77 "for pci_dev %s",
78 __FUNCTION__, module_name(is->is_owner), 78 __func__, module_name(is->is_owner),
79 pci_name(idd->idd_pdev)); 79 pci_name(idd->idd_pdev));
80 } 80 }
81 } 81 }
@@ -102,7 +102,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is)
102 printk(KERN_WARNING 102 printk(KERN_WARNING
103 "%s: IOC4 submodule %s remove failed " 103 "%s: IOC4 submodule %s remove failed "
104 "for pci_dev %s.\n", 104 "for pci_dev %s.\n",
105 __FUNCTION__, module_name(is->is_owner), 105 __func__, module_name(is->is_owner),
106 pci_name(idd->idd_pdev)); 106 pci_name(idd->idd_pdev));
107 } 107 }
108 } 108 }
@@ -282,7 +282,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
282 if ((ret = pci_enable_device(pdev))) { 282 if ((ret = pci_enable_device(pdev))) {
283 printk(KERN_WARNING 283 printk(KERN_WARNING
284 "%s: Failed to enable IOC4 device for pci_dev %s.\n", 284 "%s: Failed to enable IOC4 device for pci_dev %s.\n",
285 __FUNCTION__, pci_name(pdev)); 285 __func__, pci_name(pdev));
286 goto out; 286 goto out;
287 } 287 }
288 pci_set_master(pdev); 288 pci_set_master(pdev);
@@ -292,7 +292,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
292 if (!idd) { 292 if (!idd) {
293 printk(KERN_WARNING 293 printk(KERN_WARNING
294 "%s: Failed to allocate IOC4 data for pci_dev %s.\n", 294 "%s: Failed to allocate IOC4 data for pci_dev %s.\n",
295 __FUNCTION__, pci_name(pdev)); 295 __func__, pci_name(pdev));
296 ret = -ENODEV; 296 ret = -ENODEV;
297 goto out_idd; 297 goto out_idd;
298 } 298 }
@@ -307,7 +307,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
307 printk(KERN_WARNING 307 printk(KERN_WARNING
308 "%s: Unable to find IOC4 misc resource " 308 "%s: Unable to find IOC4 misc resource "
309 "for pci_dev %s.\n", 309 "for pci_dev %s.\n",
310 __FUNCTION__, pci_name(idd->idd_pdev)); 310 __func__, pci_name(idd->idd_pdev));
311 ret = -ENODEV; 311 ret = -ENODEV;
312 goto out_pci; 312 goto out_pci;
313 } 313 }
@@ -316,7 +316,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
316 printk(KERN_WARNING 316 printk(KERN_WARNING
317 "%s: Unable to request IOC4 misc region " 317 "%s: Unable to request IOC4 misc region "
318 "for pci_dev %s.\n", 318 "for pci_dev %s.\n",
319 __FUNCTION__, pci_name(idd->idd_pdev)); 319 __func__, pci_name(idd->idd_pdev));
320 ret = -ENODEV; 320 ret = -ENODEV;
321 goto out_pci; 321 goto out_pci;
322 } 322 }
@@ -326,7 +326,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
326 printk(KERN_WARNING 326 printk(KERN_WARNING
327 "%s: Unable to remap IOC4 misc region " 327 "%s: Unable to remap IOC4 misc region "
328 "for pci_dev %s.\n", 328 "for pci_dev %s.\n",
329 __FUNCTION__, pci_name(idd->idd_pdev)); 329 __func__, pci_name(idd->idd_pdev));
330 ret = -ENODEV; 330 ret = -ENODEV;
331 goto out_misc_region; 331 goto out_misc_region;
332 } 332 }
@@ -372,7 +372,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
372 printk(KERN_WARNING 372 printk(KERN_WARNING
373 "%s: IOC4 submodule 0x%s probe failed " 373 "%s: IOC4 submodule 0x%s probe failed "
374 "for pci_dev %s.\n", 374 "for pci_dev %s.\n",
375 __FUNCTION__, module_name(is->is_owner), 375 __func__, module_name(is->is_owner),
376 pci_name(idd->idd_pdev)); 376 pci_name(idd->idd_pdev));
377 } 377 }
378 } 378 }
@@ -406,7 +406,7 @@ ioc4_remove(struct pci_dev *pdev)
406 printk(KERN_WARNING 406 printk(KERN_WARNING
407 "%s: IOC4 submodule 0x%s remove failed " 407 "%s: IOC4 submodule 0x%s remove failed "
408 "for pci_dev %s.\n", 408 "for pci_dev %s.\n",
409 __FUNCTION__, module_name(is->is_owner), 409 __func__, module_name(is->is_owner),
410 pci_name(idd->idd_pdev)); 410 pci_name(idd->idd_pdev));
411 } 411 }
412 } 412 }
@@ -418,7 +418,7 @@ ioc4_remove(struct pci_dev *pdev)
418 printk(KERN_WARNING 418 printk(KERN_WARNING
419 "%s: Unable to get IOC4 misc mapping for pci_dev %s. " 419 "%s: Unable to get IOC4 misc mapping for pci_dev %s. "
420 "Device removal may be incomplete.\n", 420 "Device removal may be incomplete.\n",
421 __FUNCTION__, pci_name(idd->idd_pdev)); 421 __func__, pci_name(idd->idd_pdev));
422 } 422 }
423 release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); 423 release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
424 424
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 6d6286c4eeac..fa394104339c 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -47,6 +47,7 @@
47 * to test the HW NMI watchdog 47 * to test the HW NMI watchdog
48 * F## = Break at do_fork for ## iterations 48 * F## = Break at do_fork for ## iterations
49 * S## = Break at sys_open for ## iterations 49 * S## = Break at sys_open for ## iterations
50 * I## = Run the single step test ## iterations
50 * 51 *
51 * NOTE: that the do_fork and sys_open tests are mutually exclusive. 52 * NOTE: that the do_fork and sys_open tests are mutually exclusive.
52 * 53 *
@@ -132,7 +133,7 @@ static int send_ack;
132static int final_ack; 133static int final_ack;
133static int hw_break_val; 134static int hw_break_val;
134static int hw_break_val2; 135static int hw_break_val2;
135#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) 136#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC)
136static int arch_needs_sstep_emulation = 1; 137static int arch_needs_sstep_emulation = 1;
137#else 138#else
138static int arch_needs_sstep_emulation; 139static int arch_needs_sstep_emulation;
@@ -375,7 +376,7 @@ static void emul_sstep_get(char *arg)
375 break; 376 break;
376 case 1: 377 case 1:
377 /* set breakpoint */ 378 /* set breakpoint */
378 break_helper("Z0", 0, sstep_addr); 379 break_helper("Z0", NULL, sstep_addr);
379 break; 380 break;
380 case 2: 381 case 2:
381 /* Continue */ 382 /* Continue */
@@ -383,7 +384,7 @@ static void emul_sstep_get(char *arg)
383 break; 384 break;
384 case 3: 385 case 3:
385 /* Clear breakpoint */ 386 /* Clear breakpoint */
386 break_helper("z0", 0, sstep_addr); 387 break_helper("z0", NULL, sstep_addr);
387 break; 388 break;
388 default: 389 default:
389 eprintk("kgdbts: ERROR failed sstep get emulation\n"); 390 eprintk("kgdbts: ERROR failed sstep get emulation\n");
@@ -465,11 +466,11 @@ static struct test_struct sw_breakpoint_test[] = {
465 { "?", "S0*" }, /* Clear break points */ 466 { "?", "S0*" }, /* Clear break points */
466 { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ 467 { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
467 { "c", "T0*", }, /* Continue */ 468 { "c", "T0*", }, /* Continue */
468 { "g", "kgdbts_break_test", 0, check_and_rewind_pc }, 469 { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
469 { "write", "OK", write_regs }, 470 { "write", "OK", write_regs },
470 { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ 471 { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
471 { "D", "OK" }, /* Detach */ 472 { "D", "OK" }, /* Detach */
472 { "D", "OK", 0, got_break }, /* If the test worked we made it here */ 473 { "D", "OK", NULL, got_break }, /* On success we made it here */
473 { "", "" }, 474 { "", "" },
474}; 475};
475 476
@@ -499,14 +500,14 @@ static struct test_struct singlestep_break_test[] = {
499 { "?", "S0*" }, /* Clear break points */ 500 { "?", "S0*" }, /* Clear break points */
500 { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ 501 { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
501 { "c", "T0*", }, /* Continue */ 502 { "c", "T0*", }, /* Continue */
502 { "g", "kgdbts_break_test", 0, check_and_rewind_pc }, 503 { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
503 { "write", "OK", write_regs }, /* Write registers */ 504 { "write", "OK", write_regs }, /* Write registers */
504 { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ 505 { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
505 { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ 506 { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
506 { "g", "kgdbts_break_test", 0, check_single_step }, 507 { "g", "kgdbts_break_test", NULL, check_single_step },
507 { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ 508 { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
508 { "c", "T0*", }, /* Continue */ 509 { "c", "T0*", }, /* Continue */
509 { "g", "kgdbts_break_test", 0, check_and_rewind_pc }, 510 { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
510 { "write", "OK", write_regs }, /* Write registers */ 511 { "write", "OK", write_regs }, /* Write registers */
511 { "D", "OK" }, /* Remove all breakpoints and continues */ 512 { "D", "OK" }, /* Remove all breakpoints and continues */
512 { "", "" }, 513 { "", "" },
@@ -520,14 +521,14 @@ static struct test_struct do_fork_test[] = {
520 { "?", "S0*" }, /* Clear break points */ 521 { "?", "S0*" }, /* Clear break points */
521 { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ 522 { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
522 { "c", "T0*", }, /* Continue */ 523 { "c", "T0*", }, /* Continue */
523 { "g", "do_fork", 0, check_and_rewind_pc }, /* check location */ 524 { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
524 { "write", "OK", write_regs }, /* Write registers */ 525 { "write", "OK", write_regs }, /* Write registers */
525 { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */ 526 { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */
526 { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ 527 { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
527 { "g", "do_fork", 0, check_single_step }, 528 { "g", "do_fork", NULL, check_single_step },
528 { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ 529 { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
529 { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ 530 { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
530 { "D", "OK", 0, final_ack_set }, /* detach and unregister I/O */ 531 { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
531 { "", "" }, 532 { "", "" },
532}; 533};
533 534
@@ -538,14 +539,14 @@ static struct test_struct sys_open_test[] = {
538 { "?", "S0*" }, /* Clear break points */ 539 { "?", "S0*" }, /* Clear break points */
539 { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ 540 { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
540 { "c", "T0*", }, /* Continue */ 541 { "c", "T0*", }, /* Continue */
541 { "g", "sys_open", 0, check_and_rewind_pc }, /* check location */ 542 { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
542 { "write", "OK", write_regs }, /* Write registers */ 543 { "write", "OK", write_regs }, /* Write registers */
543 { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */ 544 { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
544 { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ 545 { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
545 { "g", "sys_open", 0, check_single_step }, 546 { "g", "sys_open", NULL, check_single_step },
546 { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ 547 { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
547 { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ 548 { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
548 { "D", "OK", 0, final_ack_set }, /* detach and unregister I/O */ 549 { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
549 { "", "" }, 550 { "", "" },
550}; 551};
551 552
@@ -556,11 +557,11 @@ static struct test_struct hw_breakpoint_test[] = {
556 { "?", "S0*" }, /* Clear break points */ 557 { "?", "S0*" }, /* Clear break points */
557 { "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */ 558 { "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */
558 { "c", "T0*", }, /* Continue */ 559 { "c", "T0*", }, /* Continue */
559 { "g", "kgdbts_break_test", 0, check_and_rewind_pc }, 560 { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
560 { "write", "OK", write_regs }, 561 { "write", "OK", write_regs },
561 { "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */ 562 { "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */
562 { "D", "OK" }, /* Detach */ 563 { "D", "OK" }, /* Detach */
563 { "D", "OK", 0, got_break }, /* If the test worked we made it here */ 564 { "D", "OK", NULL, got_break }, /* On success we made it here */
564 { "", "" }, 565 { "", "" },
565}; 566};
566 567
@@ -570,12 +571,12 @@ static struct test_struct hw_breakpoint_test[] = {
570static struct test_struct hw_write_break_test[] = { 571static struct test_struct hw_write_break_test[] = {
571 { "?", "S0*" }, /* Clear break points */ 572 { "?", "S0*" }, /* Clear break points */
572 { "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */ 573 { "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */
573 { "c", "T0*", 0, got_break }, /* Continue */ 574 { "c", "T0*", NULL, got_break }, /* Continue */
574 { "g", "silent", 0, check_and_rewind_pc }, 575 { "g", "silent", NULL, check_and_rewind_pc },
575 { "write", "OK", write_regs }, 576 { "write", "OK", write_regs },
576 { "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */ 577 { "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */
577 { "D", "OK" }, /* Detach */ 578 { "D", "OK" }, /* Detach */
578 { "D", "OK", 0, got_break }, /* If the test worked we made it here */ 579 { "D", "OK", NULL, got_break }, /* On success we made it here */
579 { "", "" }, 580 { "", "" },
580}; 581};
581 582
@@ -585,12 +586,12 @@ static struct test_struct hw_write_break_test[] = {
585static struct test_struct hw_access_break_test[] = { 586static struct test_struct hw_access_break_test[] = {
586 { "?", "S0*" }, /* Clear break points */ 587 { "?", "S0*" }, /* Clear break points */
587 { "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */ 588 { "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */
588 { "c", "T0*", 0, got_break }, /* Continue */ 589 { "c", "T0*", NULL, got_break }, /* Continue */
589 { "g", "silent", 0, check_and_rewind_pc }, 590 { "g", "silent", NULL, check_and_rewind_pc },
590 { "write", "OK", write_regs }, 591 { "write", "OK", write_regs },
591 { "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */ 592 { "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */
592 { "D", "OK" }, /* Detach */ 593 { "D", "OK" }, /* Detach */
593 { "D", "OK", 0, got_break }, /* If the test worked we made it here */ 594 { "D", "OK", NULL, got_break }, /* On success we made it here */
594 { "", "" }, 595 { "", "" },
595}; 596};
596 597
@@ -599,9 +600,9 @@ static struct test_struct hw_access_break_test[] = {
599 */ 600 */
600static struct test_struct nmi_sleep_test[] = { 601static struct test_struct nmi_sleep_test[] = {
601 { "?", "S0*" }, /* Clear break points */ 602 { "?", "S0*" }, /* Clear break points */
602 { "c", "T0*", 0, got_break }, /* Continue */ 603 { "c", "T0*", NULL, got_break }, /* Continue */
603 { "D", "OK" }, /* Detach */ 604 { "D", "OK" }, /* Detach */
604 { "D", "OK", 0, got_break }, /* If the test worked we made it here */ 605 { "D", "OK", NULL, got_break }, /* On success we made it here */
605 { "", "" }, 606 { "", "" },
606}; 607};
607 608
@@ -874,18 +875,23 @@ static void kgdbts_run_tests(void)
874{ 875{
875 char *ptr; 876 char *ptr;
876 int fork_test = 0; 877 int fork_test = 0;
877 int sys_open_test = 0; 878 int do_sys_open_test = 0;
879 int sstep_test = 1000;
878 int nmi_sleep = 0; 880 int nmi_sleep = 0;
881 int i;
879 882
880 ptr = strstr(config, "F"); 883 ptr = strstr(config, "F");
881 if (ptr) 884 if (ptr)
882 fork_test = simple_strtol(ptr+1, NULL, 10); 885 fork_test = simple_strtol(ptr + 1, NULL, 10);
883 ptr = strstr(config, "S"); 886 ptr = strstr(config, "S");
884 if (ptr) 887 if (ptr)
885 sys_open_test = simple_strtol(ptr+1, NULL, 10); 888 do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
886 ptr = strstr(config, "N"); 889 ptr = strstr(config, "N");
887 if (ptr) 890 if (ptr)
888 nmi_sleep = simple_strtol(ptr+1, NULL, 10); 891 nmi_sleep = simple_strtol(ptr+1, NULL, 10);
892 ptr = strstr(config, "I");
893 if (ptr)
894 sstep_test = simple_strtol(ptr+1, NULL, 10);
889 895
890 /* required internal KGDB tests */ 896 /* required internal KGDB tests */
891 v1printk("kgdbts:RUN plant and detach test\n"); 897 v1printk("kgdbts:RUN plant and detach test\n");
@@ -894,8 +900,13 @@ static void kgdbts_run_tests(void)
894 run_breakpoint_test(0); 900 run_breakpoint_test(0);
895 v1printk("kgdbts:RUN bad memory access test\n"); 901 v1printk("kgdbts:RUN bad memory access test\n");
896 run_bad_read_test(); 902 run_bad_read_test();
897 v1printk("kgdbts:RUN singlestep breakpoint test\n"); 903 v1printk("kgdbts:RUN singlestep test %i iterations\n", sstep_test);
898 run_singlestep_break_test(); 904 for (i = 0; i < sstep_test; i++) {
905 run_singlestep_break_test();
906 if (i % 100 == 0)
907 v1printk("kgdbts:RUN singlestep [%i/%i]\n",
908 i, sstep_test);
909 }
899 910
900 /* ===Optional tests=== */ 911 /* ===Optional tests=== */
901 912
@@ -922,7 +933,7 @@ static void kgdbts_run_tests(void)
922 repeat_test = fork_test; 933 repeat_test = fork_test;
923 printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n", 934 printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n",
924 repeat_test); 935 repeat_test);
925 kthread_run(kgdbts_unreg_thread, 0, "kgdbts_unreg"); 936 kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
926 run_do_fork_test(); 937 run_do_fork_test();
927 return; 938 return;
928 } 939 }
@@ -931,11 +942,11 @@ static void kgdbts_run_tests(void)
931 * executed because a kernel thread will be spawned at the very 942 * executed because a kernel thread will be spawned at the very
932 * end to unregister the debug hooks. 943 * end to unregister the debug hooks.
933 */ 944 */
934 if (sys_open_test) { 945 if (do_sys_open_test) {
935 repeat_test = sys_open_test; 946 repeat_test = do_sys_open_test;
936 printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n", 947 printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n",
937 repeat_test); 948 repeat_test);
938 kthread_run(kgdbts_unreg_thread, 0, "kgdbts_unreg"); 949 kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
939 run_sys_open_test(); 950 run_sys_open_test();
940 return; 951 return;
941 } 952 }
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 7fa61e907e1c..71d1c84e2fa8 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -12,6 +12,7 @@
12 * or alternatively, you might use OpenHaptics provided by Sensable. 12 * or alternatively, you might use OpenHaptics provided by Sensable.
13 */ 13 */
14 14
15#include <linux/compat.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/device.h> 18#include <linux/device.h>
@@ -91,11 +92,8 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
91 unsigned long flags; 92 unsigned long flags;
92 unsigned int i; 93 unsigned int i;
93 94
94 if (_IOC_TYPE(cmd) != PH_IOC_MAGIC ||
95 _IOC_NR(cmd) > PH_IOC_MAXNR)
96 return -ENOTTY;
97
98 switch (cmd) { 95 switch (cmd) {
96 case PHN_SETREG:
99 case PHN_SET_REG: 97 case PHN_SET_REG:
100 if (copy_from_user(&r, argp, sizeof(r))) 98 if (copy_from_user(&r, argp, sizeof(r)))
101 return -EFAULT; 99 return -EFAULT;
@@ -126,6 +124,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
126 phantom_status(dev, dev->status & ~PHB_RUNNING); 124 phantom_status(dev, dev->status & ~PHB_RUNNING);
127 spin_unlock_irqrestore(&dev->regs_lock, flags); 125 spin_unlock_irqrestore(&dev->regs_lock, flags);
128 break; 126 break;
127 case PHN_SETREGS:
129 case PHN_SET_REGS: 128 case PHN_SET_REGS:
130 if (copy_from_user(&rs, argp, sizeof(rs))) 129 if (copy_from_user(&rs, argp, sizeof(rs)))
131 return -EFAULT; 130 return -EFAULT;
@@ -143,6 +142,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
143 } 142 }
144 spin_unlock_irqrestore(&dev->regs_lock, flags); 143 spin_unlock_irqrestore(&dev->regs_lock, flags);
145 break; 144 break;
145 case PHN_GETREG:
146 case PHN_GET_REG: 146 case PHN_GET_REG:
147 if (copy_from_user(&r, argp, sizeof(r))) 147 if (copy_from_user(&r, argp, sizeof(r)))
148 return -EFAULT; 148 return -EFAULT;
@@ -155,6 +155,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
155 if (copy_to_user(argp, &r, sizeof(r))) 155 if (copy_to_user(argp, &r, sizeof(r)))
156 return -EFAULT; 156 return -EFAULT;
157 break; 157 break;
158 case PHN_GETREGS:
158 case PHN_GET_REGS: { 159 case PHN_GET_REGS: {
159 u32 m; 160 u32 m;
160 161
@@ -168,6 +169,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
168 for (i = 0; i < m; i++) 169 for (i = 0; i < m; i++)
169 if (rs.mask & BIT(i)) 170 if (rs.mask & BIT(i))
170 rs.values[i] = ioread32(dev->iaddr + i); 171 rs.values[i] = ioread32(dev->iaddr + i);
172 atomic_set(&dev->counter, 0);
171 spin_unlock_irqrestore(&dev->regs_lock, flags); 173 spin_unlock_irqrestore(&dev->regs_lock, flags);
172 174
173 if (copy_to_user(argp, &rs, sizeof(rs))) 175 if (copy_to_user(argp, &rs, sizeof(rs)))
@@ -191,6 +193,20 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
191 return 0; 193 return 0;
192} 194}
193 195
196#ifdef CONFIG_COMPAT
197static long phantom_compat_ioctl(struct file *filp, unsigned int cmd,
198 unsigned long arg)
199{
200 if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
201 cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT);
202 cmd |= sizeof(void *) << _IOC_SIZESHIFT;
203 }
204 return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
205}
206#else
207#define phantom_compat_ioctl NULL
208#endif
209
194static int phantom_open(struct inode *inode, struct file *file) 210static int phantom_open(struct inode *inode, struct file *file)
195{ 211{
196 struct phantom_device *dev = container_of(inode->i_cdev, 212 struct phantom_device *dev = container_of(inode->i_cdev,
@@ -239,11 +255,12 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait)
239 255
240 pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter)); 256 pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter));
241 poll_wait(file, &dev->wait, wait); 257 poll_wait(file, &dev->wait, wait);
242 if (atomic_read(&dev->counter)) { 258
259 if (!(dev->status & PHB_RUNNING))
260 mask = POLLERR;
261 else if (atomic_read(&dev->counter))
243 mask = POLLIN | POLLRDNORM; 262 mask = POLLIN | POLLRDNORM;
244 atomic_dec(&dev->counter); 263
245 } else if ((dev->status & PHB_RUNNING) == 0)
246 mask = POLLIN | POLLRDNORM | POLLERR;
247 pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); 264 pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter));
248 265
249 return mask; 266 return mask;
@@ -253,6 +270,7 @@ static struct file_operations phantom_file_ops = {
253 .open = phantom_open, 270 .open = phantom_open,
254 .release = phantom_release, 271 .release = phantom_release,
255 .unlocked_ioctl = phantom_ioctl, 272 .unlocked_ioctl = phantom_ioctl,
273 .compat_ioctl = phantom_compat_ioctl,
256 .poll = phantom_poll, 274 .poll = phantom_poll,
257}; 275};
258 276
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 27e200ec5826..acd3fd4285d7 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -211,7 +211,7 @@ xpc_rsvd_page_init(void)
211 */ 211 */
212 amos_page = xpc_vars->amos_page; 212 amos_page = xpc_vars->amos_page;
213 if (amos_page == NULL) { 213 if (amos_page == NULL) {
214 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0)); 214 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0, 1));
215 if (amos_page == NULL) { 215 if (amos_page == NULL) {
216 dev_err(xpc_part, "can't allocate page of AMOs\n"); 216 dev_err(xpc_part, "can't allocate page of AMOs\n");
217 return NULL; 217 return NULL;
@@ -230,7 +230,7 @@ xpc_rsvd_page_init(void)
230 dev_err(xpc_part, "can't change memory " 230 dev_err(xpc_part, "can't change memory "
231 "protections\n"); 231 "protections\n");
232 uncached_free_page(__IA64_UNCACHED_OFFSET | 232 uncached_free_page(__IA64_UNCACHED_OFFSET |
233 TO_PHYS((u64)amos_page)); 233 TO_PHYS((u64)amos_page), 1);
234 return NULL; 234 return NULL;
235 } 235 }
236 } 236 }
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index 02ff3d19b1cc..00e48e2a9c11 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -961,7 +961,7 @@ static int sony_nc_resume(struct acpi_device *device)
961 ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, 961 ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
962 item->value, NULL); 962 item->value, NULL);
963 if (ret < 0) { 963 if (ret < 0) {
964 printk("%s: %d\n", __FUNCTION__, ret); 964 printk("%s: %d\n", __func__, ret);
965 break; 965 break;
966 } 966 }
967 } 967 }
@@ -1453,7 +1453,7 @@ static struct sonypi_eventtypes type4_events[] = {
1453 udelay(1); \ 1453 udelay(1); \
1454 if (!n) \ 1454 if (!n) \
1455 dprintk("command failed at %s : %s (line %d)\n", \ 1455 dprintk("command failed at %s : %s (line %d)\n", \
1456 __FILE__, __FUNCTION__, __LINE__); \ 1456 __FILE__, __func__, __LINE__); \
1457} 1457}
1458 1458
1459static u8 sony_pic_call1(u8 dev) 1459static u8 sony_pic_call1(u8 dev)
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index 6cb781262f94..3f28f6eabdbf 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -21,7 +21,7 @@
21 * 02110-1301, USA. 21 * 02110-1301, USA.
22 */ 22 */
23 23
24#define TPACPI_VERSION "0.19" 24#define TPACPI_VERSION "0.20"
25#define TPACPI_SYSFS_VERSION 0x020200 25#define TPACPI_SYSFS_VERSION 0x020200
26 26
27/* 27/*
@@ -67,6 +67,7 @@
67#include <linux/hwmon.h> 67#include <linux/hwmon.h>
68#include <linux/hwmon-sysfs.h> 68#include <linux/hwmon-sysfs.h>
69#include <linux/input.h> 69#include <linux/input.h>
70#include <linux/leds.h>
70#include <asm/uaccess.h> 71#include <asm/uaccess.h>
71 72
72#include <linux/dmi.h> 73#include <linux/dmi.h>
@@ -85,6 +86,8 @@
85#define TP_CMOS_VOLUME_MUTE 2 86#define TP_CMOS_VOLUME_MUTE 2
86#define TP_CMOS_BRIGHTNESS_UP 4 87#define TP_CMOS_BRIGHTNESS_UP 4
87#define TP_CMOS_BRIGHTNESS_DOWN 5 88#define TP_CMOS_BRIGHTNESS_DOWN 5
89#define TP_CMOS_THINKLIGHT_ON 12
90#define TP_CMOS_THINKLIGHT_OFF 13
88 91
89/* NVRAM Addresses */ 92/* NVRAM Addresses */
90enum tp_nvram_addr { 93enum tp_nvram_addr {
@@ -133,8 +136,12 @@ enum {
133#define TPACPI_PROC_DIR "ibm" 136#define TPACPI_PROC_DIR "ibm"
134#define TPACPI_ACPI_EVENT_PREFIX "ibm" 137#define TPACPI_ACPI_EVENT_PREFIX "ibm"
135#define TPACPI_DRVR_NAME TPACPI_FILE 138#define TPACPI_DRVR_NAME TPACPI_FILE
139#define TPACPI_DRVR_SHORTNAME "tpacpi"
136#define TPACPI_HWMON_DRVR_NAME TPACPI_NAME "_hwmon" 140#define TPACPI_HWMON_DRVR_NAME TPACPI_NAME "_hwmon"
137 141
142#define TPACPI_NVRAM_KTHREAD_NAME "ktpacpi_nvramd"
143#define TPACPI_WORKQUEUE_NAME "ktpacpid"
144
138#define TPACPI_MAX_ACPI_ARGS 3 145#define TPACPI_MAX_ACPI_ARGS 3
139 146
140/* Debugging */ 147/* Debugging */
@@ -225,6 +232,7 @@ static struct {
225 u32 light:1; 232 u32 light:1;
226 u32 light_status:1; 233 u32 light_status:1;
227 u32 bright_16levels:1; 234 u32 bright_16levels:1;
235 u32 bright_acpimode:1;
228 u32 wan:1; 236 u32 wan:1;
229 u32 fan_ctrl_status_undef:1; 237 u32 fan_ctrl_status_undef:1;
230 u32 input_device_registered:1; 238 u32 input_device_registered:1;
@@ -236,6 +244,11 @@ static struct {
236 u32 hotkey_poll_active:1; 244 u32 hotkey_poll_active:1;
237} tp_features; 245} tp_features;
238 246
247static struct {
248 u16 hotkey_mask_ff:1;
249 u16 bright_cmos_ec_unsync:1;
250} tp_warned;
251
239struct thinkpad_id_data { 252struct thinkpad_id_data {
240 unsigned int vendor; /* ThinkPad vendor: 253 unsigned int vendor; /* ThinkPad vendor:
241 * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */ 254 * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
@@ -246,7 +259,8 @@ struct thinkpad_id_data {
246 u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */ 259 u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
247 u16 ec_model; 260 u16 ec_model;
248 261
249 char *model_str; 262 char *model_str; /* ThinkPad T43 */
263 char *nummodel_str; /* 9384A9C for a 9384-A9C model */
250}; 264};
251static struct thinkpad_id_data thinkpad_id; 265static struct thinkpad_id_data thinkpad_id;
252 266
@@ -259,6 +273,16 @@ static enum {
259static int experimental; 273static int experimental;
260static u32 dbg_level; 274static u32 dbg_level;
261 275
276static struct workqueue_struct *tpacpi_wq;
277
278/* Special LED class that can defer work */
279struct tpacpi_led_classdev {
280 struct led_classdev led_classdev;
281 struct work_struct work;
282 enum led_brightness new_brightness;
283 unsigned int led;
284};
285
262/**************************************************************************** 286/****************************************************************************
263 **************************************************************************** 287 ****************************************************************************
264 * 288 *
@@ -807,6 +831,80 @@ static int parse_strtoul(const char *buf,
807 return 0; 831 return 0;
808} 832}
809 833
834static int __init tpacpi_query_bcl_levels(acpi_handle handle)
835{
836 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
837 union acpi_object *obj;
838 int rc;
839
840 if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
841 obj = (union acpi_object *)buffer.pointer;
842 if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
843 printk(TPACPI_ERR "Unknown _BCL data, "
844 "please report this to %s\n", TPACPI_MAIL);
845 rc = 0;
846 } else {
847 rc = obj->package.count;
848 }
849 } else {
850 return 0;
851 }
852
853 kfree(buffer.pointer);
854 return rc;
855}
856
857static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
858 u32 lvl, void *context, void **rv)
859{
860 char name[ACPI_PATH_SEGMENT_LENGTH];
861 struct acpi_buffer buffer = { sizeof(name), &name };
862
863 if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
864 !strncmp("_BCL", name, sizeof(name) - 1)) {
865 BUG_ON(!rv || !*rv);
866 **(int **)rv = tpacpi_query_bcl_levels(handle);
867 return AE_CTRL_TERMINATE;
868 } else {
869 return AE_OK;
870 }
871}
872
873/*
874 * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
875 */
876static int __init tpacpi_check_std_acpi_brightness_support(void)
877{
878 int status;
879 int bcl_levels = 0;
880 void *bcl_ptr = &bcl_levels;
881
882 if (!vid_handle) {
883 TPACPI_ACPIHANDLE_INIT(vid);
884 }
885 if (!vid_handle)
886 return 0;
887
888 /*
889 * Search for a _BCL method, and execute it. This is safe on all
890 * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
891 * BIOS in ACPI backlight control mode. We do NOT have to care
892 * about calling the _BCL method in an enabled video device, any
893 * will do for our purposes.
894 */
895
896 status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
897 tpacpi_acpi_walk_find_bcl, NULL,
898 &bcl_ptr);
899
900 if (ACPI_SUCCESS(status) && bcl_levels > 2) {
901 tp_features.bright_acpimode = 1;
902 return (bcl_levels - 2);
903 }
904
905 return 0;
906}
907
810/************************************************************************* 908/*************************************************************************
811 * thinkpad-acpi driver attributes 909 * thinkpad-acpi driver attributes
812 */ 910 */
@@ -909,12 +1007,14 @@ static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
909 thinkpad_id.ec_version_str : "unknown"); 1007 thinkpad_id.ec_version_str : "unknown");
910 1008
911 if (thinkpad_id.vendor && thinkpad_id.model_str) 1009 if (thinkpad_id.vendor && thinkpad_id.model_str)
912 printk(TPACPI_INFO "%s %s\n", 1010 printk(TPACPI_INFO "%s %s, model %s\n",
913 (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ? 1011 (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
914 "IBM" : ((thinkpad_id.vendor == 1012 "IBM" : ((thinkpad_id.vendor ==
915 PCI_VENDOR_ID_LENOVO) ? 1013 PCI_VENDOR_ID_LENOVO) ?
916 "Lenovo" : "Unknown vendor"), 1014 "Lenovo" : "Unknown vendor"),
917 thinkpad_id.model_str); 1015 thinkpad_id.model_str,
1016 (thinkpad_id.nummodel_str) ?
1017 thinkpad_id.nummodel_str : "unknown");
918 1018
919 return 0; 1019 return 0;
920} 1020}
@@ -1107,6 +1207,19 @@ static int hotkey_mask_set(u32 mask)
1107 int rc = 0; 1207 int rc = 0;
1108 1208
1109 if (tp_features.hotkey_mask) { 1209 if (tp_features.hotkey_mask) {
1210 if (!tp_warned.hotkey_mask_ff &&
1211 (mask == 0xffff || mask == 0xffffff ||
1212 mask == 0xffffffff)) {
1213 tp_warned.hotkey_mask_ff = 1;
1214 printk(TPACPI_NOTICE
1215 "setting the hotkey mask to 0x%08x is likely "
1216 "not the best way to go about it\n", mask);
1217 printk(TPACPI_NOTICE
1218 "please consider using the driver defaults, "
1219 "and refer to up-to-date thinkpad-acpi "
1220 "documentation\n");
1221 }
1222
1110 HOTKEY_CONFIG_CRITICAL_START 1223 HOTKEY_CONFIG_CRITICAL_START
1111 for (i = 0; i < 32; i++) { 1224 for (i = 0; i < 32; i++) {
1112 u32 m = 1 << i; 1225 u32 m = 1 << i;
@@ -1427,8 +1540,7 @@ static void hotkey_poll_setup(int may_warn)
1427 (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) { 1540 (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) {
1428 if (!tpacpi_hotkey_task) { 1541 if (!tpacpi_hotkey_task) {
1429 tpacpi_hotkey_task = kthread_run(hotkey_kthread, 1542 tpacpi_hotkey_task = kthread_run(hotkey_kthread,
1430 NULL, 1543 NULL, TPACPI_NVRAM_KTHREAD_NAME);
1431 TPACPI_FILE "d");
1432 if (IS_ERR(tpacpi_hotkey_task)) { 1544 if (IS_ERR(tpacpi_hotkey_task)) {
1433 tpacpi_hotkey_task = NULL; 1545 tpacpi_hotkey_task = NULL;
1434 printk(TPACPI_ERR 1546 printk(TPACPI_ERR
@@ -1887,6 +1999,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
1887 KEY_UNKNOWN, /* 0x0D: FN+INSERT */ 1999 KEY_UNKNOWN, /* 0x0D: FN+INSERT */
1888 KEY_UNKNOWN, /* 0x0E: FN+DELETE */ 2000 KEY_UNKNOWN, /* 0x0E: FN+DELETE */
1889 2001
2002 /* These either have to go through ACPI video, or
2003 * act like in the IBM ThinkPads, so don't ever
2004 * enable them by default */
1890 KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */ 2005 KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
1891 KEY_RESERVED, /* 0x10: FN+END (brightness down) */ 2006 KEY_RESERVED, /* 0x10: FN+END (brightness down) */
1892 2007
@@ -2091,6 +2206,32 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2091 set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit); 2206 set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
2092 } 2207 }
2093 2208
2209 /* Do not issue duplicate brightness change events to
2210 * userspace */
2211 if (!tp_features.bright_acpimode)
2212 /* update bright_acpimode... */
2213 tpacpi_check_std_acpi_brightness_support();
2214
2215 if (tp_features.bright_acpimode) {
2216 printk(TPACPI_INFO
2217 "This ThinkPad has standard ACPI backlight "
2218 "brightness control, supported by the ACPI "
2219 "video driver\n");
2220 printk(TPACPI_NOTICE
2221 "Disabling thinkpad-acpi brightness events "
2222 "by default...\n");
2223
2224 /* The hotkey_reserved_mask change below is not
2225 * necessary while the keys are at KEY_RESERVED in the
2226 * default map, but better safe than sorry, leave it
2227 * here as a marker of what we have to do, especially
2228 * when we finally become able to set this at runtime
2229 * on response to X.org requests */
2230 hotkey_reserved_mask |=
2231 (1 << TP_ACPI_HOTKEYSCAN_FNHOME)
2232 | (1 << TP_ACPI_HOTKEYSCAN_FNEND);
2233 }
2234
2094 dbg_printk(TPACPI_DBG_INIT, 2235 dbg_printk(TPACPI_DBG_INIT,
2095 "enabling hot key handling\n"); 2236 "enabling hot key handling\n");
2096 res = hotkey_status_set(1); 2237 res = hotkey_status_set(1);
@@ -3110,13 +3251,82 @@ static struct ibm_struct video_driver_data = {
3110TPACPI_HANDLE(lght, root, "\\LGHT"); /* A21e, A2xm/p, T20-22, X20-21 */ 3251TPACPI_HANDLE(lght, root, "\\LGHT"); /* A21e, A2xm/p, T20-22, X20-21 */
3111TPACPI_HANDLE(ledb, ec, "LEDB"); /* G4x */ 3252TPACPI_HANDLE(ledb, ec, "LEDB"); /* G4x */
3112 3253
3254static int light_get_status(void)
3255{
3256 int status = 0;
3257
3258 if (tp_features.light_status) {
3259 if (!acpi_evalf(ec_handle, &status, "KBLT", "d"))
3260 return -EIO;
3261 return (!!status);
3262 }
3263
3264 return -ENXIO;
3265}
3266
3267static int light_set_status(int status)
3268{
3269 int rc;
3270
3271 if (tp_features.light) {
3272 if (cmos_handle) {
3273 rc = acpi_evalf(cmos_handle, NULL, NULL, "vd",
3274 (status)?
3275 TP_CMOS_THINKLIGHT_ON :
3276 TP_CMOS_THINKLIGHT_OFF);
3277 } else {
3278 rc = acpi_evalf(lght_handle, NULL, NULL, "vd",
3279 (status)? 1 : 0);
3280 }
3281 return (rc)? 0 : -EIO;
3282 }
3283
3284 return -ENXIO;
3285}
3286
3287static void light_set_status_worker(struct work_struct *work)
3288{
3289 struct tpacpi_led_classdev *data =
3290 container_of(work, struct tpacpi_led_classdev, work);
3291
3292 if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
3293 light_set_status((data->new_brightness != LED_OFF));
3294}
3295
3296static void light_sysfs_set(struct led_classdev *led_cdev,
3297 enum led_brightness brightness)
3298{
3299 struct tpacpi_led_classdev *data =
3300 container_of(led_cdev,
3301 struct tpacpi_led_classdev,
3302 led_classdev);
3303 data->new_brightness = brightness;
3304 queue_work(tpacpi_wq, &data->work);
3305}
3306
3307static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
3308{
3309 return (light_get_status() == 1)? LED_FULL : LED_OFF;
3310}
3311
3312static struct tpacpi_led_classdev tpacpi_led_thinklight = {
3313 .led_classdev = {
3314 .name = "tpacpi::thinklight",
3315 .brightness_set = &light_sysfs_set,
3316 .brightness_get = &light_sysfs_get,
3317 }
3318};
3319
3113static int __init light_init(struct ibm_init_struct *iibm) 3320static int __init light_init(struct ibm_init_struct *iibm)
3114{ 3321{
3322 int rc = 0;
3323
3115 vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n"); 3324 vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n");
3116 3325
3117 TPACPI_ACPIHANDLE_INIT(ledb); 3326 TPACPI_ACPIHANDLE_INIT(ledb);
3118 TPACPI_ACPIHANDLE_INIT(lght); 3327 TPACPI_ACPIHANDLE_INIT(lght);
3119 TPACPI_ACPIHANDLE_INIT(cmos); 3328 TPACPI_ACPIHANDLE_INIT(cmos);
3329 INIT_WORK(&tpacpi_led_thinklight.work, light_set_status_worker);
3120 3330
3121 /* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */ 3331 /* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */
3122 tp_features.light = (cmos_handle || lght_handle) && !ledb_handle; 3332 tp_features.light = (cmos_handle || lght_handle) && !ledb_handle;
@@ -3130,13 +3340,31 @@ static int __init light_init(struct ibm_init_struct *iibm)
3130 vdbg_printk(TPACPI_DBG_INIT, "light is %s\n", 3340 vdbg_printk(TPACPI_DBG_INIT, "light is %s\n",
3131 str_supported(tp_features.light)); 3341 str_supported(tp_features.light));
3132 3342
3133 return (tp_features.light)? 0 : 1; 3343 if (tp_features.light) {
3344 rc = led_classdev_register(&tpacpi_pdev->dev,
3345 &tpacpi_led_thinklight.led_classdev);
3346 }
3347
3348 if (rc < 0) {
3349 tp_features.light = 0;
3350 tp_features.light_status = 0;
3351 } else {
3352 rc = (tp_features.light)? 0 : 1;
3353 }
3354 return rc;
3355}
3356
3357static void light_exit(void)
3358{
3359 led_classdev_unregister(&tpacpi_led_thinklight.led_classdev);
3360 if (work_pending(&tpacpi_led_thinklight.work))
3361 flush_workqueue(tpacpi_wq);
3134} 3362}
3135 3363
3136static int light_read(char *p) 3364static int light_read(char *p)
3137{ 3365{
3138 int len = 0; 3366 int len = 0;
3139 int status = 0; 3367 int status;
3140 3368
3141 if (!tp_features.light) { 3369 if (!tp_features.light) {
3142 len += sprintf(p + len, "status:\t\tnot supported\n"); 3370 len += sprintf(p + len, "status:\t\tnot supported\n");
@@ -3144,8 +3372,9 @@ static int light_read(char *p)
3144 len += sprintf(p + len, "status:\t\tunknown\n"); 3372 len += sprintf(p + len, "status:\t\tunknown\n");
3145 len += sprintf(p + len, "commands:\ton, off\n"); 3373 len += sprintf(p + len, "commands:\ton, off\n");
3146 } else { 3374 } else {
3147 if (!acpi_evalf(ec_handle, &status, "KBLT", "d")) 3375 status = light_get_status();
3148 return -EIO; 3376 if (status < 0)
3377 return status;
3149 len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0)); 3378 len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0));
3150 len += sprintf(p + len, "commands:\ton, off\n"); 3379 len += sprintf(p + len, "commands:\ton, off\n");
3151 } 3380 }
@@ -3155,37 +3384,29 @@ static int light_read(char *p)
3155 3384
3156static int light_write(char *buf) 3385static int light_write(char *buf)
3157{ 3386{
3158 int cmos_cmd, lght_cmd;
3159 char *cmd; 3387 char *cmd;
3160 int success; 3388 int newstatus = 0;
3161 3389
3162 if (!tp_features.light) 3390 if (!tp_features.light)
3163 return -ENODEV; 3391 return -ENODEV;
3164 3392
3165 while ((cmd = next_cmd(&buf))) { 3393 while ((cmd = next_cmd(&buf))) {
3166 if (strlencmp(cmd, "on") == 0) { 3394 if (strlencmp(cmd, "on") == 0) {
3167 cmos_cmd = 0x0c; 3395 newstatus = 1;
3168 lght_cmd = 1;
3169 } else if (strlencmp(cmd, "off") == 0) { 3396 } else if (strlencmp(cmd, "off") == 0) {
3170 cmos_cmd = 0x0d; 3397 newstatus = 0;
3171 lght_cmd = 0;
3172 } else 3398 } else
3173 return -EINVAL; 3399 return -EINVAL;
3174
3175 success = cmos_handle ?
3176 acpi_evalf(cmos_handle, NULL, NULL, "vd", cmos_cmd) :
3177 acpi_evalf(lght_handle, NULL, NULL, "vd", lght_cmd);
3178 if (!success)
3179 return -EIO;
3180 } 3400 }
3181 3401
3182 return 0; 3402 return light_set_status(newstatus);
3183} 3403}
3184 3404
3185static struct ibm_struct light_driver_data = { 3405static struct ibm_struct light_driver_data = {
3186 .name = "light", 3406 .name = "light",
3187 .read = light_read, 3407 .read = light_read,
3188 .write = light_write, 3408 .write = light_write,
3409 .exit = light_exit,
3189}; 3410};
3190 3411
3191/************************************************************************* 3412/*************************************************************************
@@ -3583,6 +3804,12 @@ enum { /* For TPACPI_LED_OLD */
3583 TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */ 3804 TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */
3584}; 3805};
3585 3806
3807enum led_status_t {
3808 TPACPI_LED_OFF = 0,
3809 TPACPI_LED_ON,
3810 TPACPI_LED_BLINK,
3811};
3812
3586static enum led_access_mode led_supported; 3813static enum led_access_mode led_supported;
3587 3814
3588TPACPI_HANDLE(led, ec, "SLED", /* 570 */ 3815TPACPI_HANDLE(led, ec, "SLED", /* 570 */
@@ -3591,8 +3818,174 @@ TPACPI_HANDLE(led, ec, "SLED", /* 570 */
3591 "LED", /* all others */ 3818 "LED", /* all others */
3592 ); /* R30, R31 */ 3819 ); /* R30, R31 */
3593 3820
3821#define TPACPI_LED_NUMLEDS 8
3822static struct tpacpi_led_classdev *tpacpi_leds;
3823static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS];
3824static const char const *tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
3825 /* there's a limit of 19 chars + NULL before 2.6.26 */
3826 "tpacpi::power",
3827 "tpacpi:orange:batt",
3828 "tpacpi:green:batt",
3829 "tpacpi::dock_active",
3830 "tpacpi::bay_active",
3831 "tpacpi::dock_batt",
3832 "tpacpi::unknown_led",
3833 "tpacpi::standby",
3834};
3835
3836static int led_get_status(unsigned int led)
3837{
3838 int status;
3839 enum led_status_t led_s;
3840
3841 switch (led_supported) {
3842 case TPACPI_LED_570:
3843 if (!acpi_evalf(ec_handle,
3844 &status, "GLED", "dd", 1 << led))
3845 return -EIO;
3846 led_s = (status == 0)?
3847 TPACPI_LED_OFF :
3848 ((status == 1)?
3849 TPACPI_LED_ON :
3850 TPACPI_LED_BLINK);
3851 tpacpi_led_state_cache[led] = led_s;
3852 return led_s;
3853 default:
3854 return -ENXIO;
3855 }
3856
3857 /* not reached */
3858}
3859
3860static int led_set_status(unsigned int led, enum led_status_t ledstatus)
3861{
3862 /* off, on, blink. Index is led_status_t */
3863 static const int const led_sled_arg1[] = { 0, 1, 3 };
3864 static const int const led_exp_hlbl[] = { 0, 0, 1 }; /* led# * */
3865 static const int const led_exp_hlcl[] = { 0, 1, 1 }; /* led# * */
3866 static const int const led_led_arg1[] = { 0, 0x80, 0xc0 };
3867
3868 int rc = 0;
3869
3870 switch (led_supported) {
3871 case TPACPI_LED_570:
3872 /* 570 */
3873 led = 1 << led;
3874 if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
3875 led, led_sled_arg1[ledstatus]))
3876 rc = -EIO;
3877 break;
3878 case TPACPI_LED_OLD:
3879 /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
3880 led = 1 << led;
3881 rc = ec_write(TPACPI_LED_EC_HLMS, led);
3882 if (rc >= 0)
3883 rc = ec_write(TPACPI_LED_EC_HLBL,
3884 led * led_exp_hlbl[ledstatus]);
3885 if (rc >= 0)
3886 rc = ec_write(TPACPI_LED_EC_HLCL,
3887 led * led_exp_hlcl[ledstatus]);
3888 break;
3889 case TPACPI_LED_NEW:
3890 /* all others */
3891 if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
3892 led, led_led_arg1[ledstatus]))
3893 rc = -EIO;
3894 break;
3895 default:
3896 rc = -ENXIO;
3897 }
3898
3899 if (!rc)
3900 tpacpi_led_state_cache[led] = ledstatus;
3901
3902 return rc;
3903}
3904
3905static void led_sysfs_set_status(unsigned int led,
3906 enum led_brightness brightness)
3907{
3908 led_set_status(led,
3909 (brightness == LED_OFF) ?
3910 TPACPI_LED_OFF :
3911 (tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ?
3912 TPACPI_LED_BLINK : TPACPI_LED_ON);
3913}
3914
3915static void led_set_status_worker(struct work_struct *work)
3916{
3917 struct tpacpi_led_classdev *data =
3918 container_of(work, struct tpacpi_led_classdev, work);
3919
3920 if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
3921 led_sysfs_set_status(data->led, data->new_brightness);
3922}
3923
3924static void led_sysfs_set(struct led_classdev *led_cdev,
3925 enum led_brightness brightness)
3926{
3927 struct tpacpi_led_classdev *data = container_of(led_cdev,
3928 struct tpacpi_led_classdev, led_classdev);
3929
3930 data->new_brightness = brightness;
3931 queue_work(tpacpi_wq, &data->work);
3932}
3933
3934static int led_sysfs_blink_set(struct led_classdev *led_cdev,
3935 unsigned long *delay_on, unsigned long *delay_off)
3936{
3937 struct tpacpi_led_classdev *data = container_of(led_cdev,
3938 struct tpacpi_led_classdev, led_classdev);
3939
3940 /* Can we choose the flash rate? */
3941 if (*delay_on == 0 && *delay_off == 0) {
3942 /* yes. set them to the hardware blink rate (1 Hz) */
3943 *delay_on = 500; /* ms */
3944 *delay_off = 500; /* ms */
3945 } else if ((*delay_on != 500) || (*delay_off != 500))
3946 return -EINVAL;
3947
3948 data->new_brightness = TPACPI_LED_BLINK;
3949 queue_work(tpacpi_wq, &data->work);
3950
3951 return 0;
3952}
3953
3954static enum led_brightness led_sysfs_get(struct led_classdev *led_cdev)
3955{
3956 int rc;
3957
3958 struct tpacpi_led_classdev *data = container_of(led_cdev,
3959 struct tpacpi_led_classdev, led_classdev);
3960
3961 rc = led_get_status(data->led);
3962
3963 if (rc == TPACPI_LED_OFF || rc < 0)
3964 rc = LED_OFF; /* no error handling in led class :( */
3965 else
3966 rc = LED_FULL;
3967
3968 return rc;
3969}
3970
3971static void led_exit(void)
3972{
3973 unsigned int i;
3974
3975 for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
3976 if (tpacpi_leds[i].led_classdev.name)
3977 led_classdev_unregister(&tpacpi_leds[i].led_classdev);
3978 }
3979
3980 kfree(tpacpi_leds);
3981 tpacpi_leds = NULL;
3982}
3983
3594static int __init led_init(struct ibm_init_struct *iibm) 3984static int __init led_init(struct ibm_init_struct *iibm)
3595{ 3985{
3986 unsigned int i;
3987 int rc;
3988
3596 vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n"); 3989 vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
3597 3990
3598 TPACPI_ACPIHANDLE_INIT(led); 3991 TPACPI_ACPIHANDLE_INIT(led);
@@ -3613,10 +4006,41 @@ static int __init led_init(struct ibm_init_struct *iibm)
3613 vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n", 4006 vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
3614 str_supported(led_supported), led_supported); 4007 str_supported(led_supported), led_supported);
3615 4008
4009 tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
4010 GFP_KERNEL);
4011 if (!tpacpi_leds) {
4012 printk(TPACPI_ERR "Out of memory for LED data\n");
4013 return -ENOMEM;
4014 }
4015
4016 for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
4017 tpacpi_leds[i].led = i;
4018
4019 tpacpi_leds[i].led_classdev.brightness_set = &led_sysfs_set;
4020 tpacpi_leds[i].led_classdev.blink_set = &led_sysfs_blink_set;
4021 if (led_supported == TPACPI_LED_570)
4022 tpacpi_leds[i].led_classdev.brightness_get =
4023 &led_sysfs_get;
4024
4025 tpacpi_leds[i].led_classdev.name = tpacpi_led_names[i];
4026
4027 INIT_WORK(&tpacpi_leds[i].work, led_set_status_worker);
4028
4029 rc = led_classdev_register(&tpacpi_pdev->dev,
4030 &tpacpi_leds[i].led_classdev);
4031 if (rc < 0) {
4032 tpacpi_leds[i].led_classdev.name = NULL;
4033 led_exit();
4034 return rc;
4035 }
4036 }
4037
3616 return (led_supported != TPACPI_LED_NONE)? 0 : 1; 4038 return (led_supported != TPACPI_LED_NONE)? 0 : 1;
3617} 4039}
3618 4040
3619#define led_status(s) ((s) == 0 ? "off" : ((s) == 1 ? "on" : "blinking")) 4041#define str_led_status(s) \
4042 ((s) == TPACPI_LED_OFF ? "off" : \
4043 ((s) == TPACPI_LED_ON ? "on" : "blinking"))
3620 4044
3621static int led_read(char *p) 4045static int led_read(char *p)
3622{ 4046{
@@ -3632,11 +4056,11 @@ static int led_read(char *p)
3632 /* 570 */ 4056 /* 570 */
3633 int i, status; 4057 int i, status;
3634 for (i = 0; i < 8; i++) { 4058 for (i = 0; i < 8; i++) {
3635 if (!acpi_evalf(ec_handle, 4059 status = led_get_status(i);
3636 &status, "GLED", "dd", 1 << i)) 4060 if (status < 0)
3637 return -EIO; 4061 return -EIO;
3638 len += sprintf(p + len, "%d:\t\t%s\n", 4062 len += sprintf(p + len, "%d:\t\t%s\n",
3639 i, led_status(status)); 4063 i, str_led_status(status));
3640 } 4064 }
3641 } 4065 }
3642 4066
@@ -3646,16 +4070,11 @@ static int led_read(char *p)
3646 return len; 4070 return len;
3647} 4071}
3648 4072
3649/* off, on, blink */
3650static const int led_sled_arg1[] = { 0, 1, 3 };
3651static const int led_exp_hlbl[] = { 0, 0, 1 }; /* led# * */
3652static const int led_exp_hlcl[] = { 0, 1, 1 }; /* led# * */
3653static const int led_led_arg1[] = { 0, 0x80, 0xc0 };
3654
3655static int led_write(char *buf) 4073static int led_write(char *buf)
3656{ 4074{
3657 char *cmd; 4075 char *cmd;
3658 int led, ind, ret; 4076 int led, rc;
4077 enum led_status_t s;
3659 4078
3660 if (!led_supported) 4079 if (!led_supported)
3661 return -ENODEV; 4080 return -ENODEV;
@@ -3665,38 +4084,18 @@ static int led_write(char *buf)
3665 return -EINVAL; 4084 return -EINVAL;
3666 4085
3667 if (strstr(cmd, "off")) { 4086 if (strstr(cmd, "off")) {
3668 ind = 0; 4087 s = TPACPI_LED_OFF;
3669 } else if (strstr(cmd, "on")) { 4088 } else if (strstr(cmd, "on")) {
3670 ind = 1; 4089 s = TPACPI_LED_ON;
3671 } else if (strstr(cmd, "blink")) { 4090 } else if (strstr(cmd, "blink")) {
3672 ind = 2; 4091 s = TPACPI_LED_BLINK;
3673 } else
3674 return -EINVAL;
3675
3676 if (led_supported == TPACPI_LED_570) {
3677 /* 570 */
3678 led = 1 << led;
3679 if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
3680 led, led_sled_arg1[ind]))
3681 return -EIO;
3682 } else if (led_supported == TPACPI_LED_OLD) {
3683 /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
3684 led = 1 << led;
3685 ret = ec_write(TPACPI_LED_EC_HLMS, led);
3686 if (ret >= 0)
3687 ret = ec_write(TPACPI_LED_EC_HLBL,
3688 led * led_exp_hlbl[ind]);
3689 if (ret >= 0)
3690 ret = ec_write(TPACPI_LED_EC_HLCL,
3691 led * led_exp_hlcl[ind]);
3692 if (ret < 0)
3693 return ret;
3694 } else { 4092 } else {
3695 /* all others */ 4093 return -EINVAL;
3696 if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
3697 led, led_led_arg1[ind]))
3698 return -EIO;
3699 } 4094 }
4095
4096 rc = led_set_status(led, s);
4097 if (rc < 0)
4098 return rc;
3700 } 4099 }
3701 4100
3702 return 0; 4101 return 0;
@@ -3706,6 +4105,7 @@ static struct ibm_struct led_driver_data = {
3706 .name = "led", 4105 .name = "led",
3707 .read = led_read, 4106 .read = led_read,
3708 .write = led_write, 4107 .write = led_write,
4108 .exit = led_exit,
3709}; 4109};
3710 4110
3711/************************************************************************* 4111/*************************************************************************
@@ -4170,8 +4570,16 @@ static struct ibm_struct ecdump_driver_data = {
4170 4570
4171#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen" 4571#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen"
4172 4572
4573enum {
4574 TP_EC_BACKLIGHT = 0x31,
4575
4576 /* TP_EC_BACKLIGHT bitmasks */
4577 TP_EC_BACKLIGHT_LVLMSK = 0x1F,
4578 TP_EC_BACKLIGHT_CMDMSK = 0xE0,
4579 TP_EC_BACKLIGHT_MAPSW = 0x20,
4580};
4581
4173static struct backlight_device *ibm_backlight_device; 4582static struct backlight_device *ibm_backlight_device;
4174static int brightness_offset = 0x31;
4175static int brightness_mode; 4583static int brightness_mode;
4176static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */ 4584static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
4177 4585
@@ -4180,16 +4588,24 @@ static struct mutex brightness_mutex;
4180/* 4588/*
4181 * ThinkPads can read brightness from two places: EC 0x31, or 4589 * ThinkPads can read brightness from two places: EC 0x31, or
4182 * CMOS NVRAM byte 0x5E, bits 0-3. 4590 * CMOS NVRAM byte 0x5E, bits 0-3.
4591 *
4592 * EC 0x31 has the following layout
4593 * Bit 7: unknown function
4594 * Bit 6: unknown function
4595 * Bit 5: Z: honour scale changes, NZ: ignore scale changes
4596 * Bit 4: must be set to zero to avoid problems
4597 * Bit 3-0: backlight brightness level
4598 *
4599 * brightness_get_raw returns status data in the EC 0x31 layout
4183 */ 4600 */
4184static int brightness_get(struct backlight_device *bd) 4601static int brightness_get_raw(int *status)
4185{ 4602{
4186 u8 lec = 0, lcmos = 0, level = 0; 4603 u8 lec = 0, lcmos = 0, level = 0;
4187 4604
4188 if (brightness_mode & 1) { 4605 if (brightness_mode & 1) {
4189 if (!acpi_ec_read(brightness_offset, &lec)) 4606 if (!acpi_ec_read(TP_EC_BACKLIGHT, &lec))
4190 return -EIO; 4607 return -EIO;
4191 lec &= (tp_features.bright_16levels)? 0x0f : 0x07; 4608 level = lec & TP_EC_BACKLIGHT_LVLMSK;
4192 level = lec;
4193 }; 4609 };
4194 if (brightness_mode & 2) { 4610 if (brightness_mode & 2) {
4195 lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS) 4611 lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
@@ -4199,16 +4615,27 @@ static int brightness_get(struct backlight_device *bd)
4199 level = lcmos; 4615 level = lcmos;
4200 } 4616 }
4201 4617
4202 if (brightness_mode == 3 && lec != lcmos) { 4618 if (brightness_mode == 3) {
4203 printk(TPACPI_ERR 4619 *status = lec; /* Prefer EC, CMOS is just a backing store */
4204 "CMOS NVRAM (%u) and EC (%u) do not agree " 4620 lec &= TP_EC_BACKLIGHT_LVLMSK;
4205 "on display brightness level\n", 4621 if (lec == lcmos)
4206 (unsigned int) lcmos, 4622 tp_warned.bright_cmos_ec_unsync = 0;
4207 (unsigned int) lec); 4623 else {
4208 return -EIO; 4624 if (!tp_warned.bright_cmos_ec_unsync) {
4625 printk(TPACPI_ERR
4626 "CMOS NVRAM (%u) and EC (%u) do not "
4627 "agree on display brightness level\n",
4628 (unsigned int) lcmos,
4629 (unsigned int) lec);
4630 tp_warned.bright_cmos_ec_unsync = 1;
4631 }
4632 return -EIO;
4633 }
4634 } else {
4635 *status = level;
4209 } 4636 }
4210 4637
4211 return level; 4638 return 0;
4212} 4639}
4213 4640
4214/* May return EINTR which can always be mapped to ERESTARTSYS */ 4641/* May return EINTR which can always be mapped to ERESTARTSYS */
@@ -4216,19 +4643,22 @@ static int brightness_set(int value)
4216{ 4643{
4217 int cmos_cmd, inc, i, res; 4644 int cmos_cmd, inc, i, res;
4218 int current_value; 4645 int current_value;
4646 int command_bits;
4219 4647
4220 if (value > ((tp_features.bright_16levels)? 15 : 7)) 4648 if (value > ((tp_features.bright_16levels)? 15 : 7) ||
4649 value < 0)
4221 return -EINVAL; 4650 return -EINVAL;
4222 4651
4223 res = mutex_lock_interruptible(&brightness_mutex); 4652 res = mutex_lock_interruptible(&brightness_mutex);
4224 if (res < 0) 4653 if (res < 0)
4225 return res; 4654 return res;
4226 4655
4227 current_value = brightness_get(NULL); 4656 res = brightness_get_raw(&current_value);
4228 if (current_value < 0) { 4657 if (res < 0)
4229 res = current_value;
4230 goto errout; 4658 goto errout;
4231 } 4659
4660 command_bits = current_value & TP_EC_BACKLIGHT_CMDMSK;
4661 current_value &= TP_EC_BACKLIGHT_LVLMSK;
4232 4662
4233 cmos_cmd = value > current_value ? 4663 cmos_cmd = value > current_value ?
4234 TP_CMOS_BRIGHTNESS_UP : 4664 TP_CMOS_BRIGHTNESS_UP :
@@ -4243,7 +4673,8 @@ static int brightness_set(int value)
4243 goto errout; 4673 goto errout;
4244 } 4674 }
4245 if ((brightness_mode & 1) && 4675 if ((brightness_mode & 1) &&
4246 !acpi_ec_write(brightness_offset, i + inc)) { 4676 !acpi_ec_write(TP_EC_BACKLIGHT,
4677 (i + inc) | command_bits)) {
4247 res = -EIO; 4678 res = -EIO;
4248 goto errout;; 4679 goto errout;;
4249 } 4680 }
@@ -4266,106 +4697,23 @@ static int brightness_update_status(struct backlight_device *bd)
4266 bd->props.brightness : 0); 4697 bd->props.brightness : 0);
4267} 4698}
4268 4699
4269static struct backlight_ops ibm_backlight_data = { 4700static int brightness_get(struct backlight_device *bd)
4270 .get_brightness = brightness_get,
4271 .update_status = brightness_update_status,
4272};
4273
4274/* --------------------------------------------------------------------- */
4275
4276static int __init tpacpi_query_bcll_levels(acpi_handle handle)
4277{
4278 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
4279 union acpi_object *obj;
4280 int rc;
4281
4282 if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
4283 obj = (union acpi_object *)buffer.pointer;
4284 if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
4285 printk(TPACPI_ERR "Unknown BCLL data, "
4286 "please report this to %s\n", TPACPI_MAIL);
4287 rc = 0;
4288 } else {
4289 rc = obj->package.count;
4290 }
4291 } else {
4292 return 0;
4293 }
4294
4295 kfree(buffer.pointer);
4296 return rc;
4297}
4298
4299static acpi_status __init brightness_find_bcll(acpi_handle handle, u32 lvl,
4300 void *context, void **rv)
4301{
4302 char name[ACPI_PATH_SEGMENT_LENGTH];
4303 struct acpi_buffer buffer = { sizeof(name), &name };
4304
4305 if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
4306 !strncmp("BCLL", name, sizeof(name) - 1)) {
4307 if (tpacpi_query_bcll_levels(handle) == 16) {
4308 *rv = handle;
4309 return AE_CTRL_TERMINATE;
4310 } else {
4311 return AE_OK;
4312 }
4313 } else {
4314 return AE_OK;
4315 }
4316}
4317
4318static int __init brightness_check_levels(void)
4319{ 4701{
4320 int status; 4702 int status, res;
4321 void *found_node = NULL;
4322 4703
4323 if (!vid_handle) { 4704 res = brightness_get_raw(&status);
4324 TPACPI_ACPIHANDLE_INIT(vid); 4705 if (res < 0)
4325 } 4706 return 0; /* FIXME: teach backlight about error handling */
4326 if (!vid_handle)
4327 return 0;
4328
4329 /* Search for a BCLL package with 16 levels */
4330 status = acpi_walk_namespace(ACPI_TYPE_PACKAGE, vid_handle, 3,
4331 brightness_find_bcll, NULL,
4332 &found_node);
4333
4334 return (ACPI_SUCCESS(status) && found_node != NULL);
4335}
4336
4337static acpi_status __init brightness_find_bcl(acpi_handle handle, u32 lvl,
4338 void *context, void **rv)
4339{
4340 char name[ACPI_PATH_SEGMENT_LENGTH];
4341 struct acpi_buffer buffer = { sizeof(name), &name };
4342 4707
4343 if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) && 4708 return status & TP_EC_BACKLIGHT_LVLMSK;
4344 !strncmp("_BCL", name, sizeof(name) - 1)) {
4345 *rv = handle;
4346 return AE_CTRL_TERMINATE;
4347 } else {
4348 return AE_OK;
4349 }
4350} 4709}
4351 4710
4352static int __init brightness_check_std_acpi_support(void) 4711static struct backlight_ops ibm_backlight_data = {
4353{ 4712 .get_brightness = brightness_get,
4354 int status; 4713 .update_status = brightness_update_status,
4355 void *found_node = NULL; 4714};
4356
4357 if (!vid_handle) {
4358 TPACPI_ACPIHANDLE_INIT(vid);
4359 }
4360 if (!vid_handle)
4361 return 0;
4362
4363 /* Search for a _BCL method, but don't execute it */
4364 status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
4365 brightness_find_bcl, NULL, &found_node);
4366 4715
4367 return (ACPI_SUCCESS(status) && found_node != NULL); 4716/* --------------------------------------------------------------------- */
4368}
4369 4717
4370static int __init brightness_init(struct ibm_init_struct *iibm) 4718static int __init brightness_init(struct ibm_init_struct *iibm)
4371{ 4719{
@@ -4375,13 +4723,19 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
4375 4723
4376 mutex_init(&brightness_mutex); 4724 mutex_init(&brightness_mutex);
4377 4725
4378 if (!brightness_enable) { 4726 /*
4379 dbg_printk(TPACPI_DBG_INIT, 4727 * We always attempt to detect acpi support, so as to switch
4380 "brightness support disabled by " 4728 * Lenovo Vista BIOS to ACPI brightness mode even if we are not
4381 "module parameter\n"); 4729 * going to publish a backlight interface
4382 return 1; 4730 */
4383 } else if (brightness_enable > 1) { 4731 b = tpacpi_check_std_acpi_brightness_support();
4384 if (brightness_check_std_acpi_support()) { 4732 if (b > 0) {
4733 if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) {
4734 printk(TPACPI_NOTICE
4735 "Lenovo BIOS switched to ACPI backlight "
4736 "control mode\n");
4737 }
4738 if (brightness_enable > 1) {
4385 printk(TPACPI_NOTICE 4739 printk(TPACPI_NOTICE
4386 "standard ACPI backlight interface " 4740 "standard ACPI backlight interface "
4387 "available, not loading native one...\n"); 4741 "available, not loading native one...\n");
@@ -4389,6 +4743,22 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
4389 } 4743 }
4390 } 4744 }
4391 4745
4746 if (!brightness_enable) {
4747 dbg_printk(TPACPI_DBG_INIT,
4748 "brightness support disabled by "
4749 "module parameter\n");
4750 return 1;
4751 }
4752
4753 if (b > 16) {
4754 printk(TPACPI_ERR
4755 "Unsupported brightness interface, "
4756 "please contact %s\n", TPACPI_MAIL);
4757 return 1;
4758 }
4759 if (b == 16)
4760 tp_features.bright_16levels = 1;
4761
4392 if (!brightness_mode) { 4762 if (!brightness_mode) {
4393 if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) 4763 if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO)
4394 brightness_mode = 2; 4764 brightness_mode = 2;
@@ -4402,12 +4772,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
4402 if (brightness_mode > 3) 4772 if (brightness_mode > 3)
4403 return -EINVAL; 4773 return -EINVAL;
4404 4774
4405 tp_features.bright_16levels = 4775 if (brightness_get_raw(&b) < 0)
4406 thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO &&
4407 brightness_check_levels();
4408
4409 b = brightness_get(NULL);
4410 if (b < 0)
4411 return 1; 4776 return 1;
4412 4777
4413 if (tp_features.bright_16levels) 4778 if (tp_features.bright_16levels)
@@ -4425,7 +4790,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
4425 4790
4426 ibm_backlight_device->props.max_brightness = 4791 ibm_backlight_device->props.max_brightness =
4427 (tp_features.bright_16levels)? 15 : 7; 4792 (tp_features.bright_16levels)? 15 : 7;
4428 ibm_backlight_device->props.brightness = b; 4793 ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
4429 backlight_update_status(ibm_backlight_device); 4794 backlight_update_status(ibm_backlight_device);
4430 4795
4431 return 0; 4796 return 0;
@@ -5046,11 +5411,11 @@ static void fan_watchdog_reset(void)
5046 if (fan_watchdog_maxinterval > 0 && 5411 if (fan_watchdog_maxinterval > 0 &&
5047 tpacpi_lifecycle != TPACPI_LIFE_EXITING) { 5412 tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
5048 fan_watchdog_active = 1; 5413 fan_watchdog_active = 1;
5049 if (!schedule_delayed_work(&fan_watchdog_task, 5414 if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
5050 msecs_to_jiffies(fan_watchdog_maxinterval 5415 msecs_to_jiffies(fan_watchdog_maxinterval
5051 * 1000))) { 5416 * 1000))) {
5052 printk(TPACPI_ERR 5417 printk(TPACPI_ERR
5053 "failed to schedule the fan watchdog, " 5418 "failed to queue the fan watchdog, "
5054 "watchdog will not trigger\n"); 5419 "watchdog will not trigger\n");
5055 } 5420 }
5056 } else 5421 } else
@@ -5420,7 +5785,7 @@ static void fan_exit(void)
5420 &driver_attr_fan_watchdog); 5785 &driver_attr_fan_watchdog);
5421 5786
5422 cancel_delayed_work(&fan_watchdog_task); 5787 cancel_delayed_work(&fan_watchdog_task);
5423 flush_scheduled_work(); 5788 flush_workqueue(tpacpi_wq);
5424} 5789}
5425 5790
5426static int fan_read(char *p) 5791static int fan_read(char *p)
@@ -5826,10 +6191,13 @@ static void __init get_thinkpad_model_data(struct thinkpad_id_data *tp)
5826 6191
5827 tp->model_str = kstrdup(dmi_get_system_info(DMI_PRODUCT_VERSION), 6192 tp->model_str = kstrdup(dmi_get_system_info(DMI_PRODUCT_VERSION),
5828 GFP_KERNEL); 6193 GFP_KERNEL);
5829 if (strnicmp(tp->model_str, "ThinkPad", 8) != 0) { 6194 if (tp->model_str && strnicmp(tp->model_str, "ThinkPad", 8) != 0) {
5830 kfree(tp->model_str); 6195 kfree(tp->model_str);
5831 tp->model_str = NULL; 6196 tp->model_str = NULL;
5832 } 6197 }
6198
6199 tp->nummodel_str = kstrdup(dmi_get_system_info(DMI_PRODUCT_NAME),
6200 GFP_KERNEL);
5833} 6201}
5834 6202
5835static int __init probe_for_thinkpad(void) 6203static int __init probe_for_thinkpad(void)
@@ -6071,6 +6439,9 @@ static void thinkpad_acpi_module_exit(void)
6071 if (proc_dir) 6439 if (proc_dir)
6072 remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir); 6440 remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
6073 6441
6442 if (tpacpi_wq)
6443 destroy_workqueue(tpacpi_wq);
6444
6074 kfree(thinkpad_id.bios_version_str); 6445 kfree(thinkpad_id.bios_version_str);
6075 kfree(thinkpad_id.ec_version_str); 6446 kfree(thinkpad_id.ec_version_str);
6076 kfree(thinkpad_id.model_str); 6447 kfree(thinkpad_id.model_str);
@@ -6101,6 +6472,12 @@ static int __init thinkpad_acpi_module_init(void)
6101 TPACPI_ACPIHANDLE_INIT(ecrd); 6472 TPACPI_ACPIHANDLE_INIT(ecrd);
6102 TPACPI_ACPIHANDLE_INIT(ecwr); 6473 TPACPI_ACPIHANDLE_INIT(ecwr);
6103 6474
6475 tpacpi_wq = create_singlethread_workqueue(TPACPI_WORKQUEUE_NAME);
6476 if (!tpacpi_wq) {
6477 thinkpad_acpi_module_exit();
6478 return -ENOMEM;
6479 }
6480
6104 proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir); 6481 proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
6105 if (!proc_dir) { 6482 if (!proc_dir) {
6106 printk(TPACPI_ERR 6483 printk(TPACPI_ERR
@@ -6223,6 +6600,8 @@ static int __init thinkpad_acpi_module_init(void)
6223/* Please remove this in year 2009 */ 6600/* Please remove this in year 2009 */
6224MODULE_ALIAS("ibm_acpi"); 6601MODULE_ALIAS("ibm_acpi");
6225 6602
6603MODULE_ALIAS(TPACPI_DRVR_SHORTNAME);
6604
6226/* 6605/*
6227 * DMI matching for module autoloading 6606 * DMI matching for module autoloading
6228 * 6607 *
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 365024b83d3d..35508584ac2a 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -340,7 +340,7 @@ checkstatus:
340 340
341 /* SPI R3, R4, or R7 == R1 + 4 bytes */ 341 /* SPI R3, R4, or R7 == R1 + 4 bytes */
342 case MMC_RSP_SPI_R3: 342 case MMC_RSP_SPI_R3:
343 cmd->resp[1] = be32_to_cpu(get_unaligned((u32 *)cp)); 343 cmd->resp[1] = get_unaligned_be32(cp);
344 break; 344 break;
345 345
346 /* SPI R1 == just one status byte */ 346 /* SPI R1 == just one status byte */
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 95244a7e7353..626ac083f4e0 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -213,9 +213,10 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
213 void __iomem *base = host->base; 213 void __iomem *base = host->base;
214 char *ptr = buffer; 214 char *ptr = buffer;
215 u32 status; 215 u32 status;
216 int host_remain = host->size;
216 217
217 do { 218 do {
218 int count = host->size - (readl(base + MMCIFIFOCNT) << 2); 219 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
219 220
220 if (count > remain) 221 if (count > remain)
221 count = remain; 222 count = remain;
@@ -227,6 +228,7 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
227 228
228 ptr += count; 229 ptr += count;
229 remain -= count; 230 remain -= count;
231 host_remain -= count;
230 232
231 if (remain == 0) 233 if (remain == 0)
232 break; 234 break;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e812df607a5c..fcd1aeccdf93 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -82,9 +82,8 @@ static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **); 82static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83 83
84static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, 84static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85 size_t *retlen, u_char **mtdbuf); 85 size_t *retlen, void **virt, resource_size_t *phys);
86static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, 86static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
87 size_t len);
88 87
89static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 88static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 89static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
@@ -1240,7 +1239,8 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
1240 return ret; 1239 return ret;
1241} 1240}
1242 1241
1243static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf) 1242static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1243 size_t *retlen, void **virt, resource_size_t *phys)
1244{ 1244{
1245 struct map_info *map = mtd->priv; 1245 struct map_info *map = mtd->priv;
1246 struct cfi_private *cfi = map->fldrv_priv; 1246 struct cfi_private *cfi = map->fldrv_priv;
@@ -1257,8 +1257,10 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1257 chipnum = (from >> cfi->chipshift); 1257 chipnum = (from >> cfi->chipshift);
1258 ofs = from - (chipnum << cfi->chipshift); 1258 ofs = from - (chipnum << cfi->chipshift);
1259 1259
1260 *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs; 1260 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1261 *retlen = 0; 1261 *retlen = 0;
1262 if (phys)
1263 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1262 1264
1263 while (len) { 1265 while (len) {
1264 unsigned long thislen; 1266 unsigned long thislen;
@@ -1291,7 +1293,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1291 return 0; 1293 return 0;
1292} 1294}
1293 1295
1294static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) 1296static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1295{ 1297{
1296 struct map_info *map = mtd->priv; 1298 struct map_info *map = mtd->priv;
1297 struct cfi_private *cfi = map->fldrv_priv; 1299 struct cfi_private *cfi = map->fldrv_priv;
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index bf485ff49457..0399be178620 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -48,18 +48,21 @@ static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
48} 48}
49 49
50static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, 50static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
51 size_t *retlen, u_char **mtdbuf) 51 size_t *retlen, void **virt, resource_size_t *phys)
52{ 52{
53 if (from + len > mtd->size) 53 if (from + len > mtd->size)
54 return -EINVAL; 54 return -EINVAL;
55 55
56 *mtdbuf = mtd->priv + from; 56 /* can we return a physical address with this driver? */
57 if (phys)
58 return -EINVAL;
59
60 *virt = mtd->priv + from;
57 *retlen = len; 61 *retlen = len;
58 return 0; 62 return 0;
59} 63}
60 64
61static void ram_unpoint(struct mtd_info *mtd, u_char * addr, loff_t from, 65static void ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
62 size_t len)
63{ 66{
64} 67}
65 68
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 5f960182da95..c7987b1c5e01 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -57,20 +57,21 @@ static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
57} 57}
58 58
59static int phram_point(struct mtd_info *mtd, loff_t from, size_t len, 59static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
60 size_t *retlen, u_char **mtdbuf) 60 size_t *retlen, void **virt, resource_size_t *phys)
61{ 61{
62 u_char *start = mtd->priv;
63
64 if (from + len > mtd->size) 62 if (from + len > mtd->size)
65 return -EINVAL; 63 return -EINVAL;
66 64
67 *mtdbuf = start + from; 65 /* can we return a physical address with this driver? */
66 if (phys)
67 return -EINVAL;
68
69 *virt = mtd->priv + from;
68 *retlen = len; 70 *retlen = len;
69 return 0; 71 return 0;
70} 72}
71 73
72static void phram_unpoint(struct mtd_info *mtd, u_char *addr, loff_t from, 74static void phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
73 size_t len)
74{ 75{
75} 76}
76 77
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 7060a0895ce2..bc9981749064 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -134,7 +134,8 @@ static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
134 eoff_lo = end & (priv->asize - 1); 134 eoff_lo = end & (priv->asize - 1);
135 soff_lo = instr->addr & (priv->asize - 1); 135 soff_lo = instr->addr & (priv->asize - 1);
136 136
137 pmc551_point(mtd, instr->addr, instr->len, &retlen, &ptr); 137 pmc551_point(mtd, instr->addr, instr->len, &retlen,
138 (void **)&ptr, NULL);
138 139
139 if (soff_hi == eoff_hi || mtd->size == priv->asize) { 140 if (soff_hi == eoff_hi || mtd->size == priv->asize) {
140 /* The whole thing fits within one access, so just one shot 141 /* The whole thing fits within one access, so just one shot
@@ -154,7 +155,8 @@ static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
154 } 155 }
155 soff_hi += priv->asize; 156 soff_hi += priv->asize;
156 pmc551_point(mtd, (priv->base_map0 | soff_hi), 157 pmc551_point(mtd, (priv->base_map0 | soff_hi),
157 priv->asize, &retlen, &ptr); 158 priv->asize, &retlen,
159 (void **)&ptr, NULL);
158 } 160 }
159 memset(ptr, 0xff, eoff_lo); 161 memset(ptr, 0xff, eoff_lo);
160 } 162 }
@@ -170,7 +172,7 @@ static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
170} 172}
171 173
172static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len, 174static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
173 size_t * retlen, u_char ** mtdbuf) 175 size_t *retlen, void **virt, resource_size_t *phys)
174{ 176{
175 struct mypriv *priv = mtd->priv; 177 struct mypriv *priv = mtd->priv;
176 u32 soff_hi; 178 u32 soff_hi;
@@ -188,6 +190,10 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
188 return -EINVAL; 190 return -EINVAL;
189 } 191 }
190 192
193 /* can we return a physical address with this driver? */
194 if (phys)
195 return -EINVAL;
196
191 soff_hi = from & ~(priv->asize - 1); 197 soff_hi = from & ~(priv->asize - 1);
192 soff_lo = from & (priv->asize - 1); 198 soff_lo = from & (priv->asize - 1);
193 199
@@ -198,13 +204,12 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
198 priv->curr_map0 = soff_hi; 204 priv->curr_map0 = soff_hi;
199 } 205 }
200 206
201 *mtdbuf = priv->start + soff_lo; 207 *virt = priv->start + soff_lo;
202 *retlen = len; 208 *retlen = len;
203 return 0; 209 return 0;
204} 210}
205 211
206static void pmc551_unpoint(struct mtd_info *mtd, u_char * addr, loff_t from, 212static void pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
207 size_t len)
208{ 213{
209#ifdef CONFIG_MTD_PMC551_DEBUG 214#ifdef CONFIG_MTD_PMC551_DEBUG
210 printk(KERN_DEBUG "pmc551_unpoint()\n"); 215 printk(KERN_DEBUG "pmc551_unpoint()\n");
@@ -242,7 +247,7 @@ static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
242 soff_lo = from & (priv->asize - 1); 247 soff_lo = from & (priv->asize - 1);
243 eoff_lo = end & (priv->asize - 1); 248 eoff_lo = end & (priv->asize - 1);
244 249
245 pmc551_point(mtd, from, len, retlen, &ptr); 250 pmc551_point(mtd, from, len, retlen, (void **)&ptr, NULL);
246 251
247 if (soff_hi == eoff_hi) { 252 if (soff_hi == eoff_hi) {
248 /* The whole thing fits within one access, so just one shot 253 /* The whole thing fits within one access, so just one shot
@@ -263,7 +268,8 @@ static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
263 goto out; 268 goto out;
264 } 269 }
265 soff_hi += priv->asize; 270 soff_hi += priv->asize;
266 pmc551_point(mtd, soff_hi, priv->asize, retlen, &ptr); 271 pmc551_point(mtd, soff_hi, priv->asize, retlen,
272 (void **)&ptr, NULL);
267 } 273 }
268 memcpy(copyto, ptr, eoff_lo); 274 memcpy(copyto, ptr, eoff_lo);
269 copyto += eoff_lo; 275 copyto += eoff_lo;
@@ -308,7 +314,7 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
308 soff_lo = to & (priv->asize - 1); 314 soff_lo = to & (priv->asize - 1);
309 eoff_lo = end & (priv->asize - 1); 315 eoff_lo = end & (priv->asize - 1);
310 316
311 pmc551_point(mtd, to, len, retlen, &ptr); 317 pmc551_point(mtd, to, len, retlen, (void **)&ptr, NULL);
312 318
313 if (soff_hi == eoff_hi) { 319 if (soff_hi == eoff_hi) {
314 /* The whole thing fits within one access, so just one shot 320 /* The whole thing fits within one access, so just one shot
@@ -329,7 +335,8 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
329 goto out; 335 goto out;
330 } 336 }
331 soff_hi += priv->asize; 337 soff_hi += priv->asize;
332 pmc551_point(mtd, soff_hi, priv->asize, retlen, &ptr); 338 pmc551_point(mtd, soff_hi, priv->asize, retlen,
339 (void **)&ptr, NULL);
333 } 340 }
334 memcpy(ptr, copyfrom, eoff_lo); 341 memcpy(ptr, copyfrom, eoff_lo);
335 copyfrom += eoff_lo; 342 copyfrom += eoff_lo;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index d293add1857c..cb86db746f28 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -76,8 +76,9 @@ static char *map;
76static slram_mtd_list_t *slram_mtdlist = NULL; 76static slram_mtd_list_t *slram_mtdlist = NULL;
77 77
78static int slram_erase(struct mtd_info *, struct erase_info *); 78static int slram_erase(struct mtd_info *, struct erase_info *);
79static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, u_char **); 79static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
80static void slram_unpoint(struct mtd_info *, u_char *, loff_t, size_t); 80 resource_size_t *);
81static void slram_unpoint(struct mtd_info *, loff_t, size_t);
81static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); 82static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
82static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 83static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
83 84
@@ -104,19 +105,23 @@ static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
104} 105}
105 106
106static int slram_point(struct mtd_info *mtd, loff_t from, size_t len, 107static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
107 size_t *retlen, u_char **mtdbuf) 108 size_t *retlen, void **virt, resource_size_t *phys)
108{ 109{
109 slram_priv_t *priv = mtd->priv; 110 slram_priv_t *priv = mtd->priv;
110 111
112 /* can we return a physical address with this driver? */
113 if (phys)
114 return -EINVAL;
115
111 if (from + len > mtd->size) 116 if (from + len > mtd->size)
112 return -EINVAL; 117 return -EINVAL;
113 118
114 *mtdbuf = priv->start + from; 119 *virt = priv->start + from;
115 *retlen = len; 120 *retlen = len;
116 return(0); 121 return(0);
117} 122}
118 123
119static void slram_unpoint(struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) 124static void slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
120{ 125{
121} 126}
122 127
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index f0b10ca05029..3eb2643b2328 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -209,7 +209,7 @@ static int platram_probe(struct platform_device *pdev)
209 /* probe for the right mtd map driver 209 /* probe for the right mtd map driver
210 * supplied by the platform_data struct */ 210 * supplied by the platform_data struct */
211 211
212 if (pdata->map_probes != 0) { 212 if (pdata->map_probes) {
213 const char **map_probes = pdata->map_probes; 213 const char **map_probes = pdata->map_probes;
214 214
215 for ( ; !info->mtd && *map_probes; map_probes++) 215 for ( ; !info->mtd && *map_probes; map_probes++)
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 14ffb1a9302a..c42f4b83f686 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -40,10 +40,12 @@ struct mtd_partition uclinux_romfs[] = {
40/****************************************************************************/ 40/****************************************************************************/
41 41
42int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len, 42int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
43 size_t *retlen, u_char **mtdbuf) 43 size_t *retlen, void **virt, resource_size_t *phys)
44{ 44{
45 struct map_info *map = mtd->priv; 45 struct map_info *map = mtd->priv;
46 *mtdbuf = (u_char *) (map->virt + ((int) from)); 46 *virt = map->virt + from;
47 if (phys)
48 *phys = map->phys + from;
47 *retlen = len; 49 *retlen = len;
48 return(0); 50 return(0);
49} 51}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index c66902df3171..07c701169344 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -68,7 +68,7 @@ static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
68} 68}
69 69
70static int part_point (struct mtd_info *mtd, loff_t from, size_t len, 70static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
71 size_t *retlen, u_char **buf) 71 size_t *retlen, void **virt, resource_size_t *phys)
72{ 72{
73 struct mtd_part *part = PART(mtd); 73 struct mtd_part *part = PART(mtd);
74 if (from >= mtd->size) 74 if (from >= mtd->size)
@@ -76,14 +76,14 @@ static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
76 else if (from + len > mtd->size) 76 else if (from + len > mtd->size)
77 len = mtd->size - from; 77 len = mtd->size - from;
78 return part->master->point (part->master, from + part->offset, 78 return part->master->point (part->master, from + part->offset,
79 len, retlen, buf); 79 len, retlen, virt, phys);
80} 80}
81 81
82static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) 82static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
83{ 83{
84 struct mtd_part *part = PART(mtd); 84 struct mtd_part *part = PART(mtd);
85 85
86 part->master->unpoint (part->master, addr, from + part->offset, len); 86 part->master->unpoint(part->master, from + part->offset, len);
87} 87}
88 88
89static int part_read_oob(struct mtd_info *mtd, loff_t from, 89static int part_read_oob(struct mtd_info *mtd, loff_t from,
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index 414ceaecdb3a..0adb287027a2 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -94,6 +94,24 @@ struct at91_nand_host {
94}; 94};
95 95
96/* 96/*
97 * Enable NAND.
98 */
99static void at91_nand_enable(struct at91_nand_host *host)
100{
101 if (host->board->enable_pin)
102 at91_set_gpio_value(host->board->enable_pin, 0);
103}
104
105/*
106 * Disable NAND.
107 */
108static void at91_nand_disable(struct at91_nand_host *host)
109{
110 if (host->board->enable_pin)
111 at91_set_gpio_value(host->board->enable_pin, 1);
112}
113
114/*
97 * Hardware specific access to control-lines 115 * Hardware specific access to control-lines
98 */ 116 */
99static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 117static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
@@ -101,11 +119,11 @@ static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
101 struct nand_chip *nand_chip = mtd->priv; 119 struct nand_chip *nand_chip = mtd->priv;
102 struct at91_nand_host *host = nand_chip->priv; 120 struct at91_nand_host *host = nand_chip->priv;
103 121
104 if (host->board->enable_pin && (ctrl & NAND_CTRL_CHANGE)) { 122 if (ctrl & NAND_CTRL_CHANGE) {
105 if (ctrl & NAND_NCE) 123 if (ctrl & NAND_NCE)
106 at91_set_gpio_value(host->board->enable_pin, 0); 124 at91_nand_enable(host);
107 else 125 else
108 at91_set_gpio_value(host->board->enable_pin, 1); 126 at91_nand_disable(host);
109 } 127 }
110 if (cmd == NAND_CMD_NONE) 128 if (cmd == NAND_CMD_NONE)
111 return; 129 return;
@@ -128,24 +146,6 @@ static int at91_nand_device_ready(struct mtd_info *mtd)
128} 146}
129 147
130/* 148/*
131 * Enable NAND.
132 */
133static void at91_nand_enable(struct at91_nand_host *host)
134{
135 if (host->board->enable_pin)
136 at91_set_gpio_value(host->board->enable_pin, 0);
137}
138
139/*
140 * Disable NAND.
141 */
142static void at91_nand_disable(struct at91_nand_host *host)
143{
144 if (host->board->enable_pin)
145 at91_set_gpio_value(host->board->enable_pin, 1);
146}
147
148/*
149 * write oob for small pages 149 * write oob for small pages
150 */ 150 */
151static int at91_nand_write_oob_512(struct mtd_info *mtd, 151static int at91_nand_write_oob_512(struct mtd_info *mtd,
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 9c6573419f5a..fdfb2b2cb734 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -670,7 +670,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
670 memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length); 670 memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length);
671 } 671 }
672 skb->protocol = eth_type_trans(skb,dev); 672 skb->protocol = eth_type_trans(skb,dev);
673 adapter->stats.rx_bytes += skb->len; 673 dev->stats.rx_bytes += skb->len;
674 netif_rx(skb); 674 netif_rx(skb);
675 dev->last_rx = jiffies; 675 dev->last_rx = jiffies;
676 } 676 }
@@ -773,12 +773,12 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
773 * received board statistics 773 * received board statistics
774 */ 774 */
775 case CMD_NETWORK_STATISTICS_RESPONSE: 775 case CMD_NETWORK_STATISTICS_RESPONSE:
776 adapter->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv; 776 dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
777 adapter->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit; 777 dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
778 adapter->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC; 778 dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
779 adapter->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align; 779 dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
780 adapter->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun; 780 dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
781 adapter->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res; 781 dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
782 adapter->got[CMD_NETWORK_STATISTICS] = 1; 782 adapter->got[CMD_NETWORK_STATISTICS] = 1;
783 if (elp_debug >= 3) 783 if (elp_debug >= 3)
784 printk(KERN_DEBUG "%s: interrupt - statistics response received\n", dev->name); 784 printk(KERN_DEBUG "%s: interrupt - statistics response received\n", dev->name);
@@ -794,11 +794,11 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
794 break; 794 break;
795 switch (adapter->irx_pcb.data.xmit_resp.c_stat) { 795 switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
796 case 0xffff: 796 case 0xffff:
797 adapter->stats.tx_aborted_errors++; 797 dev->stats.tx_aborted_errors++;
798 printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name); 798 printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name);
799 break; 799 break;
800 case 0xfffe: 800 case 0xfffe:
801 adapter->stats.tx_fifo_errors++; 801 dev->stats.tx_fifo_errors++;
802 printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name); 802 printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name);
803 break; 803 break;
804 } 804 }
@@ -986,7 +986,7 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb)
986 return false; 986 return false;
987 } 987 }
988 988
989 adapter->stats.tx_bytes += nlen; 989 dev->stats.tx_bytes += nlen;
990 990
991 /* 991 /*
992 * send the adapter a transmit packet command. Ignore segment and offset 992 * send the adapter a transmit packet command. Ignore segment and offset
@@ -1041,7 +1041,6 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb)
1041 1041
1042static void elp_timeout(struct net_device *dev) 1042static void elp_timeout(struct net_device *dev)
1043{ 1043{
1044 elp_device *adapter = dev->priv;
1045 int stat; 1044 int stat;
1046 1045
1047 stat = inb_status(dev->base_addr); 1046 stat = inb_status(dev->base_addr);
@@ -1049,7 +1048,7 @@ static void elp_timeout(struct net_device *dev)
1049 if (elp_debug >= 1) 1048 if (elp_debug >= 1)
1050 printk(KERN_DEBUG "%s: status %#02x\n", dev->name, stat); 1049 printk(KERN_DEBUG "%s: status %#02x\n", dev->name, stat);
1051 dev->trans_start = jiffies; 1050 dev->trans_start = jiffies;
1052 adapter->stats.tx_dropped++; 1051 dev->stats.tx_dropped++;
1053 netif_wake_queue(dev); 1052 netif_wake_queue(dev);
1054} 1053}
1055 1054
@@ -1113,7 +1112,7 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev)
1113 /* If the device is closed, just return the latest stats we have, 1112 /* If the device is closed, just return the latest stats we have,
1114 - we cannot ask from the adapter without interrupts */ 1113 - we cannot ask from the adapter without interrupts */
1115 if (!netif_running(dev)) 1114 if (!netif_running(dev))
1116 return &adapter->stats; 1115 return &dev->stats;
1117 1116
1118 /* send a get statistics command to the board */ 1117 /* send a get statistics command to the board */
1119 adapter->tx_pcb.command = CMD_NETWORK_STATISTICS; 1118 adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
@@ -1126,12 +1125,12 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev)
1126 while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout)); 1125 while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout));
1127 if (time_after_eq(jiffies, timeout)) { 1126 if (time_after_eq(jiffies, timeout)) {
1128 TIMEOUT_MSG(__LINE__); 1127 TIMEOUT_MSG(__LINE__);
1129 return &adapter->stats; 1128 return &dev->stats;
1130 } 1129 }
1131 } 1130 }
1132 1131
1133 /* statistics are now up to date */ 1132 /* statistics are now up to date */
1134 return &adapter->stats; 1133 return &dev->stats;
1135} 1134}
1136 1135
1137 1136
@@ -1571,7 +1570,6 @@ static int __init elplus_setup(struct net_device *dev)
1571 dev->set_multicast_list = elp_set_mc_list; /* local */ 1570 dev->set_multicast_list = elp_set_mc_list; /* local */
1572 dev->ethtool_ops = &netdev_ethtool_ops; /* local */ 1571 dev->ethtool_ops = &netdev_ethtool_ops; /* local */
1573 1572
1574 memset(&(adapter->stats), 0, sizeof(struct net_device_stats));
1575 dev->mem_start = dev->mem_end = 0; 1573 dev->mem_start = dev->mem_end = 0;
1576 1574
1577 err = register_netdev(dev); 1575 err = register_netdev(dev);
diff --git a/drivers/net/3c505.h b/drivers/net/3c505.h
index 1910cb1dc787..04df2a9002b6 100644
--- a/drivers/net/3c505.h
+++ b/drivers/net/3c505.h
@@ -264,7 +264,6 @@ typedef struct {
264 pcb_struct rx_pcb; /* PCB for foreground receiving */ 264 pcb_struct rx_pcb; /* PCB for foreground receiving */
265 pcb_struct itx_pcb; /* PCB for background sending */ 265 pcb_struct itx_pcb; /* PCB for background sending */
266 pcb_struct irx_pcb; /* PCB for background receiving */ 266 pcb_struct irx_pcb; /* PCB for background receiving */
267 struct net_device_stats stats;
268 267
269 void *dma_buffer; 268 void *dma_buffer;
270 269
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 54dac0696d91..e6c545fe5f58 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -167,7 +167,6 @@ enum RxFilter {
167enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; 167enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA };
168 168
169struct el3_private { 169struct el3_private {
170 struct net_device_stats stats;
171 spinlock_t lock; 170 spinlock_t lock;
172 /* skb send-queue */ 171 /* skb send-queue */
173 int head, size; 172 int head, size;
@@ -794,7 +793,6 @@ el3_open(struct net_device *dev)
794static void 793static void
795el3_tx_timeout (struct net_device *dev) 794el3_tx_timeout (struct net_device *dev)
796{ 795{
797 struct el3_private *lp = netdev_priv(dev);
798 int ioaddr = dev->base_addr; 796 int ioaddr = dev->base_addr;
799 797
800 /* Transmitter timeout, serious problems. */ 798 /* Transmitter timeout, serious problems. */
@@ -802,7 +800,7 @@ el3_tx_timeout (struct net_device *dev)
802 "Tx FIFO room %d.\n", 800 "Tx FIFO room %d.\n",
803 dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), 801 dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
804 inw(ioaddr + TX_FREE)); 802 inw(ioaddr + TX_FREE));
805 lp->stats.tx_errors++; 803 dev->stats.tx_errors++;
806 dev->trans_start = jiffies; 804 dev->trans_start = jiffies;
807 /* Issue TX_RESET and TX_START commands. */ 805 /* Issue TX_RESET and TX_START commands. */
808 outw(TxReset, ioaddr + EL3_CMD); 806 outw(TxReset, ioaddr + EL3_CMD);
@@ -820,7 +818,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
820 818
821 netif_stop_queue (dev); 819 netif_stop_queue (dev);
822 820
823 lp->stats.tx_bytes += skb->len; 821 dev->stats.tx_bytes += skb->len;
824 822
825 if (el3_debug > 4) { 823 if (el3_debug > 4) {
826 printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n", 824 printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
@@ -881,7 +879,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
881 int i = 4; 879 int i = 4;
882 880
883 while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { 881 while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
884 if (tx_status & 0x38) lp->stats.tx_aborted_errors++; 882 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
885 if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); 883 if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
886 if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); 884 if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
887 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ 885 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
@@ -931,12 +929,11 @@ el3_interrupt(int irq, void *dev_id)
931 outw(AckIntr | RxEarly, ioaddr + EL3_CMD); 929 outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
932 } 930 }
933 if (status & TxComplete) { /* Really Tx error. */ 931 if (status & TxComplete) { /* Really Tx error. */
934 struct el3_private *lp = netdev_priv(dev);
935 short tx_status; 932 short tx_status;
936 int i = 4; 933 int i = 4;
937 934
938 while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { 935 while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
939 if (tx_status & 0x38) lp->stats.tx_aborted_errors++; 936 if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
940 if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); 937 if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
941 if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); 938 if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
942 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ 939 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
@@ -1002,7 +999,7 @@ el3_get_stats(struct net_device *dev)
1002 spin_lock_irqsave(&lp->lock, flags); 999 spin_lock_irqsave(&lp->lock, flags);
1003 update_stats(dev); 1000 update_stats(dev);
1004 spin_unlock_irqrestore(&lp->lock, flags); 1001 spin_unlock_irqrestore(&lp->lock, flags);
1005 return &lp->stats; 1002 return &dev->stats;
1006} 1003}
1007 1004
1008/* Update statistics. We change to register window 6, so this should be run 1005/* Update statistics. We change to register window 6, so this should be run
@@ -1012,7 +1009,6 @@ el3_get_stats(struct net_device *dev)
1012 */ 1009 */
1013static void update_stats(struct net_device *dev) 1010static void update_stats(struct net_device *dev)
1014{ 1011{
1015 struct el3_private *lp = netdev_priv(dev);
1016 int ioaddr = dev->base_addr; 1012 int ioaddr = dev->base_addr;
1017 1013
1018 if (el3_debug > 5) 1014 if (el3_debug > 5)
@@ -1021,13 +1017,13 @@ static void update_stats(struct net_device *dev)
1021 outw(StatsDisable, ioaddr + EL3_CMD); 1017 outw(StatsDisable, ioaddr + EL3_CMD);
1022 /* Switch to the stats window, and read everything. */ 1018 /* Switch to the stats window, and read everything. */
1023 EL3WINDOW(6); 1019 EL3WINDOW(6);
1024 lp->stats.tx_carrier_errors += inb(ioaddr + 0); 1020 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
1025 lp->stats.tx_heartbeat_errors += inb(ioaddr + 1); 1021 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
1026 /* Multiple collisions. */ inb(ioaddr + 2); 1022 /* Multiple collisions. */ inb(ioaddr + 2);
1027 lp->stats.collisions += inb(ioaddr + 3); 1023 dev->stats.collisions += inb(ioaddr + 3);
1028 lp->stats.tx_window_errors += inb(ioaddr + 4); 1024 dev->stats.tx_window_errors += inb(ioaddr + 4);
1029 lp->stats.rx_fifo_errors += inb(ioaddr + 5); 1025 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
1030 lp->stats.tx_packets += inb(ioaddr + 6); 1026 dev->stats.tx_packets += inb(ioaddr + 6);
1031 /* Rx packets */ inb(ioaddr + 7); 1027 /* Rx packets */ inb(ioaddr + 7);
1032 /* Tx deferrals */ inb(ioaddr + 8); 1028 /* Tx deferrals */ inb(ioaddr + 8);
1033 inw(ioaddr + 10); /* Total Rx and Tx octets. */ 1029 inw(ioaddr + 10); /* Total Rx and Tx octets. */
@@ -1042,7 +1038,6 @@ static void update_stats(struct net_device *dev)
1042static int 1038static int
1043el3_rx(struct net_device *dev) 1039el3_rx(struct net_device *dev)
1044{ 1040{
1045 struct el3_private *lp = netdev_priv(dev);
1046 int ioaddr = dev->base_addr; 1041 int ioaddr = dev->base_addr;
1047 short rx_status; 1042 short rx_status;
1048 1043
@@ -1054,21 +1049,21 @@ el3_rx(struct net_device *dev)
1054 short error = rx_status & 0x3800; 1049 short error = rx_status & 0x3800;
1055 1050
1056 outw(RxDiscard, ioaddr + EL3_CMD); 1051 outw(RxDiscard, ioaddr + EL3_CMD);
1057 lp->stats.rx_errors++; 1052 dev->stats.rx_errors++;
1058 switch (error) { 1053 switch (error) {
1059 case 0x0000: lp->stats.rx_over_errors++; break; 1054 case 0x0000: dev->stats.rx_over_errors++; break;
1060 case 0x0800: lp->stats.rx_length_errors++; break; 1055 case 0x0800: dev->stats.rx_length_errors++; break;
1061 case 0x1000: lp->stats.rx_frame_errors++; break; 1056 case 0x1000: dev->stats.rx_frame_errors++; break;
1062 case 0x1800: lp->stats.rx_length_errors++; break; 1057 case 0x1800: dev->stats.rx_length_errors++; break;
1063 case 0x2000: lp->stats.rx_frame_errors++; break; 1058 case 0x2000: dev->stats.rx_frame_errors++; break;
1064 case 0x2800: lp->stats.rx_crc_errors++; break; 1059 case 0x2800: dev->stats.rx_crc_errors++; break;
1065 } 1060 }
1066 } else { 1061 } else {
1067 short pkt_len = rx_status & 0x7ff; 1062 short pkt_len = rx_status & 0x7ff;
1068 struct sk_buff *skb; 1063 struct sk_buff *skb;
1069 1064
1070 skb = dev_alloc_skb(pkt_len+5); 1065 skb = dev_alloc_skb(pkt_len+5);
1071 lp->stats.rx_bytes += pkt_len; 1066 dev->stats.rx_bytes += pkt_len;
1072 if (el3_debug > 4) 1067 if (el3_debug > 4)
1073 printk("Receiving packet size %d status %4.4x.\n", 1068 printk("Receiving packet size %d status %4.4x.\n",
1074 pkt_len, rx_status); 1069 pkt_len, rx_status);
@@ -1083,11 +1078,11 @@ el3_rx(struct net_device *dev)
1083 skb->protocol = eth_type_trans(skb,dev); 1078 skb->protocol = eth_type_trans(skb,dev);
1084 netif_rx(skb); 1079 netif_rx(skb);
1085 dev->last_rx = jiffies; 1080 dev->last_rx = jiffies;
1086 lp->stats.rx_packets++; 1081 dev->stats.rx_packets++;
1087 continue; 1082 continue;
1088 } 1083 }
1089 outw(RxDiscard, ioaddr + EL3_CMD); 1084 outw(RxDiscard, ioaddr + EL3_CMD);
1090 lp->stats.rx_dropped++; 1085 dev->stats.rx_dropped++;
1091 if (el3_debug) 1086 if (el3_debug)
1092 printk("%s: Couldn't allocate a sk_buff of size %d.\n", 1087 printk("%s: Couldn't allocate a sk_buff of size %d.\n",
1093 dev->name, pkt_len); 1088 dev->name, pkt_len);
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 6ab84b661d70..105a8c7ca7e9 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -310,7 +310,6 @@ struct corkscrew_private {
310 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 310 struct sk_buff *tx_skbuff[TX_RING_SIZE];
311 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 311 unsigned int cur_rx, cur_tx; /* The next free ring entry */
312 unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */ 312 unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */
313 struct net_device_stats stats;
314 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ 313 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
315 struct timer_list timer; /* Media selection timer. */ 314 struct timer_list timer; /* Media selection timer. */
316 int capabilities ; /* Adapter capabilities word. */ 315 int capabilities ; /* Adapter capabilities word. */
@@ -983,8 +982,8 @@ static void corkscrew_timeout(struct net_device *dev)
983 break; 982 break;
984 outw(TxEnable, ioaddr + EL3_CMD); 983 outw(TxEnable, ioaddr + EL3_CMD);
985 dev->trans_start = jiffies; 984 dev->trans_start = jiffies;
986 vp->stats.tx_errors++; 985 dev->stats.tx_errors++;
987 vp->stats.tx_dropped++; 986 dev->stats.tx_dropped++;
988 netif_wake_queue(dev); 987 netif_wake_queue(dev);
989} 988}
990 989
@@ -1050,7 +1049,7 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
1050 } 1049 }
1051 /* Put out the doubleword header... */ 1050 /* Put out the doubleword header... */
1052 outl(skb->len, ioaddr + TX_FIFO); 1051 outl(skb->len, ioaddr + TX_FIFO);
1053 vp->stats.tx_bytes += skb->len; 1052 dev->stats.tx_bytes += skb->len;
1054#ifdef VORTEX_BUS_MASTER 1053#ifdef VORTEX_BUS_MASTER
1055 if (vp->bus_master) { 1054 if (vp->bus_master) {
1056 /* Set the bus-master controller to transfer the packet. */ 1055 /* Set the bus-master controller to transfer the packet. */
@@ -1094,9 +1093,9 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
1094 printk("%s: Tx error, status %2.2x.\n", 1093 printk("%s: Tx error, status %2.2x.\n",
1095 dev->name, tx_status); 1094 dev->name, tx_status);
1096 if (tx_status & 0x04) 1095 if (tx_status & 0x04)
1097 vp->stats.tx_fifo_errors++; 1096 dev->stats.tx_fifo_errors++;
1098 if (tx_status & 0x38) 1097 if (tx_status & 0x38)
1099 vp->stats.tx_aborted_errors++; 1098 dev->stats.tx_aborted_errors++;
1100 if (tx_status & 0x30) { 1099 if (tx_status & 0x30) {
1101 int j; 1100 int j;
1102 outw(TxReset, ioaddr + EL3_CMD); 1101 outw(TxReset, ioaddr + EL3_CMD);
@@ -1257,7 +1256,6 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
1257 1256
1258static int corkscrew_rx(struct net_device *dev) 1257static int corkscrew_rx(struct net_device *dev)
1259{ 1258{
1260 struct corkscrew_private *vp = netdev_priv(dev);
1261 int ioaddr = dev->base_addr; 1259 int ioaddr = dev->base_addr;
1262 int i; 1260 int i;
1263 short rx_status; 1261 short rx_status;
@@ -1271,17 +1269,17 @@ static int corkscrew_rx(struct net_device *dev)
1271 if (corkscrew_debug > 2) 1269 if (corkscrew_debug > 2)
1272 printk(" Rx error: status %2.2x.\n", 1270 printk(" Rx error: status %2.2x.\n",
1273 rx_error); 1271 rx_error);
1274 vp->stats.rx_errors++; 1272 dev->stats.rx_errors++;
1275 if (rx_error & 0x01) 1273 if (rx_error & 0x01)
1276 vp->stats.rx_over_errors++; 1274 dev->stats.rx_over_errors++;
1277 if (rx_error & 0x02) 1275 if (rx_error & 0x02)
1278 vp->stats.rx_length_errors++; 1276 dev->stats.rx_length_errors++;
1279 if (rx_error & 0x04) 1277 if (rx_error & 0x04)
1280 vp->stats.rx_frame_errors++; 1278 dev->stats.rx_frame_errors++;
1281 if (rx_error & 0x08) 1279 if (rx_error & 0x08)
1282 vp->stats.rx_crc_errors++; 1280 dev->stats.rx_crc_errors++;
1283 if (rx_error & 0x10) 1281 if (rx_error & 0x10)
1284 vp->stats.rx_length_errors++; 1282 dev->stats.rx_length_errors++;
1285 } else { 1283 } else {
1286 /* The packet length: up to 4.5K!. */ 1284 /* The packet length: up to 4.5K!. */
1287 short pkt_len = rx_status & 0x1fff; 1285 short pkt_len = rx_status & 0x1fff;
@@ -1301,8 +1299,8 @@ static int corkscrew_rx(struct net_device *dev)
1301 skb->protocol = eth_type_trans(skb, dev); 1299 skb->protocol = eth_type_trans(skb, dev);
1302 netif_rx(skb); 1300 netif_rx(skb);
1303 dev->last_rx = jiffies; 1301 dev->last_rx = jiffies;
1304 vp->stats.rx_packets++; 1302 dev->stats.rx_packets++;
1305 vp->stats.rx_bytes += pkt_len; 1303 dev->stats.rx_bytes += pkt_len;
1306 /* Wait a limited time to go to next packet. */ 1304 /* Wait a limited time to go to next packet. */
1307 for (i = 200; i >= 0; i--) 1305 for (i = 200; i >= 0; i--)
1308 if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) 1306 if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
@@ -1312,7 +1310,7 @@ static int corkscrew_rx(struct net_device *dev)
1312 printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len); 1310 printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len);
1313 } 1311 }
1314 outw(RxDiscard, ioaddr + EL3_CMD); 1312 outw(RxDiscard, ioaddr + EL3_CMD);
1315 vp->stats.rx_dropped++; 1313 dev->stats.rx_dropped++;
1316 /* Wait a limited time to skip this packet. */ 1314 /* Wait a limited time to skip this packet. */
1317 for (i = 200; i >= 0; i--) 1315 for (i = 200; i >= 0; i--)
1318 if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) 1316 if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
@@ -1337,23 +1335,23 @@ static int boomerang_rx(struct net_device *dev)
1337 if (corkscrew_debug > 2) 1335 if (corkscrew_debug > 2)
1338 printk(" Rx error: status %2.2x.\n", 1336 printk(" Rx error: status %2.2x.\n",
1339 rx_error); 1337 rx_error);
1340 vp->stats.rx_errors++; 1338 dev->stats.rx_errors++;
1341 if (rx_error & 0x01) 1339 if (rx_error & 0x01)
1342 vp->stats.rx_over_errors++; 1340 dev->stats.rx_over_errors++;
1343 if (rx_error & 0x02) 1341 if (rx_error & 0x02)
1344 vp->stats.rx_length_errors++; 1342 dev->stats.rx_length_errors++;
1345 if (rx_error & 0x04) 1343 if (rx_error & 0x04)
1346 vp->stats.rx_frame_errors++; 1344 dev->stats.rx_frame_errors++;
1347 if (rx_error & 0x08) 1345 if (rx_error & 0x08)
1348 vp->stats.rx_crc_errors++; 1346 dev->stats.rx_crc_errors++;
1349 if (rx_error & 0x10) 1347 if (rx_error & 0x10)
1350 vp->stats.rx_length_errors++; 1348 dev->stats.rx_length_errors++;
1351 } else { 1349 } else {
1352 /* The packet length: up to 4.5K!. */ 1350 /* The packet length: up to 4.5K!. */
1353 short pkt_len = rx_status & 0x1fff; 1351 short pkt_len = rx_status & 0x1fff;
1354 struct sk_buff *skb; 1352 struct sk_buff *skb;
1355 1353
1356 vp->stats.rx_bytes += pkt_len; 1354 dev->stats.rx_bytes += pkt_len;
1357 if (corkscrew_debug > 4) 1355 if (corkscrew_debug > 4)
1358 printk("Receiving packet size %d status %4.4x.\n", 1356 printk("Receiving packet size %d status %4.4x.\n",
1359 pkt_len, rx_status); 1357 pkt_len, rx_status);
@@ -1388,7 +1386,7 @@ static int boomerang_rx(struct net_device *dev)
1388 skb->protocol = eth_type_trans(skb, dev); 1386 skb->protocol = eth_type_trans(skb, dev);
1389 netif_rx(skb); 1387 netif_rx(skb);
1390 dev->last_rx = jiffies; 1388 dev->last_rx = jiffies;
1391 vp->stats.rx_packets++; 1389 dev->stats.rx_packets++;
1392 } 1390 }
1393 entry = (++vp->cur_rx) % RX_RING_SIZE; 1391 entry = (++vp->cur_rx) % RX_RING_SIZE;
1394 } 1392 }
@@ -1475,7 +1473,7 @@ static struct net_device_stats *corkscrew_get_stats(struct net_device *dev)
1475 update_stats(dev->base_addr, dev); 1473 update_stats(dev->base_addr, dev);
1476 spin_unlock_irqrestore(&vp->lock, flags); 1474 spin_unlock_irqrestore(&vp->lock, flags);
1477 } 1475 }
1478 return &vp->stats; 1476 return &dev->stats;
1479} 1477}
1480 1478
1481/* Update statistics. 1479/* Update statistics.
@@ -1487,19 +1485,17 @@ static struct net_device_stats *corkscrew_get_stats(struct net_device *dev)
1487 */ 1485 */
1488static void update_stats(int ioaddr, struct net_device *dev) 1486static void update_stats(int ioaddr, struct net_device *dev)
1489{ 1487{
1490 struct corkscrew_private *vp = netdev_priv(dev);
1491
1492 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ 1488 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
1493 /* Switch to the stats window, and read everything. */ 1489 /* Switch to the stats window, and read everything. */
1494 EL3WINDOW(6); 1490 EL3WINDOW(6);
1495 vp->stats.tx_carrier_errors += inb(ioaddr + 0); 1491 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
1496 vp->stats.tx_heartbeat_errors += inb(ioaddr + 1); 1492 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
1497 /* Multiple collisions. */ inb(ioaddr + 2); 1493 /* Multiple collisions. */ inb(ioaddr + 2);
1498 vp->stats.collisions += inb(ioaddr + 3); 1494 dev->stats.collisions += inb(ioaddr + 3);
1499 vp->stats.tx_window_errors += inb(ioaddr + 4); 1495 dev->stats.tx_window_errors += inb(ioaddr + 4);
1500 vp->stats.rx_fifo_errors += inb(ioaddr + 5); 1496 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
1501 vp->stats.tx_packets += inb(ioaddr + 6); 1497 dev->stats.tx_packets += inb(ioaddr + 6);
1502 vp->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4; 1498 dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4;
1503 /* Rx packets */ inb(ioaddr + 7); 1499 /* Rx packets */ inb(ioaddr + 7);
1504 /* Must read to clear */ 1500 /* Must read to clear */
1505 /* Tx deferrals */ inb(ioaddr + 8); 1501 /* Tx deferrals */ inb(ioaddr + 8);
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index a499e867f0f4..dc5d2584bd0c 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -34,7 +34,7 @@ struct net_device *__alloc_ei_netdev(int size)
34 34
35void NS8390_init(struct net_device *dev, int startp) 35void NS8390_init(struct net_device *dev, int startp)
36{ 36{
37 return __NS8390_init(dev, startp); 37 __NS8390_init(dev, startp);
38} 38}
39 39
40EXPORT_SYMBOL(ei_open); 40EXPORT_SYMBOL(ei_open);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f90a86ba7e2f..af46341827f2 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2593,6 +2593,7 @@ config BNX2X
2593 To compile this driver as a module, choose M here: the module 2593 To compile this driver as a module, choose M here: the module
2594 will be called bnx2x. This is recommended. 2594 will be called bnx2x. This is recommended.
2595 2595
2596source "drivers/net/sfc/Kconfig"
2596 2597
2597endif # NETDEV_10000 2598endif # NETDEV_10000
2598 2599
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 2f1f3f2739fd..dcbfe8421154 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -253,3 +253,5 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
253obj-$(CONFIG_NETXEN_NIC) += netxen/ 253obj-$(CONFIG_NETXEN_NIC) += netxen/
254obj-$(CONFIG_NIU) += niu.o 254obj-$(CONFIG_NIU) += niu.o
255obj-$(CONFIG_VIRTIO_NET) += virtio_net.o 255obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
256obj-$(CONFIG_SFC) += sfc/
257
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index f9cc2b621fe2..8eda6eeb43b7 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -47,3 +47,11 @@ config EP93XX_ETH
47 help 47 help
48 This is a driver for the ethernet hardware included in EP93xx CPUs. 48 This is a driver for the ethernet hardware included in EP93xx CPUs.
49 Say Y if you are building a kernel for EP93xx based devices. 49 Say Y if you are building a kernel for EP93xx based devices.
50
51config IXP4XX_ETH
52 tristate "Intel IXP4xx Ethernet support"
53 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
54 select MII
55 help
56 Say Y here if you want to use built-in Ethernet ports
57 on IXP4xx processor.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
index a4c868278e11..7c812ac2b6a5 100644
--- a/drivers/net/arm/Makefile
+++ b/drivers/net/arm/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_ARM_ETHER3) += ether3.o
9obj-$(CONFIG_ARM_ETHER1) += ether1.o 9obj-$(CONFIG_ARM_ETHER1) += ether1.o
10obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o 10obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
11obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o 11obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
12obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index ba6bd03a015f..a637910b02dd 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -693,11 +693,15 @@ static int __init am79c961_probe(struct platform_device *pdev)
693 * done by the ether bootp loader. 693 * done by the ether bootp loader.
694 */ 694 */
695 dev->base_addr = res->start; 695 dev->base_addr = res->start;
696 dev->irq = platform_get_irq(pdev, 0); 696 ret = platform_get_irq(pdev, 0);
697 697
698 ret = -ENODEV; 698 if (ret < 0) {
699 if (dev->irq < 0) 699 ret = -ENODEV;
700 goto nodev; 700 goto nodev;
701 }
702 dev->irq = ret;
703
704 ret = -ENODEV;
701 if (!request_region(dev->base_addr, 0x18, dev->name)) 705 if (!request_region(dev->base_addr, 0x18, dev->name))
702 goto nodev; 706 goto nodev;
703 707
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
new file mode 100644
index 000000000000..c617b64c288e
--- /dev/null
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -0,0 +1,1265 @@
1/*
2 * Intel IXP4xx Ethernet driver for Linux
3 *
4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * Ethernet port config (0x00 is not present on IXP42X):
11 *
12 * logical port 0x00 0x10 0x20
13 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
14 * physical PortId 2 0 1
15 * TX queue 23 24 25
16 * RX-free queue 26 27 28
17 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
18 *
19 *
20 * Queue entries:
21 * bits 0 -> 1 - NPE ID (RX and TX-done)
22 * bits 0 -> 2 - priority (TX, per 802.1D)
23 * bits 3 -> 4 - port ID (user-set?)
24 * bits 5 -> 31 - physical descriptor address
25 */
26
27#include <linux/delay.h>
28#include <linux/dma-mapping.h>
29#include <linux/dmapool.h>
30#include <linux/etherdevice.h>
31#include <linux/io.h>
32#include <linux/kernel.h>
33#include <linux/mii.h>
34#include <linux/platform_device.h>
35#include <asm/arch/npe.h>
36#include <asm/arch/qmgr.h>
37
38#define DEBUG_QUEUES 0
39#define DEBUG_DESC 0
40#define DEBUG_RX 0
41#define DEBUG_TX 0
42#define DEBUG_PKT_BYTES 0
43#define DEBUG_MDIO 0
44#define DEBUG_CLOSE 0
45
46#define DRV_NAME "ixp4xx_eth"
47
48#define MAX_NPES 3
49
50#define RX_DESCS 64 /* also length of all RX queues */
51#define TX_DESCS 16 /* also length of all TX queues */
52#define TXDONE_QUEUE_LEN 64 /* dwords */
53
54#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
55#define REGS_SIZE 0x1000
56#define MAX_MRU 1536 /* 0x600 */
57#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
58
59#define NAPI_WEIGHT 16
60#define MDIO_INTERVAL (3 * HZ)
61#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
62#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
63#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
64
65#define NPE_ID(port_id) ((port_id) >> 4)
66#define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
67#define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
68#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
69#define TXDONE_QUEUE 31
70
71/* TX Control Registers */
72#define TX_CNTRL0_TX_EN 0x01
73#define TX_CNTRL0_HALFDUPLEX 0x02
74#define TX_CNTRL0_RETRY 0x04
75#define TX_CNTRL0_PAD_EN 0x08
76#define TX_CNTRL0_APPEND_FCS 0x10
77#define TX_CNTRL0_2DEFER 0x20
78#define TX_CNTRL0_RMII 0x40 /* reduced MII */
79#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
80
81/* RX Control Registers */
82#define RX_CNTRL0_RX_EN 0x01
83#define RX_CNTRL0_PADSTRIP_EN 0x02
84#define RX_CNTRL0_SEND_FCS 0x04
85#define RX_CNTRL0_PAUSE_EN 0x08
86#define RX_CNTRL0_LOOP_EN 0x10
87#define RX_CNTRL0_ADDR_FLTR_EN 0x20
88#define RX_CNTRL0_RX_RUNT_EN 0x40
89#define RX_CNTRL0_BCAST_DIS 0x80
90#define RX_CNTRL1_DEFER_EN 0x01
91
92/* Core Control Register */
93#define CORE_RESET 0x01
94#define CORE_RX_FIFO_FLUSH 0x02
95#define CORE_TX_FIFO_FLUSH 0x04
96#define CORE_SEND_JAM 0x08
97#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
98
99#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
100 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
101 TX_CNTRL0_2DEFER)
102#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
103#define DEFAULT_CORE_CNTRL CORE_MDC_EN
104
105
106/* NPE message codes */
107#define NPE_GETSTATUS 0x00
108#define NPE_EDB_SETPORTADDRESS 0x01
109#define NPE_EDB_GETMACADDRESSDATABASE 0x02
110#define NPE_EDB_SETMACADDRESSSDATABASE 0x03
111#define NPE_GETSTATS 0x04
112#define NPE_RESETSTATS 0x05
113#define NPE_SETMAXFRAMELENGTHS 0x06
114#define NPE_VLAN_SETRXTAGMODE 0x07
115#define NPE_VLAN_SETDEFAULTRXVID 0x08
116#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
117#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
118#define NPE_VLAN_SETRXQOSENTRY 0x0B
119#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
120#define NPE_STP_SETBLOCKINGSTATE 0x0D
121#define NPE_FW_SETFIREWALLMODE 0x0E
122#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
123#define NPE_PC_SETAPMACTABLE 0x11
124#define NPE_SETLOOPBACK_MODE 0x12
125#define NPE_PC_SETBSSIDTABLE 0x13
126#define NPE_ADDRESS_FILTER_CONFIG 0x14
127#define NPE_APPENDFCSCONFIG 0x15
128#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
129#define NPE_MAC_RECOVERY_START 0x17
130
131
132#ifdef __ARMEB__
133typedef struct sk_buff buffer_t;
134#define free_buffer dev_kfree_skb
135#define free_buffer_irq dev_kfree_skb_irq
136#else
137typedef void buffer_t;
138#define free_buffer kfree
139#define free_buffer_irq kfree
140#endif
141
142struct eth_regs {
143 u32 tx_control[2], __res1[2]; /* 000 */
144 u32 rx_control[2], __res2[2]; /* 010 */
145 u32 random_seed, __res3[3]; /* 020 */
146 u32 partial_empty_threshold, __res4; /* 030 */
147 u32 partial_full_threshold, __res5; /* 038 */
148 u32 tx_start_bytes, __res6[3]; /* 040 */
149 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
150 u32 tx_2part_deferral[2], __res8[2]; /* 060 */
151 u32 slot_time, __res9[3]; /* 070 */
152 u32 mdio_command[4]; /* 080 */
153 u32 mdio_status[4]; /* 090 */
154 u32 mcast_mask[6], __res10[2]; /* 0A0 */
155 u32 mcast_addr[6], __res11[2]; /* 0C0 */
156 u32 int_clock_threshold, __res12[3]; /* 0E0 */
157 u32 hw_addr[6], __res13[61]; /* 0F0 */
158 u32 core_control; /* 1FC */
159};
160
161struct port {
162 struct resource *mem_res;
163 struct eth_regs __iomem *regs;
164 struct npe *npe;
165 struct net_device *netdev;
166 struct napi_struct napi;
167 struct net_device_stats stat;
168 struct mii_if_info mii;
169 struct delayed_work mdio_thread;
170 struct eth_plat_info *plat;
171 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
172 struct desc *desc_tab; /* coherent */
173 u32 desc_tab_phys;
174 int id; /* logical port ID */
175 u16 mii_bmcr;
176};
177
178/* NPE message structure */
179struct msg {
180#ifdef __ARMEB__
181 u8 cmd, eth_id, byte2, byte3;
182 u8 byte4, byte5, byte6, byte7;
183#else
184 u8 byte3, byte2, eth_id, cmd;
185 u8 byte7, byte6, byte5, byte4;
186#endif
187};
188
189/* Ethernet packet descriptor */
190struct desc {
191 u32 next; /* pointer to next buffer, unused */
192
193#ifdef __ARMEB__
194 u16 buf_len; /* buffer length */
195 u16 pkt_len; /* packet length */
196 u32 data; /* pointer to data buffer in RAM */
197 u8 dest_id;
198 u8 src_id;
199 u16 flags;
200 u8 qos;
201 u8 padlen;
202 u16 vlan_tci;
203#else
204 u16 pkt_len; /* packet length */
205 u16 buf_len; /* buffer length */
206 u32 data; /* pointer to data buffer in RAM */
207 u16 flags;
208 u8 src_id;
209 u8 dest_id;
210 u16 vlan_tci;
211 u8 padlen;
212 u8 qos;
213#endif
214
215#ifdef __ARMEB__
216 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
217 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
218 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
219#else
220 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
221 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
222 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
223#endif
224};
225
226
227#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
228 (n) * sizeof(struct desc))
229#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
230
231#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
232 ((n) + RX_DESCS) * sizeof(struct desc))
233#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
234
235#ifndef __ARMEB__
236static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
237{
238 int i;
239 for (i = 0; i < cnt; i++)
240 dest[i] = swab32(src[i]);
241}
242#endif
243
244static spinlock_t mdio_lock;
245static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
246static int ports_open;
247static struct port *npe_port_tab[MAX_NPES];
248static struct dma_pool *dma_pool;
249
250
251static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
252 int write, u16 cmd)
253{
254 int cycles = 0;
255
256 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
257 printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name);
258 return 0;
259 }
260
261 if (write) {
262 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
263 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
264 }
265 __raw_writel(((phy_id << 5) | location) & 0xFF,
266 &mdio_regs->mdio_command[2]);
267 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
268 &mdio_regs->mdio_command[3]);
269
270 while ((cycles < MAX_MDIO_RETRIES) &&
271 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
272 udelay(1);
273 cycles++;
274 }
275
276 if (cycles == MAX_MDIO_RETRIES) {
277 printk(KERN_ERR "%s: MII write failed\n", dev->name);
278 return 0;
279 }
280
281#if DEBUG_MDIO
282 printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name,
283 cycles);
284#endif
285
286 if (write)
287 return 0;
288
289 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
290 printk(KERN_ERR "%s: MII read failed\n", dev->name);
291 return 0;
292 }
293
294 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
295 (__raw_readl(&mdio_regs->mdio_status[1]) << 8);
296}
297
298static int mdio_read(struct net_device *dev, int phy_id, int location)
299{
300 unsigned long flags;
301 u16 val;
302
303 spin_lock_irqsave(&mdio_lock, flags);
304 val = mdio_cmd(dev, phy_id, location, 0, 0);
305 spin_unlock_irqrestore(&mdio_lock, flags);
306 return val;
307}
308
309static void mdio_write(struct net_device *dev, int phy_id, int location,
310 int val)
311{
312 unsigned long flags;
313
314 spin_lock_irqsave(&mdio_lock, flags);
315 mdio_cmd(dev, phy_id, location, 1, val);
316 spin_unlock_irqrestore(&mdio_lock, flags);
317}
318
319static void phy_reset(struct net_device *dev, int phy_id)
320{
321 struct port *port = netdev_priv(dev);
322 int cycles = 0;
323
324 mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET);
325
326 while (cycles < MAX_MII_RESET_RETRIES) {
327 if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) {
328#if DEBUG_MDIO
329 printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n",
330 dev->name, cycles);
331#endif
332 return;
333 }
334 udelay(1);
335 cycles++;
336 }
337
338 printk(KERN_ERR "%s: MII reset failed\n", dev->name);
339}
340
341static void eth_set_duplex(struct port *port)
342{
343 if (port->mii.full_duplex)
344 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
345 &port->regs->tx_control[0]);
346 else
347 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
348 &port->regs->tx_control[0]);
349}
350
351
352static void phy_check_media(struct port *port, int init)
353{
354 if (mii_check_media(&port->mii, 1, init))
355 eth_set_duplex(port);
356 if (port->mii.force_media) { /* mii_check_media() doesn't work */
357 struct net_device *dev = port->netdev;
358 int cur_link = mii_link_ok(&port->mii);
359 int prev_link = netif_carrier_ok(dev);
360
361 if (!prev_link && cur_link) {
362 printk(KERN_INFO "%s: link up\n", dev->name);
363 netif_carrier_on(dev);
364 } else if (prev_link && !cur_link) {
365 printk(KERN_INFO "%s: link down\n", dev->name);
366 netif_carrier_off(dev);
367 }
368 }
369}
370
371
372static void mdio_thread(struct work_struct *work)
373{
374 struct port *port = container_of(work, struct port, mdio_thread.work);
375
376 phy_check_media(port, 0);
377 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
378}
379
380
381static inline void debug_pkt(struct net_device *dev, const char *func,
382 u8 *data, int len)
383{
384#if DEBUG_PKT_BYTES
385 int i;
386
387 printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
388 for (i = 0; i < len; i++) {
389 if (i >= DEBUG_PKT_BYTES)
390 break;
391 printk("%s%02X",
392 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
393 data[i]);
394 }
395 printk("\n");
396#endif
397}
398
399
400static inline void debug_desc(u32 phys, struct desc *desc)
401{
402#if DEBUG_DESC
403 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
404 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
405 phys, desc->next, desc->buf_len, desc->pkt_len,
406 desc->data, desc->dest_id, desc->src_id, desc->flags,
407 desc->qos, desc->padlen, desc->vlan_tci,
408 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
409 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
410 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
411 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
412#endif
413}
414
415static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
416{
417#if DEBUG_QUEUES
418 static struct {
419 int queue;
420 char *name;
421 } names[] = {
422 { TX_QUEUE(0x10), "TX#0 " },
423 { TX_QUEUE(0x20), "TX#1 " },
424 { TX_QUEUE(0x00), "TX#2 " },
425 { RXFREE_QUEUE(0x10), "RX-free#0 " },
426 { RXFREE_QUEUE(0x20), "RX-free#1 " },
427 { RXFREE_QUEUE(0x00), "RX-free#2 " },
428 { TXDONE_QUEUE, "TX-done " },
429 };
430 int i;
431
432 for (i = 0; i < ARRAY_SIZE(names); i++)
433 if (names[i].queue == queue)
434 break;
435
436 printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
437 i < ARRAY_SIZE(names) ? names[i].name : "",
438 is_get ? "->" : "<-", phys);
439#endif
440}
441
442static inline u32 queue_get_entry(unsigned int queue)
443{
444 u32 phys = qmgr_get_entry(queue);
445 debug_queue(queue, 1, phys);
446 return phys;
447}
448
449static inline int queue_get_desc(unsigned int queue, struct port *port,
450 int is_tx)
451{
452 u32 phys, tab_phys, n_desc;
453 struct desc *tab;
454
455 if (!(phys = queue_get_entry(queue)))
456 return -1;
457
458 phys &= ~0x1F; /* mask out non-address bits */
459 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
460 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
461 n_desc = (phys - tab_phys) / sizeof(struct desc);
462 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
463 debug_desc(phys, &tab[n_desc]);
464 BUG_ON(tab[n_desc].next);
465 return n_desc;
466}
467
468static inline void queue_put_desc(unsigned int queue, u32 phys,
469 struct desc *desc)
470{
471 debug_queue(queue, 0, phys);
472 debug_desc(phys, desc);
473 BUG_ON(phys & 0x1F);
474 qmgr_put_entry(queue, phys);
475 BUG_ON(qmgr_stat_overflow(queue));
476}
477
478
479static inline void dma_unmap_tx(struct port *port, struct desc *desc)
480{
481#ifdef __ARMEB__
482 dma_unmap_single(&port->netdev->dev, desc->data,
483 desc->buf_len, DMA_TO_DEVICE);
484#else
485 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
486 ALIGN((desc->data & 3) + desc->buf_len, 4),
487 DMA_TO_DEVICE);
488#endif
489}
490
491
492static void eth_rx_irq(void *pdev)
493{
494 struct net_device *dev = pdev;
495 struct port *port = netdev_priv(dev);
496
497#if DEBUG_RX
498 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
499#endif
500 qmgr_disable_irq(port->plat->rxq);
501 netif_rx_schedule(dev, &port->napi);
502}
503
504static int eth_poll(struct napi_struct *napi, int budget)
505{
506 struct port *port = container_of(napi, struct port, napi);
507 struct net_device *dev = port->netdev;
508 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
509 int received = 0;
510
511#if DEBUG_RX
512 printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
513#endif
514
515 while (received < budget) {
516 struct sk_buff *skb;
517 struct desc *desc;
518 int n;
519#ifdef __ARMEB__
520 struct sk_buff *temp;
521 u32 phys;
522#endif
523
524 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
525 received = 0; /* No packet received */
526#if DEBUG_RX
527 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
528 dev->name);
529#endif
530 netif_rx_complete(dev, napi);
531 qmgr_enable_irq(rxq);
532 if (!qmgr_stat_empty(rxq) &&
533 netif_rx_reschedule(dev, napi)) {
534#if DEBUG_RX
535 printk(KERN_DEBUG "%s: eth_poll"
536 " netif_rx_reschedule successed\n",
537 dev->name);
538#endif
539 qmgr_disable_irq(rxq);
540 continue;
541 }
542#if DEBUG_RX
543 printk(KERN_DEBUG "%s: eth_poll all done\n",
544 dev->name);
545#endif
546 return 0; /* all work done */
547 }
548
549 desc = rx_desc_ptr(port, n);
550
551#ifdef __ARMEB__
552 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
553 phys = dma_map_single(&dev->dev, skb->data,
554 RX_BUFF_SIZE, DMA_FROM_DEVICE);
555 if (dma_mapping_error(phys)) {
556 dev_kfree_skb(skb);
557 skb = NULL;
558 }
559 }
560#else
561 skb = netdev_alloc_skb(dev,
562 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
563#endif
564
565 if (!skb) {
566 port->stat.rx_dropped++;
567 /* put the desc back on RX-ready queue */
568 desc->buf_len = MAX_MRU;
569 desc->pkt_len = 0;
570 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
571 continue;
572 }
573
574 /* process received frame */
575#ifdef __ARMEB__
576 temp = skb;
577 skb = port->rx_buff_tab[n];
578 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
579 RX_BUFF_SIZE, DMA_FROM_DEVICE);
580#else
581 dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
582 RX_BUFF_SIZE, DMA_FROM_DEVICE);
583 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
584 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
585#endif
586 skb_reserve(skb, NET_IP_ALIGN);
587 skb_put(skb, desc->pkt_len);
588
589 debug_pkt(dev, "eth_poll", skb->data, skb->len);
590
591 skb->protocol = eth_type_trans(skb, dev);
592 dev->last_rx = jiffies;
593 port->stat.rx_packets++;
594 port->stat.rx_bytes += skb->len;
595 netif_receive_skb(skb);
596
597 /* put the new buffer on RX-free queue */
598#ifdef __ARMEB__
599 port->rx_buff_tab[n] = temp;
600 desc->data = phys + NET_IP_ALIGN;
601#endif
602 desc->buf_len = MAX_MRU;
603 desc->pkt_len = 0;
604 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
605 received++;
606 }
607
608#if DEBUG_RX
609 printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
610#endif
611 return received; /* not all work done */
612}
613
614
615static void eth_txdone_irq(void *unused)
616{
617 u32 phys;
618
619#if DEBUG_TX
620 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
621#endif
622 while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) {
623 u32 npe_id, n_desc;
624 struct port *port;
625 struct desc *desc;
626 int start;
627
628 npe_id = phys & 3;
629 BUG_ON(npe_id >= MAX_NPES);
630 port = npe_port_tab[npe_id];
631 BUG_ON(!port);
632 phys &= ~0x1F; /* mask out non-address bits */
633 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
634 BUG_ON(n_desc >= TX_DESCS);
635 desc = tx_desc_ptr(port, n_desc);
636 debug_desc(phys, desc);
637
638 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
639 port->stat.tx_packets++;
640 port->stat.tx_bytes += desc->pkt_len;
641
642 dma_unmap_tx(port, desc);
643#if DEBUG_TX
644 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
645 port->netdev->name, port->tx_buff_tab[n_desc]);
646#endif
647 free_buffer_irq(port->tx_buff_tab[n_desc]);
648 port->tx_buff_tab[n_desc] = NULL;
649 }
650
651 start = qmgr_stat_empty(port->plat->txreadyq);
652 queue_put_desc(port->plat->txreadyq, phys, desc);
653 if (start) {
654#if DEBUG_TX
655 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
656 port->netdev->name);
657#endif
658 netif_wake_queue(port->netdev);
659 }
660 }
661}
662
663static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
664{
665 struct port *port = netdev_priv(dev);
666 unsigned int txreadyq = port->plat->txreadyq;
667 int len, offset, bytes, n;
668 void *mem;
669 u32 phys;
670 struct desc *desc;
671
672#if DEBUG_TX
673 printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
674#endif
675
676 if (unlikely(skb->len > MAX_MRU)) {
677 dev_kfree_skb(skb);
678 port->stat.tx_errors++;
679 return NETDEV_TX_OK;
680 }
681
682 debug_pkt(dev, "eth_xmit", skb->data, skb->len);
683
684 len = skb->len;
685#ifdef __ARMEB__
686 offset = 0; /* no need to keep alignment */
687 bytes = len;
688 mem = skb->data;
689#else
690 offset = (int)skb->data & 3; /* keep 32-bit alignment */
691 bytes = ALIGN(offset + len, 4);
692 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
693 dev_kfree_skb(skb);
694 port->stat.tx_dropped++;
695 return NETDEV_TX_OK;
696 }
697 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
698 dev_kfree_skb(skb);
699#endif
700
701 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
702 if (dma_mapping_error(phys)) {
703#ifdef __ARMEB__
704 dev_kfree_skb(skb);
705#else
706 kfree(mem);
707#endif
708 port->stat.tx_dropped++;
709 return NETDEV_TX_OK;
710 }
711
712 n = queue_get_desc(txreadyq, port, 1);
713 BUG_ON(n < 0);
714 desc = tx_desc_ptr(port, n);
715
716#ifdef __ARMEB__
717 port->tx_buff_tab[n] = skb;
718#else
719 port->tx_buff_tab[n] = mem;
720#endif
721 desc->data = phys + offset;
722 desc->buf_len = desc->pkt_len = len;
723
724 /* NPE firmware pads short frames with zeros internally */
725 wmb();
726 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
727 dev->trans_start = jiffies;
728
729 if (qmgr_stat_empty(txreadyq)) {
730#if DEBUG_TX
731 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
732#endif
733 netif_stop_queue(dev);
734 /* we could miss TX ready interrupt */
735 if (!qmgr_stat_empty(txreadyq)) {
736#if DEBUG_TX
737 printk(KERN_DEBUG "%s: eth_xmit ready again\n",
738 dev->name);
739#endif
740 netif_wake_queue(dev);
741 }
742 }
743
744#if DEBUG_TX
745 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
746#endif
747 return NETDEV_TX_OK;
748}
749
750
751static struct net_device_stats *eth_stats(struct net_device *dev)
752{
753 struct port *port = netdev_priv(dev);
754 return &port->stat;
755}
756
757static void eth_set_mcast_list(struct net_device *dev)
758{
759 struct port *port = netdev_priv(dev);
760 struct dev_mc_list *mclist = dev->mc_list;
761 u8 diffs[ETH_ALEN], *addr;
762 int cnt = dev->mc_count, i;
763
764 if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
765 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
766 &port->regs->rx_control[0]);
767 return;
768 }
769
770 memset(diffs, 0, ETH_ALEN);
771 addr = mclist->dmi_addr; /* first MAC address */
772
773 while (--cnt && (mclist = mclist->next))
774 for (i = 0; i < ETH_ALEN; i++)
775 diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
776
777 for (i = 0; i < ETH_ALEN; i++) {
778 __raw_writel(addr[i], &port->regs->mcast_addr[i]);
779 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
780 }
781
782 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
783 &port->regs->rx_control[0]);
784}
785
786
787static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
788{
789 struct port *port = netdev_priv(dev);
790 unsigned int duplex_chg;
791 int err;
792
793 if (!netif_running(dev))
794 return -EINVAL;
795 err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg);
796 if (duplex_chg)
797 eth_set_duplex(port);
798 return err;
799}
800
801
802static int request_queues(struct port *port)
803{
804 int err;
805
806 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0);
807 if (err)
808 return err;
809
810 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0);
811 if (err)
812 goto rel_rxfree;
813
814 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0);
815 if (err)
816 goto rel_rx;
817
818 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
819 if (err)
820 goto rel_tx;
821
822 /* TX-done queue handles skbs sent out by the NPEs */
823 if (!ports_open) {
824 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0);
825 if (err)
826 goto rel_txready;
827 }
828 return 0;
829
830rel_txready:
831 qmgr_release_queue(port->plat->txreadyq);
832rel_tx:
833 qmgr_release_queue(TX_QUEUE(port->id));
834rel_rx:
835 qmgr_release_queue(port->plat->rxq);
836rel_rxfree:
837 qmgr_release_queue(RXFREE_QUEUE(port->id));
838 printk(KERN_DEBUG "%s: unable to request hardware queues\n",
839 port->netdev->name);
840 return err;
841}
842
843static void release_queues(struct port *port)
844{
845 qmgr_release_queue(RXFREE_QUEUE(port->id));
846 qmgr_release_queue(port->plat->rxq);
847 qmgr_release_queue(TX_QUEUE(port->id));
848 qmgr_release_queue(port->plat->txreadyq);
849
850 if (!ports_open)
851 qmgr_release_queue(TXDONE_QUEUE);
852}
853
854static int init_queues(struct port *port)
855{
856 int i;
857
858 if (!ports_open)
859 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
860 POOL_ALLOC_SIZE, 32, 0)))
861 return -ENOMEM;
862
863 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
864 &port->desc_tab_phys)))
865 return -ENOMEM;
866 memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
867 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
868 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
869
870 /* Setup RX buffers */
871 for (i = 0; i < RX_DESCS; i++) {
872 struct desc *desc = rx_desc_ptr(port, i);
873 buffer_t *buff; /* skb or kmalloc()ated memory */
874 void *data;
875#ifdef __ARMEB__
876 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
877 return -ENOMEM;
878 data = buff->data;
879#else
880 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
881 return -ENOMEM;
882 data = buff;
883#endif
884 desc->buf_len = MAX_MRU;
885 desc->data = dma_map_single(&port->netdev->dev, data,
886 RX_BUFF_SIZE, DMA_FROM_DEVICE);
887 if (dma_mapping_error(desc->data)) {
888 free_buffer(buff);
889 return -EIO;
890 }
891 desc->data += NET_IP_ALIGN;
892 port->rx_buff_tab[i] = buff;
893 }
894
895 return 0;
896}
897
898static void destroy_queues(struct port *port)
899{
900 int i;
901
902 if (port->desc_tab) {
903 for (i = 0; i < RX_DESCS; i++) {
904 struct desc *desc = rx_desc_ptr(port, i);
905 buffer_t *buff = port->rx_buff_tab[i];
906 if (buff) {
907 dma_unmap_single(&port->netdev->dev,
908 desc->data - NET_IP_ALIGN,
909 RX_BUFF_SIZE, DMA_FROM_DEVICE);
910 free_buffer(buff);
911 }
912 }
913 for (i = 0; i < TX_DESCS; i++) {
914 struct desc *desc = tx_desc_ptr(port, i);
915 buffer_t *buff = port->tx_buff_tab[i];
916 if (buff) {
917 dma_unmap_tx(port, desc);
918 free_buffer(buff);
919 }
920 }
921 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
922 port->desc_tab = NULL;
923 }
924
925 if (!ports_open && dma_pool) {
926 dma_pool_destroy(dma_pool);
927 dma_pool = NULL;
928 }
929}
930
931static int eth_open(struct net_device *dev)
932{
933 struct port *port = netdev_priv(dev);
934 struct npe *npe = port->npe;
935 struct msg msg;
936 int i, err;
937
938 if (!npe_running(npe)) {
939 err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
940 if (err)
941 return err;
942
943 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
944 printk(KERN_ERR "%s: %s not responding\n", dev->name,
945 npe_name(npe));
946 return -EIO;
947 }
948 }
949
950 mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
951
952 memset(&msg, 0, sizeof(msg));
953 msg.cmd = NPE_VLAN_SETRXQOSENTRY;
954 msg.eth_id = port->id;
955 msg.byte5 = port->plat->rxq | 0x80;
956 msg.byte7 = port->plat->rxq << 4;
957 for (i = 0; i < 8; i++) {
958 msg.byte3 = i;
959 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
960 return -EIO;
961 }
962
963 msg.cmd = NPE_EDB_SETPORTADDRESS;
964 msg.eth_id = PHYSICAL_ID(port->id);
965 msg.byte2 = dev->dev_addr[0];
966 msg.byte3 = dev->dev_addr[1];
967 msg.byte4 = dev->dev_addr[2];
968 msg.byte5 = dev->dev_addr[3];
969 msg.byte6 = dev->dev_addr[4];
970 msg.byte7 = dev->dev_addr[5];
971 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
972 return -EIO;
973
974 memset(&msg, 0, sizeof(msg));
975 msg.cmd = NPE_FW_SETFIREWALLMODE;
976 msg.eth_id = port->id;
977 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
978 return -EIO;
979
980 if ((err = request_queues(port)) != 0)
981 return err;
982
983 if ((err = init_queues(port)) != 0) {
984 destroy_queues(port);
985 release_queues(port);
986 return err;
987 }
988
989 for (i = 0; i < ETH_ALEN; i++)
990 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
991 __raw_writel(0x08, &port->regs->random_seed);
992 __raw_writel(0x12, &port->regs->partial_empty_threshold);
993 __raw_writel(0x30, &port->regs->partial_full_threshold);
994 __raw_writel(0x08, &port->regs->tx_start_bytes);
995 __raw_writel(0x15, &port->regs->tx_deferral);
996 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
997 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
998 __raw_writel(0x80, &port->regs->slot_time);
999 __raw_writel(0x01, &port->regs->int_clock_threshold);
1000
1001 /* Populate queues with buffers, no failure after this point */
1002 for (i = 0; i < TX_DESCS; i++)
1003 queue_put_desc(port->plat->txreadyq,
1004 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1005
1006 for (i = 0; i < RX_DESCS; i++)
1007 queue_put_desc(RXFREE_QUEUE(port->id),
1008 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1009
1010 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
1011 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
1012 __raw_writel(0, &port->regs->rx_control[1]);
1013 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1014
1015 napi_enable(&port->napi);
1016 phy_check_media(port, 1);
1017 eth_set_mcast_list(dev);
1018 netif_start_queue(dev);
1019 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
1020
1021 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1022 eth_rx_irq, dev);
1023 if (!ports_open) {
1024 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
1025 eth_txdone_irq, NULL);
1026 qmgr_enable_irq(TXDONE_QUEUE);
1027 }
1028 ports_open++;
1029 /* we may already have RX data, enables IRQ */
1030 netif_rx_schedule(dev, &port->napi);
1031 return 0;
1032}
1033
1034static int eth_close(struct net_device *dev)
1035{
1036 struct port *port = netdev_priv(dev);
1037 struct msg msg;
1038 int buffs = RX_DESCS; /* allocated RX buffers */
1039 int i;
1040
1041 ports_open--;
1042 qmgr_disable_irq(port->plat->rxq);
1043 napi_disable(&port->napi);
1044 netif_stop_queue(dev);
1045
1046 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
1047 buffs--;
1048
1049 memset(&msg, 0, sizeof(msg));
1050 msg.cmd = NPE_SETLOOPBACK_MODE;
1051 msg.eth_id = port->id;
1052 msg.byte3 = 1;
1053 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
1054 printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
1055
1056 i = 0;
1057 do { /* drain RX buffers */
1058 while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
1059 buffs--;
1060 if (!buffs)
1061 break;
1062 if (qmgr_stat_empty(TX_QUEUE(port->id))) {
1063 /* we have to inject some packet */
1064 struct desc *desc;
1065 u32 phys;
1066 int n = queue_get_desc(port->plat->txreadyq, port, 1);
1067 BUG_ON(n < 0);
1068 desc = tx_desc_ptr(port, n);
1069 phys = tx_desc_phys(port, n);
1070 desc->buf_len = desc->pkt_len = 1;
1071 wmb();
1072 queue_put_desc(TX_QUEUE(port->id), phys, desc);
1073 }
1074 udelay(1);
1075 } while (++i < MAX_CLOSE_WAIT);
1076
1077 if (buffs)
1078 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1079 " left in NPE\n", dev->name, buffs);
1080#if DEBUG_CLOSE
1081 if (!buffs)
1082 printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
1083#endif
1084
1085 buffs = TX_DESCS;
1086 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
1087 buffs--; /* cancel TX */
1088
1089 i = 0;
1090 do {
1091 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1092 buffs--;
1093 if (!buffs)
1094 break;
1095 } while (++i < MAX_CLOSE_WAIT);
1096
1097 if (buffs)
1098 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1099 "left in NPE\n", dev->name, buffs);
1100#if DEBUG_CLOSE
1101 if (!buffs)
1102 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1103#endif
1104
1105 msg.byte3 = 0;
1106 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
1107 printk(KERN_CRIT "%s: unable to disable loopback\n",
1108 dev->name);
1109
1110 port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) &
1111 ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
1112 mdio_write(dev, port->plat->phy, MII_BMCR,
1113 port->mii_bmcr | BMCR_PDOWN);
1114
1115 if (!ports_open)
1116 qmgr_disable_irq(TXDONE_QUEUE);
1117 cancel_rearming_delayed_work(&port->mdio_thread);
1118 destroy_queues(port);
1119 release_queues(port);
1120 return 0;
1121}
1122
1123static int __devinit eth_init_one(struct platform_device *pdev)
1124{
1125 struct port *port;
1126 struct net_device *dev;
1127 struct eth_plat_info *plat = pdev->dev.platform_data;
1128 u32 regs_phys;
1129 int err;
1130
1131 if (!(dev = alloc_etherdev(sizeof(struct port))))
1132 return -ENOMEM;
1133
1134 SET_NETDEV_DEV(dev, &pdev->dev);
1135 port = netdev_priv(dev);
1136 port->netdev = dev;
1137 port->id = pdev->id;
1138
1139 switch (port->id) {
1140 case IXP4XX_ETH_NPEA:
1141 port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
1142 regs_phys = IXP4XX_EthA_BASE_PHYS;
1143 break;
1144 case IXP4XX_ETH_NPEB:
1145 port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1146 regs_phys = IXP4XX_EthB_BASE_PHYS;
1147 break;
1148 case IXP4XX_ETH_NPEC:
1149 port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
1150 regs_phys = IXP4XX_EthC_BASE_PHYS;
1151 break;
1152 default:
1153 err = -ENOSYS;
1154 goto err_free;
1155 }
1156
1157 dev->open = eth_open;
1158 dev->hard_start_xmit = eth_xmit;
1159 dev->stop = eth_close;
1160 dev->get_stats = eth_stats;
1161 dev->do_ioctl = eth_ioctl;
1162 dev->set_multicast_list = eth_set_mcast_list;
1163 dev->tx_queue_len = 100;
1164
1165 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
1166
1167 if (!(port->npe = npe_request(NPE_ID(port->id)))) {
1168 err = -EIO;
1169 goto err_free;
1170 }
1171
1172 if (register_netdev(dev)) {
1173 err = -EIO;
1174 goto err_npe_rel;
1175 }
1176
1177 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
1178 if (!port->mem_res) {
1179 err = -EBUSY;
1180 goto err_unreg;
1181 }
1182
1183 port->plat = plat;
1184 npe_port_tab[NPE_ID(port->id)] = port;
1185 memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
1186
1187 platform_set_drvdata(pdev, dev);
1188
1189 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
1190 &port->regs->core_control);
1191 udelay(50);
1192 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1193 udelay(50);
1194
1195 port->mii.dev = dev;
1196 port->mii.mdio_read = mdio_read;
1197 port->mii.mdio_write = mdio_write;
1198 port->mii.phy_id = plat->phy;
1199 port->mii.phy_id_mask = 0x1F;
1200 port->mii.reg_num_mask = 0x1F;
1201
1202 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1203 npe_name(port->npe));
1204
1205 phy_reset(dev, plat->phy);
1206 port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
1207 ~(BMCR_RESET | BMCR_PDOWN);
1208 mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
1209
1210 INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
1211 return 0;
1212
1213err_unreg:
1214 unregister_netdev(dev);
1215err_npe_rel:
1216 npe_release(port->npe);
1217err_free:
1218 free_netdev(dev);
1219 return err;
1220}
1221
1222static int __devexit eth_remove_one(struct platform_device *pdev)
1223{
1224 struct net_device *dev = platform_get_drvdata(pdev);
1225 struct port *port = netdev_priv(dev);
1226
1227 unregister_netdev(dev);
1228 npe_port_tab[NPE_ID(port->id)] = NULL;
1229 platform_set_drvdata(pdev, NULL);
1230 npe_release(port->npe);
1231 release_resource(port->mem_res);
1232 free_netdev(dev);
1233 return 0;
1234}
1235
1236static struct platform_driver drv = {
1237 .driver.name = DRV_NAME,
1238 .probe = eth_init_one,
1239 .remove = eth_remove_one,
1240};
1241
1242static int __init eth_init_module(void)
1243{
1244 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
1245 return -ENOSYS;
1246
1247 /* All MII PHY accesses use NPE-B Ethernet registers */
1248 spin_lock_init(&mdio_lock);
1249 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1250 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
1251
1252 return platform_driver_register(&drv);
1253}
1254
1255static void __exit eth_cleanup_module(void)
1256{
1257 platform_driver_unregister(&drv);
1258}
1259
1260MODULE_AUTHOR("Krzysztof Halasa");
1261MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
1262MODULE_LICENSE("GPL v2");
1263MODULE_ALIAS("platform:ixp4xx_eth");
1264module_init(eth_init_module);
1265module_exit(eth_cleanup_module);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 4fec8581bfd7..89c0018132ec 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -27,6 +27,7 @@
27#include <linux/phy.h> 27#include <linux/phy.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
30#include <linux/skbuff.h> 31#include <linux/skbuff.h>
31#include <linux/platform_device.h> 32#include <linux/platform_device.h>
32 33
@@ -42,7 +43,7 @@
42#define DRV_NAME "bfin_mac" 43#define DRV_NAME "bfin_mac"
43#define DRV_VERSION "1.1" 44#define DRV_VERSION "1.1"
44#define DRV_AUTHOR "Bryan Wu, Luke Yang" 45#define DRV_AUTHOR "Bryan Wu, Luke Yang"
45#define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver" 46#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
46 47
47MODULE_AUTHOR(DRV_AUTHOR); 48MODULE_AUTHOR(DRV_AUTHOR);
48MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
@@ -73,8 +74,14 @@ static struct net_dma_desc_tx *current_tx_ptr;
73static struct net_dma_desc_tx *tx_desc; 74static struct net_dma_desc_tx *tx_desc;
74static struct net_dma_desc_rx *rx_desc; 75static struct net_dma_desc_rx *rx_desc;
75 76
76static void bf537mac_disable(void); 77#if defined(CONFIG_BFIN_MAC_RMII)
77static void bf537mac_enable(void); 78static u16 pin_req[] = P_RMII0;
79#else
80static u16 pin_req[] = P_MII0;
81#endif
82
83static void bfin_mac_disable(void);
84static void bfin_mac_enable(void);
78 85
79static void desc_list_free(void) 86static void desc_list_free(void)
80{ 87{
@@ -243,27 +250,6 @@ init_error:
243 250
244/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ 251/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
245 252
246/* Set FER regs to MUX in Ethernet pins */
247static int setup_pin_mux(int action)
248{
249#if defined(CONFIG_BFIN_MAC_RMII)
250 u16 pin_req[] = P_RMII0;
251#else
252 u16 pin_req[] = P_MII0;
253#endif
254
255 if (action) {
256 if (peripheral_request_list(pin_req, DRV_NAME)) {
257 printk(KERN_ERR DRV_NAME
258 ": Requesting Peripherals failed\n");
259 return -EFAULT;
260 }
261 } else
262 peripheral_free_list(pin_req);
263
264 return 0;
265}
266
267/* 253/*
268 * MII operations 254 * MII operations
269 */ 255 */
@@ -322,9 +308,9 @@ static int mdiobus_reset(struct mii_bus *bus)
322 return 0; 308 return 0;
323} 309}
324 310
325static void bf537_adjust_link(struct net_device *dev) 311static void bfin_mac_adjust_link(struct net_device *dev)
326{ 312{
327 struct bf537mac_local *lp = netdev_priv(dev); 313 struct bfin_mac_local *lp = netdev_priv(dev);
328 struct phy_device *phydev = lp->phydev; 314 struct phy_device *phydev = lp->phydev;
329 unsigned long flags; 315 unsigned long flags;
330 int new_state = 0; 316 int new_state = 0;
@@ -395,7 +381,7 @@ static void bf537_adjust_link(struct net_device *dev)
395 381
396static int mii_probe(struct net_device *dev) 382static int mii_probe(struct net_device *dev)
397{ 383{
398 struct bf537mac_local *lp = netdev_priv(dev); 384 struct bfin_mac_local *lp = netdev_priv(dev);
399 struct phy_device *phydev = NULL; 385 struct phy_device *phydev = NULL;
400 unsigned short sysctl; 386 unsigned short sysctl;
401 int i; 387 int i;
@@ -431,10 +417,10 @@ static int mii_probe(struct net_device *dev)
431 } 417 }
432 418
433#if defined(CONFIG_BFIN_MAC_RMII) 419#if defined(CONFIG_BFIN_MAC_RMII)
434 phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, 420 phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0,
435 PHY_INTERFACE_MODE_RMII); 421 PHY_INTERFACE_MODE_RMII);
436#else 422#else
437 phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, 423 phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0,
438 PHY_INTERFACE_MODE_MII); 424 PHY_INTERFACE_MODE_MII);
439#endif 425#endif
440 426
@@ -469,6 +455,51 @@ static int mii_probe(struct net_device *dev)
469 return 0; 455 return 0;
470} 456}
471 457
458/*
459 * Ethtool support
460 */
461
462static int
463bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
464{
465 struct bfin_mac_local *lp = netdev_priv(dev);
466
467 if (lp->phydev)
468 return phy_ethtool_gset(lp->phydev, cmd);
469
470 return -EINVAL;
471}
472
473static int
474bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
475{
476 struct bfin_mac_local *lp = netdev_priv(dev);
477
478 if (!capable(CAP_NET_ADMIN))
479 return -EPERM;
480
481 if (lp->phydev)
482 return phy_ethtool_sset(lp->phydev, cmd);
483
484 return -EINVAL;
485}
486
487static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
488 struct ethtool_drvinfo *info)
489{
490 strcpy(info->driver, DRV_NAME);
491 strcpy(info->version, DRV_VERSION);
492 strcpy(info->fw_version, "N/A");
493 strcpy(info->bus_info, dev->dev.bus_id);
494}
495
496static struct ethtool_ops bfin_mac_ethtool_ops = {
497 .get_settings = bfin_mac_ethtool_getsettings,
498 .set_settings = bfin_mac_ethtool_setsettings,
499 .get_link = ethtool_op_get_link,
500 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
501};
502
472/**************************************************************************/ 503/**************************************************************************/
473void setup_system_regs(struct net_device *dev) 504void setup_system_regs(struct net_device *dev)
474{ 505{
@@ -511,7 +542,7 @@ static void setup_mac_addr(u8 *mac_addr)
511 bfin_write_EMAC_ADDRHI(addr_hi); 542 bfin_write_EMAC_ADDRHI(addr_hi);
512} 543}
513 544
514static int bf537mac_set_mac_address(struct net_device *dev, void *p) 545static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
515{ 546{
516 struct sockaddr *addr = p; 547 struct sockaddr *addr = p;
517 if (netif_running(dev)) 548 if (netif_running(dev))
@@ -573,7 +604,7 @@ adjust_head:
573 604
574} 605}
575 606
576static int bf537mac_hard_start_xmit(struct sk_buff *skb, 607static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
577 struct net_device *dev) 608 struct net_device *dev)
578{ 609{
579 unsigned int data; 610 unsigned int data;
@@ -631,7 +662,7 @@ out:
631 return 0; 662 return 0;
632} 663}
633 664
634static void bf537mac_rx(struct net_device *dev) 665static void bfin_mac_rx(struct net_device *dev)
635{ 666{
636 struct sk_buff *skb, *new_skb; 667 struct sk_buff *skb, *new_skb;
637 unsigned short len; 668 unsigned short len;
@@ -680,7 +711,7 @@ out:
680} 711}
681 712
682/* interrupt routine to handle rx and error signal */ 713/* interrupt routine to handle rx and error signal */
683static irqreturn_t bf537mac_interrupt(int irq, void *dev_id) 714static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
684{ 715{
685 struct net_device *dev = dev_id; 716 struct net_device *dev = dev_id;
686 int number = 0; 717 int number = 0;
@@ -700,21 +731,21 @@ get_one_packet:
700 } 731 }
701 732
702real_rx: 733real_rx:
703 bf537mac_rx(dev); 734 bfin_mac_rx(dev);
704 number++; 735 number++;
705 goto get_one_packet; 736 goto get_one_packet;
706} 737}
707 738
708#ifdef CONFIG_NET_POLL_CONTROLLER 739#ifdef CONFIG_NET_POLL_CONTROLLER
709static void bf537mac_poll(struct net_device *dev) 740static void bfin_mac_poll(struct net_device *dev)
710{ 741{
711 disable_irq(IRQ_MAC_RX); 742 disable_irq(IRQ_MAC_RX);
712 bf537mac_interrupt(IRQ_MAC_RX, dev); 743 bfin_mac_interrupt(IRQ_MAC_RX, dev);
713 enable_irq(IRQ_MAC_RX); 744 enable_irq(IRQ_MAC_RX);
714} 745}
715#endif /* CONFIG_NET_POLL_CONTROLLER */ 746#endif /* CONFIG_NET_POLL_CONTROLLER */
716 747
717static void bf537mac_disable(void) 748static void bfin_mac_disable(void)
718{ 749{
719 unsigned int opmode; 750 unsigned int opmode;
720 751
@@ -728,7 +759,7 @@ static void bf537mac_disable(void)
728/* 759/*
729 * Enable Interrupts, Receive, and Transmit 760 * Enable Interrupts, Receive, and Transmit
730 */ 761 */
731static void bf537mac_enable(void) 762static void bfin_mac_enable(void)
732{ 763{
733 u32 opmode; 764 u32 opmode;
734 765
@@ -766,23 +797,23 @@ static void bf537mac_enable(void)
766} 797}
767 798
768/* Our watchdog timed out. Called by the networking layer */ 799/* Our watchdog timed out. Called by the networking layer */
769static void bf537mac_timeout(struct net_device *dev) 800static void bfin_mac_timeout(struct net_device *dev)
770{ 801{
771 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 802 pr_debug("%s: %s\n", dev->name, __FUNCTION__);
772 803
773 bf537mac_disable(); 804 bfin_mac_disable();
774 805
775 /* reset tx queue */ 806 /* reset tx queue */
776 tx_list_tail = tx_list_head->next; 807 tx_list_tail = tx_list_head->next;
777 808
778 bf537mac_enable(); 809 bfin_mac_enable();
779 810
780 /* We can accept TX packets again */ 811 /* We can accept TX packets again */
781 dev->trans_start = jiffies; 812 dev->trans_start = jiffies;
782 netif_wake_queue(dev); 813 netif_wake_queue(dev);
783} 814}
784 815
785static void bf537mac_multicast_hash(struct net_device *dev) 816static void bfin_mac_multicast_hash(struct net_device *dev)
786{ 817{
787 u32 emac_hashhi, emac_hashlo; 818 u32 emac_hashhi, emac_hashlo;
788 struct dev_mc_list *dmi = dev->mc_list; 819 struct dev_mc_list *dmi = dev->mc_list;
@@ -821,7 +852,7 @@ static void bf537mac_multicast_hash(struct net_device *dev)
821 * promiscuous mode (for TCPDUMP and cousins) or accept 852 * promiscuous mode (for TCPDUMP and cousins) or accept
822 * a select set of multicast packets 853 * a select set of multicast packets
823 */ 854 */
824static void bf537mac_set_multicast_list(struct net_device *dev) 855static void bfin_mac_set_multicast_list(struct net_device *dev)
825{ 856{
826 u32 sysctl; 857 u32 sysctl;
827 858
@@ -840,7 +871,7 @@ static void bf537mac_set_multicast_list(struct net_device *dev)
840 sysctl = bfin_read_EMAC_OPMODE(); 871 sysctl = bfin_read_EMAC_OPMODE();
841 sysctl |= HM; 872 sysctl |= HM;
842 bfin_write_EMAC_OPMODE(sysctl); 873 bfin_write_EMAC_OPMODE(sysctl);
843 bf537mac_multicast_hash(dev); 874 bfin_mac_multicast_hash(dev);
844 } else { 875 } else {
845 /* clear promisc or multicast mode */ 876 /* clear promisc or multicast mode */
846 sysctl = bfin_read_EMAC_OPMODE(); 877 sysctl = bfin_read_EMAC_OPMODE();
@@ -852,7 +883,7 @@ static void bf537mac_set_multicast_list(struct net_device *dev)
852/* 883/*
853 * this puts the device in an inactive state 884 * this puts the device in an inactive state
854 */ 885 */
855static void bf537mac_shutdown(struct net_device *dev) 886static void bfin_mac_shutdown(struct net_device *dev)
856{ 887{
857 /* Turn off the EMAC */ 888 /* Turn off the EMAC */
858 bfin_write_EMAC_OPMODE(0x00000000); 889 bfin_write_EMAC_OPMODE(0x00000000);
@@ -866,9 +897,9 @@ static void bf537mac_shutdown(struct net_device *dev)
866 * 897 *
867 * Set up everything, reset the card, etc.. 898 * Set up everything, reset the card, etc..
868 */ 899 */
869static int bf537mac_open(struct net_device *dev) 900static int bfin_mac_open(struct net_device *dev)
870{ 901{
871 struct bf537mac_local *lp = netdev_priv(dev); 902 struct bfin_mac_local *lp = netdev_priv(dev);
872 int retval; 903 int retval;
873 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 904 pr_debug("%s: %s\n", dev->name, __FUNCTION__);
874 905
@@ -891,8 +922,8 @@ static int bf537mac_open(struct net_device *dev)
891 phy_start(lp->phydev); 922 phy_start(lp->phydev);
892 phy_write(lp->phydev, MII_BMCR, BMCR_RESET); 923 phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
893 setup_system_regs(dev); 924 setup_system_regs(dev);
894 bf537mac_disable(); 925 bfin_mac_disable();
895 bf537mac_enable(); 926 bfin_mac_enable();
896 pr_debug("hardware init finished\n"); 927 pr_debug("hardware init finished\n");
897 netif_start_queue(dev); 928 netif_start_queue(dev);
898 netif_carrier_on(dev); 929 netif_carrier_on(dev);
@@ -906,9 +937,9 @@ static int bf537mac_open(struct net_device *dev)
906 * and not talk to the outside world. Caused by 937 * and not talk to the outside world. Caused by
907 * an 'ifconfig ethX down' 938 * an 'ifconfig ethX down'
908 */ 939 */
909static int bf537mac_close(struct net_device *dev) 940static int bfin_mac_close(struct net_device *dev)
910{ 941{
911 struct bf537mac_local *lp = netdev_priv(dev); 942 struct bfin_mac_local *lp = netdev_priv(dev);
912 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 943 pr_debug("%s: %s\n", dev->name, __FUNCTION__);
913 944
914 netif_stop_queue(dev); 945 netif_stop_queue(dev);
@@ -918,7 +949,7 @@ static int bf537mac_close(struct net_device *dev)
918 phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); 949 phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
919 950
920 /* clear everything */ 951 /* clear everything */
921 bf537mac_shutdown(dev); 952 bfin_mac_shutdown(dev);
922 953
923 /* free the rx/tx buffers */ 954 /* free the rx/tx buffers */
924 desc_list_free(); 955 desc_list_free();
@@ -926,46 +957,59 @@ static int bf537mac_close(struct net_device *dev)
926 return 0; 957 return 0;
927} 958}
928 959
929static int __init bf537mac_probe(struct net_device *dev) 960static int __init bfin_mac_probe(struct platform_device *pdev)
930{ 961{
931 struct bf537mac_local *lp = netdev_priv(dev); 962 struct net_device *ndev;
932 int retval; 963 struct bfin_mac_local *lp;
933 int i; 964 int rc, i;
965
966 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
967 if (!ndev) {
968 dev_err(&pdev->dev, "Cannot allocate net device!\n");
969 return -ENOMEM;
970 }
971
972 SET_NETDEV_DEV(ndev, &pdev->dev);
973 platform_set_drvdata(pdev, ndev);
974 lp = netdev_priv(ndev);
934 975
935 /* Grab the MAC address in the MAC */ 976 /* Grab the MAC address in the MAC */
936 *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); 977 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
937 *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); 978 *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
938 979
939 /* probe mac */ 980 /* probe mac */
940 /*todo: how to proble? which is revision_register */ 981 /*todo: how to proble? which is revision_register */
941 bfin_write_EMAC_ADDRLO(0x12345678); 982 bfin_write_EMAC_ADDRLO(0x12345678);
942 if (bfin_read_EMAC_ADDRLO() != 0x12345678) { 983 if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
943 pr_debug("can't detect bf537 mac!\n"); 984 dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
944 retval = -ENODEV; 985 rc = -ENODEV;
945 goto err_out; 986 goto out_err_probe_mac;
946 } 987 }
947 988
948 /* set the GPIO pins to Ethernet mode */ 989 /* set the GPIO pins to Ethernet mode */
949 retval = setup_pin_mux(1); 990 rc = peripheral_request_list(pin_req, DRV_NAME);
950 if (retval) 991 if (rc) {
951 return retval; 992 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
952 993 rc = -EFAULT;
953 /*Is it valid? (Did bootloader initialize it?) */ 994 goto out_err_setup_pin_mux;
954 if (!is_valid_ether_addr(dev->dev_addr)) {
955 /* Grab the MAC from the board somehow - this is done in the
956 arch/blackfin/mach-bf537/boards/eth_mac.c */
957 bfin_get_ether_addr(dev->dev_addr);
958 } 995 }
959 996
997 /*
998 * Is it valid? (Did bootloader initialize it?)
999 * Grab the MAC from the board somehow
1000 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1001 */
1002 if (!is_valid_ether_addr(ndev->dev_addr))
1003 bfin_get_ether_addr(ndev->dev_addr);
1004
960 /* If still not valid, get a random one */ 1005 /* If still not valid, get a random one */
961 if (!is_valid_ether_addr(dev->dev_addr)) { 1006 if (!is_valid_ether_addr(ndev->dev_addr))
962 random_ether_addr(dev->dev_addr); 1007 random_ether_addr(ndev->dev_addr);
963 }
964 1008
965 setup_mac_addr(dev->dev_addr); 1009 setup_mac_addr(ndev->dev_addr);
966 1010
967 /* MDIO bus initial */ 1011 /* MDIO bus initial */
968 lp->mii_bus.priv = dev; 1012 lp->mii_bus.priv = ndev;
969 lp->mii_bus.read = mdiobus_read; 1013 lp->mii_bus.read = mdiobus_read;
970 lp->mii_bus.write = mdiobus_write; 1014 lp->mii_bus.write = mdiobus_write;
971 lp->mii_bus.reset = mdiobus_reset; 1015 lp->mii_bus.reset = mdiobus_reset;
@@ -975,86 +1019,86 @@ static int __init bf537mac_probe(struct net_device *dev)
975 for (i = 0; i < PHY_MAX_ADDR; ++i) 1019 for (i = 0; i < PHY_MAX_ADDR; ++i)
976 lp->mii_bus.irq[i] = PHY_POLL; 1020 lp->mii_bus.irq[i] = PHY_POLL;
977 1021
978 mdiobus_register(&lp->mii_bus); 1022 rc = mdiobus_register(&lp->mii_bus);
1023 if (rc) {
1024 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1025 goto out_err_mdiobus_register;
1026 }
979 1027
980 retval = mii_probe(dev); 1028 rc = mii_probe(ndev);
981 if (retval) 1029 if (rc) {
982 return retval; 1030 dev_err(&pdev->dev, "MII Probe failed!\n");
1031 goto out_err_mii_probe;
1032 }
983 1033
984 /* Fill in the fields of the device structure with ethernet values. */ 1034 /* Fill in the fields of the device structure with ethernet values. */
985 ether_setup(dev); 1035 ether_setup(ndev);
986 1036
987 dev->open = bf537mac_open; 1037 ndev->open = bfin_mac_open;
988 dev->stop = bf537mac_close; 1038 ndev->stop = bfin_mac_close;
989 dev->hard_start_xmit = bf537mac_hard_start_xmit; 1039 ndev->hard_start_xmit = bfin_mac_hard_start_xmit;
990 dev->set_mac_address = bf537mac_set_mac_address; 1040 ndev->set_mac_address = bfin_mac_set_mac_address;
991 dev->tx_timeout = bf537mac_timeout; 1041 ndev->tx_timeout = bfin_mac_timeout;
992 dev->set_multicast_list = bf537mac_set_multicast_list; 1042 ndev->set_multicast_list = bfin_mac_set_multicast_list;
993#ifdef CONFIG_NET_POLL_CONTROLLER 1043#ifdef CONFIG_NET_POLL_CONTROLLER
994 dev->poll_controller = bf537mac_poll; 1044 ndev->poll_controller = bfin_mac_poll;
995#endif 1045#endif
1046 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
996 1047
997 spin_lock_init(&lp->lock); 1048 spin_lock_init(&lp->lock);
998 1049
999 /* now, enable interrupts */ 1050 /* now, enable interrupts */
1000 /* register irq handler */ 1051 /* register irq handler */
1001 if (request_irq 1052 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1002 (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED, 1053 IRQF_DISABLED | IRQF_SHARED, "EMAC_RX", ndev);
1003 "EMAC_RX", dev)) { 1054 if (rc) {
1004 printk(KERN_WARNING DRV_NAME 1055 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1005 ": Unable to attach BlackFin MAC RX interrupt\n"); 1056 rc = -EBUSY;
1006 return -EBUSY; 1057 goto out_err_request_irq;
1007 } 1058 }
1008 1059
1009 1060 rc = register_netdev(ndev);
1010 retval = register_netdev(dev); 1061 if (rc) {
1011 if (retval == 0) { 1062 dev_err(&pdev->dev, "Cannot register net device!\n");
1012 /* now, print out the card info, in a short format.. */ 1063 goto out_err_reg_ndev;
1013 printk(KERN_INFO "%s: Version %s, %s\n",
1014 DRV_NAME, DRV_VERSION, DRV_DESC);
1015 }
1016
1017err_out:
1018 return retval;
1019}
1020
1021static int bfin_mac_probe(struct platform_device *pdev)
1022{
1023 struct net_device *ndev;
1024
1025 ndev = alloc_etherdev(sizeof(struct bf537mac_local));
1026 if (!ndev) {
1027 printk(KERN_WARNING DRV_NAME ": could not allocate device\n");
1028 return -ENOMEM;
1029 } 1064 }
1030 1065
1031 SET_NETDEV_DEV(ndev, &pdev->dev); 1066 /* now, print out the card info, in a short format.. */
1067 dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1032 1068
1033 platform_set_drvdata(pdev, ndev); 1069 return 0;
1034 1070
1035 if (bf537mac_probe(ndev) != 0) { 1071out_err_reg_ndev:
1036 platform_set_drvdata(pdev, NULL); 1072 free_irq(IRQ_MAC_RX, ndev);
1037 free_netdev(ndev); 1073out_err_request_irq:
1038 printk(KERN_WARNING DRV_NAME ": not found\n"); 1074out_err_mii_probe:
1039 return -ENODEV; 1075 mdiobus_unregister(&lp->mii_bus);
1040 } 1076out_err_mdiobus_register:
1077 peripheral_free_list(pin_req);
1078out_err_setup_pin_mux:
1079out_err_probe_mac:
1080 platform_set_drvdata(pdev, NULL);
1081 free_netdev(ndev);
1041 1082
1042 return 0; 1083 return rc;
1043} 1084}
1044 1085
1045static int bfin_mac_remove(struct platform_device *pdev) 1086static int bfin_mac_remove(struct platform_device *pdev)
1046{ 1087{
1047 struct net_device *ndev = platform_get_drvdata(pdev); 1088 struct net_device *ndev = platform_get_drvdata(pdev);
1089 struct bfin_mac_local *lp = netdev_priv(ndev);
1048 1090
1049 platform_set_drvdata(pdev, NULL); 1091 platform_set_drvdata(pdev, NULL);
1050 1092
1093 mdiobus_unregister(&lp->mii_bus);
1094
1051 unregister_netdev(ndev); 1095 unregister_netdev(ndev);
1052 1096
1053 free_irq(IRQ_MAC_RX, ndev); 1097 free_irq(IRQ_MAC_RX, ndev);
1054 1098
1055 free_netdev(ndev); 1099 free_netdev(ndev);
1056 1100
1057 setup_pin_mux(0); 1101 peripheral_free_list(pin_req);
1058 1102
1059 return 0; 1103 return 0;
1060} 1104}
@@ -1065,7 +1109,7 @@ static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1065 struct net_device *net_dev = platform_get_drvdata(pdev); 1109 struct net_device *net_dev = platform_get_drvdata(pdev);
1066 1110
1067 if (netif_running(net_dev)) 1111 if (netif_running(net_dev))
1068 bf537mac_close(net_dev); 1112 bfin_mac_close(net_dev);
1069 1113
1070 return 0; 1114 return 0;
1071} 1115}
@@ -1075,7 +1119,7 @@ static int bfin_mac_resume(struct platform_device *pdev)
1075 struct net_device *net_dev = platform_get_drvdata(pdev); 1119 struct net_device *net_dev = platform_get_drvdata(pdev);
1076 1120
1077 if (netif_running(net_dev)) 1121 if (netif_running(net_dev))
1078 bf537mac_open(net_dev); 1122 bfin_mac_open(net_dev);
1079 1123
1080 return 0; 1124 return 0;
1081} 1125}
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index f774d5a36942..beff51064ff4 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -49,7 +49,7 @@ struct net_dma_desc_tx {
49 struct status_area_tx status; 49 struct status_area_tx status;
50}; 50};
51 51
52struct bf537mac_local { 52struct bfin_mac_local {
53 /* 53 /*
54 * these are things that the kernel wants me to keep, so users 54 * these are things that the kernel wants me to keep, so users
55 * can find out semi-useless statistics of how well the card is 55 * can find out semi-useless statistics of how well the card is
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 15853be4680a..4b46e68183e0 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56 56
57#define DRV_MODULE_NAME "bnx2" 57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": " 58#define PFX DRV_MODULE_NAME ": "
59#define DRV_MODULE_VERSION "1.7.4" 59#define DRV_MODULE_VERSION "1.7.5"
60#define DRV_MODULE_RELDATE "February 18, 2008" 60#define DRV_MODULE_RELDATE "April 29, 2008"
61 61
62#define RUN_AT(x) (jiffies + (x)) 62#define RUN_AT(x) (jiffies + (x))
63 63
@@ -1631,8 +1631,10 @@ bnx2_set_default_remote_link(struct bnx2 *bp)
1631static void 1631static void
1632bnx2_set_default_link(struct bnx2 *bp) 1632bnx2_set_default_link(struct bnx2 *bp)
1633{ 1633{
1634 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) 1634 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1635 return bnx2_set_default_remote_link(bp); 1635 bnx2_set_default_remote_link(bp);
1636 return;
1637 }
1636 1638
1637 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; 1639 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1638 bp->req_line_speed = 0; 1640 bp->req_line_speed = 0;
@@ -1715,7 +1717,6 @@ bnx2_remote_phy_event(struct bnx2 *bp)
1715 break; 1717 break;
1716 } 1718 }
1717 1719
1718 spin_lock(&bp->phy_lock);
1719 bp->flow_ctrl = 0; 1720 bp->flow_ctrl = 0;
1720 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 1721 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1721 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { 1722 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
@@ -1737,7 +1738,6 @@ bnx2_remote_phy_event(struct bnx2 *bp)
1737 if (old_port != bp->phy_port) 1738 if (old_port != bp->phy_port)
1738 bnx2_set_default_link(bp); 1739 bnx2_set_default_link(bp);
1739 1740
1740 spin_unlock(&bp->phy_lock);
1741 } 1741 }
1742 if (bp->link_up != link_up) 1742 if (bp->link_up != link_up)
1743 bnx2_report_link(bp); 1743 bnx2_report_link(bp);
@@ -2222,6 +2222,11 @@ bnx2_init_5709_context(struct bnx2 *bp)
2222 for (i = 0; i < bp->ctx_pages; i++) { 2222 for (i = 0; i < bp->ctx_pages; i++) {
2223 int j; 2223 int j;
2224 2224
2225 if (bp->ctx_blk[i])
2226 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2227 else
2228 return -ENOMEM;
2229
2225 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, 2230 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2226 (bp->ctx_blk_mapping[i] & 0xffffffff) | 2231 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2227 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); 2232 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
@@ -2445,14 +2450,15 @@ bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2445static void 2450static void
2446bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi) 2451bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2447{ 2452{
2448 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) { 2453 spin_lock(&bp->phy_lock);
2449 spin_lock(&bp->phy_lock); 2454
2455 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2450 bnx2_set_link(bp); 2456 bnx2_set_link(bp);
2451 spin_unlock(&bp->phy_lock);
2452 }
2453 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT)) 2457 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2454 bnx2_set_remote_link(bp); 2458 bnx2_set_remote_link(bp);
2455 2459
2460 spin_unlock(&bp->phy_lock);
2461
2456} 2462}
2457 2463
2458static inline u16 2464static inline u16
@@ -3174,6 +3180,12 @@ load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3174 int i; 3180 int i;
3175 u32 val; 3181 u32 val;
3176 3182
3183 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3184 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3185 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3186 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3187 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3188 }
3177 3189
3178 for (i = 0; i < rv2p_code_len; i += 8) { 3190 for (i = 0; i < rv2p_code_len; i += 8) {
3179 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code)); 3191 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
@@ -4215,13 +4227,6 @@ bnx2_init_remote_phy(struct bnx2 *bp)
4215 if (netif_running(bp->dev)) { 4227 if (netif_running(bp->dev)) {
4216 u32 sig; 4228 u32 sig;
4217 4229
4218 if (val & BNX2_LINK_STATUS_LINK_UP) {
4219 bp->link_up = 1;
4220 netif_carrier_on(bp->dev);
4221 } else {
4222 bp->link_up = 0;
4223 netif_carrier_off(bp->dev);
4224 }
4225 sig = BNX2_DRV_ACK_CAP_SIGNATURE | 4230 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4226 BNX2_FW_CAP_REMOTE_PHY_CAPABLE; 4231 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4227 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig); 4232 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
@@ -4878,6 +4883,8 @@ bnx2_init_nic(struct bnx2 *bp)
4878 spin_lock_bh(&bp->phy_lock); 4883 spin_lock_bh(&bp->phy_lock);
4879 bnx2_init_phy(bp); 4884 bnx2_init_phy(bp);
4880 bnx2_set_link(bp); 4885 bnx2_set_link(bp);
4886 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4887 bnx2_remote_phy_event(bp);
4881 spin_unlock_bh(&bp->phy_lock); 4888 spin_unlock_bh(&bp->phy_lock);
4882 return 0; 4889 return 0;
4883} 4890}
@@ -4920,7 +4927,7 @@ bnx2_test_registers(struct bnx2 *bp)
4920 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 }, 4927 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4921 4928
4922 { 0x1000, 0, 0x00000000, 0x00000001 }, 4929 { 0x1000, 0, 0x00000000, 0x00000001 },
4923 { 0x1004, 0, 0x00000000, 0x000f0001 }, 4930 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
4924 4931
4925 { 0x1408, 0, 0x01c00800, 0x00000000 }, 4932 { 0x1408, 0, 0x01c00800, 0x00000000 },
4926 { 0x149c, 0, 0x8000ffff, 0x00000000 }, 4933 { 0x149c, 0, 0x8000ffff, 0x00000000 },
diff --git a/drivers/net/bnx2_fw2.h b/drivers/net/bnx2_fw2.h
index e6ffa2769f3d..ed0514cba0ee 100644
--- a/drivers/net/bnx2_fw2.h
+++ b/drivers/net/bnx2_fw2.h
@@ -3173,251 +3173,267 @@ static struct fw_info bnx2_rxp_fw_09 = {
3173}; 3173};
3174 3174
3175static u8 bnx2_xi_rv2p_proc1[] = { 3175static u8 bnx2_xi_rv2p_proc1[] = {
3176 /* Date: 01/14/2008 15:44 */ 3176 /* Date: 04/25/2008 22:02 */
3177 0xc5, 0x56, 0xcd, 0x6b, 0x13, 0x51, 0x10, 0x9f, 0xdd, 0x7c, 0x6c, 0x9a, 3177 0xbd, 0x56, 0x4f, 0x68, 0x1c, 0x55, 0x18, 0xff, 0x76, 0x76, 0x77, 0x66,
3178 0x6c, 0xb2, 0xa1, 0x6a, 0x09, 0x35, 0xd2, 0x58, 0x7a, 0x30, 0x6d, 0xc4, 3178 0x33, 0x3b, 0xbb, 0xb3, 0xd8, 0x34, 0x4c, 0xb7, 0x2b, 0x59, 0x83, 0x97,
3179 0x56, 0x3d, 0x78, 0x28, 0x54, 0x7a, 0x11, 0xac, 0xa7, 0x1e, 0x44, 0xc4, 3179 0xdd, 0x6c, 0x69, 0xa2, 0x15, 0x04, 0x53, 0x5a, 0x72, 0x09, 0xd8, 0x9e,
3180 0xcf, 0x20, 0x05, 0xf5, 0x8f, 0x70, 0x51, 0xab, 0x20, 0x78, 0x28, 0x68, 3180 0x02, 0xb5, 0x52, 0x84, 0xb6, 0x8b, 0xf4, 0x52, 0x5a, 0x28, 0x78, 0x11,
3181 0xb4, 0x7e, 0xa0, 0x27, 0x15, 0xf1, 0x90, 0x1c, 0x04, 0x05, 0x45, 0x50, 3181 0x84, 0x0e, 0x6d, 0x93, 0x82, 0xe8, 0x61, 0xc1, 0x06, 0x12, 0x44, 0xa3,
3182 0xf0, 0xa4, 0x37, 0x41, 0xbd, 0x54, 0xc5, 0x0f, 0xf0, 0xe2, 0x45, 0x8f, 3182 0x07, 0x95, 0x60, 0x61, 0x07, 0x3c, 0x78, 0x10, 0x14, 0x15, 0x11, 0x6c,
3183 0xda, 0xf8, 0xde, 0xcc, 0xef, 0xd9, 0xdd, 0x4d, 0xd2, 0x14, 0x0f, 0x1a, 3183 0x0f, 0x85, 0x88, 0xf6, 0xd2, 0x54, 0x4b, 0x0b, 0x1e, 0x5b, 0x3c, 0xd6,
3184 0x68, 0x7f, 0xec, 0xdb, 0xdf, 0x9b, 0x37, 0xf3, 0x9b, 0x79, 0x33, 0x9b, 3184 0x8c, 0xef, 0xfb, 0xf3, 0x92, 0x99, 0x97, 0x9d, 0x24, 0xa7, 0x2e, 0xb4,
3185 0x27, 0x22, 0x9b, 0xfc, 0xc6, 0x80, 0x42, 0x72, 0xad, 0x58, 0x4a, 0x81, 3185 0x3f, 0xbe, 0x37, 0xdf, 0xbf, 0xf7, 0xfd, 0xf9, 0xbd, 0xd4, 0x00, 0xc0,
3186 0x45, 0x74, 0xcf, 0x65, 0xf4, 0x37, 0x91, 0xfc, 0x46, 0x04, 0xfc, 0x91, 3186 0x82, 0x30, 0x1a, 0x55, 0x08, 0x65, 0x2b, 0x5f, 0x52, 0x90, 0x03, 0xf8,
3187 0xbc, 0xfa, 0xff, 0x9d, 0x26, 0x4a, 0x1a, 0x63, 0x34, 0xb1, 0x5e, 0xe3, 3187 0x1a, 0xf8, 0x57, 0xf4, 0x48, 0x0e, 0x0f, 0x8a, 0x3c, 0xce, 0x10, 0x8e,
3188 0x24, 0x3d, 0x29, 0x15, 0x14, 0xfe, 0x6a, 0x92, 0xaf, 0x9f, 0x87, 0xea, 3188 0xd7, 0xd4, 0xff, 0x17, 0xe0, 0x48, 0x13, 0x31, 0x0f, 0x47, 0x5e, 0x40,
3189 0x0f, 0x1a, 0x19, 0xb6, 0xfb, 0x0e, 0xfb, 0xdf, 0xc4, 0x04, 0xb7, 0x55, 3189 0x3c, 0x0c, 0xdf, 0x37, 0x03, 0x85, 0xff, 0xc5, 0x10, 0xa2, 0x3c, 0xdc,
3190 0x52, 0x62, 0x07, 0x48, 0x1b, 0xf3, 0x0c, 0xaf, 0xe6, 0xf4, 0x73, 0xd1, 3190 0xff, 0x36, 0x2a, 0x93, 0xff, 0x35, 0xb1, 0xff, 0x33, 0xcf, 0xf8, 0x6a,
3191 0xf2, 0x37, 0xe2, 0x7c, 0x5b, 0xd6, 0x17, 0xe6, 0x3c, 0xbd, 0x4e, 0xef, 3191 0xa7, 0xc4, 0x7e, 0x04, 0xe1, 0x40, 0x8d, 0x60, 0xb5, 0x87, 0xf2, 0x89,
3192 0x27, 0xf5, 0xb3, 0x97, 0x3e, 0xdd, 0x48, 0xb1, 0x5d, 0x79, 0xdf, 0x9b, 3192 0x13, 0x60, 0xa3, 0x9f, 0x4f, 0x94, 0x02, 0xca, 0x8d, 0x5c, 0x78, 0x40,
3193 0x3e, 0xcd, 0xfb, 0x5c, 0x4b, 0xec, 0xa9, 0x3f, 0xde, 0xbf, 0x55, 0xd9, 3193 0xf2, 0xb2, 0x58, 0xef, 0x5e, 0xcf, 0xc7, 0x73, 0xb8, 0x3f, 0x8d, 0xf2,
3194 0x81, 0xdf, 0x24, 0x76, 0x0e, 0x96, 0xf4, 0xfa, 0x76, 0xf0, 0xc6, 0xc1, 3194 0x3e, 0xf7, 0x5a, 0x0f, 0x31, 0x80, 0x73, 0x25, 0x8f, 0xef, 0x33, 0xca,
3195 0x2b, 0xb6, 0xf0, 0x16, 0xe6, 0x34, 0x3a, 0x54, 0xad, 0xe8, 0x78, 0x06, 3195 0x6e, 0xd7, 0xda, 0x68, 0xa7, 0x74, 0xdb, 0xe2, 0xb7, 0x88, 0x7e, 0xff,
3196 0x49, 0xe2, 0x49, 0xd0, 0x4c, 0xca, 0x15, 0x9d, 0x06, 0x84, 0xfd, 0x6e, 3196 0x89, 0xd9, 0x2f, 0xfa, 0x4b, 0xfa, 0x69, 0x28, 0x3f, 0x78, 0x6e, 0x4b,
3197 0x58, 0xef, 0x57, 0xbe, 0x0d, 0x6b, 0xde, 0x82, 0x8a, 0xdb, 0xc4, 0x1b, 3197 0x5e, 0xb6, 0x91, 0x97, 0xad, 0xf2, 0x90, 0x3a, 0x80, 0xce, 0x03, 0x71,
3198 0xe6, 0x39, 0x15, 0x63, 0x57, 0xf3, 0xde, 0x2a, 0x9e, 0x89, 0x2f, 0x18, 3198 0xaf, 0x8a, 0x8b, 0x7e, 0x1f, 0xcb, 0xbd, 0x01, 0x4e, 0x37, 0xc5, 0x7f,
3199 0x57, 0x26, 0x10, 0x57, 0x24, 0xde, 0x96, 0xf8, 0x82, 0x7a, 0xa5, 0xda, 3199 0x84, 0xe8, 0xe5, 0xd8, 0x9f, 0xfa, 0x27, 0xf7, 0xd8, 0xea, 0x47, 0xd7,
3200 0xf8, 0xaf, 0xcf, 0x51, 0xbe, 0xf0, 0x39, 0x49, 0xe8, 0x9c, 0x8c, 0xec, 3200 0x29, 0x9d, 0xbf, 0xd3, 0xd1, 0xdf, 0x75, 0x3f, 0x30, 0xce, 0x1d, 0x15,
3201 0x4b, 0x76, 0x88, 0xfb, 0x93, 0x35, 0xb3, 0x21, 0xec, 0x3f, 0x91, 0xb6, 3201 0x27, 0xa9, 0x0f, 0x3b, 0xe8, 0xff, 0xa6, 0xf4, 0xd3, 0x7e, 0xf9, 0xfc,
3202 0xf7, 0x54, 0xf9, 0x8d, 0xf5, 0x72, 0x3b, 0x1d, 0x12, 0xd0, 0xe1, 0x31, 3202 0xd7, 0xcd, 0xf3, 0xd6, 0xa0, 0xba, 0x15, 0x8d, 0xba, 0xfd, 0x28, 0x75,
3203 0xe2, 0x9b, 0xa2, 0x21, 0xbb, 0xc0, 0xef, 0xe3, 0xbc, 0x7f, 0xad, 0xf2, 3203 0x9b, 0x81, 0x17, 0xad, 0x80, 0xf4, 0x0a, 0x80, 0xb8, 0x5f, 0x25, 0x80,
3204 0x47, 0xe3, 0x3a, 0xe0, 0x7a, 0xe0, 0x01, 0xe0, 0x7e, 0xe0, 0x1a, 0xe0, 3204 0xf8, 0xbc, 0xe0, 0x45, 0xc1, 0xcf, 0x04, 0x97, 0x05, 0xf7, 0x0a, 0x0e,
3205 0x6a, 0xe0, 0x2a, 0x60, 0x2f, 0xf0, 0x32, 0x30, 0x0f, 0xf4, 0x80, 0x39, 3205 0x0b, 0xee, 0x11, 0x7c, 0x4e, 0xf0, 0x6f, 0xc1, 0x9a, 0xa0, 0x2f, 0x58,
3206 0xe0, 0x05, 0xa0, 0x0b, 0xcc, 0x00, 0x6b, 0xc0, 0xab, 0xc0, 0x14, 0xf0, 3206 0x15, 0xbc, 0x27, 0xe8, 0x09, 0x96, 0x0d, 0x7f, 0x75, 0xc1, 0x92, 0x60,
3207 0x28, 0xf0, 0x21, 0xf0, 0x31, 0xf0, 0x0b, 0xf0, 0x1c, 0xd0, 0xb1, 0x60, 3207 0x24, 0xf8, 0x9a, 0x61, 0xef, 0xe6, 0x18, 0x57, 0x45, 0x3e, 0x28, 0xf2,
3208 0x0f, 0xa8, 0x7e, 0x3e, 0xee, 0x47, 0x48, 0xa7, 0xeb, 0xa8, 0x7f, 0xad, 3208 0x49, 0x91, 0xb1, 0xa0, 0x32, 0xf7, 0xa9, 0x7a, 0x7d, 0xbe, 0xd1, 0xdf,
3209 0x33, 0xde, 0x97, 0x0d, 0x0f, 0xf9, 0x65, 0x9d, 0x2e, 0x83, 0xd7, 0x5b, 3209 0xd5, 0x9e, 0x7c, 0x6f, 0x69, 0xbd, 0x12, 0xd5, 0x0f, 0xda, 0x49, 0xfd,
3210 0xbf, 0x19, 0xb9, 0x27, 0xa5, 0xae, 0xf7, 0x23, 0x9a, 0x37, 0x8f, 0xe3, 3210 0x8f, 0xb7, 0xd1, 0x67, 0xb5, 0xe9, 0xd6, 0x20, 0xbb, 0x1b, 0x31, 0xe7,
3211 0x39, 0xb4, 0xc3, 0xe3, 0x73, 0x72, 0x49, 0x59, 0x37, 0x6e, 0xed, 0xf1, 3211 0xf1, 0x91, 0xd8, 0x07, 0xfd, 0xef, 0x32, 0xf6, 0x68, 0xaa, 0x63, 0xce,
3212 0x04, 0x8f, 0xa4, 0x05, 0x3f, 0xa7, 0x7b, 0xd4, 0xff, 0x66, 0x73, 0x26, 3212 0xd7, 0xa0, 0x3d, 0x7a, 0x45, 0xf6, 0xe8, 0xd0, 0x96, 0xf9, 0xe5, 0x39,
3213 0x23, 0xcf, 0x87, 0xb3, 0x46, 0x67, 0x63, 0xc7, 0xf8, 0xd3, 0xcd, 0x8f, 3213 0x3d, 0x2a, 0xf6, 0x53, 0x32, 0x9f, 0x8d, 0x0c, 0xbd, 0x30, 0xb1, 0xaf,
3214 0x4e, 0xe7, 0x19, 0xbf, 0xba, 0x9d, 0x2b, 0x58, 0xb5, 0xc3, 0xf1, 0x5f, 3214 0x14, 0x2f, 0x63, 0x1f, 0x6e, 0xe6, 0xba, 0x1d, 0x8c, 0x5b, 0x94, 0xb8,
3215 0x19, 0x15, 0x8c, 0x8f, 0x31, 0x54, 0xdc, 0x64, 0x5c, 0xe3, 0x56, 0xf7, 3215 0x59, 0xf9, 0xa1, 0xbd, 0xcc, 0x6f, 0x4b, 0xcf, 0x71, 0x7a, 0x7e, 0x79,
3216 0xb9, 0x39, 0x47, 0xa3, 0x5b, 0xa8, 0xf1, 0x7d, 0x89, 0x53, 0x2d, 0xa9, 3216 0x0e, 0x6d, 0x63, 0x0e, 0x2f, 0xed, 0xd0, 0x87, 0xb2, 0x51, 0xcf, 0xf3,
3217 0xed, 0xfe, 0x6c, 0x9e, 0x17, 0x5e, 0xff, 0xe1, 0x97, 0x8c, 0x85, 0x2b, 3217 0x4a, 0x9f, 0x45, 0xcb, 0x62, 0x5c, 0x62, 0xec, 0x78, 0x76, 0x01, 0xf1,
3218 0x2f, 0x84, 0xff, 0xba, 0xe4, 0x32, 0xee, 0x1e, 0xa1, 0xc8, 0xcf, 0xbc, 3218 0x90, 0xf7, 0x0b, 0xfb, 0x1b, 0xa5, 0x7b, 0x78, 0xc1, 0x02, 0xed, 0x6d,
3219 0x97, 0xfb, 0xe8, 0xb3, 0xdf, 0x3f, 0x2c, 0xbf, 0x61, 0xce, 0xc1, 0xbe, 3219 0x01, 0x16, 0xec, 0x21, 0x85, 0x4f, 0xe3, 0x0f, 0x59, 0xaf, 0x5e, 0xbc,
3220 0xe3, 0x26, 0x8f, 0x79, 0xf6, 0x73, 0x90, 0xe4, 0x79, 0xba, 0x2c, 0xef, 3220 0x4d, 0x18, 0x2c, 0xdd, 0x62, 0xfd, 0x3f, 0x9a, 0x9c, 0xf7, 0x1b, 0xe3,
3221 0xa7, 0xcb, 0xb8, 0xcf, 0x83, 0xe1, 0x7a, 0x90, 0x7b, 0x11, 0x43, 0xbe, 3221 0x60, 0xfc, 0xf4, 0x77, 0xd9, 0x77, 0x1f, 0xe5, 0x7f, 0x73, 0x61, 0xa4,
3222 0xf7, 0xe2, 0x5e, 0x44, 0xef, 0x71, 0xaa, 0x7e, 0x73, 0x2e, 0x58, 0x2f, 3222 0xe3, 0x88, 0xdd, 0x79, 0xbd, 0x47, 0xfc, 0xbb, 0x62, 0xd7, 0xa8, 0x6e,
3223 0x05, 0xaa, 0x8e, 0xc1, 0x9f, 0x96, 0x3c, 0x9b, 0xbe, 0x6c, 0xea, 0x9d, 3223 0xef, 0x47, 0x24, 0x0e, 0x7b, 0xf3, 0xcc, 0xaf, 0x1f, 0x44, 0xfa, 0x3e,
3224 0x97, 0xeb, 0x7e, 0x2c, 0xa4, 0xdf, 0x76, 0xaa, 0x04, 0xf3, 0x64, 0xb5, 3224 0xc2, 0x2b, 0x6d, 0xb6, 0xab, 0x50, 0x9c, 0x3d, 0xfd, 0x65, 0x63, 0x3e,
3225 0xa9, 0x97, 0x6e, 0xe7, 0x84, 0xec, 0xe5, 0x54, 0x06, 0xa8, 0xb5, 0x8e, 3225 0x9a, 0xbb, 0xe2, 0xd7, 0x27, 0xf1, 0x26, 0xbf, 0x26, 0xef, 0xaf, 0xf9,
3226 0x1d, 0xc4, 0x35, 0x81, 0x3a, 0x5e, 0xdb, 0x52, 0xc7, 0xa6, 0xdf, 0x4b, 3226 0xb5, 0x04, 0x67, 0x66, 0x7c, 0x8a, 0x57, 0xb5, 0xd9, 0xcd, 0x9b, 0x3e,
3227 0x3d, 0x77, 0xea, 0x5f, 0x7f, 0xdf, 0xa7, 0x85, 0xe7, 0x07, 0xea, 0xd3, 3227 0xe3, 0xdb, 0x2e, 0xe3, 0x43, 0x17, 0xeb, 0x13, 0xc7, 0xe7, 0xca, 0x2c,
3228 0xf4, 0x43, 0xe8, 0xe4, 0x30, 0xaf, 0xb8, 0x70, 0x5f, 0xf2, 0x26, 0xfd, 3228 0x9f, 0xad, 0xe8, 0xbd, 0xd6, 0xf6, 0x3a, 0xaf, 0xed, 0xf2, 0xc1, 0xf8,
3229 0x5c, 0x15, 0xa3, 0x1f, 0xf6, 0xd3, 0x31, 0xf1, 0x0d, 0x04, 0xfb, 0xe7, 3229 0x3a, 0x8e, 0xce, 0x43, 0xc7, 0x4b, 0xcf, 0x43, 0x76, 0x5c, 0xc6, 0xae,
3230 0x50, 0x87, 0x7c, 0x05, 0xfb, 0x6e, 0x54, 0x97, 0x70, 0xdd, 0x4b, 0xfe, 3230 0x95, 0xae, 0xc3, 0xd2, 0x04, 0x63, 0x61, 0x12, 0xf3, 0xfa, 0x21, 0xde,
3231 0xd3, 0xd0, 0xa9, 0xbf, 0x4b, 0x5f, 0xe8, 0x01, 0x6f, 0xcd, 0x32, 0x3c, 3231 0xd8, 0xeb, 0x56, 0x8d, 0xf4, 0xc6, 0x80, 0xe5, 0x59, 0x99, 0xbf, 0x59,
3232 0xb1, 0x3b, 0x59, 0x0e, 0xf6, 0x11, 0xaf, 0x89, 0xfe, 0x87, 0x7d, 0x7d, 3232 0xda, 0x47, 0xc5, 0x37, 0x16, 0x62, 0x1d, 0x42, 0x7a, 0x6f, 0x2c, 0xf7,
3233 0xf5, 0x47, 0x1d, 0xf2, 0x30, 0xfe, 0x7f, 0xf3, 0x80, 0xf9, 0x52, 0xb4, 3233 0x67, 0x9a, 0x87, 0xbc, 0x9c, 0xab, 0xfa, 0x8f, 0xa5, 0xf7, 0x78, 0x8d,
3234 0x24, 0x0f, 0x09, 0x5a, 0x99, 0xbe, 0x84, 0xf8, 0xa9, 0x83, 0xbe, 0x49, 3234 0xe7, 0xad, 0x94, 0x9e, 0xd3, 0x46, 0x3c, 0x78, 0xfe, 0xdd, 0xfe, 0x72,
3235 0xe8, 0xf0, 0x6d, 0x71, 0x79, 0x7d, 0x33, 0xe0, 0x7d, 0x0d, 0xf0, 0xb8, 3235 0x6f, 0x50, 0x3f, 0x74, 0x7e, 0x01, 0x74, 0x27, 0xb3, 0xde, 0x09, 0xfd,
3236 0x2e, 0xc6, 0xe5, 0xfe, 0x39, 0xd5, 0x2f, 0x11, 0xdd, 0xc6, 0x2a, 0xba, 3236 0x3e, 0x6b, 0x9e, 0xa4, 0xe3, 0x7e, 0x98, 0x4f, 0xdd, 0xfb, 0x28, 0x74,
3237 0xaf, 0x9c, 0xa0, 0x06, 0xe2, 0x7a, 0x1b, 0x8a, 0x2f, 0xab, 0xfc, 0x93, 3237 0x06, 0xf9, 0xff, 0x46, 0xbf, 0x7b, 0x03, 0xf6, 0x76, 0xa7, 0xb8, 0x29,
3238 0xef, 0x84, 0x3b, 0x0d, 0xa3, 0x83, 0xbc, 0x2e, 0x55, 0x04, 0x6f, 0x33, 3238 0xff, 0x55, 0xb5, 0x39, 0xb0, 0x75, 0xef, 0x1c, 0x63, 0x4f, 0x9f, 0xae,
3239 0x3f, 0x1f, 0xd0, 0x23, 0xac, 0x9b, 0xe8, 0x91, 0xa7, 0x5b, 0x7f, 0xfa, 3239 0xf3, 0x9e, 0x36, 0xb6, 0xcc, 0xa7, 0xe6, 0xaf, 0xe6, 0xb6, 0xfc, 0xf5,
3240 0x8d, 0xc7, 0xf6, 0x46, 0xd1, 0xaf, 0x0f, 0xa1, 0x6f, 0x7e, 0x48, 0x4b, 3240 0xac, 0xf8, 0xca, 0x02, 0xe6, 0x2b, 0x7c, 0x4f, 0xd2, 0x79, 0x3a, 0xfa,
3241 0x5f, 0xae, 0x4e, 0x71, 0xff, 0xa4, 0x3e, 0xf4, 0xcf, 0x6a, 0x56, 0x9e, 3241 0x9e, 0x06, 0x2f, 0xf1, 0xfd, 0xee, 0xaf, 0xef, 0x8e, 0xdf, 0x92, 0x75,
3242 0xfb, 0xb3, 0xf2, 0x1d, 0x36, 0xea, 0xb8, 0xcc, 0xeb, 0xcf, 0x0a, 0xf6, 3242 0x1a, 0xc4, 0x6f, 0xae, 0xc1, 0x57, 0xbf, 0xaf, 0x6f, 0xf2, 0x1b, 0x7e,
3243 0x65, 0xf4, 0xbe, 0x02, 0x7d, 0xdc, 0xc5, 0xf4, 0xca, 0xbc, 0x2b, 0x7d, 3243 0x5f, 0x59, 0xe1, 0xfe, 0xbd, 0x97, 0x98, 0xdf, 0x64, 0xdd, 0x87, 0xa4,
3244 0x74, 0xfe, 0x05, 0xfa, 0xba, 0x67, 0x74, 0x42, 0xbc, 0x5b, 0xf4, 0x7a, 3244 0xee, 0x4a, 0x8f, 0xec, 0x6f, 0x1b, 0xf6, 0xba, 0xff, 0xef, 0x08, 0x6f,
3245 0x1f, 0x7f, 0xf2, 0x2c, 0xe9, 0xab, 0x38, 0xc3, 0xe2, 0xdf, 0x0d, 0x78, 3245 0x5a, 0x53, 0x3c, 0x7f, 0x4e, 0xf7, 0x91, 0xd1, 0x97, 0xc9, 0x0e, 0xee,
3246 0x5f, 0x32, 0xfb, 0x06, 0xb4, 0x9e, 0x4f, 0x16, 0xcd, 0xdc, 0x18, 0xdc, 3246 0xd5, 0x65, 0x88, 0xa4, 0x6e, 0x77, 0x53, 0xf5, 0xab, 0x08, 0x4f, 0x38,
3247 0xa1, 0xfd, 0xf1, 0x28, 0xe7, 0x48, 0x3e, 0x05, 0x15, 0xcf, 0x76, 0xf4, 3247 0xf0, 0x55, 0xa4, 0xeb, 0xac, 0xfb, 0xc8, 0xf8, 0x25, 0xe9, 0xd7, 0x76,
3248 0xb6, 0xe2, 0xac, 0x2d, 0xcf, 0xb3, 0x27, 0xd9, 0xcc, 0xae, 0x59, 0xb3, 3248 0xa8, 0x77, 0x0d, 0xbe, 0xd8, 0xe0, 0x41, 0x9f, 0xfc, 0x4d, 0x08, 0xaf,
3249 0x3e, 0xc9, 0x05, 0x3a, 0x7d, 0xf7, 0x19, 0xaf, 0xe7, 0x1a, 0x31, 0x59, 3249 0x9c, 0x91, 0xfd, 0xfe, 0xcb, 0x65, 0xfe, 0xe8, 0x1e, 0xa3, 0x3d, 0x87,
3250 0x77, 0xa6, 0x8c, 0x1e, 0x1e, 0xc7, 0x57, 0x13, 0x3d, 0xf6, 0x5d, 0x14, 3250 0x11, 0xd9, 0xf3, 0x6e, 0x85, 0xe5, 0x7a, 0x85, 0x79, 0x71, 0xc2, 0xf1,
3251 0xdc, 0x4b, 0x3b, 0x19, 0xd3, 0x35, 0x57, 0xe6, 0xca, 0xbc, 0x9b, 0x62, 3251 0x48, 0xaf, 0x5e, 0x61, 0x1c, 0x29, 0xa3, 0x5d, 0x00, 0x0f, 0x8e, 0x93,
3252 0x24, 0xd6, 0xc3, 0xde, 0x2c, 0xf3, 0x21, 0x81, 0xbe, 0xde, 0x13, 0xc8, 3252 0x7a, 0x67, 0xd1, 0x63, 0x7e, 0x5f, 0xbc, 0x25, 0xfc, 0xe3, 0xeb, 0xfa,
3253 0x53, 0x74, 0xde, 0xae, 0x34, 0x5f, 0xc1, 0x39, 0x60, 0xe6, 0x43, 0xb4, 3253 0xc9, 0x7d, 0x5f, 0xc6, 0xf3, 0x11, 0xb5, 0xcf, 0xc9, 0x7e, 0x28, 0x9d,
3254 0xdf, 0x67, 0x51, 0x67, 0xd7, 0xba, 0xd4, 0xa3, 0xe9, 0x9f, 0x97, 0x16, 3254 0x36, 0xe7, 0xf7, 0xa9, 0x64, 0xdf, 0xf4, 0x93, 0xf5, 0xd6, 0xf3, 0xbd,
3255 0xe5, 0x1e, 0xb4, 0x9b, 0xb3, 0x1a, 0x73, 0x1d, 0xbe, 0x0f, 0x8a, 0xa8, 3255 0x9c, 0xd1, 0xa7, 0x99, 0x58, 0xf3, 0xdf, 0xd8, 0x0c, 0xe6, 0xeb, 0x43,
3256 0x3f, 0x33, 0x0f, 0xdb, 0x7d, 0x07, 0x08, 0x7f, 0x65, 0xf3, 0x3f, 0xdf, 3256 0xd5, 0xe1, 0xf9, 0x60, 0x54, 0x7e, 0x2c, 0x07, 0xcd, 0x1a, 0x73, 0xc2,
3257 0x61, 0xfe, 0xff, 0xb3, 0x39, 0x5f, 0x58, 0xca, 0xa3, 0xa9, 0xd3, 0x60, 3257 0x27, 0x73, 0x57, 0xc8, 0xcd, 0xf1, 0x39, 0x7d, 0x3e, 0x4d, 0x0b, 0x32,
3258 0x1e, 0x83, 0xf5, 0x1a, 0x9d, 0xc3, 0xcb, 0xcd, 0xdf, 0x1c, 0x74, 0x3e, 3258 0xbb, 0xf2, 0x13, 0x9d, 0x57, 0xa3, 0x3c, 0x9f, 0x3b, 0xc7, 0x74, 0xbd,
3259 0x06, 0x9d, 0xe3, 0x94, 0x88, 0xb1, 0x30, 0x6e, 0xfc, 0x14, 0xdb, 0xb5, 3259 0x7c, 0xba, 0xff, 0x02, 0xd7, 0xeb, 0xad, 0x1b, 0x8c, 0xa7, 0xe0, 0x75,
3260 0x67, 0x6d, 0xa6, 0xbb, 0x89, 0x33, 0x96, 0xc6, 0x9c, 0x7b, 0x46, 0x78, 3260 0x42, 0x77, 0xc1, 0x63, 0x7e, 0x5c, 0xf4, 0x4a, 0x84, 0x40, 0xf5, 0xb2,
3261 0x71, 0x59, 0x2f, 0x18, 0x3c, 0x7b, 0x4a, 0xbe, 0xfb, 0x6c, 0xfa, 0x0d, 3261 0x5e, 0xe2, 0x77, 0xad, 0x28, 0xef, 0xd1, 0x50, 0xa2, 0x8f, 0xe6, 0xfb,
3262 0x6d, 0x29, 0x98, 0xe1, 0x30, 0x0d, 0x00, 0x00, 0x00 }; 3262 0xb1, 0xdb, 0x7e, 0x26, 0xf9, 0x54, 0xbf, 0x6b, 0x39, 0xe3, 0xef, 0xc8,
3263 0x8a, 0x31, 0x9f, 0xef, 0x66, 0xcc, 0x67, 0x33, 0x63, 0xbe, 0x4d, 0x5e,
3264 0xb9, 0x24, 0x7b, 0x57, 0x80, 0x62, 0x9e, 0x1e, 0x26, 0xaf, 0x70, 0x95,
3265 0xfa, 0x6b, 0xcd, 0xf1, 0xbb, 0xee, 0x15, 0xe7, 0x73, 0x54, 0x37, 0x6f,
3266 0x9e, 0xf5, 0x0a, 0x7c, 0x1e, 0x68, 0xbc, 0x7e, 0x95, 0xdf, 0x4f, 0x0b,
3267 0xfe, 0x07, 0x89, 0x6e, 0x1e, 0x13, 0x00, 0x0d, 0x00, 0x00, 0x00 };
3263 3268
3264static u8 bnx2_xi_rv2p_proc2[] = { 3269static u8 bnx2_xi_rv2p_proc2[] = {
3265 /* Date: 01/14/2008 15:44 */ 3270 /* Date: 04/25/2008 22:02 */
3266 0xad, 0x58, 0x5d, 0x6c, 0xd3, 0x55, 0x14, 0xbf, 0xfd, 0x58, 0xdb, 0x75, 3271#define XI_RV2P_PROC2_MAX_BD_PAGE_LOC 5
3267 0xff, 0xb6, 0x63, 0x9b, 0xdd, 0xa7, 0x6e, 0x6e, 0x61, 0x6c, 0xd8, 0xcd, 3272#define XI_RV2P_PROC2_BD_PAGE_SIZE_MSK 0xffff
3268 0xd1, 0x8d, 0x4f, 0x4d, 0x5c, 0x86, 0x19, 0x20, 0x26, 0x8c, 0x61, 0xd4, 3273#define XI_RV2P_PROC2_BD_PAGE_SIZE ((PAGE_SIZE / 16) - 1)
3269 0x37, 0xd8, 0x90, 0xb2, 0xb2, 0x8d, 0x2c, 0x8c, 0xf0, 0xc0, 0x8b, 0x0d, 3274 0xad, 0x58, 0x5b, 0x6c, 0x54, 0x55, 0x14, 0x3d, 0xf3, 0xe8, 0xcc, 0xed,
3270 0xd3, 0xf1, 0xd2, 0x07, 0x47, 0xb2, 0x0d, 0x8d, 0xc1, 0x45, 0x7d, 0x40, 3275 0xcc, 0x9d, 0x99, 0xd2, 0xd6, 0xe9, 0x8b, 0x48, 0x69, 0xa5, 0x74, 0x70,
3271 0x9f, 0xec, 0x83, 0x52, 0x30, 0xc6, 0xc4, 0xe8, 0x42, 0xf0, 0x01, 0x48, 3276 0x0a, 0x65, 0x5a, 0x1e, 0x3e, 0x12, 0x49, 0xd1, 0x02, 0x3e, 0x42, 0xa9,
3272 0x30, 0xc6, 0x68, 0x48, 0x08, 0xea, 0x32, 0x10, 0x75, 0x0c, 0xfb, 0x64, 3277 0x86, 0x98, 0x18, 0x03, 0x9d, 0x4a, 0xe9, 0x40, 0x4b, 0x2a, 0x25, 0x7c,
3273 0x98, 0xf7, 0x9e, 0xdf, 0xb9, 0xff, 0xfe, 0xff, 0x5d, 0x27, 0x18, 0xec, 3278 0xf0, 0xe3, 0x84, 0x62, 0xf9, 0x99, 0x44, 0x4b, 0x80, 0x16, 0x63, 0x48,
3274 0x43, 0x4f, 0xef, 0xbd, 0xe7, 0x9e, 0x7b, 0x3e, 0x7e, 0xe7, 0x9c, 0x7b, 3279 0x23, 0x3f, 0xc4, 0xbf, 0x26, 0x28, 0x45, 0x3f, 0x4c, 0x88, 0x36, 0x04,
3275 0x5b, 0x2c, 0x84, 0x70, 0x8a, 0x44, 0xaa, 0x56, 0x52, 0x61, 0x38, 0x5c, 3280 0x3e, 0xc0, 0x44, 0x63, 0xfc, 0x21, 0x12, 0xc4, 0x5a, 0xa0, 0xc1, 0x82,
3276 0x02, 0x9f, 0xb5, 0xc5, 0x44, 0xae, 0xa5, 0x7c, 0xf2, 0xbb, 0x40, 0xbc, 3281 0x36, 0xc6, 0x48, 0xeb, 0x3d, 0x7b, 0xed, 0x73, 0xe7, 0xde, 0xe9, 0x2d,
3277 0xe4, 0xac, 0xa0, 0xb1, 0x5b, 0x28, 0x1a, 0x12, 0x22, 0x61, 0xa5, 0xa5, 3282 0x8f, 0x48, 0x3f, 0x58, 0x9c, 0x73, 0xf7, 0x39, 0x67, 0xef, 0xb5, 0x1f,
3278 0x4c, 0xaf, 0x32, 0xfd, 0x9d, 0xe9, 0xe3, 0x0e, 0xd0, 0x2b, 0x3c, 0xde, 3283 0x67, 0x9f, 0x29, 0x10, 0x42, 0x78, 0x45, 0x7a, 0x64, 0x91, 0x81, 0x22,
3279 0xc2, 0xe3, 0x6b, 0x3c, 0xfe, 0x91, 0xe9, 0x46, 0x9e, 0xdf, 0xcc, 0x34, 3284 0xe8, 0xf6, 0x68, 0x06, 0xcc, 0x0a, 0x91, 0x57, 0x2a, 0xc7, 0xc2, 0x2d,
3280 0xc9, 0x74, 0x3b, 0xaf, 0xa7, 0x99, 0xca, 0x4f, 0xc2, 0x90, 0x5f, 0x72, 3285 0xf8, 0x6f, 0x59, 0x01, 0xc1, 0x0f, 0x23, 0xf2, 0xbb, 0x5f, 0xbc, 0xe5,
3281 0xb9, 0x59, 0xeb, 0x69, 0x60, 0xba, 0x19, 0xfa, 0xee, 0xa9, 0x53, 0x7c, 3286 0xc6, 0x77, 0xaf, 0x90, 0x18, 0x11, 0x22, 0x2d, 0xb1, 0x80, 0x31, 0xc6,
3282 0xf3, 0x4b, 0x59, 0x3e, 0xcc, 0x5f, 0x9f, 0x00, 0xad, 0xc5, 0xae, 0x8f, 3287 0xe8, 0x72, 0x01, 0x4b, 0x18, 0x5f, 0x61, 0x14, 0x8c, 0xba, 0x1b, 0xe8,
3283 0x13, 0x4f, 0xeb, 0xfd, 0x20, 0x7d, 0x01, 0xd0, 0x7e, 0xb6, 0xbf, 0x33, 3288 0x66, 0xf4, 0xaa, 0x79, 0x5e, 0xaf, 0xf1, 0x7c, 0x3b, 0xe3, 0x76, 0x9e,
3284 0x42, 0x24, 0xb9, 0xdf, 0x89, 0x71, 0x77, 0xa3, 0xf2, 0x43, 0x89, 0x70, 3289 0xff, 0xdd, 0x40, 0xa5, 0x97, 0x1c, 0x4f, 0xce, 0x8a, 0xb4, 0x8e, 0x6d,
3285 0x3b, 0x95, 0x9c, 0x56, 0x9f, 0xe7, 0x3c, 0xe6, 0x5f, 0x0d, 0x81, 0xbe, 3290 0x63, 0x6a, 0x5e, 0x27, 0x48, 0xc7, 0xa0, 0xf7, 0x9b, 0x95, 0xb4, 0xce,
3286 0xe6, 0x07, 0xfd, 0xc5, 0x5f, 0x28, 0xbf, 0x97, 0x96, 0x62, 0x45, 0x2c, 3291 0x41, 0x4e, 0xce, 0xdf, 0x98, 0x55, 0xfb, 0x1d, 0xf4, 0xc8, 0xf1, 0x2f,
3287 0xdf, 0x60, 0xb5, 0x8b, 0xb0, 0x7f, 0xd6, 0x80, 0x1e, 0x2f, 0xd7, 0x41, 3292 0xc6, 0xd8, 0x25, 0x87, 0xc5, 0x51, 0x6c, 0x53, 0x5c, 0x9a, 0x91, 0xfb,
3288 0xbf, 0xef, 0x9f, 0x52, 0xf3, 0x2e, 0x91, 0x60, 0x39, 0x42, 0x68, 0x3d, 3293 0x78, 0xc4, 0xd0, 0x80, 0x46, 0xac, 0x1c, 0xd7, 0x31, 0x4e, 0x13, 0x1f,
3289 0x79, 0x7d, 0x10, 0xfb, 0x56, 0xad, 0xc1, 0xea, 0x5b, 0x31, 0x8c, 0xab, 3294 0x2e, 0x63, 0xcc, 0xfb, 0x31, 0x96, 0x85, 0x70, 0xee, 0xd6, 0x4a, 0x9c,
3290 0x3f, 0x28, 0xa6, 0xb8, 0x9c, 0x4e, 0x69, 0xfe, 0x7c, 0x72, 0xdd, 0x52, 3295 0xf7, 0xd3, 0xb3, 0x90, 0x4b, 0x47, 0x14, 0xa1, 0xf8, 0xbe, 0x48, 0xf0,
3291 0x2e, 0xe4, 0x8b, 0x7a, 0x1f, 0x29, 0x93, 0x88, 0x80, 0x8a, 0x96, 0xdc, 3296 0xf7, 0xdd, 0x72, 0x3c, 0xe5, 0x2a, 0x72, 0x81, 0x57, 0xbf, 0x50, 0x7a,
3292 0x73, 0x20, 0x7f, 0x6a, 0xb5, 0x9a, 0x77, 0x8a, 0x5e, 0x97, 0x9a, 0xf7, 3297 0x42, 0xee, 0xea, 0x40, 0xae, 0x3c, 0x0d, 0x3f, 0x4f, 0xaf, 0x50, 0x76,
3293 0x88, 0xde, 0xb8, 0xf6, 0x2f, 0xd6, 0x63, 0x1e, 0x22, 0x15, 0x7d, 0xe3, 3298 0x00, 0xda, 0x42, 0xc0, 0xa4, 0x07, 0xb8, 0x2e, 0x4e, 0x90, 0x69, 0x67,
3294 0xca, 0xce, 0x90, 0xd8, 0xe7, 0x0c, 0x11, 0x3f, 0xfc, 0xe2, 0xf2, 0x19, 3299 0x47, 0x36, 0xd7, 0x48, 0x3d, 0x0a, 0x85, 0xd7, 0x2d, 0xf7, 0x5b, 0xa1,
3295 0x9f, 0x81, 0xff, 0xcb, 0x5a, 0x83, 0x6c, 0x89, 0xb5, 0x63, 0x5f, 0x59, 3300 0xf9, 0xce, 0x61, 0xfe, 0x6d, 0xd6, 0xeb, 0xbd, 0x00, 0xf0, 0x46, 0x20,
3296 0x14, 0x74, 0x32, 0x5a, 0xa0, 0x48, 0x24, 0x36, 0x4a, 0xc3, 0xd6, 0x9b, 3301 0xdf, 0xf8, 0x77, 0x76, 0xb6, 0x23, 0x68, 0xb7, 0x2b, 0x1d, 0xc4, 0xfa,
3297 0xeb, 0x7c, 0xc4, 0x97, 0x68, 0xd1, 0xf1, 0xd3, 0xf1, 0x52, 0x71, 0xfc, 3302 0x31, 0xdd, 0xc9, 0x3e, 0xcf, 0x03, 0xec, 0xc3, 0xba, 0x05, 0x4b, 0xf1,
3298 0x44, 0xc6, 0x91, 0xdd, 0xd2, 0x00, 0xbf, 0xfe, 0xba, 0x5a, 0xf1, 0x4b, 3303 0xf5, 0xe3, 0x0e, 0x8c, 0x2b, 0x4e, 0x49, 0x39, 0x9f, 0x18, 0x1a, 0x71,
3299 0xe7, 0xd6, 0xe3, 0x9c, 0xac, 0x7e, 0xd6, 0xf8, 0x7f, 0xf4, 0x1f, 0xe2, 3304 0xe2, 0x23, 0x77, 0x7f, 0x23, 0x4e, 0x23, 0x38, 0x47, 0x54, 0x69, 0xa4,
3300 0xaf, 0xe4, 0x75, 0xb2, 0x5f, 0xea, 0xa4, 0x5f, 0x14, 0xad, 0x71, 0x24, 3305 0x54, 0x3a, 0x0e, 0x14, 0x75, 0x8e, 0xfc, 0x88, 0xe3, 0x4b, 0xe4, 0xbc,
3301 0x5a, 0xec, 0xf1, 0xb8, 0x3e, 0x11, 0xa2, 0xdf, 0xb7, 0xba, 0x8a, 0xc9, 3306 0x5b, 0xb4, 0x7a, 0x74, 0x3a, 0xa7, 0x35, 0xa5, 0xe2, 0x02, 0xdf, 0x3b,
3302 0xaf, 0xbb, 0x30, 0x7f, 0xaa, 0xfb, 0x1c, 0xe2, 0xb1, 0x83, 0xec, 0x17, 3307 0x7c, 0x04, 0xa5, 0x6d, 0xfd, 0xd2, 0xde, 0x88, 0xd8, 0xee, 0x8e, 0x90,
3303 0xfe, 0x37, 0x3e, 0xc5, 0xae, 0xbe, 0x80, 0x1a, 0xbf, 0xd2, 0x11, 0xbb, 3308 0x3c, 0xf8, 0xf1, 0x68, 0xfa, 0x97, 0x90, 0xff, 0x66, 0x91, 0x4e, 0x36,
3304 0x80, 0xf5, 0x82, 0x31, 0xf8, 0x75, 0x17, 0x4b, 0xdd, 0xe1, 0x72, 0x28, 3309 0x75, 0x34, 0x60, 0x5d, 0x71, 0x02, 0x78, 0x2c, 0x91, 0x27, 0x21, 0xde,
3305 0x92, 0xf4, 0x8c, 0xd1, 0xd0, 0x98, 0xa5, 0x75, 0x43, 0x9c, 0x4c, 0x61, 3310 0xd1, 0x4b, 0xc3, 0xe5, 0xd7, 0x57, 0x6a, 0x24, 0x97, 0xae, 0x53, 0x71,
3306 0xfd, 0x70, 0x91, 0x1a, 0xef, 0x8a, 0xcc, 0x63, 0x1c, 0x89, 0x8f, 0xf3, 3311 0xa7, 0xfc, 0x27, 0xfd, 0x3a, 0x64, 0xc6, 0x99, 0xa8, 0x06, 0xbf, 0xbf,
3307 0x46, 0x27, 0xfc, 0x70, 0xcb, 0x09, 0x79, 0x0c, 0x2f, 0xbf, 0x9b, 0xe2, 3312 0x2d, 0x91, 0xf2, 0x06, 0xc9, 0x55, 0x38, 0x27, 0xab, 0x9f, 0x35, 0x6e,
3308 0xe0, 0x10, 0x46, 0x37, 0xe8, 0x9b, 0xb4, 0xfe, 0xb7, 0x23, 0x49, 0x76, 3313 0x07, 0x2c, 0x71, 0xfb, 0x78, 0xf1, 0xb0, 0x8e, 0xf8, 0x59, 0xc7, 0x3c,
3309 0x77, 0x07, 0xdd, 0xe7, 0xc0, 0xc8, 0xb8, 0x36, 0x71, 0xab, 0x71, 0xff, 3314 0x55, 0x1a, 0x3c, 0x49, 0x5c, 0xe8, 0x4a, 0xd7, 0xd9, 0xfd, 0x74, 0x75,
3310 0xb0, 0xf8, 0x1d, 0x37, 0x34, 0x5e, 0xd9, 0xff, 0xec, 0xdf, 0xf7, 0x44, 3315 0x20, 0x42, 0xff, 0xbf, 0xd9, 0x54, 0x40, 0x3c, 0x6f, 0xc6, 0xfc, 0x91,
3311 0x2e, 0x4e, 0x41, 0xbb, 0x1b, 0x41, 0x3d, 0x0d, 0xb9, 0x78, 0xd5, 0xf8, 3316 0xe6, 0xb3, 0xf0, 0xd3, 0x26, 0xe2, 0x43, 0x04, 0x0e, 0x7d, 0x81, 0x55,
3312 0xb4, 0xfb, 0x99, 0xe3, 0x63, 0xc1, 0x0b, 0x11, 0x89, 0x13, 0x1b, 0x6e, 3317 0x6d, 0x14, 0xdf, 0xdd, 0x8d, 0x1d, 0x5f, 0x61, 0x9c, 0xf4, 0xc8, 0xf1,
3313 0x18, 0xa7, 0x95, 0xd2, 0x5f, 0x3a, 0xfe, 0x4a, 0x90, 0x57, 0x0c, 0xb2, 3318 0x6e, 0xbd, 0x7d, 0x14, 0xf2, 0x79, 0x7d, 0xe0, 0x7d, 0x33, 0x9f, 0xb2,
3314 0xbc, 0x38, 0xdb, 0x35, 0xc4, 0x76, 0xdd, 0xf1, 0x6b, 0xbf, 0x6a, 0x7b, 3319 0xc9, 0x43, 0xf9, 0x94, 0xf1, 0xf5, 0xd1, 0x50, 0x1f, 0xa3, 0xef, 0x05,
3315 0x40, 0x4f, 0xda, 0xec, 0x71, 0x48, 0x3c, 0xd9, 0x71, 0xc8, 0xfa, 0x24, 3320 0xe2, 0xf0, 0x08, 0xbe, 0xef, 0x09, 0x4a, 0x3b, 0xdf, 0x37, 0xf3, 0x2e,
3316 0xbf, 0xa9, 0xc7, 0x8f, 0xea, 0x06, 0x50, 0xd3, 0xce, 0x46, 0xc5, 0xe7, 3321 0xe5, 0xc3, 0xfa, 0xcc, 0x00, 0xf2, 0x66, 0xe2, 0x8c, 0x1c, 0x6f, 0x8b,
3317 0x89, 0x4e, 0xa7, 0xec, 0x38, 0xd4, 0xf9, 0xb8, 0xa7, 0x4e, 0xcb, 0x57, 3322 0x4f, 0x40, 0x3e, 0x9e, 0xea, 0xe7, 0x8d, 0xdd, 0xe0, 0xf1, 0xa6, 0x1b,
3318 0xb8, 0xcc, 0x48, 0x5c, 0x22, 0x6e, 0xa7, 0x53, 0xd6, 0xfc, 0xac, 0xca, 3323 0xf2, 0x1c, 0xa6, 0x01, 0x2f, 0xf9, 0xd1, 0x25, 0xf4, 0x66, 0xe0, 0x87,
3319 0x93, 0x9f, 0xf6, 0xbc, 0xd0, 0x7e, 0x39, 0x1c, 0xa0, 0x02, 0xd5, 0x71, 3324 0xf4, 0xfd, 0x5f, 0x57, 0x86, 0x78, 0xdb, 0x1a, 0xf6, 0x9e, 0x55, 0xfc,
3320 0x79, 0xce, 0x7e, 0x1e, 0xf0, 0xed, 0x35, 0xf1, 0x53, 0xb6, 0x81, 0xfd, 3325 0x30, 0x46, 0x94, 0x5d, 0xc0, 0x47, 0xcd, 0x83, 0xfe, 0x79, 0xf3, 0x80,
3321 0xc7, 0x34, 0xbc, 0x51, 0xc9, 0xeb, 0x61, 0xf9, 0x6d, 0x2c, 0xdf, 0xb0, 3326 0xfd, 0x19, 0x7b, 0x58, 0x1e, 0x00, 0x9b, 0x6b, 0x80, 0xbe, 0x6a, 0x29,
3322 0xe4, 0x9d, 0xd2, 0xaf, 0xcb, 0xcc, 0x37, 0x1d, 0xb7, 0x6c, 0xde, 0x69, 3327 0x9f, 0xff, 0x18, 0xf9, 0xc0, 0x7a, 0xcd, 0xc9, 0x37, 0xcc, 0x66, 0xe3,
3323 0xff, 0xd1, 0xf9, 0x91, 0xcb, 0x73, 0x6a, 0x7f, 0xf5, 0x03, 0xf2, 0x70, 3328 0x94, 0xc0, 0x88, 0x4f, 0x5b, 0xbc, 0x72, 0x7e, 0x2c, 0x36, 0xfc, 0xa0,
3324 0x93, 0x29, 0xef, 0x3b, 0x33, 0xdf, 0xd4, 0x7a, 0x91, 0x78, 0x8e, 0x87, 3329 0xe2, 0x4e, 0x6e, 0xe8, 0x17, 0xbb, 0x79, 0xdf, 0x14, 0xf3, 0xd1, 0xc5,
3325 0xf6, 0x7a, 0xf2, 0xa7, 0xac, 0x27, 0x64, 0x87, 0xcf, 0x38, 0xc7, 0xf5, 3330 0x7c, 0x4c, 0x32, 0xee, 0x09, 0x2a, 0x1e, 0x80, 0x87, 0x75, 0x9c, 0xdb,
3326 0x63, 0x54, 0x9d, 0x53, 0xc1, 0x7a, 0x57, 0xb0, 0xde, 0xb2, 0x5f, 0xb5, 3331 0x9a, 0x92, 0x7e, 0x5c, 0xeb, 0x50, 0x3f, 0x55, 0x9d, 0x84, 0x9f, 0x86,
3327 0x70, 0x9d, 0xd9, 0x6b, 0xad, 0x17, 0x6b, 0x2d, 0x79, 0xaf, 0xc6, 0x4d, 3332 0xd8, 0xaf, 0x27, 0xcc, 0x7a, 0xa9, 0xf8, 0x9d, 0xaf, 0x6e, 0xda, 0xf3,
3328 0x4b, 0xcb, 0xfb, 0x85, 0xcd, 0x9f, 0x09, 0x41, 0xfe, 0xf7, 0x72, 0x7c, 3333 0x29, 0xc7, 0xce, 0xcc, 0x77, 0x55, 0x58, 0x5e, 0x58, 0x0d, 0x34, 0xf9,
3329 0x3c, 0x79, 0xfa, 0x8b, 0xe6, 0x07, 0xbe, 0xb6, 0x11, 0xbf, 0xcf, 0xc4, 3334 0xad, 0xa1, 0x3c, 0x0e, 0x97, 0x8c, 0x2a, 0xfb, 0xa4, 0x7e, 0xb7, 0x55,
3330 0xbf, 0xdd, 0xde, 0xaa, 0x3c, 0x75, 0x27, 0xd7, 0x7e, 0xf8, 0xb3, 0xcf, 3335 0x1e, 0x86, 0x07, 0x0f, 0x12, 0x06, 0x0a, 0x4f, 0xc9, 0x73, 0xca, 0x1d,
3331 0x19, 0x20, 0xbe, 0x1b, 0x23, 0x6a, 0xdf, 0x49, 0x87, 0xf6, 0x53, 0x27, 3336 0xea, 0x8a, 0x3d, 0x9f, 0x73, 0xf9, 0xdd, 0x13, 0xa2, 0x42, 0xdb, 0x78,
3332 0xea, 0x90, 0x03, 0xf6, 0xd6, 0xb0, 0xbd, 0x72, 0xb9, 0x85, 0xf0, 0xef, 3337 0x79, 0xdc, 0x9e, 0xb7, 0xc8, 0x4f, 0xbf, 0x19, 0xdf, 0xc5, 0xab, 0xd9,
3333 0xbb, 0x31, 0x62, 0xb5, 0xd7, 0xf8, 0x97, 0xf3, 0xec, 0xb8, 0x19, 0xe1, 3338 0x0f, 0x8c, 0xd1, 0x35, 0x72, 0xdf, 0x16, 0x3e, 0xa7, 0x9e, 0xcf, 0xd1,
3334 0x3e, 0xd6, 0x8f, 0xbc, 0xf0, 0xed, 0xff, 0x5c, 0xeb, 0xc3, 0xe7, 0x86, 3339 0x2d, 0x75, 0x43, 0xea, 0x59, 0x68, 0xd6, 0x0b, 0x15, 0x1f, 0xd9, 0xba,
3335 0xf4, 0xf9, 0x4a, 0x5e, 0xb5, 0x98, 0x1b, 0x55, 0xfb, 0x1f, 0x13, 0x0c, 3340 0xa1, 0xfc, 0x40, 0xe7, 0xc7, 0x2f, 0x8f, 0xcb, 0xf5, 0x15, 0x0f, 0xa9,
3336 0x33, 0x31, 0xdc, 0x84, 0xfa, 0x77, 0xe7, 0x00, 0xf4, 0x1f, 0x6e, 0xd4, 3341 0x23, 0x9a, 0xb9, 0xdf, 0x8f, 0x66, 0x9d, 0x90, 0xdf, 0x83, 0xe2, 0x25,
3337 0x7d, 0x1c, 0x38, 0x16, 0x5c, 0xff, 0xbf, 0x9e, 0xc8, 0xe7, 0x97, 0x41, 3342 0x1e, 0xda, 0xeb, 0xe1, 0x1f, 0x46, 0x3d, 0x24, 0x3b, 0x34, 0xfd, 0x2c,
3338 0x07, 0xf8, 0xca, 0xd8, 0xae, 0x62, 0xb6, 0x2b, 0x22, 0x72, 0xeb, 0xec, 3343 0xd7, 0xbf, 0x5e, 0x79, 0x4e, 0x29, 0xeb, 0x5d, 0xca, 0x7a, 0x1b, 0xd7,
3339 0x5e, 0xca, 0x97, 0x4e, 0xe6, 0x7b, 0x56, 0xd7, 0xe3, 0x65, 0x7c, 0xb0, 3344 0x73, 0x1d, 0xd7, 0xc9, 0x6d, 0xd6, 0x7a, 0xf7, 0xcf, 0x4c, 0xb6, 0x6e,
3340 0xbf, 0x80, 0xcf, 0xcf, 0xe7, 0xaf, 0x7c, 0x72, 0xd3, 0x8c, 0xa3, 0x01, 3345 0xc9, 0xf1, 0xdf, 0x33, 0x73, 0xef, 0x5f, 0x47, 0x5e, 0xd3, 0x22, 0x82,
3341 0xe6, 0x73, 0xe7, 0xa9, 0xf3, 0x18, 0x65, 0xd6, 0x50, 0x9d, 0x3f, 0x73, 3346 0x3e, 0x23, 0x1d, 0x51, 0x7d, 0x41, 0x6e, 0xfd, 0xcb, 0xb5, 0x03, 0xbc,
3342 0x3c, 0xad, 0xf8, 0x02, 0x26, 0xce, 0xed, 0x76, 0xfd, 0x74, 0xff, 0xd1, 3347 0xb4, 0xb9, 0x43, 0x24, 0x77, 0xad, 0xc7, 0xde, 0x27, 0xa0, 0x0e, 0x56,
3343 0xfd, 0xaf, 0xf8, 0xc2, 0xe2, 0x60, 0x70, 0x25, 0x3f, 0xbb, 0xd5, 0xf4, 3348 0xba, 0xa0, 0xf7, 0x42, 0xd6, 0xdb, 0xd8, 0xaf, 0x8e, 0xf2, 0x41, 0xbb,
3344 0xcc, 0x42, 0x5a, 0xc7, 0xc9, 0x20, 0x3b, 0xe7, 0x46, 0xd5, 0xf9, 0x1f, 3349 0xd6, 0x63, 0xd5, 0xfb, 0xfa, 0xcc, 0xfc, 0xe7, 0x39, 0xc7, 0x41, 0x0f,
3345 0xe6, 0xf8, 0xdf, 0x69, 0xf1, 0x3f, 0xf8, 0x9f, 0x88, 0x3c, 0xaa, 0xdf, 3350 0xdf, 0xaf, 0x49, 0xc4, 0xb1, 0xd6, 0xfe, 0xb5, 0xd2, 0x8b, 0xcf, 0x8f,
3346 0xf3, 0xf5, 0xe5, 0x2f, 0xee, 0x2f, 0xcf, 0x13, 0x35, 0x7f, 0xe1, 0xa1, 3351 0x28, 0x3d, 0x74, 0xf2, 0xd3, 0x78, 0xaf, 0xdc, 0xa7, 0x48, 0x70, 0xd8,
3347 0xfd, 0xb1, 0xbb, 0xdd, 0x6a, 0x7f, 0x83, 0x98, 0x4d, 0x21, 0xbf, 0x7a, 3352 0x88, 0xee, 0x5a, 0xd4, 0xdf, 0xc9, 0x1d, 0xb0, 0xa3, 0xbb, 0x46, 0xea,
3348 0x18, 0x87, 0xfb, 0xb8, 0x5e, 0xdf, 0xf0, 0xab, 0x09, 0x9f, 0xe8, 0xdf, 3353 0x61, 0x78, 0x25, 0x8d, 0x3c, 0x17, 0x7c, 0x1f, 0x7d, 0x3b, 0xe0, 0xe4,
3349 0x49, 0xfe, 0x10, 0xe1, 0x22, 0xf8, 0xa7, 0xff, 0x45, 0xed, 0x4f, 0xcc, 3354 0xd7, 0x4b, 0x2c, 0x57, 0xcc, 0xf6, 0x15, 0xb0, 0x7d, 0x71, 0x91, 0x5b,
3350 0x57, 0x51, 0xbf, 0x75, 0x89, 0x1e, 0xaf, 0x41, 0xfc, 0x55, 0x01, 0xd0, 3355 0xe7, 0xb7, 0x55, 0xd2, 0x7d, 0xc0, 0x72, 0x2f, 0xaa, 0xfb, 0xc0, 0x90,
3351 0x30, 0xd7, 0xf9, 0x59, 0xb3, 0x8f, 0x81, 0x9e, 0xf6, 0xe8, 0xba, 0x8c, 3356 0x93, 0xfb, 0xe4, 0xf1, 0x3e, 0x4e, 0x3c, 0x39, 0xed, 0x33, 0xca, 0x71,
3352 0x7e, 0xfe, 0x95, 0x47, 0x31, 0xc8, 0x20, 0x35, 0xa3, 0x3e, 0x77, 0x35, 3357 0xd0, 0xc9, 0x72, 0x5e, 0x87, 0x7b, 0x05, 0xa3, 0xe9, 0xa5, 0x74, 0xaf,
3353 0x1a, 0xb4, 0xde, 0xdb, 0x0c, 0x3c, 0x89, 0x7a, 0xdd, 0xe7, 0xf0, 0xe1, 3358 0x9c, 0x3c, 0x40, 0xf7, 0x43, 0xc8, 0x8c, 0x53, 0xbb, 0x1d, 0x67, 0x9e,
3354 0x3e, 0x50, 0x95, 0xed, 0x77, 0xd6, 0x7e, 0x58, 0x68, 0xe9, 0x07, 0xfa, 3359 0x20, 0xef, 0x52, 0x3e, 0x2a, 0x76, 0x86, 0xe7, 0xe3, 0xd7, 0x2b, 0xa7,
3355 0x3c, 0xed, 0x47, 0x2d, 0x97, 0x86, 0xb2, 0xaf, 0x58, 0xfb, 0xa1, 0xee, 3360 0x87, 0xa7, 0x46, 0x95, 0x7f, 0x74, 0xb2, 0x77, 0xbc, 0x57, 0xf5, 0xb1,
3356 0x13, 0x8b, 0xdc, 0x27, 0x4a, 0xc5, 0xc5, 0x14, 0xec, 0x9a, 0x4d, 0xe5, 3361 0x56, 0xde, 0xdd, 0x16, 0xde, 0x21, 0xff, 0x74, 0xfc, 0xff, 0xf2, 0xed,
3357 0xe2, 0x4f, 0x9f, 0xa7, 0xe5, 0x41, 0x6f, 0x6d, 0x47, 0x56, 0x3e, 0xce, 3362 0xd4, 0x1f, 0x1c, 0x9d, 0x31, 0xfb, 0x50, 0x9f, 0x53, 0xfd, 0x5f, 0x61,
3358 0x3f, 0xc0, 0x7a, 0xfe, 0x4c, 0xf7, 0xd8, 0x30, 0xdb, 0xa3, 0xe4, 0x62, 3363 0xc6, 0xcb, 0x5e, 0xee, 0xef, 0xa6, 0x75, 0xfa, 0x4f, 0x62, 0x32, 0x43,
3359 0x7e, 0x3b, 0xf7, 0xe7, 0x84, 0x39, 0xb6, 0xf7, 0xd5, 0x1e, 0xd2, 0xab, 3364 0x43, 0xbd, 0xec, 0xb4, 0x94, 0x5b, 0x96, 0xd8, 0xcb, 0x76, 0x5d, 0xf4,
3360 0x84, 0xf1, 0x16, 0xb6, 0xe4, 0x03, 0xf8, 0xcb, 0xda, 0x40, 0x27, 0xdb, 3365 0xc0, 0xee, 0xce, 0x1d, 0x18, 0x5f, 0xe6, 0x7a, 0x7d, 0x97, 0xeb, 0xe3,
3361 0x74, 0x1c, 0x74, 0xbc, 0x74, 0x7c, 0x10, 0xc7, 0xf0, 0x3a, 0x62, 0xeb, 3366 0x16, 0x0d, 0x38, 0x59, 0x4b, 0x7c, 0x24, 0xf6, 0x9e, 0x53, 0xfb, 0xd3,
3362 0xe8, 0x5f, 0x47, 0x7d, 0xa4, 0xad, 0x7f, 0x41, 0xe3, 0x0f, 0xfb, 0x77, 3367 0xbe, 0xfa, 0x34, 0xf3, 0xf9, 0xb2, 0x87, 0xed, 0xac, 0x22, 0x3f, 0x26,
3363 0x47, 0x14, 0xff, 0xeb, 0xe2, 0x2a, 0xe1, 0x50, 0x88, 0x1f, 0x98, 0x66, 3368 0xee, 0x50, 0x3d, 0xf0, 0x8a, 0xa6, 0xc5, 0x12, 0xcb, 0x0c, 0xde, 0x58,
3364 0xfb, 0x15, 0x07, 0xc0, 0xcc, 0x57, 0x8e, 0x5f, 0x01, 0x4f, 0xb7, 0xeb, 3369 0x9f, 0x17, 0x80, 0xad, 0x7e, 0xa6, 0x21, 0x96, 0xeb, 0x67, 0x4c, 0xfb,
3365 0x7a, 0xae, 0xe3, 0x65, 0xcd, 0xd7, 0xd8, 0x32, 0xdc, 0x66, 0xeb, 0xb2, 3370 0xaa, 0x78, 0x7d, 0x1b, 0xc6, 0x7e, 0xae, 0x67, 0x83, 0xac, 0xd7, 0xd1,
3366 0xb6, 0x53, 0xf1, 0x47, 0x18, 0x8f, 0x3e, 0xd1, 0xb5, 0x0d, 0xf7, 0xdc, 3371 0x5a, 0x60, 0x38, 0x86, 0x3e, 0x61, 0x9c, 0xee, 0x85, 0x48, 0xa2, 0x77,
3367 0xa0, 0x17, 0x75, 0x3f, 0xe8, 0xb5, 0xc6, 0x4b, 0xe2, 0xa2, 0xd0, 0xab, 3372 0x14, 0xf6, 0x74, 0xad, 0x87, 0xbd, 0xf7, 0x98, 0x07, 0xc6, 0xf0, 0x89,
3368 0x86, 0x35, 0x25, 0x85, 0x64, 0xcf, 0xa9, 0x4b, 0xdf, 0xd2, 0xf2, 0xfb, 3373 0x3e, 0xba, 0x77, 0xc2, 0xfd, 0xe8, 0x33, 0xc2, 0xbe, 0x3e, 0xd8, 0xd1,
3369 0xd3, 0x45, 0x98, 0x2f, 0xdf, 0x19, 0x22, 0x7f, 0x4c, 0x01, 0xcf, 0xef, 3374 0x35, 0x8d, 0xf1, 0xbd, 0xe7, 0x80, 0x7f, 0x3d, 0x8f, 0x75, 0xfb, 0x0e,
3370 0x4e, 0x82, 0xbe, 0x23, 0x5e, 0xc0, 0xfe, 0x92, 0x13, 0x74, 0x0f, 0xf4, 3375 0x30, 0x3f, 0xeb, 0x9d, 0xd7, 0x75, 0xfe, 0x09, 0xb9, 0xee, 0x5a, 0x79,
3371 0x95, 0x33, 0x3e, 0x2b, 0x50, 0x27, 0x92, 0xd3, 0x74, 0x2f, 0x59, 0x5a, 3376 0xfe, 0xbb, 0xc3, 0xdc, 0x7f, 0x88, 0x14, 0xf5, 0x3b, 0xef, 0xe8, 0xd3,
3372 0x12, 0x01, 0x45, 0x3d, 0x66, 0xbf, 0x01, 0x3e, 0xdd, 0x96, 0x38, 0x3f, 3377 0x3c, 0xde, 0xc5, 0xf7, 0xe2, 0x2d, 0xee, 0x17, 0xba, 0x72, 0xfa, 0x85,
3373 0x08, 0xaf, 0x74, 0xaf, 0x94, 0x78, 0xc4, 0x76, 0xc6, 0xad, 0x2f, 0x17, 3378 0x09, 0xd4, 0xe9, 0xe1, 0xe9, 0x8c, 0x9c, 0x30, 0xea, 0x65, 0xbe, 0x93,
3374 0xb7, 0xda, 0x1f, 0x15, 0xce, 0xbc, 0x38, 0xdd, 0x60, 0xc7, 0xa9, 0x87, 3379 0x7f, 0xf5, 0x44, 0x09, 0xfb, 0xad, 0x78, 0x15, 0xf0, 0xd8, 0x2a, 0xdc,
3375 0x71, 0x7a, 0xcf, 0xec, 0xef, 0xcb, 0xe5, 0xa2, 0xcf, 0x5f, 0xfc, 0xdf, 3380 0xd7, 0x5d, 0xfb, 0x99, 0x9f, 0x46, 0xf2, 0xd3, 0xf2, 0xa9, 0xd1, 0xdc,
3376 0x70, 0x0b, 0xba, 0xbd, 0x41, 0x9d, 0x5f, 0xbe, 0xac, 0x1e, 0xd7, 0xda, 3381 0xf5, 0x32, 0x7e, 0x3a, 0x8d, 0xf8, 0x51, 0xe7, 0x40, 0x3e, 0x19, 0x92,
3377 0xe2, 0xdc, 0x7c, 0x5f, 0xeb, 0x75, 0xc2, 0x63, 0x5d, 0x6f, 0x31, 0xfb, 3382 0xf3, 0xf7, 0xcc, 0xba, 0x3f, 0x41, 0x7a, 0x57, 0x0c, 0x4f, 0xd3, 0xfa,
3378 0xd9, 0x11, 0x7e, 0xe7, 0x65, 0x0c, 0xfa, 0x11, 0xbd, 0x93, 0xa4, 0xa1, 3383 0x72, 0x51, 0x44, 0xf1, 0x57, 0x16, 0x9e, 0x82, 0x1d, 0x89, 0x41, 0xb6,
3379 0x51, 0x79, 0x56, 0xf1, 0x35, 0x45, 0x8f, 0x70, 0xbd, 0xbd, 0xe4, 0x42, 3384 0xbf, 0x67, 0x2d, 0x70, 0x3f, 0xfb, 0x5f, 0xf9, 0xf5, 0xca, 0x1a, 0x9d,
3380 0xbd, 0x19, 0x38, 0x80, 0xf1, 0x65, 0xae, 0x1f, 0x77, 0xd7, 0x50, 0x5d, 3385 0xd6, 0x8d, 0xf7, 0xe2, 0x1c, 0x95, 0x3f, 0xb9, 0xfd, 0xb0, 0x8a, 0x87,
3381 0x8e, 0x1e, 0x39, 0xaf, 0xe5, 0x91, 0x1c, 0x23, 0xc3, 0x75, 0xfd, 0x79, 3386 0xb2, 0x06, 0x1a, 0x8b, 0xce, 0x7d, 0xf2, 0x9c, 0x90, 0xe1, 0x47, 0x19,
3382 0x17, 0xd7, 0x5b, 0xf2, 0x9b, 0x3b, 0xfa, 0x07, 0xdd, 0x67, 0xdc, 0xa2, 3387 0x47, 0x06, 0x47, 0x7c, 0x5f, 0xdb, 0xe3, 0x44, 0xc6, 0x91, 0x8a, 0x57,
3383 0xeb, 0x49, 0x45, 0x2b, 0x65, 0xfd, 0xe6, 0xf3, 0x9f, 0x01, 0xed, 0xf5, 3388 0x6b, 0x7c, 0x59, 0xe3, 0xc7, 0x1e, 0x37, 0x61, 0xba, 0x57, 0x8c, 0x22,
3384 0x82, 0x8a, 0x66, 0x7b, 0x3c, 0x84, 0x69, 0x17, 0x46, 0x9e, 0x7a, 0x96, 3389 0x90, 0xa6, 0x77, 0x44, 0x62, 0x70, 0xe0, 0xc1, 0xfc, 0x9d, 0x00, 0x7f,
3385 0xd3, 0x87, 0xb1, 0x97, 0xef, 0x65, 0xd3, 0xec, 0xa7, 0x20, 0xf9, 0xa3, 3390 0x09, 0xd6, 0x5b, 0x4f, 0x51, 0x1f, 0xfa, 0x94, 0xe8, 0x67, 0x3f, 0x4e,
3386 0x58, 0xda, 0xa9, 0x68, 0x28, 0x3a, 0x9a, 0x86, 0xfe, 0x43, 0x5b, 0x61, 3391 0xd4, 0x72, 0xde, 0x57, 0xc1, 0x8f, 0x3d, 0xcf, 0x40, 0x9f, 0x1e, 0xce,
3387 0xdf, 0x22, 0xdb, 0xcd, 0x34, 0xf8, 0xf6, 0x18, 0xe1, 0x2f, 0x38, 0x8e, 3392 0x9f, 0xdb, 0xdc, 0x5f, 0xc0, 0xff, 0x7e, 0xbd, 0x63, 0x94, 0xfd, 0xcd,
3388 0x77, 0x48, 0xd0, 0x33, 0x06, 0x3b, 0x86, 0x32, 0x18, 0x2f, 0x6e, 0x06, 3393 0x71, 0xb8, 0x8b, 0x79, 0xb8, 0x05, 0x1e, 0x74, 0xc5, 0x43, 0xca, 0xe4,
3389 0xfd, 0x6b, 0x0b, 0xf6, 0x1d, 0x3d, 0xce, 0xfe, 0xd8, 0x9a, 0x7f, 0xdf, 3394 0x41, 0xd5, 0x19, 0xeb, 0x3e, 0x85, 0x46, 0x1c, 0x49, 0x5c, 0xa0, 0x5f,
3390 0xc0, 0x3d, 0xf0, 0x0d, 0x37, 0xa9, 0xf3, 0x07, 0x67, 0xf8, 0xfd, 0x22, 3395 0xa1, 0x3e, 0x2c, 0x8f, 0xed, 0x36, 0xe4, 0x1a, 0xa4, 0x7d, 0x61, 0xb6,
3391 0xe2, 0x2e, 0x35, 0x8e, 0x1b, 0x19, 0x1e, 0x1f, 0xe2, 0xfa, 0x7e, 0x9b, 3396 0x2f, 0x24, 0x76, 0x2e, 0xb7, 0xae, 0x0b, 0xf2, 0xba, 0x80, 0xb1, 0x0e,
3392 0xdf, 0x1b, 0x43, 0x39, 0xef, 0x8d, 0x79, 0xdc, 0x33, 0x67, 0x32, 0x49, 3397 0xf3, 0xc8, 0x53, 0x7d, 0x1e, 0x7e, 0x25, 0x8f, 0x6a, 0xdf, 0xdc, 0x7c,
3393 0xe0, 0x22, 0x51, 0x98, 0xfb, 0x5e, 0x55, 0xe3, 0x9a, 0x68, 0x39, 0xc7, 3398 0xb4, 0xf2, 0x49, 0x15, 0x9a, 0xfe, 0x50, 0x87, 0x0c, 0xbf, 0x51, 0xbd,
3394 0xa9, 0x6c, 0x3d, 0xe8, 0xe4, 0x7a, 0xbc, 0x13, 0x86, 0x8e, 0xb1, 0x5f, 3399 0xd2, 0xcd, 0xfa, 0x73, 0x97, 0xea, 0x79, 0xe0, 0x64, 0x37, 0xea, 0xc5,
3395 0x3a, 0x28, 0x4e, 0xad, 0x0b, 0xe9, 0x95, 0xde, 0xc9, 0xe0, 0x9b, 0xe2, 3400 0xc9, 0xee, 0xd3, 0x7c, 0xff, 0x32, 0x2f, 0x2d, 0xf4, 0x6e, 0x30, 0xb8,
3396 0x73, 0xc3, 0xdc, 0x4f, 0xc2, 0xc8, 0x3f, 0x51, 0x91, 0xe4, 0x77, 0xcb, 3401 0xab, 0xb2, 0xd7, 0x1f, 0xbb, 0x1e, 0x15, 0x16, 0x3d, 0xd4, 0xb9, 0x0f,
3397 0x04, 0xee, 0x9d, 0x53, 0x06, 0x68, 0x38, 0xa0, 0xf5, 0x45, 0x3e, 0x26, 3402 0xeb, 0x0f, 0xd0, 0x9f, 0x6e, 0xa0, 0xfe, 0x40, 0x33, 0xfb, 0x6c, 0xfb,
3398 0x52, 0xc8, 0x3b, 0xac, 0x3b, 0x2c, 0xeb, 0x7c, 0x1f, 0x59, 0xf6, 0xce, 3403 0x3d, 0x72, 0xeb, 0xfe, 0xe3, 0xde, 0x23, 0x5b, 0x1a, 0xac, 0xe7, 0xc5,
3399 0x51, 0x74, 0xd1, 0x51, 0xea, 0xd0, 0x76, 0x62, 0xb5, 0x3f, 0xa0, 0xf0, 3404 0xc4, 0xd8, 0x08, 0xce, 0x69, 0xe1, 0x7b, 0x7b, 0x3b, 0xe7, 0xf7, 0xb5,
3400 0x7b, 0xd3, 0xcc, 0xab, 0x79, 0xf2, 0x5b, 0xf5, 0x4c, 0x86, 0xf4, 0xaf, 3405 0x40, 0x84, 0xce, 0x4d, 0xbe, 0x46, 0xf6, 0x8a, 0x68, 0x10, 0xf6, 0x25,
3401 0x12, 0xa5, 0xd4, 0x87, 0x2b, 0x83, 0x0b, 0xf0, 0x63, 0x74, 0x9a, 0xfd, 3406 0x5f, 0xc7, 0xf7, 0x64, 0x08, 0xf3, 0xe5, 0x21, 0xfc, 0x3e, 0xd0, 0xe2,
3402 0x3f, 0xb2, 0x09, 0xf4, 0x18, 0xe3, 0x4f, 0xe3, 0xea, 0xca, 0x46, 0x83, 3407 0xd7, 0x49, 0xbe, 0x3c, 0x04, 0x8c, 0x72, 0x5d, 0x18, 0x33, 0xdf, 0x11,
3403 0xf6, 0xcd, 0x8d, 0xe2, 0x1c, 0x7d, 0x8f, 0xc8, 0x7d, 0x8f, 0x6b, 0x3c, 3408 0xc0, 0x21, 0xdf, 0x7c, 0xef, 0x08, 0xbc, 0xc7, 0xce, 0xfb, 0x50, 0x37,
3404 0x56, 0xb6, 0xa3, 0x90, 0x0e, 0x1c, 0x55, 0xe7, 0x04, 0x24, 0x8e, 0x94, 3409 0x44, 0x0c, 0xfd, 0x73, 0x53, 0x8d, 0x4e, 0xdf, 0x5b, 0x63, 0xb8, 0x9f,
3405 0xfe, 0xd2, 0x27, 0x9c, 0x8f, 0x76, 0x9c, 0x2a, 0x1c, 0xeb, 0xfc, 0xb0, 3410 0x51, 0x97, 0xe7, 0xda, 0x55, 0x81, 0x78, 0x2d, 0xcf, 0xbe, 0x37, 0xac,
3406 0xe2, 0x3b, 0x37, 0xdf, 0xb3, 0x78, 0x0d, 0xd2, 0xbd, 0x5c, 0x16, 0xb9, 3411 0xef, 0x12, 0x3d, 0x31, 0x68, 0xf6, 0xfd, 0x76, 0x3d, 0x90, 0x67, 0x72,
3407 0x04, 0xbf, 0x9b, 0xf8, 0x7e, 0xb0, 0x52, 0xfc, 0xde, 0x46, 0xfc, 0xa2, 3412 0x7f, 0x1a, 0x1a, 0xfd, 0xf5, 0xa3, 0xbc, 0x47, 0x3c, 0xdc, 0xaf, 0xdd,
3408 0xac, 0xb7, 0x11, 0x1f, 0x43, 0x5f, 0x1c, 0x67, 0x1c, 0xcd, 0x37, 0xf1, 3413 0x99, 0x45, 0xbc, 0x16, 0x89, 0x0b, 0x23, 0xe0, 0x61, 0x6c, 0xc4, 0x29,
3409 0xfd, 0x87, 0xf5, 0xfb, 0x8d, 0xdf, 0x67, 0xc0, 0x9b, 0xd7, 0x88, 0xa5, 3414 0x8f, 0xa5, 0x1e, 0xea, 0x1c, 0xd8, 0xa5, 0xec, 0xcc, 0x9e, 0x0b, 0xbd,
3410 0x19, 0x5f, 0x8c, 0xfb, 0x43, 0x6c, 0xf7, 0x6d, 0xd8, 0x6d, 0x68, 0xbb, 3415 0x76, 0xb0, 0xfe, 0xbf, 0xd2, 0xef, 0x17, 0x51, 0xb6, 0x57, 0xee, 0x8b,
3411 0xe3, 0xa6, 0xdd, 0xfa, 0x7e, 0x65, 0x95, 0x53, 0x22, 0x71, 0xab, 0xe8, 3416 0xf9, 0x8d, 0xf4, 0x6e, 0xca, 0x13, 0x69, 0x73, 0x6c, 0x7f, 0xcf, 0xb4,
3412 0x2a, 0xe3, 0x0a, 0xd5, 0xb3, 0x02, 0xb6, 0x53, 0xf2, 0xb5, 0x2b, 0x7b, 3417 0x90, 0x5e, 0x85, 0x7c, 0xaf, 0x47, 0x2d, 0xfd, 0x07, 0xe4, 0x8b, 0xeb,
3413 0x82, 0x6c, 0x4f, 0x40, 0x1c, 0x6c, 0xb5, 0xee, 0x2b, 0xe2, 0x7d, 0x7e, 3418 0x81, 0xc7, 0xea, 0x95, 0xdf, 0x94, 0x7f, 0x95, 0x3f, 0xe1, 0xf7, 0xe8,
3414 0xb9, 0x0f, 0xf3, 0xa8, 0x0b, 0xc6, 0x0a, 0xfe, 0x54, 0x7e, 0xd3, 0x72, 3419 0x4a, 0x12, 0x6b, 0x4c, 0xae, 0xa4, 0x84, 0xaf, 0x4f, 0x4e, 0xa9, 0x7b,
3415 0x73, 0xf3, 0xde, 0xea, 0x3f, 0xba, 0x91, 0xd2, 0x07, 0xf5, 0x4d, 0xc6, 3420 0x1e, 0xeb, 0xb7, 0xc4, 0xa5, 0xfc, 0x07, 0xe2, 0xfb, 0x38, 0x2e, 0xd6,
3416 0xa9, 0x05, 0xff, 0x1f, 0xe8, 0xfa, 0x76, 0x97, 0xee, 0xaf, 0xfe, 0x33, 3421 0x9f, 0x19, 0xb3, 0xfd, 0xbd, 0xe0, 0x3f, 0xeb, 0x3b, 0xc1, 0x23, 0xce,
3417 0xc3, 0xa8, 0x4f, 0x67, 0x86, 0xcf, 0xf2, 0xbb, 0x83, 0xfd, 0xd2, 0x43, 3422 0xe7, 0xf1, 0x74, 0x83, 0xea, 0x7b, 0xe7, 0x7b, 0xef, 0x48, 0x3b, 0x3e,
3418 0xff, 0x5b, 0xc8, 0xd8, 0xd5, 0xdb, 0xeb, 0x9b, 0x5d, 0x8f, 0x6a, 0x8b, 3423 0xba, 0x9f, 0xdb, 0x47, 0x67, 0xfb, 0x60, 0x65, 0xaf, 0x5c, 0x57, 0xcf,
3419 0x1e, 0xfa, 0xdc, 0x7f, 0x00, 0x5a, 0x33, 0xe6, 0xc0, 0x30, 0x14, 0x00, 3424 0x71, 0xac, 0x89, 0xa6, 0x0d, 0xf8, 0x7d, 0x23, 0xec, 0x47, 0xde, 0x84,
3420 0x00, 0x00 }; 3425 0xfd, 0x4e, 0xef, 0x52, 0x23, 0x8e, 0xf2, 0xa9, 0x60, 0x2c, 0x2c, 0xcc,
3426 0x27, 0xfb, 0x8e, 0x5c, 0xbc, 0x44, 0x62, 0x9f, 0x0d, 0x06, 0x31, 0x5f,
3427 0xd2, 0x84, 0x63, 0xbc, 0x14, 0xf7, 0x1e, 0x71, 0x08, 0x79, 0xf1, 0xe9,
3428 0x31, 0xe0, 0x27, 0xe2, 0x55, 0xec, 0x53, 0x78, 0x90, 0xee, 0x53, 0xad,
3429 0x04, 0x34, 0x67, 0x06, 0x39, 0xde, 0x4b, 0xdd, 0xf4, 0xfb, 0xe2, 0xac,
3430 0x08, 0x49, 0xf4, 0x99, 0xf9, 0x8b, 0xb8, 0xf6, 0x5a, 0xfc, 0xff, 0xa8,
3431 0x71, 0x4e, 0xf5, 0xc9, 0x88, 0x63, 0x6c, 0xc3, 0xf1, 0xae, 0xe5, 0xc6,
3432 0xbb, 0xe2, 0xa9, 0xd4, 0xed, 0x18, 0xdf, 0xab, 0xe7, 0xc6, 0xb7, 0xd2,
3433 0x4f, 0xf2, 0x7d, 0xd7, 0xbc, 0x3f, 0xe7, 0xee, 0x8f, 0xf7, 0xd3, 0x85,
3434 0x27, 0x16, 0xdf, 0xc0, 0x8d, 0xd5, 0xf2, 0xfc, 0x92, 0x39, 0x7d, 0x72,
3435 0x6e, 0xfe, 0xa1, 0xde, 0xbd, 0x61, 0xc4, 0xc3, 0x7f, 0x69, 0x4a, 0x77,
3436 0x8f, 0xc8, 0x15, 0x00, 0x00, 0x00 };
3421 3437
3422static u8 bnx2_TPAT_b09FwText[] = { 3438static u8 bnx2_TPAT_b09FwText[] = {
3423 0xbd, 0x58, 0x5d, 0x6c, 0x1c, 0xd5, 0x15, 0x3e, 0x73, 0x67, 0xd6, 0x3b, 3439 0xbd, 0x58, 0x5d, 0x6c, 0x1c, 0xd5, 0x15, 0x3e, 0x73, 0x67, 0xd6, 0x3b,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6e91b4b7aabb..6425603bc379 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3282,17 +3282,14 @@ static int bond_create_proc_entry(struct bonding *bond)
3282 struct net_device *bond_dev = bond->dev; 3282 struct net_device *bond_dev = bond->dev;
3283 3283
3284 if (bond_proc_dir) { 3284 if (bond_proc_dir) {
3285 bond->proc_entry = create_proc_entry(bond_dev->name, 3285 bond->proc_entry = proc_create_data(bond_dev->name,
3286 S_IRUGO, 3286 S_IRUGO, bond_proc_dir,
3287 bond_proc_dir); 3287 &bond_info_fops, bond);
3288 if (bond->proc_entry == NULL) { 3288 if (bond->proc_entry == NULL) {
3289 printk(KERN_WARNING DRV_NAME 3289 printk(KERN_WARNING DRV_NAME
3290 ": Warning: Cannot create /proc/net/%s/%s\n", 3290 ": Warning: Cannot create /proc/net/%s/%s\n",
3291 DRV_NAME, bond_dev->name); 3291 DRV_NAME, bond_dev->name);
3292 } else { 3292 } else {
3293 bond->proc_entry->data = bond;
3294 bond->proc_entry->proc_fops = &bond_info_fops;
3295 bond->proc_entry->owner = THIS_MODULE;
3296 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); 3293 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
3297 } 3294 }
3298 } 3295 }
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 229303ff6a39..a0177fc55e28 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -38,7 +38,7 @@
38#define DRV_VERSION "1.0-ko" 38#define DRV_VERSION "1.0-ko"
39 39
40/* Firmware version */ 40/* Firmware version */
41#define FW_VERSION_MAJOR 5 41#define FW_VERSION_MAJOR 6
42#define FW_VERSION_MINOR 0 42#define FW_VERSION_MINOR 0
43#define FW_VERSION_MICRO 0 43#define FW_VERSION_MICRO 0
44#endif /* __CHELSIO_VERSION_H */ 44#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 2d139ec79777..f3cba5e24ec5 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1802,7 +1802,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1802 * it is protected by the before last buffer's el bit being set */ 1802 * it is protected by the before last buffer's el bit being set */
1803 if (rx->prev->skb) { 1803 if (rx->prev->skb) {
1804 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; 1804 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1805 put_unaligned(cpu_to_le32(rx->dma_addr), &prev_rfd->link); 1805 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1806 } 1806 }
1807 1807
1808 return 0; 1808 return 0;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 83bda6ccde98..56f50491a453 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -633,7 +633,7 @@ static void __init printEEPROMInfo(struct net_device *dev)
633 printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC)); 633 printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC));
634 printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI)); 634 printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI));
635 printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber)); 635 printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber));
636 printk(KERN_DEBUG " AutoPort: %d\n", GetBit(!Word,ee_Jabber)); 636 printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort));
637 printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex)); 637 printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex));
638 } 638 }
639 639
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index d7a3ea88eddb..32a4f17d35fc 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -67,6 +67,10 @@
67#define FEC_MAX_PORTS 1 67#define FEC_MAX_PORTS 1
68#endif 68#endif
69 69
70#if defined(CONFIG_FADS) || defined(CONFIG_RPXCLASSIC) || defined(CONFIG_M5272)
71#define HAVE_mii_link_interrupt
72#endif
73
70/* 74/*
71 * Define the fixed address of the FEC hardware. 75 * Define the fixed address of the FEC hardware.
72 */ 76 */
@@ -205,7 +209,10 @@ struct fec_enet_private {
205 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ 209 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
206 cbd_t *dirty_tx; /* The ring entries to be free()ed. */ 210 cbd_t *dirty_tx; /* The ring entries to be free()ed. */
207 uint tx_full; 211 uint tx_full;
208 spinlock_t lock; 212 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
213 spinlock_t hw_lock;
214 /* hold while accessing the mii_list_t() elements */
215 spinlock_t mii_lock;
209 216
210 uint phy_id; 217 uint phy_id;
211 uint phy_id_done; 218 uint phy_id_done;
@@ -309,6 +316,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
309 volatile fec_t *fecp; 316 volatile fec_t *fecp;
310 volatile cbd_t *bdp; 317 volatile cbd_t *bdp;
311 unsigned short status; 318 unsigned short status;
319 unsigned long flags;
312 320
313 fep = netdev_priv(dev); 321 fep = netdev_priv(dev);
314 fecp = (volatile fec_t*)dev->base_addr; 322 fecp = (volatile fec_t*)dev->base_addr;
@@ -318,6 +326,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
318 return 1; 326 return 1;
319 } 327 }
320 328
329 spin_lock_irqsave(&fep->hw_lock, flags);
321 /* Fill in a Tx ring entry */ 330 /* Fill in a Tx ring entry */
322 bdp = fep->cur_tx; 331 bdp = fep->cur_tx;
323 332
@@ -328,6 +337,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
328 * This should not happen, since dev->tbusy should be set. 337 * This should not happen, since dev->tbusy should be set.
329 */ 338 */
330 printk("%s: tx queue full!.\n", dev->name); 339 printk("%s: tx queue full!.\n", dev->name);
340 spin_unlock_irqrestore(&fep->hw_lock, flags);
331 return 1; 341 return 1;
332 } 342 }
333#endif 343#endif
@@ -366,8 +376,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
366 flush_dcache_range((unsigned long)skb->data, 376 flush_dcache_range((unsigned long)skb->data,
367 (unsigned long)skb->data + skb->len); 377 (unsigned long)skb->data + skb->len);
368 378
369 spin_lock_irq(&fep->lock);
370
371 /* Send it on its way. Tell FEC it's ready, interrupt when done, 379 /* Send it on its way. Tell FEC it's ready, interrupt when done,
372 * it's the last BD of the frame, and to put the CRC on the end. 380 * it's the last BD of the frame, and to put the CRC on the end.
373 */ 381 */
@@ -396,7 +404,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
396 404
397 fep->cur_tx = (cbd_t *)bdp; 405 fep->cur_tx = (cbd_t *)bdp;
398 406
399 spin_unlock_irq(&fep->lock); 407 spin_unlock_irqrestore(&fep->hw_lock, flags);
400 408
401 return 0; 409 return 0;
402} 410}
@@ -454,19 +462,20 @@ fec_enet_interrupt(int irq, void * dev_id)
454 struct net_device *dev = dev_id; 462 struct net_device *dev = dev_id;
455 volatile fec_t *fecp; 463 volatile fec_t *fecp;
456 uint int_events; 464 uint int_events;
457 int handled = 0; 465 irqreturn_t ret = IRQ_NONE;
458 466
459 fecp = (volatile fec_t*)dev->base_addr; 467 fecp = (volatile fec_t*)dev->base_addr;
460 468
461 /* Get the interrupt events that caused us to be here. 469 /* Get the interrupt events that caused us to be here.
462 */ 470 */
463 while ((int_events = fecp->fec_ievent) != 0) { 471 do {
472 int_events = fecp->fec_ievent;
464 fecp->fec_ievent = int_events; 473 fecp->fec_ievent = int_events;
465 474
466 /* Handle receive event in its own function. 475 /* Handle receive event in its own function.
467 */ 476 */
468 if (int_events & FEC_ENET_RXF) { 477 if (int_events & FEC_ENET_RXF) {
469 handled = 1; 478 ret = IRQ_HANDLED;
470 fec_enet_rx(dev); 479 fec_enet_rx(dev);
471 } 480 }
472 481
@@ -475,17 +484,18 @@ fec_enet_interrupt(int irq, void * dev_id)
475 them as part of the transmit process. 484 them as part of the transmit process.
476 */ 485 */
477 if (int_events & FEC_ENET_TXF) { 486 if (int_events & FEC_ENET_TXF) {
478 handled = 1; 487 ret = IRQ_HANDLED;
479 fec_enet_tx(dev); 488 fec_enet_tx(dev);
480 } 489 }
481 490
482 if (int_events & FEC_ENET_MII) { 491 if (int_events & FEC_ENET_MII) {
483 handled = 1; 492 ret = IRQ_HANDLED;
484 fec_enet_mii(dev); 493 fec_enet_mii(dev);
485 } 494 }
486 495
487 } 496 } while (int_events);
488 return IRQ_RETVAL(handled); 497
498 return ret;
489} 499}
490 500
491 501
@@ -498,7 +508,7 @@ fec_enet_tx(struct net_device *dev)
498 struct sk_buff *skb; 508 struct sk_buff *skb;
499 509
500 fep = netdev_priv(dev); 510 fep = netdev_priv(dev);
501 spin_lock(&fep->lock); 511 spin_lock_irq(&fep->hw_lock);
502 bdp = fep->dirty_tx; 512 bdp = fep->dirty_tx;
503 513
504 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 514 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
@@ -557,7 +567,7 @@ fec_enet_tx(struct net_device *dev)
557 } 567 }
558 } 568 }
559 fep->dirty_tx = (cbd_t *)bdp; 569 fep->dirty_tx = (cbd_t *)bdp;
560 spin_unlock(&fep->lock); 570 spin_unlock_irq(&fep->hw_lock);
561} 571}
562 572
563 573
@@ -584,6 +594,8 @@ fec_enet_rx(struct net_device *dev)
584 fep = netdev_priv(dev); 594 fep = netdev_priv(dev);
585 fecp = (volatile fec_t*)dev->base_addr; 595 fecp = (volatile fec_t*)dev->base_addr;
586 596
597 spin_lock_irq(&fep->hw_lock);
598
587 /* First, grab all of the stats for the incoming packet. 599 /* First, grab all of the stats for the incoming packet.
588 * These get messed up if we get called due to a busy condition. 600 * These get messed up if we get called due to a busy condition.
589 */ 601 */
@@ -689,6 +701,8 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
689 */ 701 */
690 fecp->fec_r_des_active = 0; 702 fecp->fec_r_des_active = 0;
691#endif 703#endif
704
705 spin_unlock_irq(&fep->hw_lock);
692} 706}
693 707
694 708
@@ -702,11 +716,11 @@ fec_enet_mii(struct net_device *dev)
702 uint mii_reg; 716 uint mii_reg;
703 717
704 fep = netdev_priv(dev); 718 fep = netdev_priv(dev);
719 spin_lock_irq(&fep->mii_lock);
720
705 ep = fep->hwp; 721 ep = fep->hwp;
706 mii_reg = ep->fec_mii_data; 722 mii_reg = ep->fec_mii_data;
707 723
708 spin_lock(&fep->lock);
709
710 if ((mip = mii_head) == NULL) { 724 if ((mip = mii_head) == NULL) {
711 printk("MII and no head!\n"); 725 printk("MII and no head!\n");
712 goto unlock; 726 goto unlock;
@@ -723,7 +737,7 @@ fec_enet_mii(struct net_device *dev)
723 ep->fec_mii_data = mip->mii_regval; 737 ep->fec_mii_data = mip->mii_regval;
724 738
725unlock: 739unlock:
726 spin_unlock(&fep->lock); 740 spin_unlock_irq(&fep->mii_lock);
727} 741}
728 742
729static int 743static int
@@ -737,12 +751,11 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
737 /* Add PHY address to register command. 751 /* Add PHY address to register command.
738 */ 752 */
739 fep = netdev_priv(dev); 753 fep = netdev_priv(dev);
740 regval |= fep->phy_addr << 23; 754 spin_lock_irqsave(&fep->mii_lock, flags);
741 755
756 regval |= fep->phy_addr << 23;
742 retval = 0; 757 retval = 0;
743 758
744 spin_lock_irqsave(&fep->lock,flags);
745
746 if ((mip = mii_free) != NULL) { 759 if ((mip = mii_free) != NULL) {
747 mii_free = mip->mii_next; 760 mii_free = mip->mii_next;
748 mip->mii_regval = regval; 761 mip->mii_regval = regval;
@@ -759,9 +772,8 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
759 retval = 1; 772 retval = 1;
760 } 773 }
761 774
762 spin_unlock_irqrestore(&fep->lock,flags); 775 spin_unlock_irqrestore(&fep->mii_lock, flags);
763 776 return retval;
764 return(retval);
765} 777}
766 778
767static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 779static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
@@ -1222,7 +1234,7 @@ static phy_info_t const * const phy_info[] = {
1222}; 1234};
1223 1235
1224/* ------------------------------------------------------------------------- */ 1236/* ------------------------------------------------------------------------- */
1225#if !defined(CONFIG_M532x) 1237#ifdef HAVE_mii_link_interrupt
1226#ifdef CONFIG_RPXCLASSIC 1238#ifdef CONFIG_RPXCLASSIC
1227static void 1239static void
1228mii_link_interrupt(void *dev_id); 1240mii_link_interrupt(void *dev_id);
@@ -1362,18 +1374,8 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1362 unsigned short irq; 1374 unsigned short irq;
1363 } *idp, id[] = { 1375 } *idp, id[] = {
1364 { "fec(TXF)", 23 }, 1376 { "fec(TXF)", 23 },
1365 { "fec(TXB)", 24 },
1366 { "fec(TXFIFO)", 25 },
1367 { "fec(TXCR)", 26 },
1368 { "fec(RXF)", 27 }, 1377 { "fec(RXF)", 27 },
1369 { "fec(RXB)", 28 },
1370 { "fec(MII)", 29 }, 1378 { "fec(MII)", 29 },
1371 { "fec(LC)", 30 },
1372 { "fec(HBERR)", 31 },
1373 { "fec(GRA)", 32 },
1374 { "fec(EBERR)", 33 },
1375 { "fec(BABT)", 34 },
1376 { "fec(BABR)", 35 },
1377 { NULL }, 1379 { NULL },
1378 }; 1380 };
1379 1381
@@ -1533,18 +1535,8 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1533 unsigned short irq; 1535 unsigned short irq;
1534 } *idp, id[] = { 1536 } *idp, id[] = {
1535 { "fec(TXF)", 23 }, 1537 { "fec(TXF)", 23 },
1536 { "fec(TXB)", 24 },
1537 { "fec(TXFIFO)", 25 },
1538 { "fec(TXCR)", 26 },
1539 { "fec(RXF)", 27 }, 1538 { "fec(RXF)", 27 },
1540 { "fec(RXB)", 28 },
1541 { "fec(MII)", 29 }, 1539 { "fec(MII)", 29 },
1542 { "fec(LC)", 30 },
1543 { "fec(HBERR)", 31 },
1544 { "fec(GRA)", 32 },
1545 { "fec(EBERR)", 33 },
1546 { "fec(BABT)", 34 },
1547 { "fec(BABR)", 35 },
1548 { NULL }, 1540 { NULL },
1549 }; 1541 };
1550 1542
@@ -1660,18 +1652,8 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1660 unsigned short irq; 1652 unsigned short irq;
1661 } *idp, id[] = { 1653 } *idp, id[] = {
1662 { "fec(TXF)", 36 }, 1654 { "fec(TXF)", 36 },
1663 { "fec(TXB)", 37 },
1664 { "fec(TXFIFO)", 38 },
1665 { "fec(TXCR)", 39 },
1666 { "fec(RXF)", 40 }, 1655 { "fec(RXF)", 40 },
1667 { "fec(RXB)", 41 },
1668 { "fec(MII)", 42 }, 1656 { "fec(MII)", 42 },
1669 { "fec(LC)", 43 },
1670 { "fec(HBERR)", 44 },
1671 { "fec(GRA)", 45 },
1672 { "fec(EBERR)", 46 },
1673 { "fec(BABT)", 47 },
1674 { "fec(BABR)", 48 },
1675 { NULL }, 1657 { NULL },
1676 }; 1658 };
1677 1659
@@ -2126,6 +2108,7 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
2126 2108
2127/* This interrupt occurs when the PHY detects a link change. 2109/* This interrupt occurs when the PHY detects a link change.
2128*/ 2110*/
2111#ifdef HAVE_mii_link_interrupt
2129#ifdef CONFIG_RPXCLASSIC 2112#ifdef CONFIG_RPXCLASSIC
2130static void 2113static void
2131mii_link_interrupt(void *dev_id) 2114mii_link_interrupt(void *dev_id)
@@ -2148,6 +2131,7 @@ mii_link_interrupt(int irq, void * dev_id)
2148 2131
2149 return IRQ_HANDLED; 2132 return IRQ_HANDLED;
2150} 2133}
2134#endif
2151 2135
2152static int 2136static int
2153fec_enet_open(struct net_device *dev) 2137fec_enet_open(struct net_device *dev)
@@ -2243,13 +2227,13 @@ static void set_multicast_list(struct net_device *dev)
2243 /* Catch all multicast addresses, so set the 2227 /* Catch all multicast addresses, so set the
2244 * filter to all 1's. 2228 * filter to all 1's.
2245 */ 2229 */
2246 ep->fec_hash_table_high = 0xffffffff; 2230 ep->fec_grp_hash_table_high = 0xffffffff;
2247 ep->fec_hash_table_low = 0xffffffff; 2231 ep->fec_grp_hash_table_low = 0xffffffff;
2248 } else { 2232 } else {
2249 /* Clear filter and add the addresses in hash register. 2233 /* Clear filter and add the addresses in hash register.
2250 */ 2234 */
2251 ep->fec_hash_table_high = 0; 2235 ep->fec_grp_hash_table_high = 0;
2252 ep->fec_hash_table_low = 0; 2236 ep->fec_grp_hash_table_low = 0;
2253 2237
2254 dmi = dev->mc_list; 2238 dmi = dev->mc_list;
2255 2239
@@ -2280,9 +2264,9 @@ static void set_multicast_list(struct net_device *dev)
2280 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 2264 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
2281 2265
2282 if (hash > 31) 2266 if (hash > 31)
2283 ep->fec_hash_table_high |= 1 << (hash - 32); 2267 ep->fec_grp_hash_table_high |= 1 << (hash - 32);
2284 else 2268 else
2285 ep->fec_hash_table_low |= 1 << hash; 2269 ep->fec_grp_hash_table_low |= 1 << hash;
2286 } 2270 }
2287 } 2271 }
2288 } 2272 }
@@ -2332,6 +2316,9 @@ int __init fec_enet_init(struct net_device *dev)
2332 return -ENOMEM; 2316 return -ENOMEM;
2333 } 2317 }
2334 2318
2319 spin_lock_init(&fep->hw_lock);
2320 spin_lock_init(&fep->mii_lock);
2321
2335 /* Create an Ethernet device instance. 2322 /* Create an Ethernet device instance.
2336 */ 2323 */
2337 fecp = (volatile fec_t *) fec_hw[index]; 2324 fecp = (volatile fec_t *) fec_hw[index];
@@ -2430,11 +2417,15 @@ int __init fec_enet_init(struct net_device *dev)
2430 */ 2417 */
2431 fec_request_intrs(dev); 2418 fec_request_intrs(dev);
2432 2419
2433 fecp->fec_hash_table_high = 0; 2420 fecp->fec_grp_hash_table_high = 0;
2434 fecp->fec_hash_table_low = 0; 2421 fecp->fec_grp_hash_table_low = 0;
2435 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 2422 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
2436 fecp->fec_ecntrl = 2; 2423 fecp->fec_ecntrl = 2;
2437 fecp->fec_r_des_active = 0; 2424 fecp->fec_r_des_active = 0;
2425#ifndef CONFIG_M5272
2426 fecp->fec_hash_table_high = 0;
2427 fecp->fec_hash_table_low = 0;
2428#endif
2438 2429
2439 dev->base_addr = (unsigned long)fecp; 2430 dev->base_addr = (unsigned long)fecp;
2440 2431
@@ -2455,8 +2446,7 @@ int __init fec_enet_init(struct net_device *dev)
2455 2446
2456 /* Clear and enable interrupts */ 2447 /* Clear and enable interrupts */
2457 fecp->fec_ievent = 0xffc00000; 2448 fecp->fec_ievent = 0xffc00000;
2458 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | 2449 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
2459 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
2460 2450
2461 /* Queue up command to detect the PHY and initialize the 2451 /* Queue up command to detect the PHY and initialize the
2462 * remainder of the interface. 2452 * remainder of the interface.
@@ -2500,8 +2490,8 @@ fec_restart(struct net_device *dev, int duplex)
2500 2490
2501 /* Reset all multicast. 2491 /* Reset all multicast.
2502 */ 2492 */
2503 fecp->fec_hash_table_high = 0; 2493 fecp->fec_grp_hash_table_high = 0;
2504 fecp->fec_hash_table_low = 0; 2494 fecp->fec_grp_hash_table_low = 0;
2505 2495
2506 /* Set maximum receive buffer size. 2496 /* Set maximum receive buffer size.
2507 */ 2497 */
@@ -2583,8 +2573,7 @@ fec_restart(struct net_device *dev, int duplex)
2583 2573
2584 /* Enable interrupts we wish to service. 2574 /* Enable interrupts we wish to service.
2585 */ 2575 */
2586 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | 2576 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
2587 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
2588} 2577}
2589 2578
2590static void 2579static void
@@ -2624,7 +2613,7 @@ fec_stop(struct net_device *dev)
2624static int __init fec_enet_module_init(void) 2613static int __init fec_enet_module_init(void)
2625{ 2614{
2626 struct net_device *dev; 2615 struct net_device *dev;
2627 int i, j, err; 2616 int i, err;
2628 DECLARE_MAC_BUF(mac); 2617 DECLARE_MAC_BUF(mac);
2629 2618
2630 printk("FEC ENET Version 0.2\n"); 2619 printk("FEC ENET Version 0.2\n");
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 1d421606984f..292719daceff 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -88,8 +88,8 @@ typedef struct fec {
88 unsigned long fec_reserved7[158]; 88 unsigned long fec_reserved7[158];
89 unsigned long fec_addr_low; /* Low 32bits MAC address */ 89 unsigned long fec_addr_low; /* Low 32bits MAC address */
90 unsigned long fec_addr_high; /* High 16bits MAC address */ 90 unsigned long fec_addr_high; /* High 16bits MAC address */
91 unsigned long fec_hash_table_high; /* High 32bits hash table */ 91 unsigned long fec_grp_hash_table_high;/* High 32bits hash table */
92 unsigned long fec_hash_table_low; /* Low 32bits hash table */ 92 unsigned long fec_grp_hash_table_low; /* Low 32bits hash table */
93 unsigned long fec_r_des_start; /* Receive descriptor ring */ 93 unsigned long fec_r_des_start; /* Receive descriptor ring */
94 unsigned long fec_x_des_start; /* Transmit descriptor ring */ 94 unsigned long fec_x_des_start; /* Transmit descriptor ring */
95 unsigned long fec_r_buff_size; /* Maximum receive buff size */ 95 unsigned long fec_r_buff_size; /* Maximum receive buff size */
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e5e6352556fa..5f9c42e7a7f1 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -43,6 +43,29 @@
43 43
44#define DRIVER_NAME "mpc52xx-fec" 44#define DRIVER_NAME "mpc52xx-fec"
45 45
46#define FEC5200_PHYADDR_NONE (-1)
47#define FEC5200_PHYADDR_7WIRE (-2)
48
49/* Private driver data structure */
50struct mpc52xx_fec_priv {
51 int duplex;
52 int speed;
53 int r_irq;
54 int t_irq;
55 struct mpc52xx_fec __iomem *fec;
56 struct bcom_task *rx_dmatsk;
57 struct bcom_task *tx_dmatsk;
58 spinlock_t lock;
59 int msg_enable;
60
61 /* MDIO link details */
62 int phy_addr;
63 unsigned int phy_speed;
64 struct phy_device *phydev;
65 enum phy_state link;
66};
67
68
46static irqreturn_t mpc52xx_fec_interrupt(int, void *); 69static irqreturn_t mpc52xx_fec_interrupt(int, void *);
47static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *); 70static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
48static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *); 71static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
@@ -223,7 +246,7 @@ static int mpc52xx_fec_phy_start(struct net_device *dev)
223 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 246 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
224 int err; 247 int err;
225 248
226 if (!priv->has_phy) 249 if (priv->phy_addr < 0)
227 return 0; 250 return 0;
228 251
229 err = mpc52xx_fec_init_phy(dev); 252 err = mpc52xx_fec_init_phy(dev);
@@ -243,7 +266,7 @@ static void mpc52xx_fec_phy_stop(struct net_device *dev)
243{ 266{
244 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 267 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
245 268
246 if (!priv->has_phy) 269 if (!priv->phydev)
247 return; 270 return;
248 271
249 phy_disconnect(priv->phydev); 272 phy_disconnect(priv->phydev);
@@ -255,7 +278,7 @@ static void mpc52xx_fec_phy_stop(struct net_device *dev)
255static int mpc52xx_fec_phy_mii_ioctl(struct mpc52xx_fec_priv *priv, 278static int mpc52xx_fec_phy_mii_ioctl(struct mpc52xx_fec_priv *priv,
256 struct mii_ioctl_data *mii_data, int cmd) 279 struct mii_ioctl_data *mii_data, int cmd)
257{ 280{
258 if (!priv->has_phy) 281 if (!priv->phydev)
259 return -ENOTSUPP; 282 return -ENOTSUPP;
260 283
261 return phy_mii_ioctl(priv->phydev, mii_data, cmd); 284 return phy_mii_ioctl(priv->phydev, mii_data, cmd);
@@ -265,7 +288,7 @@ static void mpc52xx_fec_phy_hw_init(struct mpc52xx_fec_priv *priv)
265{ 288{
266 struct mpc52xx_fec __iomem *fec = priv->fec; 289 struct mpc52xx_fec __iomem *fec = priv->fec;
267 290
268 if (!priv->has_phy) 291 if (priv->phydev)
269 return; 292 return;
270 293
271 out_be32(&fec->mii_speed, priv->phy_speed); 294 out_be32(&fec->mii_speed, priv->phy_speed);
@@ -491,20 +514,23 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
491 514
492 out_be32(&fec->ievent, ievent); /* clear pending events */ 515 out_be32(&fec->ievent, ievent); /* clear pending events */
493 516
494 if (ievent & ~(FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) { 517 /* on fifo error, soft-reset fec */
495 if (ievent & ~FEC_IEVENT_TFINT) 518 if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
496 dev_dbg(&dev->dev, "ievent: %08x\n", ievent); 519
520 if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
521 dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
522 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
523 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
524
525 mpc52xx_fec_reset(dev);
526
527 netif_wake_queue(dev);
497 return IRQ_HANDLED; 528 return IRQ_HANDLED;
498 } 529 }
499 530
500 if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR)) 531 if (ievent & ~FEC_IEVENT_TFINT)
501 dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n"); 532 dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
502 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
503 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
504
505 mpc52xx_fec_reset(dev);
506 533
507 netif_wake_queue(dev);
508 return IRQ_HANDLED; 534 return IRQ_HANDLED;
509} 535}
510 536
@@ -701,7 +727,7 @@ static void mpc52xx_fec_start(struct net_device *dev)
701 rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */ 727 rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */
702 rcntrl |= FEC_RCNTRL_FCE; 728 rcntrl |= FEC_RCNTRL_FCE;
703 729
704 if (priv->has_phy) 730 if (priv->phy_addr != FEC5200_PHYADDR_7WIRE)
705 rcntrl |= FEC_RCNTRL_MII_MODE; 731 rcntrl |= FEC_RCNTRL_MII_MODE;
706 732
707 if (priv->duplex == DUPLEX_FULL) 733 if (priv->duplex == DUPLEX_FULL)
@@ -861,7 +887,10 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
861 struct net_device *ndev; 887 struct net_device *ndev;
862 struct mpc52xx_fec_priv *priv = NULL; 888 struct mpc52xx_fec_priv *priv = NULL;
863 struct resource mem; 889 struct resource mem;
864 const phandle *ph; 890 struct device_node *phy_node;
891 const phandle *phy_handle;
892 const u32 *prop;
893 int prop_size;
865 894
866 phys_addr_t rx_fifo; 895 phys_addr_t rx_fifo;
867 phys_addr_t tx_fifo; 896 phys_addr_t tx_fifo;
@@ -945,26 +974,37 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
945 mpc52xx_fec_get_paddr(ndev, ndev->dev_addr); 974 mpc52xx_fec_get_paddr(ndev, ndev->dev_addr);
946 975
947 priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT); 976 priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
948 priv->duplex = DUPLEX_FULL;
949 977
950 /* is the phy present in device tree? */ 978 /*
951 ph = of_get_property(op->node, "phy-handle", NULL); 979 * Link mode configuration
952 if (ph) { 980 */
953 const unsigned int *prop;
954 struct device_node *phy_dn;
955 priv->has_phy = 1;
956
957 phy_dn = of_find_node_by_phandle(*ph);
958 prop = of_get_property(phy_dn, "reg", NULL);
959 priv->phy_addr = *prop;
960 981
961 of_node_put(phy_dn); 982 /* Start with safe defaults for link connection */
983 priv->phy_addr = FEC5200_PHYADDR_NONE;
984 priv->speed = 100;
985 priv->duplex = DUPLEX_HALF;
986 priv->phy_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1;
987
988 /* the 7-wire property means don't use MII mode */
989 if (of_find_property(op->node, "fsl,7-wire-mode", NULL))
990 priv->phy_addr = FEC5200_PHYADDR_7WIRE;
991
992 /* The current speed preconfigures the speed of the MII link */
993 prop = of_get_property(op->node, "current-speed", &prop_size);
994 if (prop && (prop_size >= sizeof(u32) * 2)) {
995 priv->speed = prop[0];
996 priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
997 }
962 998
963 /* Phy speed */ 999 /* If there is a phy handle, setup link to that phy */
964 priv->phy_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1; 1000 phy_handle = of_get_property(op->node, "phy-handle", &prop_size);
965 } else { 1001 if (phy_handle && (prop_size >= sizeof(phandle))) {
966 dev_info(&ndev->dev, "can't find \"phy-handle\" in device" 1002 phy_node = of_find_node_by_phandle(*phy_handle);
967 " tree, using 7-wire mode\n"); 1003 prop = of_get_property(phy_node, "reg", &prop_size);
1004 if (prop && (prop_size >= sizeof(u32)))
1005 if ((*prop >= 0) && (*prop < PHY_MAX_ADDR))
1006 priv->phy_addr = *prop;
1007 of_node_put(phy_node);
968 } 1008 }
969 1009
970 /* Hardware init */ 1010 /* Hardware init */
@@ -979,6 +1019,20 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
979 if (rv < 0) 1019 if (rv < 0)
980 goto probe_error; 1020 goto probe_error;
981 1021
1022 /* Now report the link setup */
1023 switch (priv->phy_addr) {
1024 case FEC5200_PHYADDR_NONE:
1025 dev_info(&ndev->dev, "Fixed speed MII link: %i%cD\n",
1026 priv->speed, priv->duplex ? 'F' : 'H');
1027 break;
1028 case FEC5200_PHYADDR_7WIRE:
1029 dev_info(&ndev->dev, "using 7-wire PHY mode\n");
1030 break;
1031 default:
1032 dev_info(&ndev->dev, "Using PHY at MDIO address %i\n",
1033 priv->phy_addr);
1034 }
1035
982 /* We're done ! */ 1036 /* We're done ! */
983 dev_set_drvdata(&op->dev, ndev); 1037 dev_set_drvdata(&op->dev, ndev);
984 1038
diff --git a/drivers/net/fec_mpc52xx.h b/drivers/net/fec_mpc52xx.h
index 8b1f75397b9a..a227a525bdbb 100644
--- a/drivers/net/fec_mpc52xx.h
+++ b/drivers/net/fec_mpc52xx.h
@@ -26,25 +26,6 @@
26 26
27#define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000) 27#define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000)
28 28
29struct mpc52xx_fec_priv {
30 int duplex;
31 int r_irq;
32 int t_irq;
33 struct mpc52xx_fec __iomem *fec;
34 struct bcom_task *rx_dmatsk;
35 struct bcom_task *tx_dmatsk;
36 spinlock_t lock;
37 int msg_enable;
38
39 int has_phy;
40 unsigned int phy_speed;
41 unsigned int phy_addr;
42 struct phy_device *phydev;
43 enum phy_state link;
44 int speed;
45};
46
47
48/* ======================================================================== */ 29/* ======================================================================== */
49/* Hardware register sets & bits */ 30/* Hardware register sets & bits */
50/* ======================================================================== */ 31/* ======================================================================== */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 99a4b990939f..587afe7be689 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -131,8 +131,6 @@ static void free_skb_resources(struct gfar_private *priv);
131static void gfar_set_multi(struct net_device *dev); 131static void gfar_set_multi(struct net_device *dev);
132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
133static void gfar_configure_serdes(struct net_device *dev); 133static void gfar_configure_serdes(struct net_device *dev);
134extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value);
135extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
136#ifdef CONFIG_GFAR_NAPI 134#ifdef CONFIG_GFAR_NAPI
137static int gfar_poll(struct napi_struct *napi, int budget); 135static int gfar_poll(struct napi_struct *napi, int budget);
138#endif 136#endif
@@ -477,24 +475,30 @@ static int init_phy(struct net_device *dev)
477 return 0; 475 return 0;
478} 476}
479 477
478/*
479 * Initialize TBI PHY interface for communicating with the
480 * SERDES lynx PHY on the chip. We communicate with this PHY
481 * through the MDIO bus on each controller, treating it as a
482 * "normal" PHY at the address found in the TBIPA register. We assume
483 * that the TBIPA register is valid. Either the MDIO bus code will set
484 * it to a value that doesn't conflict with other PHYs on the bus, or the
485 * value doesn't matter, as there are no other PHYs on the bus.
486 */
480static void gfar_configure_serdes(struct net_device *dev) 487static void gfar_configure_serdes(struct net_device *dev)
481{ 488{
482 struct gfar_private *priv = netdev_priv(dev); 489 struct gfar_private *priv = netdev_priv(dev);
483 struct gfar_mii __iomem *regs = 490 struct gfar_mii __iomem *regs =
484 (void __iomem *)&priv->regs->gfar_mii_regs; 491 (void __iomem *)&priv->regs->gfar_mii_regs;
492 int tbipa = gfar_read(&priv->regs->tbipa);
485 493
486 /* Initialise TBI i/f to communicate with serdes (lynx phy) */ 494 /* Single clk mode, mii mode off(for serdes communication) */
495 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT);
487 496
488 /* Single clk mode, mii mode off(for aerdes communication) */ 497 gfar_local_mdio_write(regs, tbipa, MII_ADVERTISE,
489 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT);
490
491 /* Supported pause and full-duplex, no half-duplex */
492 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE,
493 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 498 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
494 ADVERTISE_1000XPSE_ASYM); 499 ADVERTISE_1000XPSE_ASYM);
495 500
496 /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */ 501 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE |
497 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE |
498 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 502 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
499} 503}
500 504
@@ -541,9 +545,6 @@ static void init_registers(struct net_device *dev)
541 545
542 /* Initialize the Minimum Frame Length Register */ 546 /* Initialize the Minimum Frame Length Register */
543 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 547 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
544
545 /* Assign the TBI an address which won't conflict with the PHYs */
546 gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
547} 548}
548 549
549 550
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 0d0883609469..fd487be3993e 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -137,7 +137,6 @@ extern const char gfar_driver_version[];
137#define DEFAULT_RXCOUNT 0 137#define DEFAULT_RXCOUNT 0
138#endif /* CONFIG_GFAR_NAPI */ 138#endif /* CONFIG_GFAR_NAPI */
139 139
140#define TBIPA_VALUE 0x1f
141#define MIIMCFG_INIT_VALUE 0x00000007 140#define MIIMCFG_INIT_VALUE 0x00000007
142#define MIIMCFG_RESET 0x80000000 141#define MIIMCFG_RESET 0x80000000
143#define MIIMIND_BUSY 0x00000001 142#define MIIMIND_BUSY 0x00000001
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index b8898927236a..ebcfb27a904e 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -78,7 +78,6 @@ int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
78 * same as system mdio bus, used for controlling the external PHYs, for eg. 78 * same as system mdio bus, used for controlling the external PHYs, for eg.
79 */ 79 */
80int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum) 80int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum)
81
82{ 81{
83 u16 value; 82 u16 value;
84 83
@@ -122,7 +121,7 @@ int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
122} 121}
123 122
124/* Reset the MIIM registers, and wait for the bus to free */ 123/* Reset the MIIM registers, and wait for the bus to free */
125int gfar_mdio_reset(struct mii_bus *bus) 124static int gfar_mdio_reset(struct mii_bus *bus)
126{ 125{
127 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; 126 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
128 unsigned int timeout = PHY_INIT_TIMEOUT; 127 unsigned int timeout = PHY_INIT_TIMEOUT;
@@ -152,14 +151,15 @@ int gfar_mdio_reset(struct mii_bus *bus)
152} 151}
153 152
154 153
155int gfar_mdio_probe(struct device *dev) 154static int gfar_mdio_probe(struct device *dev)
156{ 155{
157 struct platform_device *pdev = to_platform_device(dev); 156 struct platform_device *pdev = to_platform_device(dev);
158 struct gianfar_mdio_data *pdata; 157 struct gianfar_mdio_data *pdata;
159 struct gfar_mii __iomem *regs; 158 struct gfar_mii __iomem *regs;
159 struct gfar __iomem *enet_regs;
160 struct mii_bus *new_bus; 160 struct mii_bus *new_bus;
161 struct resource *r; 161 struct resource *r;
162 int err = 0; 162 int i, err = 0;
163 163
164 if (NULL == dev) 164 if (NULL == dev)
165 return -EINVAL; 165 return -EINVAL;
@@ -199,6 +199,34 @@ int gfar_mdio_probe(struct device *dev)
199 new_bus->dev = dev; 199 new_bus->dev = dev;
200 dev_set_drvdata(dev, new_bus); 200 dev_set_drvdata(dev, new_bus);
201 201
202 /*
203 * This is mildly evil, but so is our hardware for doing this.
204 * Also, we have to cast back to struct gfar_mii because of
205 * definition weirdness done in gianfar.h.
206 */
207 enet_regs = (struct gfar __iomem *)
208 ((char *)regs - offsetof(struct gfar, gfar_mii_regs));
209
210 /* Scan the bus, looking for an empty spot for TBIPA */
211 gfar_write(&enet_regs->tbipa, 0);
212 for (i = PHY_MAX_ADDR; i > 0; i--) {
213 u32 phy_id;
214 int r;
215
216 r = get_phy_id(new_bus, i, &phy_id);
217 if (r)
218 return r;
219
220 if (phy_id == 0xffffffff)
221 break;
222 }
223
224 /* The bus is full. We don't support using 31 PHYs, sorry */
225 if (i == 0)
226 return -EBUSY;
227
228 gfar_write(&enet_regs->tbipa, i);
229
202 err = mdiobus_register(new_bus); 230 err = mdiobus_register(new_bus);
203 231
204 if (0 != err) { 232 if (0 != err) {
@@ -218,7 +246,7 @@ reg_map_fail:
218} 246}
219 247
220 248
221int gfar_mdio_remove(struct device *dev) 249static int gfar_mdio_remove(struct device *dev)
222{ 250{
223 struct mii_bus *bus = dev_get_drvdata(dev); 251 struct mii_bus *bus = dev_get_drvdata(dev);
224 252
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index b373091c7031..2af28b16a0e2 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -41,6 +41,9 @@ struct gfar_mii {
41 41
42int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum); 42int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
43int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); 43int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
44int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
45 int regnum, u16 value);
46int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
44int __init gfar_mdio_init(void); 47int __init gfar_mdio_init(void);
45void gfar_mdio_exit(void); 48void gfar_mdio_exit(void);
46#endif /* GIANFAR_PHY_H */ 49#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index b53f6b6491b3..e5c2380f50ca 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1508,7 +1508,7 @@ static int hamachi_rx(struct net_device *dev)
1508 hmp->rx_buf_sz, 1508 hmp->rx_buf_sz,
1509 PCI_DMA_FROMDEVICE); 1509 PCI_DMA_FROMDEVICE);
1510 buf_addr = (u8 *) hmp->rx_skbuff[entry]->data; 1510 buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
1511 frame_status = le32_to_cpu(get_unaligned((__le32*)&(buf_addr[data_size - 12]))); 1511 frame_status = get_unaligned_le32(&(buf_addr[data_size - 12]));
1512 if (hamachi_debug > 4) 1512 if (hamachi_debug > 4)
1513 printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", 1513 printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
1514 frame_status); 1514 frame_status);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 1da55dd2a5a0..9d5721287d6f 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -148,13 +148,13 @@ static void sp_xmit_on_air(unsigned long channel)
148 148
149 if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) { 149 if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) {
150 sp->led_state = 0x70; 150 sp->led_state = 0x70;
151 sp->tty->driver->write(sp->tty, &sp->led_state, 1); 151 sp->tty->ops->write(sp->tty, &sp->led_state, 1);
152 sp->tx_enable = 1; 152 sp->tx_enable = 1;
153 actual = sp->tty->driver->write(sp->tty, sp->xbuff, sp->status2); 153 actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2);
154 sp->xleft -= actual; 154 sp->xleft -= actual;
155 sp->xhead += actual; 155 sp->xhead += actual;
156 sp->led_state = 0x60; 156 sp->led_state = 0x60;
157 sp->tty->driver->write(sp->tty, &sp->led_state, 1); 157 sp->tty->ops->write(sp->tty, &sp->led_state, 1);
158 sp->status2 = 0; 158 sp->status2 = 0;
159 } else 159 } else
160 mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100); 160 mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100);
@@ -220,13 +220,13 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
220 */ 220 */
221 if (sp->duplex == 1) { 221 if (sp->duplex == 1) {
222 sp->led_state = 0x70; 222 sp->led_state = 0x70;
223 sp->tty->driver->write(sp->tty, &sp->led_state, 1); 223 sp->tty->ops->write(sp->tty, &sp->led_state, 1);
224 sp->tx_enable = 1; 224 sp->tx_enable = 1;
225 actual = sp->tty->driver->write(sp->tty, sp->xbuff, count); 225 actual = sp->tty->ops->write(sp->tty, sp->xbuff, count);
226 sp->xleft = count - actual; 226 sp->xleft = count - actual;
227 sp->xhead = sp->xbuff + actual; 227 sp->xhead = sp->xbuff + actual;
228 sp->led_state = 0x60; 228 sp->led_state = 0x60;
229 sp->tty->driver->write(sp->tty, &sp->led_state, 1); 229 sp->tty->ops->write(sp->tty, &sp->led_state, 1);
230 } else { 230 } else {
231 sp->xleft = count; 231 sp->xleft = count;
232 sp->xhead = sp->xbuff; 232 sp->xhead = sp->xbuff;
@@ -444,7 +444,7 @@ static void sixpack_write_wakeup(struct tty_struct *tty)
444 } 444 }
445 445
446 if (sp->tx_enable) { 446 if (sp->tx_enable) {
447 actual = tty->driver->write(tty, sp->xhead, sp->xleft); 447 actual = tty->ops->write(tty, sp->xhead, sp->xleft);
448 sp->xleft -= actual; 448 sp->xleft -= actual;
449 sp->xhead += actual; 449 sp->xhead += actual;
450 } 450 }
@@ -491,9 +491,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
491 sixpack_decode(sp, buf, count1); 491 sixpack_decode(sp, buf, count1);
492 492
493 sp_put(sp); 493 sp_put(sp);
494 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) 494 tty_unthrottle(tty);
495 && tty->driver->unthrottle)
496 tty->driver->unthrottle(tty);
497} 495}
498 496
499/* 497/*
@@ -554,8 +552,8 @@ static void resync_tnc(unsigned long channel)
554 /* resync the TNC */ 552 /* resync the TNC */
555 553
556 sp->led_state = 0x60; 554 sp->led_state = 0x60;
557 sp->tty->driver->write(sp->tty, &sp->led_state, 1); 555 sp->tty->ops->write(sp->tty, &sp->led_state, 1);
558 sp->tty->driver->write(sp->tty, &resync_cmd, 1); 556 sp->tty->ops->write(sp->tty, &resync_cmd, 1);
559 557
560 558
561 /* Start resync timer again -- the TNC might be still absent */ 559 /* Start resync timer again -- the TNC might be still absent */
@@ -573,7 +571,7 @@ static inline int tnc_init(struct sixpack *sp)
573 571
574 tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP); 572 tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP);
575 573
576 sp->tty->driver->write(sp->tty, &inbyte, 1); 574 sp->tty->ops->write(sp->tty, &inbyte, 1);
577 575
578 del_timer(&sp->resync_t); 576 del_timer(&sp->resync_t);
579 sp->resync_t.data = (unsigned long) sp; 577 sp->resync_t.data = (unsigned long) sp;
@@ -601,6 +599,8 @@ static int sixpack_open(struct tty_struct *tty)
601 599
602 if (!capable(CAP_NET_ADMIN)) 600 if (!capable(CAP_NET_ADMIN))
603 return -EPERM; 601 return -EPERM;
602 if (tty->ops->write == NULL)
603 return -EOPNOTSUPP;
604 604
605 dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup); 605 dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup);
606 if (!dev) { 606 if (!dev) {
@@ -914,9 +914,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
914 } else { /* output watchdog char if idle */ 914 } else { /* output watchdog char if idle */
915 if ((sp->status2 != 0) && (sp->duplex == 1)) { 915 if ((sp->status2 != 0) && (sp->duplex == 1)) {
916 sp->led_state = 0x70; 916 sp->led_state = 0x70;
917 sp->tty->driver->write(sp->tty, &sp->led_state, 1); 917 sp->tty->ops->write(sp->tty, &sp->led_state, 1);
918 sp->tx_enable = 1; 918 sp->tx_enable = 1;
919 actual = sp->tty->driver->write(sp->tty, sp->xbuff, sp->status2); 919 actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2);
920 sp->xleft -= actual; 920 sp->xleft -= actual;
921 sp->xhead += actual; 921 sp->xhead += actual;
922 sp->led_state = 0x60; 922 sp->led_state = 0x60;
@@ -926,7 +926,7 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
926 } 926 }
927 927
928 /* needed to trigger the TNC watchdog */ 928 /* needed to trigger the TNC watchdog */
929 sp->tty->driver->write(sp->tty, &sp->led_state, 1); 929 sp->tty->ops->write(sp->tty, &sp->led_state, 1);
930 930
931 /* if the state byte has been received, the TNC is present, 931 /* if the state byte has been received, the TNC is present,
932 so the resync timer can be reset. */ 932 so the resync timer can be reset. */
@@ -956,12 +956,12 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
956 if ((sp->status & SIXP_RX_DCD_MASK) == 956 if ((sp->status & SIXP_RX_DCD_MASK) ==
957 SIXP_RX_DCD_MASK) { 957 SIXP_RX_DCD_MASK) {
958 sp->led_state = 0x68; 958 sp->led_state = 0x68;
959 sp->tty->driver->write(sp->tty, &sp->led_state, 1); 959 sp->tty->ops->write(sp->tty, &sp->led_state, 1);
960 } 960 }
961 } else { 961 } else {
962 sp->led_state = 0x60; 962 sp->led_state = 0x60;
963 /* fill trailing bytes with zeroes */ 963 /* fill trailing bytes with zeroes */
964 sp->tty->driver->write(sp->tty, &sp->led_state, 1); 964 sp->tty->ops->write(sp->tty, &sp->led_state, 1);
965 rest = sp->rx_count; 965 rest = sp->rx_count;
966 if (rest != 0) 966 if (rest != 0)
967 for (i = rest; i <= 3; i++) 967 for (i = rest; i <= 3; i++)
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 30c9b3b0d131..65166035aca0 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -516,7 +516,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
516 spin_unlock_bh(&ax->buflock); 516 spin_unlock_bh(&ax->buflock);
517 517
518 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); 518 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
519 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); 519 actual = ax->tty->ops->write(ax->tty, ax->xbuff, count);
520 ax->stats.tx_packets++; 520 ax->stats.tx_packets++;
521 ax->stats.tx_bytes += actual; 521 ax->stats.tx_bytes += actual;
522 522
@@ -546,7 +546,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
546 } 546 }
547 547
548 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, 548 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
549 (ax->tty->driver->chars_in_buffer(ax->tty) || ax->xleft) ? 549 (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ?
550 "bad line quality" : "driver error"); 550 "bad line quality" : "driver error");
551 551
552 ax->xleft = 0; 552 ax->xleft = 0;
@@ -736,6 +736,8 @@ static int mkiss_open(struct tty_struct *tty)
736 736
737 if (!capable(CAP_NET_ADMIN)) 737 if (!capable(CAP_NET_ADMIN))
738 return -EPERM; 738 return -EPERM;
739 if (tty->ops->write == NULL)
740 return -EOPNOTSUPP;
739 741
740 dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup); 742 dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup);
741 if (!dev) { 743 if (!dev) {
@@ -754,8 +756,7 @@ static int mkiss_open(struct tty_struct *tty)
754 tty->disc_data = ax; 756 tty->disc_data = ax;
755 tty->receive_room = 65535; 757 tty->receive_room = 65535;
756 758
757 if (tty->driver->flush_buffer) 759 tty_driver_flush_buffer(tty);
758 tty->driver->flush_buffer(tty);
759 760
760 /* Restore default settings */ 761 /* Restore default settings */
761 dev->type = ARPHRD_AX25; 762 dev->type = ARPHRD_AX25;
@@ -935,9 +936,7 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
935 } 936 }
936 937
937 mkiss_put(ax); 938 mkiss_put(ax);
938 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) 939 tty_unthrottle(tty);
939 && tty->driver->unthrottle)
940 tty->driver->unthrottle(tty);
941} 940}
942 941
943/* 942/*
@@ -962,7 +961,7 @@ static void mkiss_write_wakeup(struct tty_struct *tty)
962 goto out; 961 goto out;
963 } 962 }
964 963
965 actual = tty->driver->write(tty, ax->xhead, ax->xleft); 964 actual = tty->ops->write(tty, ax->xhead, ax->xleft);
966 ax->xleft -= actual; 965 ax->xleft -= actual;
967 ax->xhead += actual; 966 ax->xhead += actual;
968 967
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index ce4fc2ec2fe4..00527805e4f1 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1302,13 +1302,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1302 if (ibmveth_proc_dir) { 1302 if (ibmveth_proc_dir) {
1303 char u_addr[10]; 1303 char u_addr[10];
1304 sprintf(u_addr, "%x", adapter->vdev->unit_address); 1304 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1305 entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir); 1305 entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir,
1306 if (!entry) { 1306 &ibmveth_proc_fops, adapter);
1307 if (!entry)
1307 ibmveth_error_printk("Cannot create adapter proc entry"); 1308 ibmveth_error_printk("Cannot create adapter proc entry");
1308 } else {
1309 entry->data = (void *) adapter;
1310 entry->proc_fops = &ibmveth_proc_fops;
1311 }
1312 } 1309 }
1313 return; 1310 return;
1314} 1311}
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index fc753d7f674e..e6f40b7f9041 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -64,7 +64,7 @@ static int irtty_chars_in_buffer(struct sir_dev *dev)
64 IRDA_ASSERT(priv != NULL, return -1;); 64 IRDA_ASSERT(priv != NULL, return -1;);
65 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;); 65 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
66 66
67 return priv->tty->driver->chars_in_buffer(priv->tty); 67 return tty_chars_in_buffer(priv->tty);
68} 68}
69 69
70/* Wait (sleep) until underlaying hardware finished transmission 70/* Wait (sleep) until underlaying hardware finished transmission
@@ -93,10 +93,8 @@ static void irtty_wait_until_sent(struct sir_dev *dev)
93 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;); 93 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
94 94
95 tty = priv->tty; 95 tty = priv->tty;
96 if (tty->driver->wait_until_sent) { 96 if (tty->ops->wait_until_sent) {
97 lock_kernel(); 97 tty->ops->wait_until_sent(tty, msecs_to_jiffies(100));
98 tty->driver->wait_until_sent(tty, msecs_to_jiffies(100));
99 unlock_kernel();
100 } 98 }
101 else { 99 else {
102 msleep(USBSERIAL_TX_DONE_DELAY); 100 msleep(USBSERIAL_TX_DONE_DELAY);
@@ -125,48 +123,14 @@ static int irtty_change_speed(struct sir_dev *dev, unsigned speed)
125 123
126 tty = priv->tty; 124 tty = priv->tty;
127 125
128 lock_kernel(); 126 mutex_lock(&tty->termios_mutex);
129 old_termios = *(tty->termios); 127 old_termios = *(tty->termios);
130 cflag = tty->termios->c_cflag; 128 cflag = tty->termios->c_cflag;
131 129 tty_encode_baud_rate(tty, speed, speed);
132 cflag &= ~CBAUD; 130 if (tty->ops->set_termios)
133 131 tty->ops->set_termios(tty, &old_termios);
134 IRDA_DEBUG(2, "%s(), Setting speed to %d\n", __FUNCTION__, speed);
135
136 switch (speed) {
137 case 1200:
138 cflag |= B1200;
139 break;
140 case 2400:
141 cflag |= B2400;
142 break;
143 case 4800:
144 cflag |= B4800;
145 break;
146 case 19200:
147 cflag |= B19200;
148 break;
149 case 38400:
150 cflag |= B38400;
151 break;
152 case 57600:
153 cflag |= B57600;
154 break;
155 case 115200:
156 cflag |= B115200;
157 break;
158 case 9600:
159 default:
160 cflag |= B9600;
161 break;
162 }
163
164 tty->termios->c_cflag = cflag;
165 if (tty->driver->set_termios)
166 tty->driver->set_termios(tty, &old_termios);
167 unlock_kernel();
168
169 priv->io.speed = speed; 132 priv->io.speed = speed;
133 mutex_unlock(&tty->termios_mutex);
170 134
171 return 0; 135 return 0;
172} 136}
@@ -202,8 +166,8 @@ static int irtty_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
202 * This function is not yet defined for all tty driver, so 166 * This function is not yet defined for all tty driver, so
203 * let's be careful... Jean II 167 * let's be careful... Jean II
204 */ 168 */
205 IRDA_ASSERT(priv->tty->driver->tiocmset != NULL, return -1;); 169 IRDA_ASSERT(priv->tty->ops->tiocmset != NULL, return -1;);
206 priv->tty->driver->tiocmset(priv->tty, NULL, set, clear); 170 priv->tty->ops->tiocmset(priv->tty, NULL, set, clear);
207 171
208 return 0; 172 return 0;
209} 173}
@@ -225,17 +189,13 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
225 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;); 189 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
226 190
227 tty = priv->tty; 191 tty = priv->tty;
228 if (!tty->driver->write) 192 if (!tty->ops->write)
229 return 0; 193 return 0;
230 tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 194 tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
231 if (tty->driver->write_room) { 195 writelen = tty_write_room(tty);
232 writelen = tty->driver->write_room(tty); 196 if (writelen > len)
233 if (writelen > len)
234 writelen = len;
235 }
236 else
237 writelen = len; 197 writelen = len;
238 return tty->driver->write(tty, ptr, writelen); 198 return tty->ops->write(tty, ptr, writelen);
239} 199}
240 200
241/* ------------------------------------------------------- */ 201/* ------------------------------------------------------- */
@@ -321,7 +281,7 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
321 struct ktermios old_termios; 281 struct ktermios old_termios;
322 int cflag; 282 int cflag;
323 283
324 lock_kernel(); 284 mutex_lock(&tty->termios_mutex);
325 old_termios = *(tty->termios); 285 old_termios = *(tty->termios);
326 cflag = tty->termios->c_cflag; 286 cflag = tty->termios->c_cflag;
327 287
@@ -331,9 +291,9 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
331 cflag |= CREAD; 291 cflag |= CREAD;
332 292
333 tty->termios->c_cflag = cflag; 293 tty->termios->c_cflag = cflag;
334 if (tty->driver->set_termios) 294 if (tty->ops->set_termios)
335 tty->driver->set_termios(tty, &old_termios); 295 tty->ops->set_termios(tty, &old_termios);
336 unlock_kernel(); 296 mutex_unlock(&tty->termios_mutex);
337} 297}
338 298
339/*****************************************************************/ 299/*****************************************************************/
@@ -359,8 +319,8 @@ static int irtty_start_dev(struct sir_dev *dev)
359 319
360 tty = priv->tty; 320 tty = priv->tty;
361 321
362 if (tty->driver->start) 322 if (tty->ops->start)
363 tty->driver->start(tty); 323 tty->ops->start(tty);
364 /* Make sure we can receive more data */ 324 /* Make sure we can receive more data */
365 irtty_stop_receiver(tty, FALSE); 325 irtty_stop_receiver(tty, FALSE);
366 326
@@ -388,8 +348,8 @@ static int irtty_stop_dev(struct sir_dev *dev)
388 348
389 /* Make sure we don't receive more data */ 349 /* Make sure we don't receive more data */
390 irtty_stop_receiver(tty, TRUE); 350 irtty_stop_receiver(tty, TRUE);
391 if (tty->driver->stop) 351 if (tty->ops->stop)
392 tty->driver->stop(tty); 352 tty->ops->stop(tty);
393 353
394 mutex_unlock(&irtty_mutex); 354 mutex_unlock(&irtty_mutex);
395 355
@@ -483,11 +443,10 @@ static int irtty_open(struct tty_struct *tty)
483 443
484 /* stop the underlying driver */ 444 /* stop the underlying driver */
485 irtty_stop_receiver(tty, TRUE); 445 irtty_stop_receiver(tty, TRUE);
486 if (tty->driver->stop) 446 if (tty->ops->stop)
487 tty->driver->stop(tty); 447 tty->ops->stop(tty);
488 448
489 if (tty->driver->flush_buffer) 449 tty_driver_flush_buffer(tty);
490 tty->driver->flush_buffer(tty);
491 450
492 /* apply mtt override */ 451 /* apply mtt override */
493 sir_tty_drv.qos_mtt_bits = qos_mtt_bits; 452 sir_tty_drv.qos_mtt_bits = qos_mtt_bits;
@@ -564,8 +523,8 @@ static void irtty_close(struct tty_struct *tty)
564 /* Stop tty */ 523 /* Stop tty */
565 irtty_stop_receiver(tty, TRUE); 524 irtty_stop_receiver(tty, TRUE);
566 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 525 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
567 if (tty->driver->stop) 526 if (tty->ops->stop)
568 tty->driver->stop(tty); 527 tty->ops->stop(tty);
569 528
570 kfree(priv); 529 kfree(priv);
571 530
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 93916cf33f29..ad92d3ff1c40 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -464,7 +464,7 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
464 } 464 }
465 465
466 fcs = ~(crc32_le(~0, buf, new_len)); 466 fcs = ~(crc32_le(~0, buf, new_len));
467 if(fcs != le32_to_cpu(get_unaligned((__le32 *)(buf+new_len)))) { 467 if(fcs != get_unaligned_le32(buf + new_len)) {
468 IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); 468 IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
469 mcs->stats.rx_errors++; 469 mcs->stats.rx_errors++;
470 mcs->stats.rx_crc_errors++; 470 mcs->stats.rx_crc_errors++;
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index e59c485bc497..051963782749 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -329,7 +329,7 @@ static void fir_eof(struct stir_cb *stir)
329 } 329 }
330 330
331 fcs = ~(crc32_le(~0, rx_buff->data, len)); 331 fcs = ~(crc32_le(~0, rx_buff->data, len));
332 if (fcs != le32_to_cpu(get_unaligned((__le32 *)(rx_buff->data+len)))) { 332 if (fcs != get_unaligned_le32(rx_buff->data + len)) {
333 pr_debug("crc error calc 0x%x len %d\n", fcs, len); 333 pr_debug("crc error calc 0x%x len %d\n", fcs, len);
334 stir->stats.rx_errors++; 334 stir->stats.rx_errors++;
335 stir->stats.rx_crc_errors++; 335 stir->stats.rx_crc_errors++;
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index acd082a96a4f..d15e00b8591e 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1674,13 +1674,12 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1674 if (vlsi_proc_root != NULL) { 1674 if (vlsi_proc_root != NULL) {
1675 struct proc_dir_entry *ent; 1675 struct proc_dir_entry *ent;
1676 1676
1677 ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root); 1677 ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO,
1678 vlsi_proc_root, VLSI_PROC_FOPS, ndev);
1678 if (!ent) { 1679 if (!ent) {
1679 IRDA_WARNING("%s: failed to create proc entry\n", 1680 IRDA_WARNING("%s: failed to create proc entry\n",
1680 __FUNCTION__); 1681 __FUNCTION__);
1681 } else { 1682 } else {
1682 ent->data = ndev;
1683 ent->proc_fops = VLSI_PROC_FOPS;
1684 ent->size = 0; 1683 ent->size = 0;
1685 } 1684 }
1686 idev->proc_entry = ent; 1685 idev->proc_entry = ent;
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 6fda0af9d0a6..95e87a2f8896 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -188,7 +188,8 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
188EXPORT_SYMBOL_GPL(mlx4_cq_resize); 188EXPORT_SYMBOL_GPL(mlx4_cq_resize);
189 189
190int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 190int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
191 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq) 191 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
192 int collapsed)
192{ 193{
193 struct mlx4_priv *priv = mlx4_priv(dev); 194 struct mlx4_priv *priv = mlx4_priv(dev);
194 struct mlx4_cq_table *cq_table = &priv->cq_table; 195 struct mlx4_cq_table *cq_table = &priv->cq_table;
@@ -224,6 +225,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
224 cq_context = mailbox->buf; 225 cq_context = mailbox->buf;
225 memset(cq_context, 0, sizeof *cq_context); 226 memset(cq_context, 0, sizeof *cq_context);
226 227
228 cq_context->flags = cpu_to_be32(!!collapsed << 18);
227 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); 229 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
228 cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn; 230 cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn;
229 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 231 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 79b317b88c86..03a9abcce524 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -551,7 +551,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
551 u64 mtt_seg; 551 u64 mtt_seg;
552 int err = -ENOMEM; 552 int err = -ENOMEM;
553 553
554 if (page_shift < 12 || page_shift >= 32) 554 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
555 return -EINVAL; 555 return -EINVAL;
556 556
557 /* All MTTs must fit in the same page */ 557 /* All MTTs must fit in the same page */
@@ -607,15 +607,9 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
607void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 607void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
608 u32 *lkey, u32 *rkey) 608 u32 *lkey, u32 *rkey)
609{ 609{
610 u32 key;
611
612 if (!fmr->maps) 610 if (!fmr->maps)
613 return; 611 return;
614 612
615 key = key_to_hw_index(fmr->mr.key);
616 key &= dev->caps.num_mpts - 1;
617 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
618
619 fmr->maps = 0; 613 fmr->maps = 0;
620 614
621 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 615 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index cead81e80f0c..ef63c8d2bd7e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2437,7 +2437,7 @@ static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev)
2437 int status; 2437 int status;
2438 2438
2439 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); 2439 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
2440 if (unlikely(IS_ERR(segs))) 2440 if (IS_ERR(segs))
2441 goto drop; 2441 goto drop;
2442 2442
2443 while (segs) { 2443 while (segs) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3ac8529bb92c..6bf9e76b0a00 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -48,7 +48,7 @@ config VITESSE_PHY
48config SMSC_PHY 48config SMSC_PHY
49 tristate "Drivers for SMSC PHYs" 49 tristate "Drivers for SMSC PHYs"
50 ---help--- 50 ---help---
51 Currently supports the LAN83C185 PHY 51 Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs
52 52
53config BROADCOM_PHY 53config BROADCOM_PHY
54 tristate "Drivers for Broadcom PHYs" 54 tristate "Drivers for Broadcom PHYs"
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ddf8d51832a6..ac3c01d28fdf 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -256,7 +256,7 @@ void phy_prepare_link(struct phy_device *phydev,
256/** 256/**
257 * phy_connect - connect an ethernet device to a PHY device 257 * phy_connect - connect an ethernet device to a PHY device
258 * @dev: the network device to connect 258 * @dev: the network device to connect
259 * @phy_id: the PHY device to connect 259 * @bus_id: the id string of the PHY device to connect
260 * @handler: callback function for state change notifications 260 * @handler: callback function for state change notifications
261 * @flags: PHY device's dev_flags 261 * @flags: PHY device's dev_flags
262 * @interface: PHY device's interface 262 * @interface: PHY device's interface
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index b1d8ed40ad98..73baa7a3bb0e 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -12,6 +12,8 @@
12 * Free Software Foundation; either version 2 of the License, or (at your 12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. 13 * option) any later version.
14 * 14 *
15 * Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@smsc.com
16 *
15 */ 17 */
16 18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -38,7 +40,7 @@
38 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4) 40 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
39 41
40 42
41static int lan83c185_config_intr(struct phy_device *phydev) 43static int smsc_phy_config_intr(struct phy_device *phydev)
42{ 44{
43 int rc = phy_write (phydev, MII_LAN83C185_IM, 45 int rc = phy_write (phydev, MII_LAN83C185_IM,
44 ((PHY_INTERRUPT_ENABLED == phydev->interrupts) 46 ((PHY_INTERRUPT_ENABLED == phydev->interrupts)
@@ -48,16 +50,16 @@ static int lan83c185_config_intr(struct phy_device *phydev)
48 return rc < 0 ? rc : 0; 50 return rc < 0 ? rc : 0;
49} 51}
50 52
51static int lan83c185_ack_interrupt(struct phy_device *phydev) 53static int smsc_phy_ack_interrupt(struct phy_device *phydev)
52{ 54{
53 int rc = phy_read (phydev, MII_LAN83C185_ISF); 55 int rc = phy_read (phydev, MII_LAN83C185_ISF);
54 56
55 return rc < 0 ? rc : 0; 57 return rc < 0 ? rc : 0;
56} 58}
57 59
58static int lan83c185_config_init(struct phy_device *phydev) 60static int smsc_phy_config_init(struct phy_device *phydev)
59{ 61{
60 return lan83c185_ack_interrupt (phydev); 62 return smsc_phy_ack_interrupt (phydev);
61} 63}
62 64
63 65
@@ -73,22 +75,87 @@ static struct phy_driver lan83c185_driver = {
73 /* basic functions */ 75 /* basic functions */
74 .config_aneg = genphy_config_aneg, 76 .config_aneg = genphy_config_aneg,
75 .read_status = genphy_read_status, 77 .read_status = genphy_read_status,
76 .config_init = lan83c185_config_init, 78 .config_init = smsc_phy_config_init,
77 79
78 /* IRQ related */ 80 /* IRQ related */
79 .ack_interrupt = lan83c185_ack_interrupt, 81 .ack_interrupt = smsc_phy_ack_interrupt,
80 .config_intr = lan83c185_config_intr, 82 .config_intr = smsc_phy_config_intr,
83
84 .driver = { .owner = THIS_MODULE, }
85};
86
87static struct phy_driver lan8187_driver = {
88 .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */
89 .phy_id_mask = 0xfffffff0,
90 .name = "SMSC LAN8187",
91
92 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
93 | SUPPORTED_Asym_Pause),
94 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
95
96 /* basic functions */
97 .config_aneg = genphy_config_aneg,
98 .read_status = genphy_read_status,
99 .config_init = smsc_phy_config_init,
100
101 /* IRQ related */
102 .ack_interrupt = smsc_phy_ack_interrupt,
103 .config_intr = smsc_phy_config_intr,
104
105 .driver = { .owner = THIS_MODULE, }
106};
107
108static struct phy_driver lan8700_driver = {
109 .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
110 .phy_id_mask = 0xfffffff0,
111 .name = "SMSC LAN8700",
112
113 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
114 | SUPPORTED_Asym_Pause),
115 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
116
117 /* basic functions */
118 .config_aneg = genphy_config_aneg,
119 .read_status = genphy_read_status,
120 .config_init = smsc_phy_config_init,
121
122 /* IRQ related */
123 .ack_interrupt = smsc_phy_ack_interrupt,
124 .config_intr = smsc_phy_config_intr,
81 125
82 .driver = { .owner = THIS_MODULE, } 126 .driver = { .owner = THIS_MODULE, }
83}; 127};
84 128
85static int __init smsc_init(void) 129static int __init smsc_init(void)
86{ 130{
87 return phy_driver_register (&lan83c185_driver); 131 int ret;
132
133 ret = phy_driver_register (&lan83c185_driver);
134 if (ret)
135 goto err1;
136
137 ret = phy_driver_register (&lan8187_driver);
138 if (ret)
139 goto err2;
140
141 ret = phy_driver_register (&lan8700_driver);
142 if (ret)
143 goto err3;
144
145 return 0;
146
147err3:
148 phy_driver_unregister (&lan8187_driver);
149err2:
150 phy_driver_unregister (&lan83c185_driver);
151err1:
152 return ret;
88} 153}
89 154
90static void __exit smsc_exit(void) 155static void __exit smsc_exit(void)
91{ 156{
157 phy_driver_unregister (&lan8700_driver);
158 phy_driver_unregister (&lan8187_driver);
92 phy_driver_unregister (&lan83c185_driver); 159 phy_driver_unregister (&lan83c185_driver);
93} 160}
94 161
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index f023d5b67e6e..f1a52def1241 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -158,6 +158,9 @@ ppp_asynctty_open(struct tty_struct *tty)
158 struct asyncppp *ap; 158 struct asyncppp *ap;
159 int err; 159 int err;
160 160
161 if (tty->ops->write == NULL)
162 return -EOPNOTSUPP;
163
161 err = -ENOMEM; 164 err = -ENOMEM;
162 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 165 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
163 if (!ap) 166 if (!ap)
@@ -358,9 +361,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
358 if (!skb_queue_empty(&ap->rqueue)) 361 if (!skb_queue_empty(&ap->rqueue))
359 tasklet_schedule(&ap->tsk); 362 tasklet_schedule(&ap->tsk);
360 ap_put(ap); 363 ap_put(ap);
361 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) 364 tty_unthrottle(tty);
362 && tty->driver->unthrottle)
363 tty->driver->unthrottle(tty);
364} 365}
365 366
366static void 367static void
@@ -676,7 +677,7 @@ ppp_async_push(struct asyncppp *ap)
676 if (!tty_stuffed && ap->optr < ap->olim) { 677 if (!tty_stuffed && ap->optr < ap->olim) {
677 avail = ap->olim - ap->optr; 678 avail = ap->olim - ap->optr;
678 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 679 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
679 sent = tty->driver->write(tty, ap->optr, avail); 680 sent = tty->ops->write(tty, ap->optr, avail);
680 if (sent < 0) 681 if (sent < 0)
681 goto flush; /* error, e.g. loss of CD */ 682 goto flush; /* error, e.g. loss of CD */
682 ap->optr += sent; 683 ap->optr += sent;
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 0d80fa546719..b8f0369a71e7 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -207,6 +207,9 @@ ppp_sync_open(struct tty_struct *tty)
207 struct syncppp *ap; 207 struct syncppp *ap;
208 int err; 208 int err;
209 209
210 if (tty->ops->write == NULL)
211 return -EOPNOTSUPP;
212
210 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 213 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
211 err = -ENOMEM; 214 err = -ENOMEM;
212 if (!ap) 215 if (!ap)
@@ -398,9 +401,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
398 if (!skb_queue_empty(&ap->rqueue)) 401 if (!skb_queue_empty(&ap->rqueue))
399 tasklet_schedule(&ap->tsk); 402 tasklet_schedule(&ap->tsk);
400 sp_put(ap); 403 sp_put(ap);
401 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) 404 tty_unthrottle(tty);
402 && tty->driver->unthrottle)
403 tty->driver->unthrottle(tty);
404} 405}
405 406
406static void 407static void
@@ -653,7 +654,7 @@ ppp_sync_push(struct syncppp *ap)
653 tty_stuffed = 0; 654 tty_stuffed = 0;
654 if (!tty_stuffed && ap->tpkt) { 655 if (!tty_stuffed && ap->tpkt) {
655 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 656 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
656 sent = tty->driver->write(tty, ap->tpkt->data, ap->tpkt->len); 657 sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
657 if (sent < 0) 658 if (sent < 0)
658 goto flush; /* error, e.g. loss of CD */ 659 goto flush; /* error, e.g. loss of CD */
659 if (sent < ap->tpkt->len) { 660 if (sent < ap->tpkt->len) {
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 4fad4ddb3504..58a26a47af29 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1052,11 +1052,9 @@ static int __init pppoe_proc_init(void)
1052{ 1052{
1053 struct proc_dir_entry *p; 1053 struct proc_dir_entry *p;
1054 1054
1055 p = create_proc_entry("pppoe", S_IRUGO, init_net.proc_net); 1055 p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops);
1056 if (!p) 1056 if (!p)
1057 return -ENOMEM; 1057 return -ENOMEM;
1058
1059 p->proc_fops = &pppoe_seq_fops;
1060 return 0; 1058 return 0;
1061} 1059}
1062#else /* CONFIG_PROC_FS */ 1060#else /* CONFIG_PROC_FS */
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 3d10ca050b79..244d7830c92a 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2469,12 +2469,12 @@ static int __init pppol2tp_init(void)
2469 goto out_unregister_pppol2tp_proto; 2469 goto out_unregister_pppol2tp_proto;
2470 2470
2471#ifdef CONFIG_PROC_FS 2471#ifdef CONFIG_PROC_FS
2472 pppol2tp_proc = create_proc_entry("pppol2tp", 0, init_net.proc_net); 2472 pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0,
2473 &pppol2tp_proc_fops);
2473 if (!pppol2tp_proc) { 2474 if (!pppol2tp_proc) {
2474 err = -ENOMEM; 2475 err = -ENOMEM;
2475 goto out_unregister_pppox_proto; 2476 goto out_unregister_pppox_proto;
2476 } 2477 }
2477 pppol2tp_proc->proc_fops = &pppol2tp_proc_fops;
2478#endif /* CONFIG_PROC_FS */ 2478#endif /* CONFIG_PROC_FS */
2479 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", 2479 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
2480 PPPOL2TP_DRV_VERSION); 2480 PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 3acfeeabdee1..657242504621 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1617,6 +1617,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1617 SET_NETDEV_DEV(dev, &pdev->dev); 1617 SET_NETDEV_DEV(dev, &pdev->dev);
1618 tp = netdev_priv(dev); 1618 tp = netdev_priv(dev);
1619 tp->dev = dev; 1619 tp->dev = dev;
1620 tp->pci_dev = pdev;
1620 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 1621 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1621 1622
1622 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 1623 /* enable device (incl. PCI PM wakeup and hotplug setup) */
@@ -1705,18 +1706,18 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1705 1706
1706 rtl8169_print_mac_version(tp); 1707 rtl8169_print_mac_version(tp);
1707 1708
1708 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { 1709 for (i = 0; i < ARRAY_SIZE(rtl_chip_info); i++) {
1709 if (tp->mac_version == rtl_chip_info[i].mac_version) 1710 if (tp->mac_version == rtl_chip_info[i].mac_version)
1710 break; 1711 break;
1711 } 1712 }
1712 if (i < 0) { 1713 if (i == ARRAY_SIZE(rtl_chip_info)) {
1713 /* Unknown chip: assume array element #0, original RTL-8169 */ 1714 /* Unknown chip: assume array element #0, original RTL-8169 */
1714 if (netif_msg_probe(tp)) { 1715 if (netif_msg_probe(tp)) {
1715 dev_printk(KERN_DEBUG, &pdev->dev, 1716 dev_printk(KERN_DEBUG, &pdev->dev,
1716 "unknown chip version, assuming %s\n", 1717 "unknown chip version, assuming %s\n",
1717 rtl_chip_info[0].name); 1718 rtl_chip_info[0].name);
1718 } 1719 }
1719 i++; 1720 i = 0;
1720 } 1721 }
1721 tp->chipset = i; 1722 tp->chipset = i;
1722 1723
@@ -1777,7 +1778,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1777#endif 1778#endif
1778 1779
1779 tp->intr_mask = 0xffff; 1780 tp->intr_mask = 0xffff;
1780 tp->pci_dev = pdev;
1781 tp->mmio_addr = ioaddr; 1781 tp->mmio_addr = ioaddr;
1782 tp->align = cfg->align; 1782 tp->align = cfg->align;
1783 tp->hw_start = cfg->hw_start; 1783 tp->hw_start = cfg->hw_start;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index e7fd08adbbac..2b8fd68bc516 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -77,7 +77,7 @@ static int rionet_capable = 1;
77 * could be made into a hash table to save memory depending 77 * could be made into a hash table to save memory depending
78 * on system trade-offs. 78 * on system trade-offs.
79 */ 79 */
80static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES]; 80static struct rio_dev **rionet_active;
81 81
82#define is_rionet_capable(pef, src_ops, dst_ops) \ 82#define is_rionet_capable(pef, src_ops, dst_ops) \
83 ((pef & RIO_PEF_INB_MBOX) && \ 83 ((pef & RIO_PEF_INB_MBOX) && \
@@ -195,7 +195,8 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
195 } 195 }
196 196
197 if (eth->h_dest[0] & 0x01) { 197 if (eth->h_dest[0] & 0x01) {
198 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++) 198 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
199 i++)
199 if (rionet_active[i]) 200 if (rionet_active[i])
200 rionet_queue_tx_msg(skb, ndev, 201 rionet_queue_tx_msg(skb, ndev,
201 rionet_active[i]); 202 rionet_active[i]);
@@ -385,6 +386,8 @@ static void rionet_remove(struct rio_dev *rdev)
385 struct net_device *ndev = NULL; 386 struct net_device *ndev = NULL;
386 struct rionet_peer *peer, *tmp; 387 struct rionet_peer *peer, *tmp;
387 388
389 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
390 __ilog2(sizeof(void *)) + 4 : 0);
388 unregister_netdev(ndev); 391 unregister_netdev(ndev);
389 kfree(ndev); 392 kfree(ndev);
390 393
@@ -443,6 +446,15 @@ static int rionet_setup_netdev(struct rio_mport *mport)
443 goto out; 446 goto out;
444 } 447 }
445 448
449 rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
450 mport->sys_size ? __ilog2(sizeof(void *)) + 4 : 0);
451 if (!rionet_active) {
452 rc = -ENOMEM;
453 goto out;
454 }
455 memset((void *)rionet_active, 0, sizeof(void *) *
456 RIO_MAX_ROUTE_ENTRIES(mport->sys_size));
457
446 /* Set up private area */ 458 /* Set up private area */
447 rnet = (struct rionet_private *)ndev->priv; 459 rnet = (struct rionet_private *)ndev->priv;
448 rnet->mport = mport; 460 rnet->mport = mport;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 157fd932e951..523478ebfd69 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -86,7 +86,7 @@
86#include "s2io.h" 86#include "s2io.h"
87#include "s2io-regs.h" 87#include "s2io-regs.h"
88 88
89#define DRV_VERSION "2.0.26.22" 89#define DRV_VERSION "2.0.26.23"
90 90
91/* S2io Driver name & version. */ 91/* S2io Driver name & version. */
92static char s2io_driver_name[] = "Neterion"; 92static char s2io_driver_name[] = "Neterion";
@@ -809,6 +809,7 @@ static int init_shared_mem(struct s2io_nic *nic)
809 config->rx_cfg[i].num_rxd - 1; 809 config->rx_cfg[i].num_rxd - 1;
810 mac_control->rings[i].nic = nic; 810 mac_control->rings[i].nic = nic;
811 mac_control->rings[i].ring_no = i; 811 mac_control->rings[i].ring_no = i;
812 mac_control->rings[i].lro = lro_enable;
812 813
813 blk_cnt = config->rx_cfg[i].num_rxd / 814 blk_cnt = config->rx_cfg[i].num_rxd /
814 (rxd_count[nic->rxd_mode] + 1); 815 (rxd_count[nic->rxd_mode] + 1);
@@ -1560,113 +1561,112 @@ static int init_nic(struct s2io_nic *nic)
1560 writeq(val64, &bar0->tx_fifo_partition_0); 1561 writeq(val64, &bar0->tx_fifo_partition_0);
1561 1562
1562 /* Filling the Rx round robin registers as per the 1563 /* Filling the Rx round robin registers as per the
1563 * number of Rings and steering based on QoS. 1564 * number of Rings and steering based on QoS with
1564 */ 1565 * equal priority.
1566 */
1565 switch (config->rx_ring_num) { 1567 switch (config->rx_ring_num) {
1566 case 1: 1568 case 1:
1569 val64 = 0x0;
1570 writeq(val64, &bar0->rx_w_round_robin_0);
1571 writeq(val64, &bar0->rx_w_round_robin_1);
1572 writeq(val64, &bar0->rx_w_round_robin_2);
1573 writeq(val64, &bar0->rx_w_round_robin_3);
1574 writeq(val64, &bar0->rx_w_round_robin_4);
1575
1567 val64 = 0x8080808080808080ULL; 1576 val64 = 0x8080808080808080ULL;
1568 writeq(val64, &bar0->rts_qos_steering); 1577 writeq(val64, &bar0->rts_qos_steering);
1569 break; 1578 break;
1570 case 2: 1579 case 2:
1571 val64 = 0x0000010000010000ULL; 1580 val64 = 0x0001000100010001ULL;
1572 writeq(val64, &bar0->rx_w_round_robin_0); 1581 writeq(val64, &bar0->rx_w_round_robin_0);
1573 val64 = 0x0100000100000100ULL;
1574 writeq(val64, &bar0->rx_w_round_robin_1); 1582 writeq(val64, &bar0->rx_w_round_robin_1);
1575 val64 = 0x0001000001000001ULL;
1576 writeq(val64, &bar0->rx_w_round_robin_2); 1583 writeq(val64, &bar0->rx_w_round_robin_2);
1577 val64 = 0x0000010000010000ULL;
1578 writeq(val64, &bar0->rx_w_round_robin_3); 1584 writeq(val64, &bar0->rx_w_round_robin_3);
1579 val64 = 0x0100000000000000ULL; 1585 val64 = 0x0001000100000000ULL;
1580 writeq(val64, &bar0->rx_w_round_robin_4); 1586 writeq(val64, &bar0->rx_w_round_robin_4);
1581 1587
1582 val64 = 0x8080808040404040ULL; 1588 val64 = 0x8080808040404040ULL;
1583 writeq(val64, &bar0->rts_qos_steering); 1589 writeq(val64, &bar0->rts_qos_steering);
1584 break; 1590 break;
1585 case 3: 1591 case 3:
1586 val64 = 0x0001000102000001ULL; 1592 val64 = 0x0001020001020001ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_0); 1593 writeq(val64, &bar0->rx_w_round_robin_0);
1588 val64 = 0x0001020000010001ULL; 1594 val64 = 0x0200010200010200ULL;
1589 writeq(val64, &bar0->rx_w_round_robin_1); 1595 writeq(val64, &bar0->rx_w_round_robin_1);
1590 val64 = 0x0200000100010200ULL; 1596 val64 = 0x0102000102000102ULL;
1591 writeq(val64, &bar0->rx_w_round_robin_2); 1597 writeq(val64, &bar0->rx_w_round_robin_2);
1592 val64 = 0x0001000102000001ULL; 1598 val64 = 0x0001020001020001ULL;
1593 writeq(val64, &bar0->rx_w_round_robin_3); 1599 writeq(val64, &bar0->rx_w_round_robin_3);
1594 val64 = 0x0001020000000000ULL; 1600 val64 = 0x0200010200000000ULL;
1595 writeq(val64, &bar0->rx_w_round_robin_4); 1601 writeq(val64, &bar0->rx_w_round_robin_4);
1596 1602
1597 val64 = 0x8080804040402020ULL; 1603 val64 = 0x8080804040402020ULL;
1598 writeq(val64, &bar0->rts_qos_steering); 1604 writeq(val64, &bar0->rts_qos_steering);
1599 break; 1605 break;
1600 case 4: 1606 case 4:
1601 val64 = 0x0001020300010200ULL; 1607 val64 = 0x0001020300010203ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_0); 1608 writeq(val64, &bar0->rx_w_round_robin_0);
1603 val64 = 0x0100000102030001ULL;
1604 writeq(val64, &bar0->rx_w_round_robin_1); 1609 writeq(val64, &bar0->rx_w_round_robin_1);
1605 val64 = 0x0200010000010203ULL;
1606 writeq(val64, &bar0->rx_w_round_robin_2); 1610 writeq(val64, &bar0->rx_w_round_robin_2);
1607 val64 = 0x0001020001000001ULL;
1608 writeq(val64, &bar0->rx_w_round_robin_3); 1611 writeq(val64, &bar0->rx_w_round_robin_3);
1609 val64 = 0x0203000100000000ULL; 1612 val64 = 0x0001020300000000ULL;
1610 writeq(val64, &bar0->rx_w_round_robin_4); 1613 writeq(val64, &bar0->rx_w_round_robin_4);
1611 1614
1612 val64 = 0x8080404020201010ULL; 1615 val64 = 0x8080404020201010ULL;
1613 writeq(val64, &bar0->rts_qos_steering); 1616 writeq(val64, &bar0->rts_qos_steering);
1614 break; 1617 break;
1615 case 5: 1618 case 5:
1616 val64 = 0x0001000203000102ULL; 1619 val64 = 0x0001020304000102ULL;
1617 writeq(val64, &bar0->rx_w_round_robin_0); 1620 writeq(val64, &bar0->rx_w_round_robin_0);
1618 val64 = 0x0001020001030004ULL; 1621 val64 = 0x0304000102030400ULL;
1619 writeq(val64, &bar0->rx_w_round_robin_1); 1622 writeq(val64, &bar0->rx_w_round_robin_1);
1620 val64 = 0x0001000203000102ULL; 1623 val64 = 0x0102030400010203ULL;
1621 writeq(val64, &bar0->rx_w_round_robin_2); 1624 writeq(val64, &bar0->rx_w_round_robin_2);
1622 val64 = 0x0001020001030004ULL; 1625 val64 = 0x0400010203040001ULL;
1623 writeq(val64, &bar0->rx_w_round_robin_3); 1626 writeq(val64, &bar0->rx_w_round_robin_3);
1624 val64 = 0x0001000000000000ULL; 1627 val64 = 0x0203040000000000ULL;
1625 writeq(val64, &bar0->rx_w_round_robin_4); 1628 writeq(val64, &bar0->rx_w_round_robin_4);
1626 1629
1627 val64 = 0x8080404020201008ULL; 1630 val64 = 0x8080404020201008ULL;
1628 writeq(val64, &bar0->rts_qos_steering); 1631 writeq(val64, &bar0->rts_qos_steering);
1629 break; 1632 break;
1630 case 6: 1633 case 6:
1631 val64 = 0x0001020304000102ULL; 1634 val64 = 0x0001020304050001ULL;
1632 writeq(val64, &bar0->rx_w_round_robin_0); 1635 writeq(val64, &bar0->rx_w_round_robin_0);
1633 val64 = 0x0304050001020001ULL; 1636 val64 = 0x0203040500010203ULL;
1634 writeq(val64, &bar0->rx_w_round_robin_1); 1637 writeq(val64, &bar0->rx_w_round_robin_1);
1635 val64 = 0x0203000100000102ULL; 1638 val64 = 0x0405000102030405ULL;
1636 writeq(val64, &bar0->rx_w_round_robin_2); 1639 writeq(val64, &bar0->rx_w_round_robin_2);
1637 val64 = 0x0304000102030405ULL; 1640 val64 = 0x0001020304050001ULL;
1638 writeq(val64, &bar0->rx_w_round_robin_3); 1641 writeq(val64, &bar0->rx_w_round_robin_3);
1639 val64 = 0x0001000200000000ULL; 1642 val64 = 0x0203040500000000ULL;
1640 writeq(val64, &bar0->rx_w_round_robin_4); 1643 writeq(val64, &bar0->rx_w_round_robin_4);
1641 1644
1642 val64 = 0x8080404020100804ULL; 1645 val64 = 0x8080404020100804ULL;
1643 writeq(val64, &bar0->rts_qos_steering); 1646 writeq(val64, &bar0->rts_qos_steering);
1644 break; 1647 break;
1645 case 7: 1648 case 7:
1646 val64 = 0x0001020001020300ULL; 1649 val64 = 0x0001020304050600ULL;
1647 writeq(val64, &bar0->rx_w_round_robin_0); 1650 writeq(val64, &bar0->rx_w_round_robin_0);
1648 val64 = 0x0102030400010203ULL; 1651 val64 = 0x0102030405060001ULL;
1649 writeq(val64, &bar0->rx_w_round_robin_1); 1652 writeq(val64, &bar0->rx_w_round_robin_1);
1650 val64 = 0x0405060001020001ULL; 1653 val64 = 0x0203040506000102ULL;
1651 writeq(val64, &bar0->rx_w_round_robin_2); 1654 writeq(val64, &bar0->rx_w_round_robin_2);
1652 val64 = 0x0304050000010200ULL; 1655 val64 = 0x0304050600010203ULL;
1653 writeq(val64, &bar0->rx_w_round_robin_3); 1656 writeq(val64, &bar0->rx_w_round_robin_3);
1654 val64 = 0x0102030000000000ULL; 1657 val64 = 0x0405060000000000ULL;
1655 writeq(val64, &bar0->rx_w_round_robin_4); 1658 writeq(val64, &bar0->rx_w_round_robin_4);
1656 1659
1657 val64 = 0x8080402010080402ULL; 1660 val64 = 0x8080402010080402ULL;
1658 writeq(val64, &bar0->rts_qos_steering); 1661 writeq(val64, &bar0->rts_qos_steering);
1659 break; 1662 break;
1660 case 8: 1663 case 8:
1661 val64 = 0x0001020300040105ULL; 1664 val64 = 0x0001020304050607ULL;
1662 writeq(val64, &bar0->rx_w_round_robin_0); 1665 writeq(val64, &bar0->rx_w_round_robin_0);
1663 val64 = 0x0200030106000204ULL;
1664 writeq(val64, &bar0->rx_w_round_robin_1); 1666 writeq(val64, &bar0->rx_w_round_robin_1);
1665 val64 = 0x0103000502010007ULL;
1666 writeq(val64, &bar0->rx_w_round_robin_2); 1667 writeq(val64, &bar0->rx_w_round_robin_2);
1667 val64 = 0x0304010002060500ULL;
1668 writeq(val64, &bar0->rx_w_round_robin_3); 1668 writeq(val64, &bar0->rx_w_round_robin_3);
1669 val64 = 0x0103020400000000ULL; 1669 val64 = 0x0001020300000000ULL;
1670 writeq(val64, &bar0->rx_w_round_robin_4); 1670 writeq(val64, &bar0->rx_w_round_robin_4);
1671 1671
1672 val64 = 0x8040201008040201ULL; 1672 val64 = 0x8040201008040201ULL;
@@ -2499,8 +2499,7 @@ static void stop_nic(struct s2io_nic *nic)
2499 2499
2500/** 2500/**
2501 * fill_rx_buffers - Allocates the Rx side skbs 2501 * fill_rx_buffers - Allocates the Rx side skbs
2502 * @nic: device private variable 2502 * @ring_info: per ring structure
2503 * @ring_no: ring number
2504 * Description: 2503 * Description:
2505 * The function allocates Rx side skbs and puts the physical 2504 * The function allocates Rx side skbs and puts the physical
2506 * address of these buffers into the RxD buffer pointers, so that the NIC 2505 * address of these buffers into the RxD buffer pointers, so that the NIC
@@ -2518,103 +2517,94 @@ static void stop_nic(struct s2io_nic *nic)
2518 * SUCCESS on success or an appropriate -ve value on failure. 2517 * SUCCESS on success or an appropriate -ve value on failure.
2519 */ 2518 */
2520 2519
2521static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) 2520static int fill_rx_buffers(struct ring_info *ring)
2522{ 2521{
2523 struct net_device *dev = nic->dev;
2524 struct sk_buff *skb; 2522 struct sk_buff *skb;
2525 struct RxD_t *rxdp; 2523 struct RxD_t *rxdp;
2526 int off, off1, size, block_no, block_no1; 2524 int off, size, block_no, block_no1;
2527 u32 alloc_tab = 0; 2525 u32 alloc_tab = 0;
2528 u32 alloc_cnt; 2526 u32 alloc_cnt;
2529 struct mac_info *mac_control;
2530 struct config_param *config;
2531 u64 tmp; 2527 u64 tmp;
2532 struct buffAdd *ba; 2528 struct buffAdd *ba;
2533 struct RxD_t *first_rxdp = NULL; 2529 struct RxD_t *first_rxdp = NULL;
2534 u64 Buffer0_ptr = 0, Buffer1_ptr = 0; 2530 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2531 int rxd_index = 0;
2535 struct RxD1 *rxdp1; 2532 struct RxD1 *rxdp1;
2536 struct RxD3 *rxdp3; 2533 struct RxD3 *rxdp3;
2537 struct swStat *stats = &nic->mac_control.stats_info->sw_stat; 2534 struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2538 2535
2539 mac_control = &nic->mac_control; 2536 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2540 config = &nic->config;
2541 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2542 atomic_read(&nic->rx_bufs_left[ring_no]);
2543 2537
2544 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index; 2538 block_no1 = ring->rx_curr_get_info.block_index;
2545 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2546 while (alloc_tab < alloc_cnt) { 2539 while (alloc_tab < alloc_cnt) {
2547 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2540 block_no = ring->rx_curr_put_info.block_index;
2548 block_index;
2549 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2550 2541
2551 rxdp = mac_control->rings[ring_no]. 2542 off = ring->rx_curr_put_info.offset;
2552 rx_blocks[block_no].rxds[off].virt_addr; 2543
2544 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2545
2546 rxd_index = off + 1;
2547 if (block_no)
2548 rxd_index += (block_no * ring->rxd_count);
2553 2549
2554 if ((block_no == block_no1) && (off == off1) && 2550 if ((block_no == block_no1) &&
2555 (rxdp->Host_Control)) { 2551 (off == ring->rx_curr_get_info.offset) &&
2552 (rxdp->Host_Control)) {
2556 DBG_PRINT(INTR_DBG, "%s: Get and Put", 2553 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2557 dev->name); 2554 ring->dev->name);
2558 DBG_PRINT(INTR_DBG, " info equated\n"); 2555 DBG_PRINT(INTR_DBG, " info equated\n");
2559 goto end; 2556 goto end;
2560 } 2557 }
2561 if (off && (off == rxd_count[nic->rxd_mode])) { 2558 if (off && (off == ring->rxd_count)) {
2562 mac_control->rings[ring_no].rx_curr_put_info. 2559 ring->rx_curr_put_info.block_index++;
2563 block_index++; 2560 if (ring->rx_curr_put_info.block_index ==
2564 if (mac_control->rings[ring_no].rx_curr_put_info. 2561 ring->block_count)
2565 block_index == mac_control->rings[ring_no]. 2562 ring->rx_curr_put_info.block_index = 0;
2566 block_count) 2563 block_no = ring->rx_curr_put_info.block_index;
2567 mac_control->rings[ring_no].rx_curr_put_info. 2564 off = 0;
2568 block_index = 0; 2565 ring->rx_curr_put_info.offset = off;
2569 block_no = mac_control->rings[ring_no]. 2566 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2570 rx_curr_put_info.block_index;
2571 if (off == rxd_count[nic->rxd_mode])
2572 off = 0;
2573 mac_control->rings[ring_no].rx_curr_put_info.
2574 offset = off;
2575 rxdp = mac_control->rings[ring_no].
2576 rx_blocks[block_no].block_virt_addr;
2577 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2567 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2578 dev->name, rxdp); 2568 ring->dev->name, rxdp);
2569
2579 } 2570 }
2580 2571
2581 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2572 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2582 ((nic->rxd_mode == RXD_MODE_3B) && 2573 ((ring->rxd_mode == RXD_MODE_3B) &&
2583 (rxdp->Control_2 & s2BIT(0)))) { 2574 (rxdp->Control_2 & s2BIT(0)))) {
2584 mac_control->rings[ring_no].rx_curr_put_info. 2575 ring->rx_curr_put_info.offset = off;
2585 offset = off;
2586 goto end; 2576 goto end;
2587 } 2577 }
2588 /* calculate size of skb based on ring mode */ 2578 /* calculate size of skb based on ring mode */
2589 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + 2579 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2590 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2580 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2591 if (nic->rxd_mode == RXD_MODE_1) 2581 if (ring->rxd_mode == RXD_MODE_1)
2592 size += NET_IP_ALIGN; 2582 size += NET_IP_ALIGN;
2593 else 2583 else
2594 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; 2584 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2595 2585
2596 /* allocate skb */ 2586 /* allocate skb */
2597 skb = dev_alloc_skb(size); 2587 skb = dev_alloc_skb(size);
2598 if(!skb) { 2588 if(!skb) {
2599 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); 2589 DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2600 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n"); 2590 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2601 if (first_rxdp) { 2591 if (first_rxdp) {
2602 wmb(); 2592 wmb();
2603 first_rxdp->Control_1 |= RXD_OWN_XENA; 2593 first_rxdp->Control_1 |= RXD_OWN_XENA;
2604 } 2594 }
2605 nic->mac_control.stats_info->sw_stat. \ 2595 stats->mem_alloc_fail_cnt++;
2606 mem_alloc_fail_cnt++; 2596
2607 return -ENOMEM ; 2597 return -ENOMEM ;
2608 } 2598 }
2609 nic->mac_control.stats_info->sw_stat.mem_allocated 2599 stats->mem_allocated += skb->truesize;
2610 += skb->truesize; 2600
2611 if (nic->rxd_mode == RXD_MODE_1) { 2601 if (ring->rxd_mode == RXD_MODE_1) {
2612 /* 1 buffer mode - normal operation mode */ 2602 /* 1 buffer mode - normal operation mode */
2613 rxdp1 = (struct RxD1*)rxdp; 2603 rxdp1 = (struct RxD1*)rxdp;
2614 memset(rxdp, 0, sizeof(struct RxD1)); 2604 memset(rxdp, 0, sizeof(struct RxD1));
2615 skb_reserve(skb, NET_IP_ALIGN); 2605 skb_reserve(skb, NET_IP_ALIGN);
2616 rxdp1->Buffer0_ptr = pci_map_single 2606 rxdp1->Buffer0_ptr = pci_map_single
2617 (nic->pdev, skb->data, size - NET_IP_ALIGN, 2607 (ring->pdev, skb->data, size - NET_IP_ALIGN,
2618 PCI_DMA_FROMDEVICE); 2608 PCI_DMA_FROMDEVICE);
2619 if( (rxdp1->Buffer0_ptr == 0) || 2609 if( (rxdp1->Buffer0_ptr == 0) ||
2620 (rxdp1->Buffer0_ptr == 2610 (rxdp1->Buffer0_ptr ==
@@ -2623,8 +2613,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2623 2613
2624 rxdp->Control_2 = 2614 rxdp->Control_2 =
2625 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2615 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2626 2616 rxdp->Host_Control = (unsigned long) (skb);
2627 } else if (nic->rxd_mode == RXD_MODE_3B) { 2617 } else if (ring->rxd_mode == RXD_MODE_3B) {
2628 /* 2618 /*
2629 * 2 buffer mode - 2619 * 2 buffer mode -
2630 * 2 buffer mode provides 128 2620 * 2 buffer mode provides 128
@@ -2640,7 +2630,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2640 rxdp3->Buffer0_ptr = Buffer0_ptr; 2630 rxdp3->Buffer0_ptr = Buffer0_ptr;
2641 rxdp3->Buffer1_ptr = Buffer1_ptr; 2631 rxdp3->Buffer1_ptr = Buffer1_ptr;
2642 2632
2643 ba = &mac_control->rings[ring_no].ba[block_no][off]; 2633 ba = &ring->ba[block_no][off];
2644 skb_reserve(skb, BUF0_LEN); 2634 skb_reserve(skb, BUF0_LEN);
2645 tmp = (u64)(unsigned long) skb->data; 2635 tmp = (u64)(unsigned long) skb->data;
2646 tmp += ALIGN_SIZE; 2636 tmp += ALIGN_SIZE;
@@ -2650,10 +2640,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2650 2640
2651 if (!(rxdp3->Buffer0_ptr)) 2641 if (!(rxdp3->Buffer0_ptr))
2652 rxdp3->Buffer0_ptr = 2642 rxdp3->Buffer0_ptr =
2653 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2643 pci_map_single(ring->pdev, ba->ba_0,
2654 PCI_DMA_FROMDEVICE); 2644 BUF0_LEN, PCI_DMA_FROMDEVICE);
2655 else 2645 else
2656 pci_dma_sync_single_for_device(nic->pdev, 2646 pci_dma_sync_single_for_device(ring->pdev,
2657 (dma_addr_t) rxdp3->Buffer0_ptr, 2647 (dma_addr_t) rxdp3->Buffer0_ptr,
2658 BUF0_LEN, PCI_DMA_FROMDEVICE); 2648 BUF0_LEN, PCI_DMA_FROMDEVICE);
2659 if( (rxdp3->Buffer0_ptr == 0) || 2649 if( (rxdp3->Buffer0_ptr == 0) ||
@@ -2661,7 +2651,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2661 goto pci_map_failed; 2651 goto pci_map_failed;
2662 2652
2663 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2653 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2664 if (nic->rxd_mode == RXD_MODE_3B) { 2654 if (ring->rxd_mode == RXD_MODE_3B) {
2665 /* Two buffer mode */ 2655 /* Two buffer mode */
2666 2656
2667 /* 2657 /*
@@ -2669,39 +2659,42 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2669 * L4 payload 2659 * L4 payload
2670 */ 2660 */
2671 rxdp3->Buffer2_ptr = pci_map_single 2661 rxdp3->Buffer2_ptr = pci_map_single
2672 (nic->pdev, skb->data, dev->mtu + 4, 2662 (ring->pdev, skb->data, ring->mtu + 4,
2673 PCI_DMA_FROMDEVICE); 2663 PCI_DMA_FROMDEVICE);
2674 2664
2675 if( (rxdp3->Buffer2_ptr == 0) || 2665 if( (rxdp3->Buffer2_ptr == 0) ||
2676 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) 2666 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2677 goto pci_map_failed; 2667 goto pci_map_failed;
2678 2668
2679 rxdp3->Buffer1_ptr = 2669 if (!rxdp3->Buffer1_ptr)
2680 pci_map_single(nic->pdev, 2670 rxdp3->Buffer1_ptr =
2671 pci_map_single(ring->pdev,
2681 ba->ba_1, BUF1_LEN, 2672 ba->ba_1, BUF1_LEN,
2682 PCI_DMA_FROMDEVICE); 2673 PCI_DMA_FROMDEVICE);
2674
2683 if( (rxdp3->Buffer1_ptr == 0) || 2675 if( (rxdp3->Buffer1_ptr == 0) ||
2684 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { 2676 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2685 pci_unmap_single 2677 pci_unmap_single
2686 (nic->pdev, 2678 (ring->pdev,
2687 (dma_addr_t)rxdp3->Buffer2_ptr, 2679 (dma_addr_t)(unsigned long)
2688 dev->mtu + 4, 2680 skb->data,
2681 ring->mtu + 4,
2689 PCI_DMA_FROMDEVICE); 2682 PCI_DMA_FROMDEVICE);
2690 goto pci_map_failed; 2683 goto pci_map_failed;
2691 } 2684 }
2692 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2685 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2693 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2686 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2694 (dev->mtu + 4); 2687 (ring->mtu + 4);
2695 } 2688 }
2696 rxdp->Control_2 |= s2BIT(0); 2689 rxdp->Control_2 |= s2BIT(0);
2690 rxdp->Host_Control = (unsigned long) (skb);
2697 } 2691 }
2698 rxdp->Host_Control = (unsigned long) (skb);
2699 if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2692 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2700 rxdp->Control_1 |= RXD_OWN_XENA; 2693 rxdp->Control_1 |= RXD_OWN_XENA;
2701 off++; 2694 off++;
2702 if (off == (rxd_count[nic->rxd_mode] + 1)) 2695 if (off == (ring->rxd_count + 1))
2703 off = 0; 2696 off = 0;
2704 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2697 ring->rx_curr_put_info.offset = off;
2705 2698
2706 rxdp->Control_2 |= SET_RXD_MARKER; 2699 rxdp->Control_2 |= SET_RXD_MARKER;
2707 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { 2700 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
@@ -2711,7 +2704,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2711 } 2704 }
2712 first_rxdp = rxdp; 2705 first_rxdp = rxdp;
2713 } 2706 }
2714 atomic_inc(&nic->rx_bufs_left[ring_no]); 2707 ring->rx_bufs_left += 1;
2715 alloc_tab++; 2708 alloc_tab++;
2716 } 2709 }
2717 2710
@@ -2783,7 +2776,7 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2783 } 2776 }
2784 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; 2777 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2785 dev_kfree_skb(skb); 2778 dev_kfree_skb(skb);
2786 atomic_dec(&sp->rx_bufs_left[ring_no]); 2779 mac_control->rings[ring_no].rx_bufs_left -= 1;
2787 } 2780 }
2788} 2781}
2789 2782
@@ -2814,7 +2807,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
2814 mac_control->rings[i].rx_curr_get_info.block_index = 0; 2807 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2815 mac_control->rings[i].rx_curr_put_info.offset = 0; 2808 mac_control->rings[i].rx_curr_put_info.offset = 0;
2816 mac_control->rings[i].rx_curr_get_info.offset = 0; 2809 mac_control->rings[i].rx_curr_get_info.offset = 0;
2817 atomic_set(&sp->rx_bufs_left[i], 0); 2810 mac_control->rings[i].rx_bufs_left = 0;
2818 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", 2811 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2819 dev->name, buf_cnt, i); 2812 dev->name, buf_cnt, i);
2820 } 2813 }
@@ -2864,7 +2857,7 @@ static int s2io_poll(struct napi_struct *napi, int budget)
2864 netif_rx_complete(dev, napi); 2857 netif_rx_complete(dev, napi);
2865 2858
2866 for (i = 0; i < config->rx_ring_num; i++) { 2859 for (i = 0; i < config->rx_ring_num; i++) {
2867 if (fill_rx_buffers(nic, i) == -ENOMEM) { 2860 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2868 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2861 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2869 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2862 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2870 break; 2863 break;
@@ -2877,7 +2870,7 @@ static int s2io_poll(struct napi_struct *napi, int budget)
2877 2870
2878no_rx: 2871no_rx:
2879 for (i = 0; i < config->rx_ring_num; i++) { 2872 for (i = 0; i < config->rx_ring_num; i++) {
2880 if (fill_rx_buffers(nic, i) == -ENOMEM) { 2873 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2881 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2874 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2882 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2875 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2883 break; 2876 break;
@@ -2928,7 +2921,7 @@ static void s2io_netpoll(struct net_device *dev)
2928 rx_intr_handler(&mac_control->rings[i]); 2921 rx_intr_handler(&mac_control->rings[i]);
2929 2922
2930 for (i = 0; i < config->rx_ring_num; i++) { 2923 for (i = 0; i < config->rx_ring_num; i++) {
2931 if (fill_rx_buffers(nic, i) == -ENOMEM) { 2924 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2932 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2925 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2933 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); 2926 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2934 break; 2927 break;
@@ -2953,8 +2946,6 @@ static void s2io_netpoll(struct net_device *dev)
2953 */ 2946 */
2954static void rx_intr_handler(struct ring_info *ring_data) 2947static void rx_intr_handler(struct ring_info *ring_data)
2955{ 2948{
2956 struct s2io_nic *nic = ring_data->nic;
2957 struct net_device *dev = (struct net_device *) nic->dev;
2958 int get_block, put_block; 2949 int get_block, put_block;
2959 struct rx_curr_get_info get_info, put_info; 2950 struct rx_curr_get_info get_info, put_info;
2960 struct RxD_t *rxdp; 2951 struct RxD_t *rxdp;
@@ -2977,33 +2968,34 @@ static void rx_intr_handler(struct ring_info *ring_data)
2977 */ 2968 */
2978 if ((get_block == put_block) && 2969 if ((get_block == put_block) &&
2979 (get_info.offset + 1) == put_info.offset) { 2970 (get_info.offset + 1) == put_info.offset) {
2980 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); 2971 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2972 ring_data->dev->name);
2981 break; 2973 break;
2982 } 2974 }
2983 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2975 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2984 if (skb == NULL) { 2976 if (skb == NULL) {
2985 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2977 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2986 dev->name); 2978 ring_data->dev->name);
2987 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 2979 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2988 return; 2980 return;
2989 } 2981 }
2990 if (nic->rxd_mode == RXD_MODE_1) { 2982 if (ring_data->rxd_mode == RXD_MODE_1) {
2991 rxdp1 = (struct RxD1*)rxdp; 2983 rxdp1 = (struct RxD1*)rxdp;
2992 pci_unmap_single(nic->pdev, (dma_addr_t) 2984 pci_unmap_single(ring_data->pdev, (dma_addr_t)
2993 rxdp1->Buffer0_ptr, 2985 rxdp1->Buffer0_ptr,
2994 dev->mtu + 2986 ring_data->mtu +
2995 HEADER_ETHERNET_II_802_3_SIZE + 2987 HEADER_ETHERNET_II_802_3_SIZE +
2996 HEADER_802_2_SIZE + 2988 HEADER_802_2_SIZE +
2997 HEADER_SNAP_SIZE, 2989 HEADER_SNAP_SIZE,
2998 PCI_DMA_FROMDEVICE); 2990 PCI_DMA_FROMDEVICE);
2999 } else if (nic->rxd_mode == RXD_MODE_3B) { 2991 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3000 rxdp3 = (struct RxD3*)rxdp; 2992 rxdp3 = (struct RxD3*)rxdp;
3001 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2993 pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3002 rxdp3->Buffer0_ptr, 2994 rxdp3->Buffer0_ptr,
3003 BUF0_LEN, PCI_DMA_FROMDEVICE); 2995 BUF0_LEN, PCI_DMA_FROMDEVICE);
3004 pci_unmap_single(nic->pdev, (dma_addr_t) 2996 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3005 rxdp3->Buffer2_ptr, 2997 rxdp3->Buffer2_ptr,
3006 dev->mtu + 4, 2998 ring_data->mtu + 4,
3007 PCI_DMA_FROMDEVICE); 2999 PCI_DMA_FROMDEVICE);
3008 } 3000 }
3009 prefetch(skb->data); 3001 prefetch(skb->data);
@@ -3012,7 +3004,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
3012 ring_data->rx_curr_get_info.offset = get_info.offset; 3004 ring_data->rx_curr_get_info.offset = get_info.offset;
3013 rxdp = ring_data->rx_blocks[get_block]. 3005 rxdp = ring_data->rx_blocks[get_block].
3014 rxds[get_info.offset].virt_addr; 3006 rxds[get_info.offset].virt_addr;
3015 if (get_info.offset == rxd_count[nic->rxd_mode]) { 3007 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3016 get_info.offset = 0; 3008 get_info.offset = 0;
3017 ring_data->rx_curr_get_info.offset = get_info.offset; 3009 ring_data->rx_curr_get_info.offset = get_info.offset;
3018 get_block++; 3010 get_block++;
@@ -3022,19 +3014,21 @@ static void rx_intr_handler(struct ring_info *ring_data)
3022 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 3014 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3023 } 3015 }
3024 3016
3025 nic->pkts_to_process -= 1; 3017 if(ring_data->nic->config.napi){
3026 if ((napi) && (!nic->pkts_to_process)) 3018 ring_data->nic->pkts_to_process -= 1;
3027 break; 3019 if (!ring_data->nic->pkts_to_process)
3020 break;
3021 }
3028 pkt_cnt++; 3022 pkt_cnt++;
3029 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 3023 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3030 break; 3024 break;
3031 } 3025 }
3032 if (nic->lro) { 3026 if (ring_data->lro) {
3033 /* Clear all LRO sessions before exiting */ 3027 /* Clear all LRO sessions before exiting */
3034 for (i=0; i<MAX_LRO_SESSIONS; i++) { 3028 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3035 struct lro *lro = &nic->lro0_n[i]; 3029 struct lro *lro = &ring_data->lro0_n[i];
3036 if (lro->in_use) { 3030 if (lro->in_use) {
3037 update_L3L4_header(nic, lro); 3031 update_L3L4_header(ring_data->nic, lro);
3038 queue_rx_frame(lro->parent, lro->vlan_tag); 3032 queue_rx_frame(lro->parent, lro->vlan_tag);
3039 clear_lro_session(lro); 3033 clear_lro_session(lro);
3040 } 3034 }
@@ -4333,10 +4327,10 @@ s2io_alarm_handle(unsigned long data)
4333 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 4327 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4334} 4328}
4335 4329
4336static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n) 4330static int s2io_chk_rx_buffers(struct ring_info *ring)
4337{ 4331{
4338 if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { 4332 if (fill_rx_buffers(ring) == -ENOMEM) {
4339 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name); 4333 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
4340 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 4334 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4341 } 4335 }
4342 return 0; 4336 return 0;
@@ -4351,7 +4345,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4351 return IRQ_HANDLED; 4345 return IRQ_HANDLED;
4352 4346
4353 rx_intr_handler(ring); 4347 rx_intr_handler(ring);
4354 s2io_chk_rx_buffers(sp, ring->ring_no); 4348 s2io_chk_rx_buffers(ring);
4355 4349
4356 return IRQ_HANDLED; 4350 return IRQ_HANDLED;
4357} 4351}
@@ -4809,7 +4803,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4809 */ 4803 */
4810 if (!config->napi) { 4804 if (!config->napi) {
4811 for (i = 0; i < config->rx_ring_num; i++) 4805 for (i = 0; i < config->rx_ring_num; i++)
4812 s2io_chk_rx_buffers(sp, i); 4806 s2io_chk_rx_buffers(&mac_control->rings[i]);
4813 } 4807 }
4814 writeq(sp->general_int_mask, &bar0->general_int_mask); 4808 writeq(sp->general_int_mask, &bar0->general_int_mask);
4815 readl(&bar0->general_int_status); 4809 readl(&bar0->general_int_status);
@@ -4866,6 +4860,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4866 struct s2io_nic *sp = dev->priv; 4860 struct s2io_nic *sp = dev->priv;
4867 struct mac_info *mac_control; 4861 struct mac_info *mac_control;
4868 struct config_param *config; 4862 struct config_param *config;
4863 int i;
4869 4864
4870 4865
4871 mac_control = &sp->mac_control; 4866 mac_control = &sp->mac_control;
@@ -4885,6 +4880,13 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4885 sp->stats.rx_length_errors = 4880 sp->stats.rx_length_errors =
4886 le64_to_cpu(mac_control->stats_info->rmac_long_frms); 4881 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4887 4882
4883 /* collect per-ring rx_packets and rx_bytes */
4884 sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4885 for (i = 0; i < config->rx_ring_num; i++) {
4886 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4887 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4888 }
4889
4888 return (&sp->stats); 4890 return (&sp->stats);
4889} 4891}
4890 4892
@@ -7157,7 +7159,9 @@ static int s2io_card_up(struct s2io_nic * sp)
7157 config = &sp->config; 7159 config = &sp->config;
7158 7160
7159 for (i = 0; i < config->rx_ring_num; i++) { 7161 for (i = 0; i < config->rx_ring_num; i++) {
7160 if ((ret = fill_rx_buffers(sp, i))) { 7162 mac_control->rings[i].mtu = dev->mtu;
7163 ret = fill_rx_buffers(&mac_control->rings[i]);
7164 if (ret) {
7161 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7165 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7162 dev->name); 7166 dev->name);
7163 s2io_reset(sp); 7167 s2io_reset(sp);
@@ -7165,7 +7169,7 @@ static int s2io_card_up(struct s2io_nic * sp)
7165 return -ENOMEM; 7169 return -ENOMEM;
7166 } 7170 }
7167 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, 7171 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7168 atomic_read(&sp->rx_bufs_left[i])); 7172 mac_control->rings[i].rx_bufs_left);
7169 } 7173 }
7170 7174
7171 /* Initialise napi */ 7175 /* Initialise napi */
@@ -7300,7 +7304,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
7300static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) 7304static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7301{ 7305{
7302 struct s2io_nic *sp = ring_data->nic; 7306 struct s2io_nic *sp = ring_data->nic;
7303 struct net_device *dev = (struct net_device *) sp->dev; 7307 struct net_device *dev = (struct net_device *) ring_data->dev;
7304 struct sk_buff *skb = (struct sk_buff *) 7308 struct sk_buff *skb = (struct sk_buff *)
7305 ((unsigned long) rxdp->Host_Control); 7309 ((unsigned long) rxdp->Host_Control);
7306 int ring_no = ring_data->ring_no; 7310 int ring_no = ring_data->ring_no;
@@ -7377,19 +7381,19 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7377 sp->mac_control.stats_info->sw_stat.mem_freed 7381 sp->mac_control.stats_info->sw_stat.mem_freed
7378 += skb->truesize; 7382 += skb->truesize;
7379 dev_kfree_skb(skb); 7383 dev_kfree_skb(skb);
7380 atomic_dec(&sp->rx_bufs_left[ring_no]); 7384 ring_data->rx_bufs_left -= 1;
7381 rxdp->Host_Control = 0; 7385 rxdp->Host_Control = 0;
7382 return 0; 7386 return 0;
7383 } 7387 }
7384 } 7388 }
7385 7389
7386 /* Updating statistics */ 7390 /* Updating statistics */
7387 sp->stats.rx_packets++; 7391 ring_data->rx_packets++;
7388 rxdp->Host_Control = 0; 7392 rxdp->Host_Control = 0;
7389 if (sp->rxd_mode == RXD_MODE_1) { 7393 if (sp->rxd_mode == RXD_MODE_1) {
7390 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); 7394 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7391 7395
7392 sp->stats.rx_bytes += len; 7396 ring_data->rx_bytes += len;
7393 skb_put(skb, len); 7397 skb_put(skb, len);
7394 7398
7395 } else if (sp->rxd_mode == RXD_MODE_3B) { 7399 } else if (sp->rxd_mode == RXD_MODE_3B) {
@@ -7400,13 +7404,13 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7400 unsigned char *buff = skb_push(skb, buf0_len); 7404 unsigned char *buff = skb_push(skb, buf0_len);
7401 7405
7402 struct buffAdd *ba = &ring_data->ba[get_block][get_off]; 7406 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7403 sp->stats.rx_bytes += buf0_len + buf2_len; 7407 ring_data->rx_bytes += buf0_len + buf2_len;
7404 memcpy(buff, ba->ba_0, buf0_len); 7408 memcpy(buff, ba->ba_0, buf0_len);
7405 skb_put(skb, buf2_len); 7409 skb_put(skb, buf2_len);
7406 } 7410 }
7407 7411
7408 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || 7412 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7409 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && 7413 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7410 (sp->rx_csum)) { 7414 (sp->rx_csum)) {
7411 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); 7415 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7412 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); 7416 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
@@ -7417,14 +7421,14 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7417 * a flag in the RxD. 7421 * a flag in the RxD.
7418 */ 7422 */
7419 skb->ip_summed = CHECKSUM_UNNECESSARY; 7423 skb->ip_summed = CHECKSUM_UNNECESSARY;
7420 if (sp->lro) { 7424 if (ring_data->lro) {
7421 u32 tcp_len; 7425 u32 tcp_len;
7422 u8 *tcp; 7426 u8 *tcp;
7423 int ret = 0; 7427 int ret = 0;
7424 7428
7425 ret = s2io_club_tcp_session(skb->data, &tcp, 7429 ret = s2io_club_tcp_session(ring_data,
7426 &tcp_len, &lro, 7430 skb->data, &tcp, &tcp_len, &lro,
7427 rxdp, sp); 7431 rxdp, sp);
7428 switch (ret) { 7432 switch (ret) {
7429 case 3: /* Begin anew */ 7433 case 3: /* Begin anew */
7430 lro->parent = skb; 7434 lro->parent = skb;
@@ -7486,7 +7490,7 @@ send_up:
7486 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); 7490 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7487 dev->last_rx = jiffies; 7491 dev->last_rx = jiffies;
7488aggregate: 7492aggregate:
7489 atomic_dec(&sp->rx_bufs_left[ring_no]); 7493 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7490 return SUCCESS; 7494 return SUCCESS;
7491} 7495}
7492 7496
@@ -7603,12 +7607,14 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7603 tx_steering_type = NO_STEERING; 7607 tx_steering_type = NO_STEERING;
7604 } 7608 }
7605 7609
7606 if ( rx_ring_num > 8) { 7610 if (rx_ring_num > MAX_RX_RINGS) {
7607 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " 7611 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7608 "supported\n"); 7612 "supported\n");
7609 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); 7613 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7610 rx_ring_num = 8; 7614 MAX_RX_RINGS);
7615 rx_ring_num = MAX_RX_RINGS;
7611 } 7616 }
7617
7612 if (*dev_intr_type != INTA) 7618 if (*dev_intr_type != INTA)
7613 napi = 0; 7619 napi = 0;
7614 7620
@@ -7836,10 +7842,15 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7836 7842
7837 /* Rx side parameters. */ 7843 /* Rx side parameters. */
7838 config->rx_ring_num = rx_ring_num; 7844 config->rx_ring_num = rx_ring_num;
7839 for (i = 0; i < MAX_RX_RINGS; i++) { 7845 for (i = 0; i < config->rx_ring_num; i++) {
7840 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 7846 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7841 (rxd_count[sp->rxd_mode] + 1); 7847 (rxd_count[sp->rxd_mode] + 1);
7842 config->rx_cfg[i].ring_priority = i; 7848 config->rx_cfg[i].ring_priority = i;
7849 mac_control->rings[i].rx_bufs_left = 0;
7850 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7851 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7852 mac_control->rings[i].pdev = sp->pdev;
7853 mac_control->rings[i].dev = sp->dev;
7843 } 7854 }
7844 7855
7845 for (i = 0; i < rx_ring_num; i++) { 7856 for (i = 0; i < rx_ring_num; i++) {
@@ -7854,10 +7865,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7854 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; 7865 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7855 7866
7856 7867
7857 /* Initialize Ring buffer parameters. */
7858 for (i = 0; i < config->rx_ring_num; i++)
7859 atomic_set(&sp->rx_bufs_left[i], 0);
7860
7861 /* initialize the shared memory used by the NIC and the host */ 7868 /* initialize the shared memory used by the NIC and the host */
7862 if (init_shared_mem(sp)) { 7869 if (init_shared_mem(sp)) {
7863 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 7870 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
@@ -8077,6 +8084,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8077 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, 8084 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8078 sp->config.tx_fifo_num); 8085 sp->config.tx_fifo_num);
8079 8086
8087 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8088 sp->config.rx_ring_num);
8089
8080 switch(sp->config.intr_type) { 8090 switch(sp->config.intr_type) {
8081 case INTA: 8091 case INTA:
8082 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); 8092 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
@@ -8391,8 +8401,9 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8391} 8401}
8392 8402
8393static int 8403static int
8394s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, 8404s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8395 struct RxD_t *rxdp, struct s2io_nic *sp) 8405 u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8406 struct s2io_nic *sp)
8396{ 8407{
8397 struct iphdr *ip; 8408 struct iphdr *ip;
8398 struct tcphdr *tcph; 8409 struct tcphdr *tcph;
@@ -8410,7 +8421,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8410 tcph = (struct tcphdr *)*tcp; 8421 tcph = (struct tcphdr *)*tcp;
8411 *tcp_len = get_l4_pyld_length(ip, tcph); 8422 *tcp_len = get_l4_pyld_length(ip, tcph);
8412 for (i=0; i<MAX_LRO_SESSIONS; i++) { 8423 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8413 struct lro *l_lro = &sp->lro0_n[i]; 8424 struct lro *l_lro = &ring_data->lro0_n[i];
8414 if (l_lro->in_use) { 8425 if (l_lro->in_use) {
8415 if (check_for_socket_match(l_lro, ip, tcph)) 8426 if (check_for_socket_match(l_lro, ip, tcph))
8416 continue; 8427 continue;
@@ -8448,7 +8459,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8448 } 8459 }
8449 8460
8450 for (i=0; i<MAX_LRO_SESSIONS; i++) { 8461 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8451 struct lro *l_lro = &sp->lro0_n[i]; 8462 struct lro *l_lro = &ring_data->lro0_n[i];
8452 if (!(l_lro->in_use)) { 8463 if (!(l_lro->in_use)) {
8453 *lro = l_lro; 8464 *lro = l_lro;
8454 ret = 3; /* Begin anew */ 8465 ret = 3; /* Begin anew */
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index ce53a02105f2..0709ebae9139 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -678,11 +678,53 @@ struct rx_block_info {
678 struct rxd_info *rxds; 678 struct rxd_info *rxds;
679}; 679};
680 680
681/* Data structure to represent a LRO session */
682struct lro {
683 struct sk_buff *parent;
684 struct sk_buff *last_frag;
685 u8 *l2h;
686 struct iphdr *iph;
687 struct tcphdr *tcph;
688 u32 tcp_next_seq;
689 __be32 tcp_ack;
690 int total_len;
691 int frags_len;
692 int sg_num;
693 int in_use;
694 __be16 window;
695 u16 vlan_tag;
696 u32 cur_tsval;
697 __be32 cur_tsecr;
698 u8 saw_ts;
699} ____cacheline_aligned;
700
681/* Ring specific structure */ 701/* Ring specific structure */
682struct ring_info { 702struct ring_info {
683 /* The ring number */ 703 /* The ring number */
684 int ring_no; 704 int ring_no;
685 705
706 /* per-ring buffer counter */
707 u32 rx_bufs_left;
708
709 #define MAX_LRO_SESSIONS 32
710 struct lro lro0_n[MAX_LRO_SESSIONS];
711 u8 lro;
712
713 /* copy of sp->rxd_mode flag */
714 int rxd_mode;
715
716 /* Number of rxds per block for the rxd_mode */
717 int rxd_count;
718
719 /* copy of sp pointer */
720 struct s2io_nic *nic;
721
722 /* copy of sp->dev pointer */
723 struct net_device *dev;
724
725 /* copy of sp->pdev pointer */
726 struct pci_dev *pdev;
727
686 /* 728 /*
687 * Place holders for the virtual and physical addresses of 729 * Place holders for the virtual and physical addresses of
688 * all the Rx Blocks 730 * all the Rx Blocks
@@ -703,10 +745,16 @@ struct ring_info {
703 */ 745 */
704 struct rx_curr_get_info rx_curr_get_info; 746 struct rx_curr_get_info rx_curr_get_info;
705 747
748 /* interface MTU value */
749 unsigned mtu;
750
706 /* Buffer Address store. */ 751 /* Buffer Address store. */
707 struct buffAdd **ba; 752 struct buffAdd **ba;
708 struct s2io_nic *nic; 753
709}; 754 /* per-Ring statistics */
755 unsigned long rx_packets;
756 unsigned long rx_bytes;
757} ____cacheline_aligned;
710 758
711/* Fifo specific structure */ 759/* Fifo specific structure */
712struct fifo_info { 760struct fifo_info {
@@ -813,26 +861,6 @@ struct msix_info_st {
813 u64 data; 861 u64 data;
814}; 862};
815 863
816/* Data structure to represent a LRO session */
817struct lro {
818 struct sk_buff *parent;
819 struct sk_buff *last_frag;
820 u8 *l2h;
821 struct iphdr *iph;
822 struct tcphdr *tcph;
823 u32 tcp_next_seq;
824 __be32 tcp_ack;
825 int total_len;
826 int frags_len;
827 int sg_num;
828 int in_use;
829 __be16 window;
830 u16 vlan_tag;
831 u32 cur_tsval;
832 __be32 cur_tsecr;
833 u8 saw_ts;
834} ____cacheline_aligned;
835
836/* These flags represent the devices temporary state */ 864/* These flags represent the devices temporary state */
837enum s2io_device_state_t 865enum s2io_device_state_t
838{ 866{
@@ -872,8 +900,6 @@ struct s2io_nic {
872 /* Space to back up the PCI config space */ 900 /* Space to back up the PCI config space */
873 u32 config_space[256 / sizeof(u32)]; 901 u32 config_space[256 / sizeof(u32)];
874 902
875 atomic_t rx_bufs_left[MAX_RX_RINGS];
876
877#define PROMISC 1 903#define PROMISC 1
878#define ALL_MULTI 2 904#define ALL_MULTI 2
879 905
@@ -950,8 +976,6 @@ struct s2io_nic {
950#define XFRAME_II_DEVICE 2 976#define XFRAME_II_DEVICE 2
951 u8 device_type; 977 u8 device_type;
952 978
953#define MAX_LRO_SESSIONS 32
954 struct lro lro0_n[MAX_LRO_SESSIONS];
955 unsigned long clubbed_frms_cnt; 979 unsigned long clubbed_frms_cnt;
956 unsigned long sending_both; 980 unsigned long sending_both;
957 u8 lro; 981 u8 lro;
@@ -1118,9 +1142,9 @@ static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr);
1118static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset); 1142static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset);
1119static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr); 1143static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr);
1120 1144
1121static int 1145static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
1122s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, 1146 u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
1123 struct RxD_t *rxdp, struct s2io_nic *sp); 1147 struct s2io_nic *sp);
1124static void clear_lro_session(struct lro *lro); 1148static void clear_lro_session(struct lro *lro);
1125static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag); 1149static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag);
1126static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); 1150static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
new file mode 100644
index 000000000000..dbad95c295bd
--- /dev/null
+++ b/drivers/net/sfc/Kconfig
@@ -0,0 +1,12 @@
1config SFC
2 tristate "Solarflare Solarstorm SFC4000 support"
3 depends on PCI && INET
4 select MII
5 select INET_LRO
6 select CRC32
7 help
8 This driver supports 10-gigabit Ethernet cards based on
9 the Solarflare Communications Solarstorm SFC4000 controller.
10
11 To compile this driver as a module, choose M here. The module
12 will be called sfc.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
new file mode 100644
index 000000000000..0f023447eafd
--- /dev/null
+++ b/drivers/net/sfc/Makefile
@@ -0,0 +1,5 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \
2 i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \
3 tenxpress.o boards.o sfe4001.o
4
5obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
new file mode 100644
index 000000000000..2806201644cc
--- /dev/null
+++ b/drivers/net/sfc/bitfield.h
@@ -0,0 +1,508 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_BITFIELD_H
12#define EFX_BITFIELD_H
13
14/*
15 * Efx bitfield access
16 *
17 * Efx NICs make extensive use of bitfields up to 128 bits
18 * wide. Since there is no native 128-bit datatype on most systems,
19 * and since 64-bit datatypes are inefficient on 32-bit systems and
20 * vice versa, we wrap accesses in a way that uses the most efficient
21 * datatype.
22 *
23 * The NICs are PCI devices and therefore little-endian. Since most
24 * of the quantities that we deal with are DMAed to/from host memory,
25 * we define our datatypes (efx_oword_t, efx_qword_t and
26 * efx_dword_t) to be little-endian.
27 */
28
29/* Lowest bit numbers and widths */
30#define EFX_DUMMY_FIELD_LBN 0
31#define EFX_DUMMY_FIELD_WIDTH 0
32#define EFX_DWORD_0_LBN 0
33#define EFX_DWORD_0_WIDTH 32
34#define EFX_DWORD_1_LBN 32
35#define EFX_DWORD_1_WIDTH 32
36#define EFX_DWORD_2_LBN 64
37#define EFX_DWORD_2_WIDTH 32
38#define EFX_DWORD_3_LBN 96
39#define EFX_DWORD_3_WIDTH 32
40
41/* Specified attribute (e.g. LBN) of the specified field */
42#define EFX_VAL(field, attribute) field ## _ ## attribute
43/* Low bit number of the specified field */
44#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
45/* Bit width of the specified field */
46#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
47/* High bit number of the specified field */
48#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
49/* Mask equal in width to the specified field.
50 *
51 * For example, a field with width 5 would have a mask of 0x1f.
52 *
53 * The maximum width mask that can be generated is 64 bits.
54 */
55#define EFX_MASK64(field) \
56 (EFX_WIDTH(field) == 64 ? ~((u64) 0) : \
57 (((((u64) 1) << EFX_WIDTH(field))) - 1))
58
59/* Mask equal in width to the specified field.
60 *
61 * For example, a field with width 5 would have a mask of 0x1f.
62 *
63 * The maximum width mask that can be generated is 32 bits. Use
64 * EFX_MASK64 for higher width fields.
65 */
66#define EFX_MASK32(field) \
67 (EFX_WIDTH(field) == 32 ? ~((u32) 0) : \
68 (((((u32) 1) << EFX_WIDTH(field))) - 1))
69
70/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
71typedef union efx_dword {
72 __le32 u32[1];
73} efx_dword_t;
74
75/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
76typedef union efx_qword {
77 __le64 u64[1];
78 __le32 u32[2];
79 efx_dword_t dword[2];
80} efx_qword_t;
81
82/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
83typedef union efx_oword {
84 __le64 u64[2];
85 efx_qword_t qword[2];
86 __le32 u32[4];
87 efx_dword_t dword[4];
88} efx_oword_t;
89
90/* Format string and value expanders for printk */
91#define EFX_DWORD_FMT "%08x"
92#define EFX_QWORD_FMT "%08x:%08x"
93#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
94#define EFX_DWORD_VAL(dword) \
95 ((unsigned int) le32_to_cpu((dword).u32[0]))
96#define EFX_QWORD_VAL(qword) \
97 ((unsigned int) le32_to_cpu((qword).u32[1])), \
98 ((unsigned int) le32_to_cpu((qword).u32[0]))
99#define EFX_OWORD_VAL(oword) \
100 ((unsigned int) le32_to_cpu((oword).u32[3])), \
101 ((unsigned int) le32_to_cpu((oword).u32[2])), \
102 ((unsigned int) le32_to_cpu((oword).u32[1])), \
103 ((unsigned int) le32_to_cpu((oword).u32[0]))
104
105/*
106 * Extract bit field portion [low,high) from the native-endian element
107 * which contains bits [min,max).
108 *
109 * For example, suppose "element" represents the high 32 bits of a
110 * 64-bit value, and we wish to extract the bits belonging to the bit
111 * field occupying bits 28-45 of this 64-bit value.
112 *
113 * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
114 *
115 * ( element ) << 4
116 *
117 * The result will contain the relevant bits filled in in the range
118 * [0,high-low), with garbage in bits [high-low+1,...).
119 */
120#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
121 (((low > max) || (high < min)) ? 0 : \
122 ((low > min) ? \
123 ((native_element) >> (low - min)) : \
124 ((native_element) << (min - low))))
125
126/*
127 * Extract bit field portion [low,high) from the 64-bit little-endian
128 * element which contains bits [min,max)
129 */
130#define EFX_EXTRACT64(element, min, max, low, high) \
131 EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
132
133/*
134 * Extract bit field portion [low,high) from the 32-bit little-endian
135 * element which contains bits [min,max)
136 */
137#define EFX_EXTRACT32(element, min, max, low, high) \
138 EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
139
140#define EFX_EXTRACT_OWORD64(oword, low, high) \
141 (EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
142 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high))
143
144#define EFX_EXTRACT_QWORD64(qword, low, high) \
145 EFX_EXTRACT64((qword).u64[0], 0, 63, low, high)
146
147#define EFX_EXTRACT_OWORD32(oword, low, high) \
148 (EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
149 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
150 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
151 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high))
152
153#define EFX_EXTRACT_QWORD32(qword, low, high) \
154 (EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
155 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high))
156
157#define EFX_EXTRACT_DWORD(dword, low, high) \
158 EFX_EXTRACT32((dword).u32[0], 0, 31, low, high)
159
160#define EFX_OWORD_FIELD64(oword, field) \
161 (EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
162 & EFX_MASK64(field))
163
164#define EFX_QWORD_FIELD64(qword, field) \
165 (EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
166 & EFX_MASK64(field))
167
168#define EFX_OWORD_FIELD32(oword, field) \
169 (EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
170 & EFX_MASK32(field))
171
172#define EFX_QWORD_FIELD32(qword, field) \
173 (EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
174 & EFX_MASK32(field))
175
176#define EFX_DWORD_FIELD(dword, field) \
177 (EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
178 & EFX_MASK32(field))
179
180#define EFX_OWORD_IS_ZERO64(oword) \
181 (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
182
183#define EFX_QWORD_IS_ZERO64(qword) \
184 (((qword).u64[0]) == (__force __le64) 0)
185
186#define EFX_OWORD_IS_ZERO32(oword) \
187 (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
188 == (__force __le32) 0)
189
190#define EFX_QWORD_IS_ZERO32(qword) \
191 (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
192
193#define EFX_DWORD_IS_ZERO(dword) \
194 (((dword).u32[0]) == (__force __le32) 0)
195
196#define EFX_OWORD_IS_ALL_ONES64(oword) \
197 (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
198
199#define EFX_QWORD_IS_ALL_ONES64(qword) \
200 ((qword).u64[0] == ~((__force __le64) 0))
201
202#define EFX_OWORD_IS_ALL_ONES32(oword) \
203 (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
204 == ~((__force __le32) 0))
205
206#define EFX_QWORD_IS_ALL_ONES32(qword) \
207 (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
208
209#define EFX_DWORD_IS_ALL_ONES(dword) \
210 ((dword).u32[0] == ~((__force __le32) 0))
211
212#if BITS_PER_LONG == 64
213#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
214#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
215#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
216#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
217#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
218#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
219#else
220#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
221#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
222#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
223#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
224#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
225#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
226#endif
227
228/*
229 * Construct bit field portion
230 *
231 * Creates the portion of the bit field [low,high) that lies within
232 * the range [min,max).
233 */
234#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
235 (((low > max) || (high < min)) ? 0 : \
236 ((low > min) ? \
237 (((u64) (value)) << (low - min)) : \
238 (((u64) (value)) >> (min - low))))
239
240#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
241 (((low > max) || (high < min)) ? 0 : \
242 ((low > min) ? \
243 (((u32) (value)) << (low - min)) : \
244 (((u32) (value)) >> (min - low))))
245
246#define EFX_INSERT_NATIVE(min, max, low, high, value) \
247 ((((max - min) >= 32) || ((high - low) >= 32)) ? \
248 EFX_INSERT_NATIVE64(min, max, low, high, value) : \
249 EFX_INSERT_NATIVE32(min, max, low, high, value))
250
251/*
252 * Construct bit field portion
253 *
254 * Creates the portion of the named bit field that lies within the
255 * range [min,max).
256 */
257#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
258 EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
259 EFX_HIGH_BIT(field), value)
260
261/*
262 * Construct bit field
263 *
264 * Creates the portion of the named bit fields that lie within the
265 * range [min,max).
266 */
267#define EFX_INSERT_FIELDS_NATIVE(min, max, \
268 field1, value1, \
269 field2, value2, \
270 field3, value3, \
271 field4, value4, \
272 field5, value5, \
273 field6, value6, \
274 field7, value7, \
275 field8, value8, \
276 field9, value9, \
277 field10, value10) \
278 (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
279 EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
280 EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
281 EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
282 EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
283 EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
284 EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
285 EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
286 EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
287 EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
288
289#define EFX_INSERT_FIELDS64(...) \
290 cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
291
292#define EFX_INSERT_FIELDS32(...) \
293 cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
294
295#define EFX_POPULATE_OWORD64(oword, ...) do { \
296 (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
297 (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
298 } while (0)
299
300#define EFX_POPULATE_QWORD64(qword, ...) do { \
301 (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
302 } while (0)
303
304#define EFX_POPULATE_OWORD32(oword, ...) do { \
305 (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
306 (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
307 (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
308 (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
309 } while (0)
310
311#define EFX_POPULATE_QWORD32(qword, ...) do { \
312 (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
313 (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
314 } while (0)
315
316#define EFX_POPULATE_DWORD(dword, ...) do { \
317 (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
318 } while (0)
319
320#if BITS_PER_LONG == 64
321#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
322#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
323#else
324#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
325#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
326#endif
327
328/* Populate an octword field with various numbers of arguments */
329#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
330#define EFX_POPULATE_OWORD_9(oword, ...) \
331 EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
332#define EFX_POPULATE_OWORD_8(oword, ...) \
333 EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
334#define EFX_POPULATE_OWORD_7(oword, ...) \
335 EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
336#define EFX_POPULATE_OWORD_6(oword, ...) \
337 EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
338#define EFX_POPULATE_OWORD_5(oword, ...) \
339 EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
340#define EFX_POPULATE_OWORD_4(oword, ...) \
341 EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
342#define EFX_POPULATE_OWORD_3(oword, ...) \
343 EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
344#define EFX_POPULATE_OWORD_2(oword, ...) \
345 EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
346#define EFX_POPULATE_OWORD_1(oword, ...) \
347 EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
348#define EFX_ZERO_OWORD(oword) \
349 EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
350#define EFX_SET_OWORD(oword) \
351 EFX_POPULATE_OWORD_4(oword, \
352 EFX_DWORD_0, 0xffffffff, \
353 EFX_DWORD_1, 0xffffffff, \
354 EFX_DWORD_2, 0xffffffff, \
355 EFX_DWORD_3, 0xffffffff)
356
357/* Populate a quadword field with various numbers of arguments */
358#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
359#define EFX_POPULATE_QWORD_9(qword, ...) \
360 EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
361#define EFX_POPULATE_QWORD_8(qword, ...) \
362 EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
363#define EFX_POPULATE_QWORD_7(qword, ...) \
364 EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
365#define EFX_POPULATE_QWORD_6(qword, ...) \
366 EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
367#define EFX_POPULATE_QWORD_5(qword, ...) \
368 EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
369#define EFX_POPULATE_QWORD_4(qword, ...) \
370 EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
371#define EFX_POPULATE_QWORD_3(qword, ...) \
372 EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
373#define EFX_POPULATE_QWORD_2(qword, ...) \
374 EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
375#define EFX_POPULATE_QWORD_1(qword, ...) \
376 EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
377#define EFX_ZERO_QWORD(qword) \
378 EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
379#define EFX_SET_QWORD(qword) \
380 EFX_POPULATE_QWORD_2(qword, \
381 EFX_DWORD_0, 0xffffffff, \
382 EFX_DWORD_1, 0xffffffff)
383
384/* Populate a dword field with various numbers of arguments */
385#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
386#define EFX_POPULATE_DWORD_9(dword, ...) \
387 EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
388#define EFX_POPULATE_DWORD_8(dword, ...) \
389 EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
390#define EFX_POPULATE_DWORD_7(dword, ...) \
391 EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
392#define EFX_POPULATE_DWORD_6(dword, ...) \
393 EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
394#define EFX_POPULATE_DWORD_5(dword, ...) \
395 EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
396#define EFX_POPULATE_DWORD_4(dword, ...) \
397 EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
398#define EFX_POPULATE_DWORD_3(dword, ...) \
399 EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
400#define EFX_POPULATE_DWORD_2(dword, ...) \
401 EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
402#define EFX_POPULATE_DWORD_1(dword, ...) \
403 EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
404#define EFX_ZERO_DWORD(dword) \
405 EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
406#define EFX_SET_DWORD(dword) \
407 EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
408
409/*
410 * Modify a named field within an already-populated structure. Used
411 * for read-modify-write operations.
412 *
413 */
414
415#define EFX_INVERT_OWORD(oword) do { \
416 (oword).u64[0] = ~((oword).u64[0]); \
417 (oword).u64[1] = ~((oword).u64[1]); \
418 } while (0)
419
420#define EFX_INSERT_FIELD64(...) \
421 cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
422
423#define EFX_INSERT_FIELD32(...) \
424 cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
425
426#define EFX_INPLACE_MASK64(min, max, field) \
427 EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field))
428
429#define EFX_INPLACE_MASK32(min, max, field) \
430 EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field))
431
432#define EFX_SET_OWORD_FIELD64(oword, field, value) do { \
433 (oword).u64[0] = (((oword).u64[0] \
434 & ~EFX_INPLACE_MASK64(0, 63, field)) \
435 | EFX_INSERT_FIELD64(0, 63, field, value)); \
436 (oword).u64[1] = (((oword).u64[1] \
437 & ~EFX_INPLACE_MASK64(64, 127, field)) \
438 | EFX_INSERT_FIELD64(64, 127, field, value)); \
439 } while (0)
440
441#define EFX_SET_QWORD_FIELD64(qword, field, value) do { \
442 (qword).u64[0] = (((qword).u64[0] \
443 & ~EFX_INPLACE_MASK64(0, 63, field)) \
444 | EFX_INSERT_FIELD64(0, 63, field, value)); \
445 } while (0)
446
447#define EFX_SET_OWORD_FIELD32(oword, field, value) do { \
448 (oword).u32[0] = (((oword).u32[0] \
449 & ~EFX_INPLACE_MASK32(0, 31, field)) \
450 | EFX_INSERT_FIELD32(0, 31, field, value)); \
451 (oword).u32[1] = (((oword).u32[1] \
452 & ~EFX_INPLACE_MASK32(32, 63, field)) \
453 | EFX_INSERT_FIELD32(32, 63, field, value)); \
454 (oword).u32[2] = (((oword).u32[2] \
455 & ~EFX_INPLACE_MASK32(64, 95, field)) \
456 | EFX_INSERT_FIELD32(64, 95, field, value)); \
457 (oword).u32[3] = (((oword).u32[3] \
458 & ~EFX_INPLACE_MASK32(96, 127, field)) \
459 | EFX_INSERT_FIELD32(96, 127, field, value)); \
460 } while (0)
461
462#define EFX_SET_QWORD_FIELD32(qword, field, value) do { \
463 (qword).u32[0] = (((qword).u32[0] \
464 & ~EFX_INPLACE_MASK32(0, 31, field)) \
465 | EFX_INSERT_FIELD32(0, 31, field, value)); \
466 (qword).u32[1] = (((qword).u32[1] \
467 & ~EFX_INPLACE_MASK32(32, 63, field)) \
468 | EFX_INSERT_FIELD32(32, 63, field, value)); \
469 } while (0)
470
471#define EFX_SET_DWORD_FIELD(dword, field, value) do { \
472 (dword).u32[0] = (((dword).u32[0] \
473 & ~EFX_INPLACE_MASK32(0, 31, field)) \
474 | EFX_INSERT_FIELD32(0, 31, field, value)); \
475 } while (0)
476
477#if BITS_PER_LONG == 64
478#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
479#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
480#else
481#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
482#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
483#endif
484
485#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
486 if (FALCON_REV(efx) >= FALCON_REV_B0) { \
487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
488 } else { \
489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
490 } \
491} while (0)
492
493#define EFX_QWORD_FIELD_VER(efx, qword, field) \
494 (FALCON_REV(efx) >= FALCON_REV_B0 ? \
495 EFX_QWORD_FIELD((qword), field##_B0) : \
496 EFX_QWORD_FIELD((qword), field##_A1))
497
498/* Used to avoid compiler warnings about shift range exceeding width
499 * of the data types when dma_addr_t is only 32 bits wide.
500 */
501#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
502#define EFX_DMA_TYPE_WIDTH(width) \
503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
504#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
505 ~((u64) 0) : ~((u32) 0))
506#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
507
508#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
new file mode 100644
index 000000000000..eecaa6d58584
--- /dev/null
+++ b/drivers/net/sfc/boards.c
@@ -0,0 +1,167 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "phy.h"
12#include "boards.h"
13#include "efx.h"
14
15/* Macros for unpacking the board revision */
16/* The revision info is in host byte order. */
17#define BOARD_TYPE(_rev) (_rev >> 8)
18#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
19#define BOARD_MINOR(_rev) (_rev & 0xf)
20
21/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
22#define BLINK_INTERVAL (HZ/2)
23
24static void blink_led_timer(unsigned long context)
25{
26 struct efx_nic *efx = (struct efx_nic *)context;
27 struct efx_blinker *bl = &efx->board_info.blinker;
28 efx->board_info.set_fault_led(efx, bl->state);
29 bl->state = !bl->state;
30 if (bl->resubmit) {
31 bl->timer.expires = jiffies + BLINK_INTERVAL;
32 add_timer(&bl->timer);
33 }
34}
35
36static void board_blink(struct efx_nic *efx, int blink)
37{
38 struct efx_blinker *blinker = &efx->board_info.blinker;
39
40 /* The rtnl mutex serialises all ethtool ioctls, so
41 * nothing special needs doing here. */
42 if (blink) {
43 blinker->resubmit = 1;
44 blinker->state = 0;
45 setup_timer(&blinker->timer, blink_led_timer,
46 (unsigned long)efx);
47 blinker->timer.expires = jiffies + BLINK_INTERVAL;
48 add_timer(&blinker->timer);
49 } else {
50 blinker->resubmit = 0;
51 if (blinker->timer.function)
52 del_timer_sync(&blinker->timer);
53 efx->board_info.set_fault_led(efx, 0);
54 }
55}
56
57/*****************************************************************************
58 * Support for the SFE4002
59 *
60 */
61/****************************************************************************/
62/* LED allocations. Note that on rev A0 boards the schematic and the reality
63 * differ: red and green are swapped. Below is the fixed (A1) layout (there
64 * are only 3 A0 boards in existence, so no real reason to make this
65 * conditional).
66 */
67#define SFE4002_FAULT_LED (2) /* Red */
68#define SFE4002_RX_LED (0) /* Green */
69#define SFE4002_TX_LED (1) /* Amber */
70
71static int sfe4002_init_leds(struct efx_nic *efx)
72{
73 /* Set the TX and RX LEDs to reflect status and activity, and the
74 * fault LED off */
75 xfp_set_led(efx, SFE4002_TX_LED,
76 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
77 xfp_set_led(efx, SFE4002_RX_LED,
78 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
79 xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
80 efx->board_info.blinker.led_num = SFE4002_FAULT_LED;
81 return 0;
82}
83
84static void sfe4002_fault_led(struct efx_nic *efx, int state)
85{
86 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
87 QUAKE_LED_OFF);
88}
89
90static int sfe4002_init(struct efx_nic *efx)
91{
92 efx->board_info.init_leds = sfe4002_init_leds;
93 efx->board_info.set_fault_led = sfe4002_fault_led;
94 efx->board_info.blink = board_blink;
95 return 0;
96}
97
98/* This will get expanded as board-specific details get moved out of the
99 * PHY drivers. */
100struct efx_board_data {
101 const char *ref_model;
102 const char *gen_type;
103 int (*init) (struct efx_nic *nic);
104};
105
106static int dummy_init(struct efx_nic *nic)
107{
108 return 0;
109}
110
111static struct efx_board_data board_data[] = {
112 [EFX_BOARD_INVALID] =
113 {NULL, NULL, dummy_init},
114 [EFX_BOARD_SFE4001] =
115 {"SFE4001", "10GBASE-T adapter", sfe4001_poweron},
116 [EFX_BOARD_SFE4002] =
117 {"SFE4002", "XFP adapter", sfe4002_init},
118};
119
120int efx_set_board_info(struct efx_nic *efx, u16 revision_info)
121{
122 int rc = 0;
123 struct efx_board_data *data;
124
125 if (BOARD_TYPE(revision_info) >= EFX_BOARD_MAX) {
126 EFX_ERR(efx, "squashing unknown board type %d\n",
127 BOARD_TYPE(revision_info));
128 revision_info = 0;
129 }
130
131 if (BOARD_TYPE(revision_info) == 0) {
132 efx->board_info.major = 0;
133 efx->board_info.minor = 0;
134 /* For early boards that don't have revision info. there is
135 * only 1 board for each PHY type, so we can work it out, with
136 * the exception of the PHY-less boards. */
137 switch (efx->phy_type) {
138 case PHY_TYPE_10XPRESS:
139 efx->board_info.type = EFX_BOARD_SFE4001;
140 break;
141 case PHY_TYPE_XFP:
142 efx->board_info.type = EFX_BOARD_SFE4002;
143 break;
144 default:
145 efx->board_info.type = 0;
146 break;
147 }
148 } else {
149 efx->board_info.type = BOARD_TYPE(revision_info);
150 efx->board_info.major = BOARD_MAJOR(revision_info);
151 efx->board_info.minor = BOARD_MINOR(revision_info);
152 }
153
154 data = &board_data[efx->board_info.type];
155
156 /* Report the board model number or generic type for recognisable
157 * boards. */
158 if (efx->board_info.type != 0)
159 EFX_INFO(efx, "board is %s rev %c%d\n",
160 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
161 ? data->ref_model : data->gen_type,
162 'A' + efx->board_info.major, efx->board_info.minor);
163
164 efx->board_info.init = data->init;
165
166 return rc;
167}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
new file mode 100644
index 000000000000..f56341d428e1
--- /dev/null
+++ b/drivers/net/sfc/boards.h
@@ -0,0 +1,26 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_BOARDS_H
11#define EFX_BOARDS_H
12
13/* Board IDs (must fit in 8 bits) */
14enum efx_board_type {
15 EFX_BOARD_INVALID = 0,
16 EFX_BOARD_SFE4001 = 1, /* SFE4001 (10GBASE-T) */
17 EFX_BOARD_SFE4002 = 2,
18 /* Insert new types before here */
19 EFX_BOARD_MAX
20};
21
22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
23extern int sfe4001_poweron(struct efx_nic *efx);
24extern void sfe4001_poweroff(struct efx_nic *efx);
25
26#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
new file mode 100644
index 000000000000..59edcf793c19
--- /dev/null
+++ b/drivers/net/sfc/efx.c
@@ -0,0 +1,2208 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/crc32.h>
21#include <linux/ethtool.h>
22#include "net_driver.h"
23#include "gmii.h"
24#include "ethtool.h"
25#include "tx.h"
26#include "rx.h"
27#include "efx.h"
28#include "mdio_10g.h"
29#include "falcon.h"
30#include "workarounds.h"
31#include "mac.h"
32
33#define EFX_MAX_MTU (9 * 1024)
34
35/* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
39 */
40static struct workqueue_struct *refill_workqueue;
41
42/**************************************************************************
43 *
44 * Configurable values
45 *
46 *************************************************************************/
47
48/*
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
50 *
51 * This sets the default for new devices. It can be controlled later
52 * using ethtool.
53 */
54static int lro = 1;
55module_param(lro, int, 0644);
56MODULE_PARM_DESC(lro, "Large receive offload acceleration");
57
58/*
59 * Use separate channels for TX and RX events
60 *
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
63 *
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
65 * is not written
66 */
67static unsigned int separate_tx_and_rx_channels = 1;
68
69/* This is the weight assigned to each of the (per-channel) virtual
70 * NAPI devices.
71 */
72static int napi_weight = 64;
73
74/* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
77 */
78unsigned int efx_monitor_interval = 1 * HZ;
79
80/* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
82 */
83static unsigned int monitor_reset = 1;
84
85/* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated
88 * MAC address. This allows for loading the sfc_mtd driver to
89 * reprogram the flash, even if the flash contents (including the MAC
90 * address) have previously been erased.
91 */
92static unsigned int allow_bad_hwaddr;
93
94/* Initial interrupt moderation settings. They can be modified after
95 * module load with ethtool.
96 *
97 * The default for RX should strike a balance between increasing the
98 * round-trip latency and reducing overhead.
99 */
100static unsigned int rx_irq_mod_usec = 60;
101
102/* Initial interrupt moderation settings. They can be modified after
103 * module load with ethtool.
104 *
105 * This default is chosen to ensure that a 10G link does not go idle
106 * while a TX queue is stopped after it has become full. A queue is
107 * restarted when it drops below half full. The time this takes (assuming
108 * worst case 3 descriptors per packet and 1024 descriptors) is
109 * 512 / 3 * 1.2 = 205 usec.
110 */
111static unsigned int tx_irq_mod_usec = 150;
112
113/* This is the first interrupt mode to try out of:
114 * 0 => MSI-X
115 * 1 => MSI
116 * 2 => legacy
117 */
118static unsigned int interrupt_mode;
119
120/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
121 * i.e. the number of CPUs among which we may distribute simultaneous
122 * interrupt handling.
123 *
124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
125 * The default (0) means to assign an interrupt to each package (level II cache)
126 */
127static unsigned int rss_cpus;
128module_param(rss_cpus, uint, 0444);
129MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
130
131/**************************************************************************
132 *
133 * Utility functions and prototypes
134 *
135 *************************************************************************/
136static void efx_remove_channel(struct efx_channel *channel);
137static void efx_remove_port(struct efx_nic *efx);
138static void efx_fini_napi(struct efx_nic *efx);
139static void efx_fini_channels(struct efx_nic *efx);
140
141#define EFX_ASSERT_RESET_SERIALISED(efx) \
142 do { \
143 if ((efx->state == STATE_RUNNING) || \
144 (efx->state == STATE_RESETTING)) \
145 ASSERT_RTNL(); \
146 } while (0)
147
148/**************************************************************************
149 *
150 * Event queue processing
151 *
152 *************************************************************************/
153
154/* Process channel's event queue
155 *
156 * This function is responsible for processing the event queue of a
157 * single channel. The caller must guarantee that this function will
158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently.
160 */
161static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
162{
163 int rxdmaqs;
164 struct efx_rx_queue *rx_queue;
165
166 if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
167 !channel->enabled))
168 return rx_quota;
169
170 rxdmaqs = falcon_process_eventq(channel, &rx_quota);
171
172 /* Deliver last RX packet. */
173 if (channel->rx_pkt) {
174 __efx_rx_packet(channel, channel->rx_pkt,
175 channel->rx_pkt_csummed);
176 channel->rx_pkt = NULL;
177 }
178
179 efx_flush_lro(channel);
180 efx_rx_strategy(channel);
181
182 /* Refill descriptor rings as necessary */
183 rx_queue = &channel->efx->rx_queue[0];
184 while (rxdmaqs) {
185 if (rxdmaqs & 0x01)
186 efx_fast_push_rx_descriptors(rx_queue);
187 rx_queue++;
188 rxdmaqs >>= 1;
189 }
190
191 return rx_quota;
192}
193
194/* Mark channel as finished processing
195 *
196 * Note that since we will not receive further interrupts for this
197 * channel before we finish processing and call the eventq_read_ack()
198 * method, there is no need to use the interrupt hold-off timers.
199 */
200static inline void efx_channel_processed(struct efx_channel *channel)
201{
202 /* Write to EVQ_RPTR_REG. If a new event arrived in a race
203 * with finishing processing, a new interrupt will be raised.
204 */
205 channel->work_pending = 0;
206 smp_wmb(); /* Ensure channel updated before any new interrupt. */
207 falcon_eventq_read_ack(channel);
208}
209
210/* NAPI poll handler
211 *
212 * NAPI guarantees serialisation of polls of the same device, which
213 * provides the guarantee required by efx_process_channel().
214 */
215static int efx_poll(struct napi_struct *napi, int budget)
216{
217 struct efx_channel *channel =
218 container_of(napi, struct efx_channel, napi_str);
219 struct net_device *napi_dev = channel->napi_dev;
220 int unused;
221 int rx_packets;
222
223 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
224 channel->channel, raw_smp_processor_id());
225
226 unused = efx_process_channel(channel, budget);
227 rx_packets = (budget - unused);
228
229 if (rx_packets < budget) {
230 /* There is no race here; although napi_disable() will
231 * only wait for netif_rx_complete(), this isn't a problem
232 * since efx_channel_processed() will have no effect if
233 * interrupts have already been disabled.
234 */
235 netif_rx_complete(napi_dev, napi);
236 efx_channel_processed(channel);
237 }
238
239 return rx_packets;
240}
241
242/* Process the eventq of the specified channel immediately on this CPU
243 *
244 * Disable hardware generated interrupts, wait for any existing
245 * processing to finish, then directly poll (and ack ) the eventq.
246 * Finally reenable NAPI and interrupts.
247 *
248 * Since we are touching interrupts the caller should hold the suspend lock
249 */
250void efx_process_channel_now(struct efx_channel *channel)
251{
252 struct efx_nic *efx = channel->efx;
253
254 BUG_ON(!channel->used_flags);
255 BUG_ON(!channel->enabled);
256
257 /* Disable interrupts and wait for ISRs to complete */
258 falcon_disable_interrupts(efx);
259 if (efx->legacy_irq)
260 synchronize_irq(efx->legacy_irq);
261 if (channel->has_interrupt && channel->irq)
262 synchronize_irq(channel->irq);
263
264 /* Wait for any NAPI processing to complete */
265 napi_disable(&channel->napi_str);
266
267 /* Poll the channel */
268 (void) efx_process_channel(channel, efx->type->evq_size);
269
270 /* Ack the eventq. This may cause an interrupt to be generated
271 * when they are reenabled */
272 efx_channel_processed(channel);
273
274 napi_enable(&channel->napi_str);
275 falcon_enable_interrupts(efx);
276}
277
278/* Create event queue
279 * Event queue memory allocations are done only once. If the channel
280 * is reset, the memory buffer will be reused; this guards against
281 * errors during channel reset and also simplifies interrupt handling.
282 */
283static int efx_probe_eventq(struct efx_channel *channel)
284{
285 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
286
287 return falcon_probe_eventq(channel);
288}
289
290/* Prepare channel's event queue */
291static int efx_init_eventq(struct efx_channel *channel)
292{
293 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
294
295 channel->eventq_read_ptr = 0;
296
297 return falcon_init_eventq(channel);
298}
299
300static void efx_fini_eventq(struct efx_channel *channel)
301{
302 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
303
304 falcon_fini_eventq(channel);
305}
306
307static void efx_remove_eventq(struct efx_channel *channel)
308{
309 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
310
311 falcon_remove_eventq(channel);
312}
313
314/**************************************************************************
315 *
316 * Channel handling
317 *
318 *************************************************************************/
319
320/* Setup per-NIC RX buffer parameters.
321 * Calculate the rx buffer allocation parameters required to support
322 * the current MTU, including padding for header alignment and overruns.
323 */
324static void efx_calc_rx_buffer_params(struct efx_nic *efx)
325{
326 unsigned int order, len;
327
328 len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
329 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
330 efx->type->rx_buffer_padding);
331
332 /* Calculate page-order */
333 for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
334 ;
335
336 efx->rx_buffer_len = len;
337 efx->rx_buffer_order = order;
338}
339
340static int efx_probe_channel(struct efx_channel *channel)
341{
342 struct efx_tx_queue *tx_queue;
343 struct efx_rx_queue *rx_queue;
344 int rc;
345
346 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
347
348 rc = efx_probe_eventq(channel);
349 if (rc)
350 goto fail1;
351
352 efx_for_each_channel_tx_queue(tx_queue, channel) {
353 rc = efx_probe_tx_queue(tx_queue);
354 if (rc)
355 goto fail2;
356 }
357
358 efx_for_each_channel_rx_queue(rx_queue, channel) {
359 rc = efx_probe_rx_queue(rx_queue);
360 if (rc)
361 goto fail3;
362 }
363
364 channel->n_rx_frm_trunc = 0;
365
366 return 0;
367
368 fail3:
369 efx_for_each_channel_rx_queue(rx_queue, channel)
370 efx_remove_rx_queue(rx_queue);
371 fail2:
372 efx_for_each_channel_tx_queue(tx_queue, channel)
373 efx_remove_tx_queue(tx_queue);
374 fail1:
375 return rc;
376}
377
378
379/* Channels are shutdown and reinitialised whilst the NIC is running
380 * to propagate configuration changes (mtu, checksum offload), or
381 * to clear hardware error conditions
382 */
383static int efx_init_channels(struct efx_nic *efx)
384{
385 struct efx_tx_queue *tx_queue;
386 struct efx_rx_queue *rx_queue;
387 struct efx_channel *channel;
388 int rc = 0;
389
390 efx_calc_rx_buffer_params(efx);
391
392 /* Initialise the channels */
393 efx_for_each_channel(channel, efx) {
394 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
395
396 rc = efx_init_eventq(channel);
397 if (rc)
398 goto err;
399
400 efx_for_each_channel_tx_queue(tx_queue, channel) {
401 rc = efx_init_tx_queue(tx_queue);
402 if (rc)
403 goto err;
404 }
405
406 /* The rx buffer allocation strategy is MTU dependent */
407 efx_rx_strategy(channel);
408
409 efx_for_each_channel_rx_queue(rx_queue, channel) {
410 rc = efx_init_rx_queue(rx_queue);
411 if (rc)
412 goto err;
413 }
414
415 WARN_ON(channel->rx_pkt != NULL);
416 efx_rx_strategy(channel);
417 }
418
419 return 0;
420
421 err:
422 EFX_ERR(efx, "failed to initialise channel %d\n",
423 channel ? channel->channel : -1);
424 efx_fini_channels(efx);
425 return rc;
426}
427
428/* This enables event queue processing and packet transmission.
429 *
430 * Note that this function is not allowed to fail, since that would
431 * introduce too much complexity into the suspend/resume path.
432 */
433static void efx_start_channel(struct efx_channel *channel)
434{
435 struct efx_rx_queue *rx_queue;
436
437 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
438
439 if (!(channel->efx->net_dev->flags & IFF_UP))
440 netif_napi_add(channel->napi_dev, &channel->napi_str,
441 efx_poll, napi_weight);
442
443 channel->work_pending = 0;
444 channel->enabled = 1;
445 smp_wmb(); /* ensure channel updated before first interrupt */
446
447 napi_enable(&channel->napi_str);
448
449 /* Load up RX descriptors */
450 efx_for_each_channel_rx_queue(rx_queue, channel)
451 efx_fast_push_rx_descriptors(rx_queue);
452}
453
454/* This disables event queue processing and packet transmission.
455 * This function does not guarantee that all queue processing
456 * (e.g. RX refill) is complete.
457 */
458static void efx_stop_channel(struct efx_channel *channel)
459{
460 struct efx_rx_queue *rx_queue;
461
462 if (!channel->enabled)
463 return;
464
465 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
466
467 channel->enabled = 0;
468 napi_disable(&channel->napi_str);
469
470 /* Ensure that any worker threads have exited or will be no-ops */
471 efx_for_each_channel_rx_queue(rx_queue, channel) {
472 spin_lock_bh(&rx_queue->add_lock);
473 spin_unlock_bh(&rx_queue->add_lock);
474 }
475}
476
477static void efx_fini_channels(struct efx_nic *efx)
478{
479 struct efx_channel *channel;
480 struct efx_tx_queue *tx_queue;
481 struct efx_rx_queue *rx_queue;
482
483 EFX_ASSERT_RESET_SERIALISED(efx);
484 BUG_ON(efx->port_enabled);
485
486 efx_for_each_channel(channel, efx) {
487 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
488
489 efx_for_each_channel_rx_queue(rx_queue, channel)
490 efx_fini_rx_queue(rx_queue);
491 efx_for_each_channel_tx_queue(tx_queue, channel)
492 efx_fini_tx_queue(tx_queue);
493 }
494
495 /* Do the event queues last so that we can handle flush events
496 * for all DMA queues. */
497 efx_for_each_channel(channel, efx) {
498 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
499
500 efx_fini_eventq(channel);
501 }
502}
503
504static void efx_remove_channel(struct efx_channel *channel)
505{
506 struct efx_tx_queue *tx_queue;
507 struct efx_rx_queue *rx_queue;
508
509 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
510
511 efx_for_each_channel_rx_queue(rx_queue, channel)
512 efx_remove_rx_queue(rx_queue);
513 efx_for_each_channel_tx_queue(tx_queue, channel)
514 efx_remove_tx_queue(tx_queue);
515 efx_remove_eventq(channel);
516
517 channel->used_flags = 0;
518}
519
520void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
521{
522 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
523}
524
525/**************************************************************************
526 *
527 * Port handling
528 *
529 **************************************************************************/
530
531/* This ensures that the kernel is kept informed (via
532 * netif_carrier_on/off) of the link status, and also maintains the
533 * link status's stop on the port's TX queue.
534 */
535static void efx_link_status_changed(struct efx_nic *efx)
536{
537 int carrier_ok;
538
539 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
540 * that no events are triggered between unregister_netdev() and the
541 * driver unloading. A more general condition is that NETDEV_CHANGE
542 * can only be generated between NETDEV_UP and NETDEV_DOWN */
543 if (!netif_running(efx->net_dev))
544 return;
545
546 carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
547 if (efx->link_up != carrier_ok) {
548 efx->n_link_state_changes++;
549
550 if (efx->link_up)
551 netif_carrier_on(efx->net_dev);
552 else
553 netif_carrier_off(efx->net_dev);
554 }
555
556 /* Status message for kernel log */
557 if (efx->link_up) {
558 struct mii_if_info *gmii = &efx->mii;
559 unsigned adv, lpa;
560 /* NONE here means direct XAUI from the controller, with no
561 * MDIO-attached device we can query. */
562 if (efx->phy_type != PHY_TYPE_NONE) {
563 adv = gmii_advertised(gmii);
564 lpa = gmii_lpa(gmii);
565 } else {
566 lpa = GM_LPA_10000 | LPA_DUPLEX;
567 adv = lpa;
568 }
569 EFX_INFO(efx, "link up at %dMbps %s-duplex "
570 "(adv %04x lpa %04x) (MTU %d)%s\n",
571 (efx->link_options & GM_LPA_10000 ? 10000 :
572 (efx->link_options & GM_LPA_1000 ? 1000 :
573 (efx->link_options & GM_LPA_100 ? 100 :
574 10))),
575 (efx->link_options & GM_LPA_DUPLEX ?
576 "full" : "half"),
577 adv, lpa,
578 efx->net_dev->mtu,
579 (efx->promiscuous ? " [PROMISC]" : ""));
580 } else {
581 EFX_INFO(efx, "link down\n");
582 }
583
584}
585
586/* This call reinitialises the MAC to pick up new PHY settings. The
587 * caller must hold the mac_lock */
588static void __efx_reconfigure_port(struct efx_nic *efx)
589{
590 WARN_ON(!mutex_is_locked(&efx->mac_lock));
591
592 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
593 raw_smp_processor_id());
594
595 falcon_reconfigure_xmac(efx);
596
597 /* Inform kernel of loss/gain of carrier */
598 efx_link_status_changed(efx);
599}
600
601/* Reinitialise the MAC to pick up new PHY settings, even if the port is
602 * disabled. */
603void efx_reconfigure_port(struct efx_nic *efx)
604{
605 EFX_ASSERT_RESET_SERIALISED(efx);
606
607 mutex_lock(&efx->mac_lock);
608 __efx_reconfigure_port(efx);
609 mutex_unlock(&efx->mac_lock);
610}
611
612/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
613 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
614 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
615static void efx_reconfigure_work(struct work_struct *data)
616{
617 struct efx_nic *efx = container_of(data, struct efx_nic,
618 reconfigure_work);
619
620 mutex_lock(&efx->mac_lock);
621 if (efx->port_enabled)
622 __efx_reconfigure_port(efx);
623 mutex_unlock(&efx->mac_lock);
624}
625
626static int efx_probe_port(struct efx_nic *efx)
627{
628 int rc;
629
630 EFX_LOG(efx, "create port\n");
631
632 /* Connect up MAC/PHY operations table and read MAC address */
633 rc = falcon_probe_port(efx);
634 if (rc)
635 goto err;
636
637 /* Sanity check MAC address */
638 if (is_valid_ether_addr(efx->mac_address)) {
639 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
640 } else {
641 DECLARE_MAC_BUF(mac);
642
643 EFX_ERR(efx, "invalid MAC address %s\n",
644 print_mac(mac, efx->mac_address));
645 if (!allow_bad_hwaddr) {
646 rc = -EINVAL;
647 goto err;
648 }
649 random_ether_addr(efx->net_dev->dev_addr);
650 EFX_INFO(efx, "using locally-generated MAC %s\n",
651 print_mac(mac, efx->net_dev->dev_addr));
652 }
653
654 return 0;
655
656 err:
657 efx_remove_port(efx);
658 return rc;
659}
660
661static int efx_init_port(struct efx_nic *efx)
662{
663 int rc;
664
665 EFX_LOG(efx, "init port\n");
666
667 /* Initialise the MAC and PHY */
668 rc = falcon_init_xmac(efx);
669 if (rc)
670 return rc;
671
672 efx->port_initialized = 1;
673
674 /* Reconfigure port to program MAC registers */
675 falcon_reconfigure_xmac(efx);
676
677 return 0;
678}
679
680/* Allow efx_reconfigure_port() to be scheduled, and close the window
681 * between efx_stop_port and efx_flush_all whereby a previously scheduled
682 * efx_reconfigure_port() may have been cancelled */
683static void efx_start_port(struct efx_nic *efx)
684{
685 EFX_LOG(efx, "start port\n");
686 BUG_ON(efx->port_enabled);
687
688 mutex_lock(&efx->mac_lock);
689 efx->port_enabled = 1;
690 __efx_reconfigure_port(efx);
691 mutex_unlock(&efx->mac_lock);
692}
693
694/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
695 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
696 * efx_reconfigure_work can still be scheduled via NAPI processing
697 * until efx_flush_all() is called */
698static void efx_stop_port(struct efx_nic *efx)
699{
700 EFX_LOG(efx, "stop port\n");
701
702 mutex_lock(&efx->mac_lock);
703 efx->port_enabled = 0;
704 mutex_unlock(&efx->mac_lock);
705
706 /* Serialise against efx_set_multicast_list() */
707 if (NET_DEV_REGISTERED(efx)) {
708 netif_tx_lock_bh(efx->net_dev);
709 netif_tx_unlock_bh(efx->net_dev);
710 }
711}
712
713static void efx_fini_port(struct efx_nic *efx)
714{
715 EFX_LOG(efx, "shut down port\n");
716
717 if (!efx->port_initialized)
718 return;
719
720 falcon_fini_xmac(efx);
721 efx->port_initialized = 0;
722
723 efx->link_up = 0;
724 efx_link_status_changed(efx);
725}
726
727static void efx_remove_port(struct efx_nic *efx)
728{
729 EFX_LOG(efx, "destroying port\n");
730
731 falcon_remove_port(efx);
732}
733
734/**************************************************************************
735 *
736 * NIC handling
737 *
738 **************************************************************************/
739
740/* This configures the PCI device to enable I/O and DMA. */
741static int efx_init_io(struct efx_nic *efx)
742{
743 struct pci_dev *pci_dev = efx->pci_dev;
744 dma_addr_t dma_mask = efx->type->max_dma_mask;
745 int rc;
746
747 EFX_LOG(efx, "initialising I/O\n");
748
749 rc = pci_enable_device(pci_dev);
750 if (rc) {
751 EFX_ERR(efx, "failed to enable PCI device\n");
752 goto fail1;
753 }
754
755 pci_set_master(pci_dev);
756
757 /* Set the PCI DMA mask. Try all possibilities from our
758 * genuine mask down to 32 bits, because some architectures
759 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
760 * masks event though they reject 46 bit masks.
761 */
762 while (dma_mask > 0x7fffffffUL) {
763 if (pci_dma_supported(pci_dev, dma_mask) &&
764 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
765 break;
766 dma_mask >>= 1;
767 }
768 if (rc) {
769 EFX_ERR(efx, "could not find a suitable DMA mask\n");
770 goto fail2;
771 }
772 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
773 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
774 if (rc) {
775 /* pci_set_consistent_dma_mask() is not *allowed* to
776 * fail with a mask that pci_set_dma_mask() accepted,
777 * but just in case...
778 */
779 EFX_ERR(efx, "failed to set consistent DMA mask\n");
780 goto fail2;
781 }
782
783 efx->membase_phys = pci_resource_start(efx->pci_dev,
784 efx->type->mem_bar);
785 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
786 if (rc) {
787 EFX_ERR(efx, "request for memory BAR failed\n");
788 rc = -EIO;
789 goto fail3;
790 }
791 efx->membase = ioremap_nocache(efx->membase_phys,
792 efx->type->mem_map_size);
793 if (!efx->membase) {
794 EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n",
795 efx->type->mem_bar, efx->membase_phys,
796 efx->type->mem_map_size);
797 rc = -ENOMEM;
798 goto fail4;
799 }
800 EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n",
801 efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size,
802 efx->membase);
803
804 return 0;
805
806 fail4:
807 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
808 fail3:
809 efx->membase_phys = 0UL;
810 fail2:
811 pci_disable_device(efx->pci_dev);
812 fail1:
813 return rc;
814}
815
816static void efx_fini_io(struct efx_nic *efx)
817{
818 EFX_LOG(efx, "shutting down I/O\n");
819
820 if (efx->membase) {
821 iounmap(efx->membase);
822 efx->membase = NULL;
823 }
824
825 if (efx->membase_phys) {
826 pci_release_region(efx->pci_dev, efx->type->mem_bar);
827 efx->membase_phys = 0UL;
828 }
829
830 pci_disable_device(efx->pci_dev);
831}
832
833/* Probe the number and type of interrupts we are able to obtain. */
834static void efx_probe_interrupts(struct efx_nic *efx)
835{
836 int max_channel = efx->type->phys_addr_channels - 1;
837 struct msix_entry xentries[EFX_MAX_CHANNELS];
838 int rc, i;
839
840 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
841 BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
842
843 efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus();
844 efx->rss_queues = min(efx->rss_queues, max_channel + 1);
845 efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
846
847 /* Request maximum number of MSI interrupts, and fill out
848 * the channel interrupt information the allowed allocation */
849 for (i = 0; i < efx->rss_queues; i++)
850 xentries[i].entry = i;
851 rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
852 if (rc > 0) {
853 EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
854 efx->rss_queues = rc;
855 rc = pci_enable_msix(efx->pci_dev, xentries,
856 efx->rss_queues);
857 }
858
859 if (rc == 0) {
860 for (i = 0; i < efx->rss_queues; i++) {
861 efx->channel[i].has_interrupt = 1;
862 efx->channel[i].irq = xentries[i].vector;
863 }
864 } else {
865 /* Fall back to single channel MSI */
866 efx->interrupt_mode = EFX_INT_MODE_MSI;
867 EFX_ERR(efx, "could not enable MSI-X\n");
868 }
869 }
870
871 /* Try single interrupt MSI */
872 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
873 efx->rss_queues = 1;
874 rc = pci_enable_msi(efx->pci_dev);
875 if (rc == 0) {
876 efx->channel[0].irq = efx->pci_dev->irq;
877 efx->channel[0].has_interrupt = 1;
878 } else {
879 EFX_ERR(efx, "could not enable MSI\n");
880 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
881 }
882 }
883
884 /* Assume legacy interrupts */
885 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
886 efx->rss_queues = 1;
887 /* Every channel is interruptible */
888 for (i = 0; i < EFX_MAX_CHANNELS; i++)
889 efx->channel[i].has_interrupt = 1;
890 efx->legacy_irq = efx->pci_dev->irq;
891 }
892}
893
894static void efx_remove_interrupts(struct efx_nic *efx)
895{
896 struct efx_channel *channel;
897
898 /* Remove MSI/MSI-X interrupts */
899 efx_for_each_channel_with_interrupt(channel, efx)
900 channel->irq = 0;
901 pci_disable_msi(efx->pci_dev);
902 pci_disable_msix(efx->pci_dev);
903
904 /* Remove legacy interrupt */
905 efx->legacy_irq = 0;
906}
907
908/* Select number of used resources
909 * Should be called after probe_interrupts()
910 */
911static void efx_select_used(struct efx_nic *efx)
912{
913 struct efx_tx_queue *tx_queue;
914 struct efx_rx_queue *rx_queue;
915 int i;
916
917 /* TX queues. One per port per channel with TX capability
918 * (more than one per port won't work on Linux, due to out
919 * of order issues... but will be fine on Solaris)
920 */
921 tx_queue = &efx->tx_queue[0];
922
923 /* Perform this for each channel with TX capabilities.
924 * At the moment, we only support a single TX queue
925 */
926 tx_queue->used = 1;
927 if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
928 tx_queue->channel = &efx->channel[1];
929 else
930 tx_queue->channel = &efx->channel[0];
931 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
932 tx_queue++;
933
934 /* RX queues. Each has a dedicated channel. */
935 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
936 rx_queue = &efx->rx_queue[i];
937
938 if (i < efx->rss_queues) {
939 rx_queue->used = 1;
940 /* If we allow multiple RX queues per channel
941 * we need to decide that here
942 */
943 rx_queue->channel = &efx->channel[rx_queue->queue];
944 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
945 rx_queue++;
946 }
947 }
948}
949
950static int efx_probe_nic(struct efx_nic *efx)
951{
952 int rc;
953
954 EFX_LOG(efx, "creating NIC\n");
955
956 /* Carry out hardware-type specific initialisation */
957 rc = falcon_probe_nic(efx);
958 if (rc)
959 return rc;
960
961 /* Determine the number of channels and RX queues by trying to hook
962 * in MSI-X interrupts. */
963 efx_probe_interrupts(efx);
964
965 /* Determine number of RX queues and TX queues */
966 efx_select_used(efx);
967
968 /* Initialise the interrupt moderation settings */
969 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
970
971 return 0;
972}
973
974static void efx_remove_nic(struct efx_nic *efx)
975{
976 EFX_LOG(efx, "destroying NIC\n");
977
978 efx_remove_interrupts(efx);
979 falcon_remove_nic(efx);
980}
981
982/**************************************************************************
983 *
984 * NIC startup/shutdown
985 *
986 *************************************************************************/
987
988static int efx_probe_all(struct efx_nic *efx)
989{
990 struct efx_channel *channel;
991 int rc;
992
993 /* Create NIC */
994 rc = efx_probe_nic(efx);
995 if (rc) {
996 EFX_ERR(efx, "failed to create NIC\n");
997 goto fail1;
998 }
999
1000 /* Create port */
1001 rc = efx_probe_port(efx);
1002 if (rc) {
1003 EFX_ERR(efx, "failed to create port\n");
1004 goto fail2;
1005 }
1006
1007 /* Create channels */
1008 efx_for_each_channel(channel, efx) {
1009 rc = efx_probe_channel(channel);
1010 if (rc) {
1011 EFX_ERR(efx, "failed to create channel %d\n",
1012 channel->channel);
1013 goto fail3;
1014 }
1015 }
1016
1017 return 0;
1018
1019 fail3:
1020 efx_for_each_channel(channel, efx)
1021 efx_remove_channel(channel);
1022 efx_remove_port(efx);
1023 fail2:
1024 efx_remove_nic(efx);
1025 fail1:
1026 return rc;
1027}
1028
1029/* Called after previous invocation(s) of efx_stop_all, restarts the
1030 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1031 * and ensures that the port is scheduled to be reconfigured.
1032 * This function is safe to call multiple times when the NIC is in any
1033 * state. */
1034static void efx_start_all(struct efx_nic *efx)
1035{
1036 struct efx_channel *channel;
1037
1038 EFX_ASSERT_RESET_SERIALISED(efx);
1039
1040 /* Check that it is appropriate to restart the interface. All
1041 * of these flags are safe to read under just the rtnl lock */
1042 if (efx->port_enabled)
1043 return;
1044 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1045 return;
1046 if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev))
1047 return;
1048
1049 /* Mark the port as enabled so port reconfigurations can start, then
1050 * restart the transmit interface early so the watchdog timer stops */
1051 efx_start_port(efx);
1052 efx_wake_queue(efx);
1053
1054 efx_for_each_channel(channel, efx)
1055 efx_start_channel(channel);
1056
1057 falcon_enable_interrupts(efx);
1058
1059 /* Start hardware monitor if we're in RUNNING */
1060 if (efx->state == STATE_RUNNING)
1061 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1062 efx_monitor_interval);
1063}
1064
1065/* Flush all delayed work. Should only be called when no more delayed work
1066 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1067 * since we're holding the rtnl_lock at this point. */
1068static void efx_flush_all(struct efx_nic *efx)
1069{
1070 struct efx_rx_queue *rx_queue;
1071
1072 /* Make sure the hardware monitor is stopped */
1073 cancel_delayed_work_sync(&efx->monitor_work);
1074
1075 /* Ensure that all RX slow refills are complete. */
1076 efx_for_each_rx_queue(rx_queue, efx) {
1077 cancel_delayed_work_sync(&rx_queue->work);
1078 }
1079
1080 /* Stop scheduled port reconfigurations */
1081 cancel_work_sync(&efx->reconfigure_work);
1082
1083}
1084
1085/* Quiesce hardware and software without bringing the link down.
1086 * Safe to call multiple times, when the nic and interface is in any
1087 * state. The caller is guaranteed to subsequently be in a position
1088 * to modify any hardware and software state they see fit without
1089 * taking locks. */
1090static void efx_stop_all(struct efx_nic *efx)
1091{
1092 struct efx_channel *channel;
1093
1094 EFX_ASSERT_RESET_SERIALISED(efx);
1095
1096 /* port_enabled can be read safely under the rtnl lock */
1097 if (!efx->port_enabled)
1098 return;
1099
1100 /* Disable interrupts and wait for ISR to complete */
1101 falcon_disable_interrupts(efx);
1102 if (efx->legacy_irq)
1103 synchronize_irq(efx->legacy_irq);
1104 efx_for_each_channel_with_interrupt(channel, efx)
1105 if (channel->irq)
1106 synchronize_irq(channel->irq);
1107
1108 /* Stop all NAPI processing and synchronous rx refills */
1109 efx_for_each_channel(channel, efx)
1110 efx_stop_channel(channel);
1111
1112 /* Stop all asynchronous port reconfigurations. Since all
1113 * event processing has already been stopped, there is no
1114 * window to loose phy events */
1115 efx_stop_port(efx);
1116
1117 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1118 efx_flush_all(efx);
1119
1120 /* Isolate the MAC from the TX and RX engines, so that queue
1121 * flushes will complete in a timely fashion. */
1122 falcon_deconfigure_mac_wrapper(efx);
1123 falcon_drain_tx_fifo(efx);
1124
1125 /* Stop the kernel transmit interface late, so the watchdog
1126 * timer isn't ticking over the flush */
1127 efx_stop_queue(efx);
1128 if (NET_DEV_REGISTERED(efx)) {
1129 netif_tx_lock_bh(efx->net_dev);
1130 netif_tx_unlock_bh(efx->net_dev);
1131 }
1132}
1133
1134static void efx_remove_all(struct efx_nic *efx)
1135{
1136 struct efx_channel *channel;
1137
1138 efx_for_each_channel(channel, efx)
1139 efx_remove_channel(channel);
1140 efx_remove_port(efx);
1141 efx_remove_nic(efx);
1142}
1143
1144/* A convinience function to safely flush all the queues */
1145int efx_flush_queues(struct efx_nic *efx)
1146{
1147 int rc;
1148
1149 EFX_ASSERT_RESET_SERIALISED(efx);
1150
1151 efx_stop_all(efx);
1152
1153 efx_fini_channels(efx);
1154 rc = efx_init_channels(efx);
1155 if (rc) {
1156 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1157 return rc;
1158 }
1159
1160 efx_start_all(efx);
1161
1162 return 0;
1163}
1164
1165/**************************************************************************
1166 *
1167 * Interrupt moderation
1168 *
1169 **************************************************************************/
1170
1171/* Set interrupt moderation parameters */
1172void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1173{
1174 struct efx_tx_queue *tx_queue;
1175 struct efx_rx_queue *rx_queue;
1176
1177 EFX_ASSERT_RESET_SERIALISED(efx);
1178
1179 efx_for_each_tx_queue(tx_queue, efx)
1180 tx_queue->channel->irq_moderation = tx_usecs;
1181
1182 efx_for_each_rx_queue(rx_queue, efx)
1183 rx_queue->channel->irq_moderation = rx_usecs;
1184}
1185
1186/**************************************************************************
1187 *
1188 * Hardware monitor
1189 *
1190 **************************************************************************/
1191
1192/* Run periodically off the general workqueue. Serialised against
1193 * efx_reconfigure_port via the mac_lock */
1194static void efx_monitor(struct work_struct *data)
1195{
1196 struct efx_nic *efx = container_of(data, struct efx_nic,
1197 monitor_work.work);
1198 int rc = 0;
1199
1200 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1201 raw_smp_processor_id());
1202
1203
1204 /* If the mac_lock is already held then it is likely a port
1205 * reconfiguration is already in place, which will likely do
1206 * most of the work of check_hw() anyway. */
1207 if (!mutex_trylock(&efx->mac_lock)) {
1208 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1209 efx_monitor_interval);
1210 return;
1211 }
1212
1213 if (efx->port_enabled)
1214 rc = falcon_check_xmac(efx);
1215 mutex_unlock(&efx->mac_lock);
1216
1217 if (rc) {
1218 if (monitor_reset) {
1219 EFX_ERR(efx, "hardware monitor detected a fault: "
1220 "triggering reset\n");
1221 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1222 } else {
1223 EFX_ERR(efx, "hardware monitor detected a fault, "
1224 "skipping reset\n");
1225 }
1226 }
1227
1228 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1229 efx_monitor_interval);
1230}
1231
1232/**************************************************************************
1233 *
1234 * ioctls
1235 *
1236 *************************************************************************/
1237
1238/* Net device ioctl
1239 * Context: process, rtnl_lock() held.
1240 */
1241static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1242{
1243 struct efx_nic *efx = net_dev->priv;
1244
1245 EFX_ASSERT_RESET_SERIALISED(efx);
1246
1247 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1248}
1249
1250/**************************************************************************
1251 *
1252 * NAPI interface
1253 *
1254 **************************************************************************/
1255
1256static int efx_init_napi(struct efx_nic *efx)
1257{
1258 struct efx_channel *channel;
1259 int rc;
1260
1261 efx_for_each_channel(channel, efx) {
1262 channel->napi_dev = efx->net_dev;
1263 rc = efx_lro_init(&channel->lro_mgr, efx);
1264 if (rc)
1265 goto err;
1266 }
1267 return 0;
1268 err:
1269 efx_fini_napi(efx);
1270 return rc;
1271}
1272
1273static void efx_fini_napi(struct efx_nic *efx)
1274{
1275 struct efx_channel *channel;
1276
1277 efx_for_each_channel(channel, efx) {
1278 efx_lro_fini(&channel->lro_mgr);
1279 channel->napi_dev = NULL;
1280 }
1281}
1282
1283/**************************************************************************
1284 *
1285 * Kernel netpoll interface
1286 *
1287 *************************************************************************/
1288
1289#ifdef CONFIG_NET_POLL_CONTROLLER
1290
1291/* Although in the common case interrupts will be disabled, this is not
1292 * guaranteed. However, all our work happens inside the NAPI callback,
1293 * so no locking is required.
1294 */
1295static void efx_netpoll(struct net_device *net_dev)
1296{
1297 struct efx_nic *efx = net_dev->priv;
1298 struct efx_channel *channel;
1299
1300 efx_for_each_channel_with_interrupt(channel, efx)
1301 efx_schedule_channel(channel);
1302}
1303
1304#endif
1305
1306/**************************************************************************
1307 *
1308 * Kernel net device interface
1309 *
1310 *************************************************************************/
1311
1312/* Context: process, rtnl_lock() held. */
1313static int efx_net_open(struct net_device *net_dev)
1314{
1315 struct efx_nic *efx = net_dev->priv;
1316 EFX_ASSERT_RESET_SERIALISED(efx);
1317
1318 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1319 raw_smp_processor_id());
1320
1321 efx_start_all(efx);
1322 return 0;
1323}
1324
1325/* Context: process, rtnl_lock() held.
1326 * Note that the kernel will ignore our return code; this method
1327 * should really be a void.
1328 */
1329static int efx_net_stop(struct net_device *net_dev)
1330{
1331 struct efx_nic *efx = net_dev->priv;
1332 int rc;
1333
1334 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1335 raw_smp_processor_id());
1336
1337 /* Stop the device and flush all the channels */
1338 efx_stop_all(efx);
1339 efx_fini_channels(efx);
1340 rc = efx_init_channels(efx);
1341 if (rc)
1342 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1343
1344 return 0;
1345}
1346
1347/* Context: process, dev_base_lock held, non-blocking. */
1348static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1349{
1350 struct efx_nic *efx = net_dev->priv;
1351 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1352 struct net_device_stats *stats = &net_dev->stats;
1353
1354 if (!spin_trylock(&efx->stats_lock))
1355 return stats;
1356 if (efx->state == STATE_RUNNING) {
1357 falcon_update_stats_xmac(efx);
1358 falcon_update_nic_stats(efx);
1359 }
1360 spin_unlock(&efx->stats_lock);
1361
1362 stats->rx_packets = mac_stats->rx_packets;
1363 stats->tx_packets = mac_stats->tx_packets;
1364 stats->rx_bytes = mac_stats->rx_bytes;
1365 stats->tx_bytes = mac_stats->tx_bytes;
1366 stats->multicast = mac_stats->rx_multicast;
1367 stats->collisions = mac_stats->tx_collision;
1368 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1369 mac_stats->rx_length_error);
1370 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1371 stats->rx_crc_errors = mac_stats->rx_bad;
1372 stats->rx_frame_errors = mac_stats->rx_align_error;
1373 stats->rx_fifo_errors = mac_stats->rx_overflow;
1374 stats->rx_missed_errors = mac_stats->rx_missed;
1375 stats->tx_window_errors = mac_stats->tx_late_collision;
1376
1377 stats->rx_errors = (stats->rx_length_errors +
1378 stats->rx_over_errors +
1379 stats->rx_crc_errors +
1380 stats->rx_frame_errors +
1381 stats->rx_fifo_errors +
1382 stats->rx_missed_errors +
1383 mac_stats->rx_symbol_error);
1384 stats->tx_errors = (stats->tx_window_errors +
1385 mac_stats->tx_bad);
1386
1387 return stats;
1388}
1389
1390/* Context: netif_tx_lock held, BHs disabled. */
1391static void efx_watchdog(struct net_device *net_dev)
1392{
1393 struct efx_nic *efx = net_dev->priv;
1394
1395 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1396 atomic_read(&efx->netif_stop_count), efx->port_enabled,
1397 monitor_reset ? "resetting channels" : "skipping reset");
1398
1399 if (monitor_reset)
1400 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1401}
1402
1403
1404/* Context: process, rtnl_lock() held. */
1405static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1406{
1407 struct efx_nic *efx = net_dev->priv;
1408 int rc = 0;
1409
1410 EFX_ASSERT_RESET_SERIALISED(efx);
1411
1412 if (new_mtu > EFX_MAX_MTU)
1413 return -EINVAL;
1414
1415 efx_stop_all(efx);
1416
1417 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1418
1419 efx_fini_channels(efx);
1420 net_dev->mtu = new_mtu;
1421 rc = efx_init_channels(efx);
1422 if (rc)
1423 goto fail;
1424
1425 efx_start_all(efx);
1426 return rc;
1427
1428 fail:
1429 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1430 return rc;
1431}
1432
1433static int efx_set_mac_address(struct net_device *net_dev, void *data)
1434{
1435 struct efx_nic *efx = net_dev->priv;
1436 struct sockaddr *addr = data;
1437 char *new_addr = addr->sa_data;
1438
1439 EFX_ASSERT_RESET_SERIALISED(efx);
1440
1441 if (!is_valid_ether_addr(new_addr)) {
1442 DECLARE_MAC_BUF(mac);
1443 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
1444 print_mac(mac, new_addr));
1445 return -EINVAL;
1446 }
1447
1448 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1449
1450 /* Reconfigure the MAC */
1451 efx_reconfigure_port(efx);
1452
1453 return 0;
1454}
1455
1456/* Context: netif_tx_lock held, BHs disabled. */
1457static void efx_set_multicast_list(struct net_device *net_dev)
1458{
1459 struct efx_nic *efx = net_dev->priv;
1460 struct dev_mc_list *mc_list = net_dev->mc_list;
1461 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1462 int promiscuous;
1463 u32 crc;
1464 int bit;
1465 int i;
1466
1467 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
1468 promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
1469 if (efx->promiscuous != promiscuous) {
1470 efx->promiscuous = promiscuous;
1471 /* Close the window between efx_stop_port() and efx_flush_all()
1472 * by only queuing work when the port is enabled. */
1473 if (efx->port_enabled)
1474 queue_work(efx->workqueue, &efx->reconfigure_work);
1475 }
1476
1477 /* Build multicast hash table */
1478 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1479 memset(mc_hash, 0xff, sizeof(*mc_hash));
1480 } else {
1481 memset(mc_hash, 0x00, sizeof(*mc_hash));
1482 for (i = 0; i < net_dev->mc_count; i++) {
1483 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1484 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1485 set_bit_le(bit, mc_hash->byte);
1486 mc_list = mc_list->next;
1487 }
1488 }
1489
1490 /* Create and activate new global multicast hash table */
1491 falcon_set_multicast_hash(efx);
1492}
1493
1494static int efx_netdev_event(struct notifier_block *this,
1495 unsigned long event, void *ptr)
1496{
1497 struct net_device *net_dev = (struct net_device *)ptr;
1498
1499 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1500 struct efx_nic *efx = net_dev->priv;
1501
1502 strcpy(efx->name, net_dev->name);
1503 }
1504
1505 return NOTIFY_DONE;
1506}
1507
1508static struct notifier_block efx_netdev_notifier = {
1509 .notifier_call = efx_netdev_event,
1510};
1511
1512static int efx_register_netdev(struct efx_nic *efx)
1513{
1514 struct net_device *net_dev = efx->net_dev;
1515 int rc;
1516
1517 net_dev->watchdog_timeo = 5 * HZ;
1518 net_dev->irq = efx->pci_dev->irq;
1519 net_dev->open = efx_net_open;
1520 net_dev->stop = efx_net_stop;
1521 net_dev->get_stats = efx_net_stats;
1522 net_dev->tx_timeout = &efx_watchdog;
1523 net_dev->hard_start_xmit = efx_hard_start_xmit;
1524 net_dev->do_ioctl = efx_ioctl;
1525 net_dev->change_mtu = efx_change_mtu;
1526 net_dev->set_mac_address = efx_set_mac_address;
1527 net_dev->set_multicast_list = efx_set_multicast_list;
1528#ifdef CONFIG_NET_POLL_CONTROLLER
1529 net_dev->poll_controller = efx_netpoll;
1530#endif
1531 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1532 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1533
1534 /* Always start with carrier off; PHY events will detect the link */
1535 netif_carrier_off(efx->net_dev);
1536
1537 /* Clear MAC statistics */
1538 falcon_update_stats_xmac(efx);
1539 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1540
1541 rc = register_netdev(net_dev);
1542 if (rc) {
1543 EFX_ERR(efx, "could not register net dev\n");
1544 return rc;
1545 }
1546 strcpy(efx->name, net_dev->name);
1547
1548 return 0;
1549}
1550
1551static void efx_unregister_netdev(struct efx_nic *efx)
1552{
1553 struct efx_tx_queue *tx_queue;
1554
1555 if (!efx->net_dev)
1556 return;
1557
1558 BUG_ON(efx->net_dev->priv != efx);
1559
1560 /* Free up any skbs still remaining. This has to happen before
1561 * we try to unregister the netdev as running their destructors
1562 * may be needed to get the device ref. count to 0. */
1563 efx_for_each_tx_queue(tx_queue, efx)
1564 efx_release_tx_buffers(tx_queue);
1565
1566 if (NET_DEV_REGISTERED(efx)) {
1567 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1568 unregister_netdev(efx->net_dev);
1569 }
1570}
1571
1572/**************************************************************************
1573 *
1574 * Device reset and suspend
1575 *
1576 **************************************************************************/
1577
1578/* The final hardware and software finalisation before reset. */
1579static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1580{
1581 int rc;
1582
1583 EFX_ASSERT_RESET_SERIALISED(efx);
1584
1585 rc = falcon_xmac_get_settings(efx, ecmd);
1586 if (rc) {
1587 EFX_ERR(efx, "could not back up PHY settings\n");
1588 goto fail;
1589 }
1590
1591 efx_fini_channels(efx);
1592 return 0;
1593
1594 fail:
1595 return rc;
1596}
1597
1598/* The first part of software initialisation after a hardware reset
1599 * This function does not handle serialisation with the kernel, it
1600 * assumes the caller has done this */
1601static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1602{
1603 int rc;
1604
1605 rc = efx_init_channels(efx);
1606 if (rc)
1607 goto fail1;
1608
1609 /* Restore MAC and PHY settings. */
1610 rc = falcon_xmac_set_settings(efx, ecmd);
1611 if (rc) {
1612 EFX_ERR(efx, "could not restore PHY settings\n");
1613 goto fail2;
1614 }
1615
1616 return 0;
1617
1618 fail2:
1619 efx_fini_channels(efx);
1620 fail1:
1621 return rc;
1622}
1623
1624/* Reset the NIC as transparently as possible. Do not reset the PHY
1625 * Note that the reset may fail, in which case the card will be left
1626 * in a most-probably-unusable state.
1627 *
1628 * This function will sleep. You cannot reset from within an atomic
1629 * state; use efx_schedule_reset() instead.
1630 *
1631 * Grabs the rtnl_lock.
1632 */
1633static int efx_reset(struct efx_nic *efx)
1634{
1635 struct ethtool_cmd ecmd;
1636 enum reset_type method = efx->reset_pending;
1637 int rc;
1638
1639 /* Serialise with kernel interfaces */
1640 rtnl_lock();
1641
1642 /* If we're not RUNNING then don't reset. Leave the reset_pending
1643 * flag set so that efx_pci_probe_main will be retried */
1644 if (efx->state != STATE_RUNNING) {
1645 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1646 goto unlock_rtnl;
1647 }
1648
1649 efx->state = STATE_RESETTING;
1650 EFX_INFO(efx, "resetting (%d)\n", method);
1651
1652 /* The net_dev->get_stats handler is quite slow, and will fail
1653 * if a fetch is pending over reset. Serialise against it. */
1654 spin_lock(&efx->stats_lock);
1655 spin_unlock(&efx->stats_lock);
1656
1657 efx_stop_all(efx);
1658 mutex_lock(&efx->mac_lock);
1659
1660 rc = efx_reset_down(efx, &ecmd);
1661 if (rc)
1662 goto fail1;
1663
1664 rc = falcon_reset_hw(efx, method);
1665 if (rc) {
1666 EFX_ERR(efx, "failed to reset hardware\n");
1667 goto fail2;
1668 }
1669
1670 /* Allow resets to be rescheduled. */
1671 efx->reset_pending = RESET_TYPE_NONE;
1672
1673 /* Reinitialise bus-mastering, which may have been turned off before
1674 * the reset was scheduled. This is still appropriate, even in the
1675 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1676 * can respond to requests. */
1677 pci_set_master(efx->pci_dev);
1678
1679 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1680 * case so the driver can talk to external SRAM */
1681 rc = falcon_init_nic(efx);
1682 if (rc) {
1683 EFX_ERR(efx, "failed to initialise NIC\n");
1684 goto fail3;
1685 }
1686
1687 /* Leave device stopped if necessary */
1688 if (method == RESET_TYPE_DISABLE) {
1689 /* Reinitialise the device anyway so the driver unload sequence
1690 * can talk to the external SRAM */
1691 (void) falcon_init_nic(efx);
1692 rc = -EIO;
1693 goto fail4;
1694 }
1695
1696 rc = efx_reset_up(efx, &ecmd);
1697 if (rc)
1698 goto fail5;
1699
1700 mutex_unlock(&efx->mac_lock);
1701 EFX_LOG(efx, "reset complete\n");
1702
1703 efx->state = STATE_RUNNING;
1704 efx_start_all(efx);
1705
1706 unlock_rtnl:
1707 rtnl_unlock();
1708 return 0;
1709
1710 fail5:
1711 fail4:
1712 fail3:
1713 fail2:
1714 fail1:
1715 EFX_ERR(efx, "has been disabled\n");
1716 efx->state = STATE_DISABLED;
1717
1718 mutex_unlock(&efx->mac_lock);
1719 rtnl_unlock();
1720 efx_unregister_netdev(efx);
1721 efx_fini_port(efx);
1722 return rc;
1723}
1724
1725/* The worker thread exists so that code that cannot sleep can
1726 * schedule a reset for later.
1727 */
1728static void efx_reset_work(struct work_struct *data)
1729{
1730 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1731
1732 efx_reset(nic);
1733}
1734
1735void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1736{
1737 enum reset_type method;
1738
1739 if (efx->reset_pending != RESET_TYPE_NONE) {
1740 EFX_INFO(efx, "quenching already scheduled reset\n");
1741 return;
1742 }
1743
1744 switch (type) {
1745 case RESET_TYPE_INVISIBLE:
1746 case RESET_TYPE_ALL:
1747 case RESET_TYPE_WORLD:
1748 case RESET_TYPE_DISABLE:
1749 method = type;
1750 break;
1751 case RESET_TYPE_RX_RECOVERY:
1752 case RESET_TYPE_RX_DESC_FETCH:
1753 case RESET_TYPE_TX_DESC_FETCH:
1754 case RESET_TYPE_TX_SKIP:
1755 method = RESET_TYPE_INVISIBLE;
1756 break;
1757 default:
1758 method = RESET_TYPE_ALL;
1759 break;
1760 }
1761
1762 if (method != type)
1763 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1764 else
1765 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1766
1767 efx->reset_pending = method;
1768
1769 queue_work(efx->workqueue, &efx->reset_work);
1770}
1771
1772/**************************************************************************
1773 *
1774 * List of NICs we support
1775 *
1776 **************************************************************************/
1777
1778/* PCI device ID table */
1779static struct pci_device_id efx_pci_table[] __devinitdata = {
1780 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1781 .driver_data = (unsigned long) &falcon_a_nic_type},
1782 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1783 .driver_data = (unsigned long) &falcon_b_nic_type},
1784 {0} /* end of list */
1785};
1786
1787/**************************************************************************
1788 *
1789 * Dummy PHY/MAC/Board operations
1790 *
1791 * Can be used where the MAC does not implement this operation
1792 * Needed so all function pointers are valid and do not have to be tested
1793 * before use
1794 *
1795 **************************************************************************/
1796int efx_port_dummy_op_int(struct efx_nic *efx)
1797{
1798 return 0;
1799}
1800void efx_port_dummy_op_void(struct efx_nic *efx) {}
1801void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
1802
1803static struct efx_phy_operations efx_dummy_phy_operations = {
1804 .init = efx_port_dummy_op_int,
1805 .reconfigure = efx_port_dummy_op_void,
1806 .check_hw = efx_port_dummy_op_int,
1807 .fini = efx_port_dummy_op_void,
1808 .clear_interrupt = efx_port_dummy_op_void,
1809 .reset_xaui = efx_port_dummy_op_void,
1810};
1811
1812/* Dummy board operations */
1813static int efx_nic_dummy_op_int(struct efx_nic *nic)
1814{
1815 return 0;
1816}
1817
1818static struct efx_board efx_dummy_board_info = {
1819 .init = efx_nic_dummy_op_int,
1820 .init_leds = efx_port_dummy_op_int,
1821 .set_fault_led = efx_port_dummy_op_blink,
1822};
1823
1824/**************************************************************************
1825 *
1826 * Data housekeeping
1827 *
1828 **************************************************************************/
1829
1830/* This zeroes out and then fills in the invariants in a struct
1831 * efx_nic (including all sub-structures).
1832 */
1833static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1834 struct pci_dev *pci_dev, struct net_device *net_dev)
1835{
1836 struct efx_channel *channel;
1837 struct efx_tx_queue *tx_queue;
1838 struct efx_rx_queue *rx_queue;
1839 int i, rc;
1840
1841 /* Initialise common structures */
1842 memset(efx, 0, sizeof(*efx));
1843 spin_lock_init(&efx->biu_lock);
1844 spin_lock_init(&efx->phy_lock);
1845 INIT_WORK(&efx->reset_work, efx_reset_work);
1846 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1847 efx->pci_dev = pci_dev;
1848 efx->state = STATE_INIT;
1849 efx->reset_pending = RESET_TYPE_NONE;
1850 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1851 efx->board_info = efx_dummy_board_info;
1852
1853 efx->net_dev = net_dev;
1854 efx->rx_checksum_enabled = 1;
1855 spin_lock_init(&efx->netif_stop_lock);
1856 spin_lock_init(&efx->stats_lock);
1857 mutex_init(&efx->mac_lock);
1858 efx->phy_op = &efx_dummy_phy_operations;
1859 efx->mii.dev = net_dev;
1860 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1861 atomic_set(&efx->netif_stop_count, 1);
1862
1863 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1864 channel = &efx->channel[i];
1865 channel->efx = efx;
1866 channel->channel = i;
1867 channel->evqnum = i;
1868 channel->work_pending = 0;
1869 }
1870 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
1871 tx_queue = &efx->tx_queue[i];
1872 tx_queue->efx = efx;
1873 tx_queue->queue = i;
1874 tx_queue->buffer = NULL;
1875 tx_queue->channel = &efx->channel[0]; /* for safety */
1876 }
1877 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1878 rx_queue = &efx->rx_queue[i];
1879 rx_queue->efx = efx;
1880 rx_queue->queue = i;
1881 rx_queue->channel = &efx->channel[0]; /* for safety */
1882 rx_queue->buffer = NULL;
1883 spin_lock_init(&rx_queue->add_lock);
1884 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1885 }
1886
1887 efx->type = type;
1888
1889 /* Sanity-check NIC type */
1890 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1891 (efx->type->txd_ring_mask + 1));
1892 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1893 (efx->type->rxd_ring_mask + 1));
1894 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1895 (efx->type->evq_size - 1));
1896 /* As close as we can get to guaranteeing that we don't overflow */
1897 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1898 (efx->type->txd_ring_mask + 1 +
1899 efx->type->rxd_ring_mask + 1));
1900 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1901
1902 /* Higher numbered interrupt modes are less capable! */
1903 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1904 interrupt_mode);
1905
1906 efx->workqueue = create_singlethread_workqueue("sfc_work");
1907 if (!efx->workqueue) {
1908 rc = -ENOMEM;
1909 goto fail1;
1910 }
1911
1912 return 0;
1913
1914 fail1:
1915 return rc;
1916}
1917
1918static void efx_fini_struct(struct efx_nic *efx)
1919{
1920 if (efx->workqueue) {
1921 destroy_workqueue(efx->workqueue);
1922 efx->workqueue = NULL;
1923 }
1924}
1925
1926/**************************************************************************
1927 *
1928 * PCI interface
1929 *
1930 **************************************************************************/
1931
1932/* Main body of final NIC shutdown code
1933 * This is called only at module unload (or hotplug removal).
1934 */
1935static void efx_pci_remove_main(struct efx_nic *efx)
1936{
1937 EFX_ASSERT_RESET_SERIALISED(efx);
1938
1939 /* Skip everything if we never obtained a valid membase */
1940 if (!efx->membase)
1941 return;
1942
1943 efx_fini_channels(efx);
1944 efx_fini_port(efx);
1945
1946 /* Shutdown the board, then the NIC and board state */
1947 falcon_fini_interrupt(efx);
1948
1949 efx_fini_napi(efx);
1950 efx_remove_all(efx);
1951}
1952
1953/* Final NIC shutdown
1954 * This is called only at module unload (or hotplug removal).
1955 */
1956static void efx_pci_remove(struct pci_dev *pci_dev)
1957{
1958 struct efx_nic *efx;
1959
1960 efx = pci_get_drvdata(pci_dev);
1961 if (!efx)
1962 return;
1963
1964 /* Mark the NIC as fini, then stop the interface */
1965 rtnl_lock();
1966 efx->state = STATE_FINI;
1967 dev_close(efx->net_dev);
1968
1969 /* Allow any queued efx_resets() to complete */
1970 rtnl_unlock();
1971
1972 if (efx->membase == NULL)
1973 goto out;
1974
1975 efx_unregister_netdev(efx);
1976
1977 /* Wait for any scheduled resets to complete. No more will be
1978 * scheduled from this point because efx_stop_all() has been
1979 * called, we are no longer registered with driverlink, and
1980 * the net_device's have been removed. */
1981 flush_workqueue(efx->workqueue);
1982
1983 efx_pci_remove_main(efx);
1984
1985out:
1986 efx_fini_io(efx);
1987 EFX_LOG(efx, "shutdown successful\n");
1988
1989 pci_set_drvdata(pci_dev, NULL);
1990 efx_fini_struct(efx);
1991 free_netdev(efx->net_dev);
1992};
1993
1994/* Main body of NIC initialisation
1995 * This is called at module load (or hotplug insertion, theoretically).
1996 */
1997static int efx_pci_probe_main(struct efx_nic *efx)
1998{
1999 int rc;
2000
2001 /* Do start-of-day initialisation */
2002 rc = efx_probe_all(efx);
2003 if (rc)
2004 goto fail1;
2005
2006 rc = efx_init_napi(efx);
2007 if (rc)
2008 goto fail2;
2009
2010 /* Initialise the board */
2011 rc = efx->board_info.init(efx);
2012 if (rc) {
2013 EFX_ERR(efx, "failed to initialise board\n");
2014 goto fail3;
2015 }
2016
2017 rc = falcon_init_nic(efx);
2018 if (rc) {
2019 EFX_ERR(efx, "failed to initialise NIC\n");
2020 goto fail4;
2021 }
2022
2023 rc = efx_init_port(efx);
2024 if (rc) {
2025 EFX_ERR(efx, "failed to initialise port\n");
2026 goto fail5;
2027 }
2028
2029 rc = efx_init_channels(efx);
2030 if (rc)
2031 goto fail6;
2032
2033 rc = falcon_init_interrupt(efx);
2034 if (rc)
2035 goto fail7;
2036
2037 return 0;
2038
2039 fail7:
2040 efx_fini_channels(efx);
2041 fail6:
2042 efx_fini_port(efx);
2043 fail5:
2044 fail4:
2045 fail3:
2046 efx_fini_napi(efx);
2047 fail2:
2048 efx_remove_all(efx);
2049 fail1:
2050 return rc;
2051}
2052
2053/* NIC initialisation
2054 *
2055 * This is called at module load (or hotplug insertion,
2056 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2057 * sets up and registers the network devices with the kernel and hooks
2058 * the interrupt service routine. It does not prepare the device for
2059 * transmission; this is left to the first time one of the network
2060 * interfaces is brought up (i.e. efx_net_open).
2061 */
2062static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2063 const struct pci_device_id *entry)
2064{
2065 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2066 struct net_device *net_dev;
2067 struct efx_nic *efx;
2068 int i, rc;
2069
2070 /* Allocate and initialise a struct net_device and struct efx_nic */
2071 net_dev = alloc_etherdev(sizeof(*efx));
2072 if (!net_dev)
2073 return -ENOMEM;
2074 net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
2075 if (lro)
2076 net_dev->features |= NETIF_F_LRO;
2077 efx = net_dev->priv;
2078 pci_set_drvdata(pci_dev, efx);
2079 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2080 if (rc)
2081 goto fail1;
2082
2083 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2084
2085 /* Set up basic I/O (BAR mappings etc) */
2086 rc = efx_init_io(efx);
2087 if (rc)
2088 goto fail2;
2089
2090 /* No serialisation is required with the reset path because
2091 * we're in STATE_INIT. */
2092 for (i = 0; i < 5; i++) {
2093 rc = efx_pci_probe_main(efx);
2094 if (rc == 0)
2095 break;
2096
2097 /* Serialise against efx_reset(). No more resets will be
2098 * scheduled since efx_stop_all() has been called, and we
2099 * have not and never have been registered with either
2100 * the rtnetlink or driverlink layers. */
2101 cancel_work_sync(&efx->reset_work);
2102
2103 /* Retry if a recoverably reset event has been scheduled */
2104 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2105 (efx->reset_pending != RESET_TYPE_ALL))
2106 goto fail3;
2107
2108 efx->reset_pending = RESET_TYPE_NONE;
2109 }
2110
2111 if (rc) {
2112 EFX_ERR(efx, "Could not reset NIC\n");
2113 goto fail4;
2114 }
2115
2116 /* Switch to the running state before we expose the device to
2117 * the OS. This is to ensure that the initial gathering of
2118 * MAC stats succeeds. */
2119 rtnl_lock();
2120 efx->state = STATE_RUNNING;
2121 rtnl_unlock();
2122
2123 rc = efx_register_netdev(efx);
2124 if (rc)
2125 goto fail5;
2126
2127 EFX_LOG(efx, "initialisation successful\n");
2128
2129 return 0;
2130
2131 fail5:
2132 efx_pci_remove_main(efx);
2133 fail4:
2134 fail3:
2135 efx_fini_io(efx);
2136 fail2:
2137 efx_fini_struct(efx);
2138 fail1:
2139 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2140 free_netdev(net_dev);
2141 return rc;
2142}
2143
2144static struct pci_driver efx_pci_driver = {
2145 .name = EFX_DRIVER_NAME,
2146 .id_table = efx_pci_table,
2147 .probe = efx_pci_probe,
2148 .remove = efx_pci_remove,
2149};
2150
2151/**************************************************************************
2152 *
2153 * Kernel module interface
2154 *
2155 *************************************************************************/
2156
2157module_param(interrupt_mode, uint, 0444);
2158MODULE_PARM_DESC(interrupt_mode,
2159 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2160
2161static int __init efx_init_module(void)
2162{
2163 int rc;
2164
2165 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2166
2167 rc = register_netdevice_notifier(&efx_netdev_notifier);
2168 if (rc)
2169 goto err_notifier;
2170
2171 refill_workqueue = create_workqueue("sfc_refill");
2172 if (!refill_workqueue) {
2173 rc = -ENOMEM;
2174 goto err_refill;
2175 }
2176
2177 rc = pci_register_driver(&efx_pci_driver);
2178 if (rc < 0)
2179 goto err_pci;
2180
2181 return 0;
2182
2183 err_pci:
2184 destroy_workqueue(refill_workqueue);
2185 err_refill:
2186 unregister_netdevice_notifier(&efx_netdev_notifier);
2187 err_notifier:
2188 return rc;
2189}
2190
2191static void __exit efx_exit_module(void)
2192{
2193 printk(KERN_INFO "Solarflare NET driver unloading\n");
2194
2195 pci_unregister_driver(&efx_pci_driver);
2196 destroy_workqueue(refill_workqueue);
2197 unregister_netdevice_notifier(&efx_netdev_notifier);
2198
2199}
2200
2201module_init(efx_init_module);
2202module_exit(efx_exit_module);
2203
2204MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2205 "Solarflare Communications");
2206MODULE_DESCRIPTION("Solarflare Communications network driver");
2207MODULE_LICENSE("GPL");
2208MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
new file mode 100644
index 000000000000..3b2f69f4a9ab
--- /dev/null
+++ b/drivers/net/sfc/efx.h
@@ -0,0 +1,67 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_EFX_H
12#define EFX_EFX_H
13
14#include "net_driver.h"
15
16/* PCI IDs */
17#define EFX_VENDID_SFC 0x1924
18#define FALCON_A_P_DEVID 0x0703
19#define FALCON_A_S_DEVID 0x6703
20#define FALCON_B_P_DEVID 0x0710
21
22/* TX */
23extern int efx_xmit(struct efx_nic *efx,
24 struct efx_tx_queue *tx_queue, struct sk_buff *skb);
25extern void efx_stop_queue(struct efx_nic *efx);
26extern void efx_wake_queue(struct efx_nic *efx);
27
28/* RX */
29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
30extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
31 unsigned int len, int checksummed, int discard);
32extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
33
34/* Channels */
35extern void efx_process_channel_now(struct efx_channel *channel);
36extern int efx_flush_queues(struct efx_nic *efx);
37
38/* Ports */
39extern void efx_reconfigure_port(struct efx_nic *efx);
40
41/* Global */
42extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
43extern void efx_suspend(struct efx_nic *efx);
44extern void efx_resume(struct efx_nic *efx);
45extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
46 int rx_usecs);
47extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
48extern void efx_hex_dump(const u8 *, unsigned int, const char *);
49
50/* Dummy PHY ops for PHY drivers */
51extern int efx_port_dummy_op_int(struct efx_nic *efx);
52extern void efx_port_dummy_op_void(struct efx_nic *efx);
53extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink);
54
55
56extern unsigned int efx_monitor_interval;
57
58static inline void efx_schedule_channel(struct efx_channel *channel)
59{
60 EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
61 channel->channel, raw_smp_processor_id());
62 channel->work_pending = 1;
63
64 netif_rx_schedule(channel->napi_dev, &channel->napi_str);
65}
66
67#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
new file mode 100644
index 000000000000..43663a4619da
--- /dev/null
+++ b/drivers/net/sfc/enum.h
@@ -0,0 +1,50 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_ENUM_H
11#define EFX_ENUM_H
12
13/*****************************************************************************/
14
15/**
16 * enum reset_type - reset types
17 *
18 * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
19 * %RESET_TYPE_DISABLE specify the method/scope of the reset. The
20 * other valuesspecify reasons, which efx_schedule_reset() will choose
21 * a method for.
22 *
23 * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
24 * @RESET_TYPE_ALL: reset everything but PCI core blocks
25 * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
26 * @RESET_TYPE_DISABLE: disable NIC
27 * @RESET_TYPE_MONITOR: reset due to hardware monitor
28 * @RESET_TYPE_INT_ERROR: reset due to internal error
29 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
30 * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
31 * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
32 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
33 */
34enum reset_type {
35 RESET_TYPE_NONE = -1,
36 RESET_TYPE_INVISIBLE = 0,
37 RESET_TYPE_ALL = 1,
38 RESET_TYPE_WORLD = 2,
39 RESET_TYPE_DISABLE = 3,
40 RESET_TYPE_MAX_METHOD,
41 RESET_TYPE_MONITOR,
42 RESET_TYPE_INT_ERROR,
43 RESET_TYPE_RX_RECOVERY,
44 RESET_TYPE_RX_DESC_FETCH,
45 RESET_TYPE_TX_DESC_FETCH,
46 RESET_TYPE_TX_SKIP,
47 RESET_TYPE_MAX,
48};
49
50#endif /* EFX_ENUM_H */
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
new file mode 100644
index 000000000000..ad541badbd98
--- /dev/null
+++ b/drivers/net/sfc/ethtool.c
@@ -0,0 +1,460 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/netdevice.h>
12#include <linux/ethtool.h>
13#include <linux/rtnetlink.h>
14#include "net_driver.h"
15#include "efx.h"
16#include "ethtool.h"
17#include "falcon.h"
18#include "gmii.h"
19#include "mac.h"
20
21static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
22
23struct ethtool_string {
24 char name[ETH_GSTRING_LEN];
25};
26
27struct efx_ethtool_stat {
28 const char *name;
29 enum {
30 EFX_ETHTOOL_STAT_SOURCE_mac_stats,
31 EFX_ETHTOOL_STAT_SOURCE_nic,
32 EFX_ETHTOOL_STAT_SOURCE_channel
33 } source;
34 unsigned offset;
35 u64(*get_stat) (void *field); /* Reader function */
36};
37
38/* Initialiser for a struct #efx_ethtool_stat with type-checking */
39#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
40 get_stat_function) { \
41 .name = #stat_name, \
42 .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
43 .offset = ((((field_type *) 0) == \
44 &((struct efx_##source_name *)0)->field) ? \
45 offsetof(struct efx_##source_name, field) : \
46 offsetof(struct efx_##source_name, field)), \
47 .get_stat = get_stat_function, \
48}
49
50static u64 efx_get_uint_stat(void *field)
51{
52 return *(unsigned int *)field;
53}
54
55static u64 efx_get_ulong_stat(void *field)
56{
57 return *(unsigned long *)field;
58}
59
60static u64 efx_get_u64_stat(void *field)
61{
62 return *(u64 *) field;
63}
64
65static u64 efx_get_atomic_stat(void *field)
66{
67 return atomic_read((atomic_t *) field);
68}
69
70#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
71 EFX_ETHTOOL_STAT(field, mac_stats, field, \
72 unsigned long, efx_get_ulong_stat)
73
74#define EFX_ETHTOOL_U64_MAC_STAT(field) \
75 EFX_ETHTOOL_STAT(field, mac_stats, field, \
76 u64, efx_get_u64_stat)
77
78#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
79 EFX_ETHTOOL_STAT(name, nic, n_##name, \
80 unsigned int, efx_get_uint_stat)
81
82#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
83 EFX_ETHTOOL_STAT(field, nic, field, \
84 atomic_t, efx_get_atomic_stat)
85
86#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
87 EFX_ETHTOOL_STAT(field, channel, n_##field, \
88 unsigned int, efx_get_uint_stat)
89
90static struct efx_ethtool_stat efx_ethtool_stats[] = {
91 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
92 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
93 EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
94 EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
95 EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
96 EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
97 EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
98 EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
99 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
100 EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
101 EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
102 EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
103 EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
104 EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
105 EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
106 EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
107 EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
108 EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
109 EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
110 EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
111 EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
112 EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
113 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
114 EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
115 EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
116 EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
117 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
118 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
119 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
120 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
121 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
122 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
123 EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
124 EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
125 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
126 EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
127 EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
128 EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
129 EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
130 EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
131 EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
132 EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
133 EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
134 EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
135 EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
136 EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
137 EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
138 EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
139 EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
140 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
141 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
142 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
143 EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
144 EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
145 EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
146 EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
147 EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
148 EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
149 EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
150 EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
151 EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
152 EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
153 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
154 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
157};
158
159/* Number of ethtool statistics */
160#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
161
162/**************************************************************************
163 *
164 * Ethtool operations
165 *
166 **************************************************************************
167 */
168
169/* Identify device by flashing LEDs */
170static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
171{
172 struct efx_nic *efx = net_dev->priv;
173
174 efx->board_info.blink(efx, 1);
175 schedule_timeout_interruptible(seconds * HZ);
176 efx->board_info.blink(efx, 0);
177 return 0;
178}
179
180/* This must be called with rtnl_lock held. */
181int efx_ethtool_get_settings(struct net_device *net_dev,
182 struct ethtool_cmd *ecmd)
183{
184 struct efx_nic *efx = net_dev->priv;
185 int rc;
186
187 mutex_lock(&efx->mac_lock);
188 rc = falcon_xmac_get_settings(efx, ecmd);
189 mutex_unlock(&efx->mac_lock);
190
191 return rc;
192}
193
194/* This must be called with rtnl_lock held. */
195int efx_ethtool_set_settings(struct net_device *net_dev,
196 struct ethtool_cmd *ecmd)
197{
198 struct efx_nic *efx = net_dev->priv;
199 int rc;
200
201 mutex_lock(&efx->mac_lock);
202 rc = falcon_xmac_set_settings(efx, ecmd);
203 mutex_unlock(&efx->mac_lock);
204 if (!rc)
205 efx_reconfigure_port(efx);
206
207 return rc;
208}
209
210static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
211 struct ethtool_drvinfo *info)
212{
213 struct efx_nic *efx = net_dev->priv;
214
215 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
216 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
217 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
218}
219
220static int efx_ethtool_get_stats_count(struct net_device *net_dev)
221{
222 return EFX_ETHTOOL_NUM_STATS;
223}
224
225static void efx_ethtool_get_strings(struct net_device *net_dev,
226 u32 string_set, u8 *strings)
227{
228 struct ethtool_string *ethtool_strings =
229 (struct ethtool_string *)strings;
230 int i;
231
232 if (string_set == ETH_SS_STATS)
233 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
234 strncpy(ethtool_strings[i].name,
235 efx_ethtool_stats[i].name,
236 sizeof(ethtool_strings[i].name));
237}
238
239static void efx_ethtool_get_stats(struct net_device *net_dev,
240 struct ethtool_stats *stats,
241 u64 *data)
242{
243 struct efx_nic *efx = net_dev->priv;
244 struct efx_mac_stats *mac_stats = &efx->mac_stats;
245 struct efx_ethtool_stat *stat;
246 struct efx_channel *channel;
247 int i;
248
249 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
250
251 /* Update MAC and NIC statistics */
252 net_dev->get_stats(net_dev);
253
254 /* Fill detailed statistics buffer */
255 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
256 stat = &efx_ethtool_stats[i];
257 switch (stat->source) {
258 case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
259 data[i] = stat->get_stat((void *)mac_stats +
260 stat->offset);
261 break;
262 case EFX_ETHTOOL_STAT_SOURCE_nic:
263 data[i] = stat->get_stat((void *)efx + stat->offset);
264 break;
265 case EFX_ETHTOOL_STAT_SOURCE_channel:
266 data[i] = 0;
267 efx_for_each_channel(channel, efx)
268 data[i] += stat->get_stat((void *)channel +
269 stat->offset);
270 break;
271 }
272 }
273}
274
275static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
276{
277 struct efx_nic *efx = net_dev->priv;
278 int rc;
279
280 rc = ethtool_op_set_tx_csum(net_dev, enable);
281 if (rc)
282 return rc;
283
284 efx_flush_queues(efx);
285
286 return 0;
287}
288
289static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
290{
291 struct efx_nic *efx = net_dev->priv;
292
293 /* No way to stop the hardware doing the checks; we just
294 * ignore the result.
295 */
296 efx->rx_checksum_enabled = (enable ? 1 : 0);
297
298 return 0;
299}
300
301static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
302{
303 struct efx_nic *efx = net_dev->priv;
304
305 return efx->rx_checksum_enabled;
306}
307
308/* Restart autonegotiation */
309static int efx_ethtool_nway_reset(struct net_device *net_dev)
310{
311 struct efx_nic *efx = net_dev->priv;
312
313 return mii_nway_restart(&efx->mii);
314}
315
316static u32 efx_ethtool_get_link(struct net_device *net_dev)
317{
318 struct efx_nic *efx = net_dev->priv;
319
320 return efx->link_up;
321}
322
323static int efx_ethtool_get_coalesce(struct net_device *net_dev,
324 struct ethtool_coalesce *coalesce)
325{
326 struct efx_nic *efx = net_dev->priv;
327 struct efx_tx_queue *tx_queue;
328 struct efx_rx_queue *rx_queue;
329 struct efx_channel *channel;
330
331 memset(coalesce, 0, sizeof(*coalesce));
332
333 /* Find lowest IRQ moderation across all used TX queues */
334 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
335 efx_for_each_tx_queue(tx_queue, efx) {
336 channel = tx_queue->channel;
337 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
338 if (channel->used_flags != EFX_USED_BY_RX_TX)
339 coalesce->tx_coalesce_usecs_irq =
340 channel->irq_moderation;
341 else
342 coalesce->tx_coalesce_usecs_irq = 0;
343 }
344 }
345
346 /* Find lowest IRQ moderation across all used RX queues */
347 coalesce->rx_coalesce_usecs_irq = ~((u32) 0);
348 efx_for_each_rx_queue(rx_queue, efx) {
349 channel = rx_queue->channel;
350 if (channel->irq_moderation < coalesce->rx_coalesce_usecs_irq)
351 coalesce->rx_coalesce_usecs_irq =
352 channel->irq_moderation;
353 }
354
355 return 0;
356}
357
358/* Set coalescing parameters
359 * The difficulties occur for shared channels
360 */
361static int efx_ethtool_set_coalesce(struct net_device *net_dev,
362 struct ethtool_coalesce *coalesce)
363{
364 struct efx_nic *efx = net_dev->priv;
365 struct efx_channel *channel;
366 struct efx_tx_queue *tx_queue;
367 unsigned tx_usecs, rx_usecs;
368
369 if (coalesce->use_adaptive_rx_coalesce ||
370 coalesce->use_adaptive_tx_coalesce)
371 return -EOPNOTSUPP;
372
373 if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
374 EFX_ERR(efx, "invalid coalescing setting. "
375 "Only rx/tx_coalesce_usecs_irq are supported\n");
376 return -EOPNOTSUPP;
377 }
378
379 rx_usecs = coalesce->rx_coalesce_usecs_irq;
380 tx_usecs = coalesce->tx_coalesce_usecs_irq;
381
382 /* If the channel is shared only allow RX parameters to be set */
383 efx_for_each_tx_queue(tx_queue, efx) {
384 if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
385 tx_usecs) {
386 EFX_ERR(efx, "Channel is shared. "
387 "Only RX coalescing may be set\n");
388 return -EOPNOTSUPP;
389 }
390 }
391
392 efx_init_irq_moderation(efx, tx_usecs, rx_usecs);
393
394 /* Reset channel to pick up new moderation value. Note that
395 * this may change the value of the irq_moderation field
396 * (e.g. to allow for hardware timer granularity).
397 */
398 efx_for_each_channel(channel, efx)
399 falcon_set_int_moderation(channel);
400
401 return 0;
402}
403
404static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
405 struct ethtool_pauseparam *pause)
406{
407 struct efx_nic *efx = net_dev->priv;
408 enum efx_fc_type flow_control = efx->flow_control;
409 int rc;
410
411 flow_control &= ~(EFX_FC_RX | EFX_FC_TX | EFX_FC_AUTO);
412 flow_control |= pause->rx_pause ? EFX_FC_RX : 0;
413 flow_control |= pause->tx_pause ? EFX_FC_TX : 0;
414 flow_control |= pause->autoneg ? EFX_FC_AUTO : 0;
415
416 /* Try to push the pause parameters */
417 mutex_lock(&efx->mac_lock);
418 rc = falcon_xmac_set_pause(efx, flow_control);
419 mutex_unlock(&efx->mac_lock);
420
421 if (!rc)
422 efx_reconfigure_port(efx);
423
424 return rc;
425}
426
427static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
428 struct ethtool_pauseparam *pause)
429{
430 struct efx_nic *efx = net_dev->priv;
431
432 pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
433 pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
434 pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0;
435}
436
437
438struct ethtool_ops efx_ethtool_ops = {
439 .get_settings = efx_ethtool_get_settings,
440 .set_settings = efx_ethtool_set_settings,
441 .get_drvinfo = efx_ethtool_get_drvinfo,
442 .nway_reset = efx_ethtool_nway_reset,
443 .get_link = efx_ethtool_get_link,
444 .get_coalesce = efx_ethtool_get_coalesce,
445 .set_coalesce = efx_ethtool_set_coalesce,
446 .get_pauseparam = efx_ethtool_get_pauseparam,
447 .set_pauseparam = efx_ethtool_set_pauseparam,
448 .get_rx_csum = efx_ethtool_get_rx_csum,
449 .set_rx_csum = efx_ethtool_set_rx_csum,
450 .get_tx_csum = ethtool_op_get_tx_csum,
451 .set_tx_csum = efx_ethtool_set_tx_csum,
452 .get_sg = ethtool_op_get_sg,
453 .set_sg = ethtool_op_set_sg,
454 .get_flags = ethtool_op_get_flags,
455 .set_flags = ethtool_op_set_flags,
456 .get_strings = efx_ethtool_get_strings,
457 .phys_id = efx_ethtool_phys_id,
458 .get_stats_count = efx_ethtool_get_stats_count,
459 .get_ethtool_stats = efx_ethtool_get_stats,
460};
diff --git a/drivers/net/sfc/ethtool.h b/drivers/net/sfc/ethtool.h
new file mode 100644
index 000000000000..3628e43df14d
--- /dev/null
+++ b/drivers/net/sfc/ethtool.h
@@ -0,0 +1,27 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_ETHTOOL_H
12#define EFX_ETHTOOL_H
13
14#include "net_driver.h"
15
16/*
17 * Ethtool support
18 */
19
20extern int efx_ethtool_get_settings(struct net_device *net_dev,
21 struct ethtool_cmd *ecmd);
22extern int efx_ethtool_set_settings(struct net_device *net_dev,
23 struct ethtool_cmd *ecmd);
24
25extern struct ethtool_ops efx_ethtool_ops;
26
27#endif /* EFX_ETHTOOL_H */
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
new file mode 100644
index 000000000000..46db549ce580
--- /dev/null
+++ b/drivers/net/sfc/falcon.c
@@ -0,0 +1,2722 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/pci.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
16#include "net_driver.h"
17#include "bitfield.h"
18#include "efx.h"
19#include "mac.h"
20#include "gmii.h"
21#include "spi.h"
22#include "falcon.h"
23#include "falcon_hwdefs.h"
24#include "falcon_io.h"
25#include "mdio_10g.h"
26#include "phy.h"
27#include "boards.h"
28#include "workarounds.h"
29
30/* Falcon hardware control.
31 * Falcon is the internal codename for the SFC4000 controller that is
32 * present in SFE400X evaluation boards
33 */
34
35/**
36 * struct falcon_nic_data - Falcon NIC state
37 * @next_buffer_table: First available buffer table id
38 * @pci_dev2: The secondary PCI device if present
39 */
40struct falcon_nic_data {
41 unsigned next_buffer_table;
42 struct pci_dev *pci_dev2;
43};
44
45/**************************************************************************
46 *
47 * Configurable values
48 *
49 **************************************************************************
50 */
51
52static int disable_dma_stats;
53
54/* This is set to 16 for a good reason. In summary, if larger than
55 * 16, the descriptor cache holds more than a default socket
56 * buffer's worth of packets (for UDP we can only have at most one
57 * socket buffer's worth outstanding). This combined with the fact
58 * that we only get 1 TX event per descriptor cache means the NIC
59 * goes idle.
60 */
61#define TX_DC_ENTRIES 16
62#define TX_DC_ENTRIES_ORDER 0
63#define TX_DC_BASE 0x130000
64
65#define RX_DC_ENTRIES 64
66#define RX_DC_ENTRIES_ORDER 2
67#define RX_DC_BASE 0x100000
68
69/* RX FIFO XOFF watermark
70 *
71 * When the amount of the RX FIFO increases used increases past this
72 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
73 * This also has an effect on RX/TX arbitration
74 */
75static int rx_xoff_thresh_bytes = -1;
76module_param(rx_xoff_thresh_bytes, int, 0644);
77MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
78
79/* RX FIFO XON watermark
80 *
81 * When the amount of the RX FIFO used decreases below this
82 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
83 * This also has an effect on RX/TX arbitration
84 */
85static int rx_xon_thresh_bytes = -1;
86module_param(rx_xon_thresh_bytes, int, 0644);
87MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
88
89/* TX descriptor ring size - min 512 max 4k */
90#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
91#define FALCON_TXD_RING_SIZE 1024
92#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
93
94/* RX descriptor ring size - min 512 max 4k */
95#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
96#define FALCON_RXD_RING_SIZE 1024
97#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
98
99/* Event queue size - max 32k */
100#define FALCON_EVQ_ORDER EVQ_SIZE_4K
101#define FALCON_EVQ_SIZE 4096
102#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
103
104/* Max number of internal errors. After this resets will not be performed */
105#define FALCON_MAX_INT_ERRORS 4
106
107/* Maximum period that we wait for flush events. If the flush event
108 * doesn't arrive in this period of time then we check if the queue
109 * was disabled anyway. */
110#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */
111
112/**************************************************************************
113 *
114 * Falcon constants
115 *
116 **************************************************************************
117 */
118
119/* DMA address mask (up to 46-bit, avoiding compiler warnings)
120 *
121 * Note that it is possible to have a platform with 64-bit longs and
122 * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
123 * platform DMA mask.
124 */
125#if BITS_PER_LONG == 64
126#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
127#else
128#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
129#endif
130
131/* TX DMA length mask (13-bit) */
132#define FALCON_TX_DMA_MASK (4096 - 1)
133
134/* Size and alignment of special buffers (4KB) */
135#define FALCON_BUF_SIZE 4096
136
137/* Dummy SRAM size code */
138#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
139
140/* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
141#define PCI_EXP_DEVCAP_PWR_VAL_LBN 18
142#define PCI_EXP_DEVCAP_PWR_SCL_LBN 26
143#define PCI_EXP_DEVCTL_PAYLOAD_LBN 5
144#define PCI_EXP_LNKSTA_LNK_WID 0x3f0
145#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
146
147#define FALCON_IS_DUAL_FUNC(efx) \
148 (FALCON_REV(efx) < FALCON_REV_B0)
149
150/**************************************************************************
151 *
152 * Falcon hardware access
153 *
154 **************************************************************************/
155
156/* Read the current event from the event queue */
157static inline efx_qword_t *falcon_event(struct efx_channel *channel,
158 unsigned int index)
159{
160 return (((efx_qword_t *) (channel->eventq.addr)) + index);
161}
162
163/* See if an event is present
164 *
165 * We check both the high and low dword of the event for all ones. We
166 * wrote all ones when we cleared the event, and no valid event can
167 * have all ones in either its high or low dwords. This approach is
168 * robust against reordering.
169 *
170 * Note that using a single 64-bit comparison is incorrect; even
171 * though the CPU read will be atomic, the DMA write may not be.
172 */
173static inline int falcon_event_present(efx_qword_t *event)
174{
175 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
176 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
177}
178
179/**************************************************************************
180 *
181 * I2C bus - this is a bit-bashing interface using GPIO pins
182 * Note that it uses the output enables to tristate the outputs
183 * SDA is the data pin and SCL is the clock
184 *
185 **************************************************************************
186 */
187static void falcon_setsdascl(struct efx_i2c_interface *i2c)
188{
189 efx_oword_t reg;
190
191 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
192 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, (i2c->scl ? 0 : 1));
193 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, (i2c->sda ? 0 : 1));
194 falcon_write(i2c->efx, &reg, GPIO_CTL_REG_KER);
195}
196
197static int falcon_getsda(struct efx_i2c_interface *i2c)
198{
199 efx_oword_t reg;
200
201 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
202 return EFX_OWORD_FIELD(reg, GPIO3_IN);
203}
204
205static int falcon_getscl(struct efx_i2c_interface *i2c)
206{
207 efx_oword_t reg;
208
209 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
210 return EFX_DWORD_FIELD(reg, GPIO0_IN);
211}
212
213static struct efx_i2c_bit_operations falcon_i2c_bit_operations = {
214 .setsda = falcon_setsdascl,
215 .setscl = falcon_setsdascl,
216 .getsda = falcon_getsda,
217 .getscl = falcon_getscl,
218 .udelay = 100,
219 .mdelay = 10,
220};
221
222/**************************************************************************
223 *
224 * Falcon special buffer handling
225 * Special buffers are used for event queues and the TX and RX
226 * descriptor rings.
227 *
228 *************************************************************************/
229
230/*
231 * Initialise a Falcon special buffer
232 *
233 * This will define a buffer (previously allocated via
234 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
235 * it to be used for event queues, descriptor rings etc.
236 */
237static int
238falcon_init_special_buffer(struct efx_nic *efx,
239 struct efx_special_buffer *buffer)
240{
241 efx_qword_t buf_desc;
242 int index;
243 dma_addr_t dma_addr;
244 int i;
245
246 EFX_BUG_ON_PARANOID(!buffer->addr);
247
248 /* Write buffer descriptors to NIC */
249 for (i = 0; i < buffer->entries; i++) {
250 index = buffer->index + i;
251 dma_addr = buffer->dma_addr + (i * 4096);
252 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
253 index, (unsigned long long)dma_addr);
254 EFX_POPULATE_QWORD_4(buf_desc,
255 IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
256 BUF_ADR_REGION, 0,
257 BUF_ADR_FBUF, (dma_addr >> 12),
258 BUF_OWNER_ID_FBUF, 0);
259 falcon_write_sram(efx, &buf_desc, index);
260 }
261
262 return 0;
263}
264
265/* Unmaps a buffer from Falcon and clears the buffer table entries */
266static void
267falcon_fini_special_buffer(struct efx_nic *efx,
268 struct efx_special_buffer *buffer)
269{
270 efx_oword_t buf_tbl_upd;
271 unsigned int start = buffer->index;
272 unsigned int end = (buffer->index + buffer->entries - 1);
273
274 if (!buffer->entries)
275 return;
276
277 EFX_LOG(efx, "unmapping special buffers %d-%d\n",
278 buffer->index, buffer->index + buffer->entries - 1);
279
280 EFX_POPULATE_OWORD_4(buf_tbl_upd,
281 BUF_UPD_CMD, 0,
282 BUF_CLR_CMD, 1,
283 BUF_CLR_END_ID, end,
284 BUF_CLR_START_ID, start);
285 falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
286}
287
288/*
289 * Allocate a new Falcon special buffer
290 *
291 * This allocates memory for a new buffer, clears it and allocates a
292 * new buffer ID range. It does not write into Falcon's buffer table.
293 *
294 * This call will allocate 4KB buffers, since Falcon can't use 8KB
295 * buffers for event queues and descriptor rings.
296 */
297static int falcon_alloc_special_buffer(struct efx_nic *efx,
298 struct efx_special_buffer *buffer,
299 unsigned int len)
300{
301 struct falcon_nic_data *nic_data = efx->nic_data;
302
303 len = ALIGN(len, FALCON_BUF_SIZE);
304
305 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
306 &buffer->dma_addr);
307 if (!buffer->addr)
308 return -ENOMEM;
309 buffer->len = len;
310 buffer->entries = len / FALCON_BUF_SIZE;
311 BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
312
313 /* All zeros is a potentially valid event so memset to 0xff */
314 memset(buffer->addr, 0xff, len);
315
316 /* Select new buffer ID */
317 buffer->index = nic_data->next_buffer_table;
318 nic_data->next_buffer_table += buffer->entries;
319
320 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
321 "(virt %p phys %lx)\n", buffer->index,
322 buffer->index + buffer->entries - 1,
323 (unsigned long long)buffer->dma_addr, len,
324 buffer->addr, virt_to_phys(buffer->addr));
325
326 return 0;
327}
328
329static void falcon_free_special_buffer(struct efx_nic *efx,
330 struct efx_special_buffer *buffer)
331{
332 if (!buffer->addr)
333 return;
334
335 EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
336 "(virt %p phys %lx)\n", buffer->index,
337 buffer->index + buffer->entries - 1,
338 (unsigned long long)buffer->dma_addr, buffer->len,
339 buffer->addr, virt_to_phys(buffer->addr));
340
341 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
342 buffer->dma_addr);
343 buffer->addr = NULL;
344 buffer->entries = 0;
345}
346
347/**************************************************************************
348 *
349 * Falcon generic buffer handling
350 * These buffers are used for interrupt status and MAC stats
351 *
352 **************************************************************************/
353
354static int falcon_alloc_buffer(struct efx_nic *efx,
355 struct efx_buffer *buffer, unsigned int len)
356{
357 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
358 &buffer->dma_addr);
359 if (!buffer->addr)
360 return -ENOMEM;
361 buffer->len = len;
362 memset(buffer->addr, 0, len);
363 return 0;
364}
365
366static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
367{
368 if (buffer->addr) {
369 pci_free_consistent(efx->pci_dev, buffer->len,
370 buffer->addr, buffer->dma_addr);
371 buffer->addr = NULL;
372 }
373}
374
375/**************************************************************************
376 *
377 * Falcon TX path
378 *
379 **************************************************************************/
380
381/* Returns a pointer to the specified transmit descriptor in the TX
382 * descriptor queue belonging to the specified channel.
383 */
384static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
385 unsigned int index)
386{
387 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
388}
389
390/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
391static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
392{
393 unsigned write_ptr;
394 efx_dword_t reg;
395
396 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
397 EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
398 falcon_writel_page(tx_queue->efx, &reg,
399 TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
400}
401
402
403/* For each entry inserted into the software descriptor ring, create a
404 * descriptor in the hardware TX descriptor ring (in host memory), and
405 * write a doorbell.
406 */
407void falcon_push_buffers(struct efx_tx_queue *tx_queue)
408{
409
410 struct efx_tx_buffer *buffer;
411 efx_qword_t *txd;
412 unsigned write_ptr;
413
414 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
415
416 do {
417 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
418 buffer = &tx_queue->buffer[write_ptr];
419 txd = falcon_tx_desc(tx_queue, write_ptr);
420 ++tx_queue->write_count;
421
422 /* Create TX descriptor ring entry */
423 EFX_POPULATE_QWORD_5(*txd,
424 TX_KER_PORT, 0,
425 TX_KER_CONT, buffer->continuation,
426 TX_KER_BYTE_CNT, buffer->len,
427 TX_KER_BUF_REGION, 0,
428 TX_KER_BUF_ADR, buffer->dma_addr);
429 } while (tx_queue->write_count != tx_queue->insert_count);
430
431 wmb(); /* Ensure descriptors are written before they are fetched */
432 falcon_notify_tx_desc(tx_queue);
433}
434
435/* Allocate hardware resources for a TX queue */
436int falcon_probe_tx(struct efx_tx_queue *tx_queue)
437{
438 struct efx_nic *efx = tx_queue->efx;
439 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
440 FALCON_TXD_RING_SIZE *
441 sizeof(efx_qword_t));
442}
443
444int falcon_init_tx(struct efx_tx_queue *tx_queue)
445{
446 efx_oword_t tx_desc_ptr;
447 struct efx_nic *efx = tx_queue->efx;
448 int rc;
449
450 /* Pin TX descriptor ring */
451 rc = falcon_init_special_buffer(efx, &tx_queue->txd);
452 if (rc)
453 return rc;
454
455 /* Push TX descriptor ring to card */
456 EFX_POPULATE_OWORD_10(tx_desc_ptr,
457 TX_DESCQ_EN, 1,
458 TX_ISCSI_DDIG_EN, 0,
459 TX_ISCSI_HDIG_EN, 0,
460 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
461 TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum,
462 TX_DESCQ_OWNER_ID, 0,
463 TX_DESCQ_LABEL, tx_queue->queue,
464 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
465 TX_DESCQ_TYPE, 0,
466 TX_NON_IP_DROP_DIS_B0, 1);
467
468 if (FALCON_REV(efx) >= FALCON_REV_B0) {
469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
472 }
473
474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
475 tx_queue->queue);
476
477 if (FALCON_REV(efx) < FALCON_REV_B0) {
478 efx_oword_t reg;
479
480 BUG_ON(tx_queue->queue >= 128); /* HW limit */
481
482 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
483 if (efx->net_dev->features & NETIF_F_IP_CSUM)
484 clear_bit_le(tx_queue->queue, (void *)&reg);
485 else
486 set_bit_le(tx_queue->queue, (void *)&reg);
487 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
488 }
489
490 return 0;
491}
492
493static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
494{
495 struct efx_nic *efx = tx_queue->efx;
496 struct efx_channel *channel = &efx->channel[0];
497 efx_oword_t tx_flush_descq;
498 unsigned int read_ptr, i;
499
500 /* Post a flush command */
501 EFX_POPULATE_OWORD_2(tx_flush_descq,
502 TX_FLUSH_DESCQ_CMD, 1,
503 TX_FLUSH_DESCQ, tx_queue->queue);
504 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
505 msleep(FALCON_FLUSH_TIMEOUT);
506
507 if (EFX_WORKAROUND_7803(efx))
508 return 0;
509
510 /* Look for a flush completed event */
511 read_ptr = channel->eventq_read_ptr;
512 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
513 efx_qword_t *event = falcon_event(channel, read_ptr);
514 int ev_code, ev_sub_code, ev_queue;
515 if (!falcon_event_present(event))
516 break;
517
518 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
519 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
520 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
521 if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
522 (ev_queue == tx_queue->queue)) {
523 EFX_LOG(efx, "tx queue %d flush command succesful\n",
524 tx_queue->queue);
525 return 0;
526 }
527
528 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
529 }
530
531 if (EFX_WORKAROUND_11557(efx)) {
532 efx_oword_t reg;
533 int enabled;
534
535 falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
536 tx_queue->queue);
537 enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
538 if (!enabled) {
539 EFX_LOG(efx, "tx queue %d disabled without a "
540 "flush event seen\n", tx_queue->queue);
541 return 0;
542 }
543 }
544
545 EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
546 return -ETIMEDOUT;
547}
548
549void falcon_fini_tx(struct efx_tx_queue *tx_queue)
550{
551 struct efx_nic *efx = tx_queue->efx;
552 efx_oword_t tx_desc_ptr;
553
554 /* Stop the hardware using the queue */
555 if (falcon_flush_tx_queue(tx_queue))
556 EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
557
558 /* Remove TX descriptor ring from card */
559 EFX_ZERO_OWORD(tx_desc_ptr);
560 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
561 tx_queue->queue);
562
563 /* Unpin TX descriptor ring */
564 falcon_fini_special_buffer(efx, &tx_queue->txd);
565}
566
567/* Free buffers backing TX queue */
568void falcon_remove_tx(struct efx_tx_queue *tx_queue)
569{
570 falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
571}
572
573/**************************************************************************
574 *
575 * Falcon RX path
576 *
577 **************************************************************************/
578
579/* Returns a pointer to the specified descriptor in the RX descriptor queue */
580static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
581 unsigned int index)
582{
583 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
584}
585
586/* This creates an entry in the RX descriptor queue */
587static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
588 unsigned index)
589{
590 struct efx_rx_buffer *rx_buf;
591 efx_qword_t *rxd;
592
593 rxd = falcon_rx_desc(rx_queue, index);
594 rx_buf = efx_rx_buffer(rx_queue, index);
595 EFX_POPULATE_QWORD_3(*rxd,
596 RX_KER_BUF_SIZE,
597 rx_buf->len -
598 rx_queue->efx->type->rx_buffer_padding,
599 RX_KER_BUF_REGION, 0,
600 RX_KER_BUF_ADR, rx_buf->dma_addr);
601}
602
603/* This writes to the RX_DESC_WPTR register for the specified receive
604 * descriptor ring.
605 */
606void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
607{
608 efx_dword_t reg;
609 unsigned write_ptr;
610
611 while (rx_queue->notified_count != rx_queue->added_count) {
612 falcon_build_rx_desc(rx_queue,
613 rx_queue->notified_count &
614 FALCON_RXD_RING_MASK);
615 ++rx_queue->notified_count;
616 }
617
618 wmb();
619 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
620 EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
621 falcon_writel_page(rx_queue->efx, &reg,
622 RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
623}
624
625int falcon_probe_rx(struct efx_rx_queue *rx_queue)
626{
627 struct efx_nic *efx = rx_queue->efx;
628 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
629 FALCON_RXD_RING_SIZE *
630 sizeof(efx_qword_t));
631}
632
633int falcon_init_rx(struct efx_rx_queue *rx_queue)
634{
635 efx_oword_t rx_desc_ptr;
636 struct efx_nic *efx = rx_queue->efx;
637 int rc;
638 int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0;
639 int iscsi_digest_en = is_b0;
640
641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
642 rx_queue->queue, rx_queue->rxd.index,
643 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
644
645 /* Pin RX descriptor ring */
646 rc = falcon_init_special_buffer(efx, &rx_queue->rxd);
647 if (rc)
648 return rc;
649
650 /* Push RX descriptor ring to card */
651 EFX_POPULATE_OWORD_10(rx_desc_ptr,
652 RX_ISCSI_DDIG_EN, iscsi_digest_en,
653 RX_ISCSI_HDIG_EN, iscsi_digest_en,
654 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
655 RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum,
656 RX_DESCQ_OWNER_ID, 0,
657 RX_DESCQ_LABEL, rx_queue->queue,
658 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
659 RX_DESCQ_TYPE, 0 /* kernel queue */ ,
660 /* For >=B0 this is scatter so disable */
661 RX_DESCQ_JUMBO, !is_b0,
662 RX_DESCQ_EN, 1);
663 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
664 rx_queue->queue);
665 return 0;
666}
667
668static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
669{
670 struct efx_nic *efx = rx_queue->efx;
671 struct efx_channel *channel = &efx->channel[0];
672 unsigned int read_ptr, i;
673 efx_oword_t rx_flush_descq;
674
675 /* Post a flush command */
676 EFX_POPULATE_OWORD_2(rx_flush_descq,
677 RX_FLUSH_DESCQ_CMD, 1,
678 RX_FLUSH_DESCQ, rx_queue->queue);
679 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
680 msleep(FALCON_FLUSH_TIMEOUT);
681
682 if (EFX_WORKAROUND_7803(efx))
683 return 0;
684
685 /* Look for a flush completed event */
686 read_ptr = channel->eventq_read_ptr;
687 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
688 efx_qword_t *event = falcon_event(channel, read_ptr);
689 int ev_code, ev_sub_code, ev_queue, ev_failed;
690 if (!falcon_event_present(event))
691 break;
692
693 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
694 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
695 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
696 ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
697
698 if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
699 (ev_queue == rx_queue->queue)) {
700 if (ev_failed) {
701 EFX_INFO(efx, "rx queue %d flush command "
702 "failed\n", rx_queue->queue);
703 return -EAGAIN;
704 } else {
705 EFX_LOG(efx, "rx queue %d flush command "
706 "succesful\n", rx_queue->queue);
707 return 0;
708 }
709 }
710
711 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
712 }
713
714 if (EFX_WORKAROUND_11557(efx)) {
715 efx_oword_t reg;
716 int enabled;
717
718 falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
719 rx_queue->queue);
720 enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
721 if (!enabled) {
722 EFX_LOG(efx, "rx queue %d disabled without a "
723 "flush event seen\n", rx_queue->queue);
724 return 0;
725 }
726 }
727
728 EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
729 return -ETIMEDOUT;
730}
731
732void falcon_fini_rx(struct efx_rx_queue *rx_queue)
733{
734 efx_oword_t rx_desc_ptr;
735 struct efx_nic *efx = rx_queue->efx;
736 int i, rc;
737
738 /* Try and flush the rx queue. This may need to be repeated */
739 for (i = 0; i < 5; i++) {
740 rc = falcon_flush_rx_queue(rx_queue);
741 if (rc == -EAGAIN)
742 continue;
743 break;
744 }
745 if (rc)
746 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
747
748 /* Remove RX descriptor ring from card */
749 EFX_ZERO_OWORD(rx_desc_ptr);
750 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
751 rx_queue->queue);
752
753 /* Unpin RX descriptor ring */
754 falcon_fini_special_buffer(efx, &rx_queue->rxd);
755}
756
757/* Free buffers backing RX queue */
758void falcon_remove_rx(struct efx_rx_queue *rx_queue)
759{
760 falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
761}
762
763/**************************************************************************
764 *
765 * Falcon event queue processing
766 * Event queues are processed by per-channel tasklets.
767 *
768 **************************************************************************/
769
770/* Update a channel's event queue's read pointer (RPTR) register
771 *
772 * This writes the EVQ_RPTR_REG register for the specified channel's
773 * event queue.
774 *
775 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
776 * whereas channel->eventq_read_ptr contains the index of the "next to
777 * read" event.
778 */
779void falcon_eventq_read_ack(struct efx_channel *channel)
780{
781 efx_dword_t reg;
782 struct efx_nic *efx = channel->efx;
783
784 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
785 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
786 channel->evqnum);
787}
788
789/* Use HW to insert a SW defined event */
790void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
791{
792 efx_oword_t drv_ev_reg;
793
794 EFX_POPULATE_OWORD_2(drv_ev_reg,
795 DRV_EV_QID, channel->evqnum,
796 DRV_EV_DATA,
797 EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
798 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
799}
800
801/* Handle a transmit completion event
802 *
803 * Falcon batches TX completion events; the message we receive is of
804 * the form "complete all TX events up to this index".
805 */
806static inline void falcon_handle_tx_event(struct efx_channel *channel,
807 efx_qword_t *event)
808{
809 unsigned int tx_ev_desc_ptr;
810 unsigned int tx_ev_q_label;
811 struct efx_tx_queue *tx_queue;
812 struct efx_nic *efx = channel->efx;
813
814 if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
815 /* Transmit completion */
816 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
817 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
818 tx_queue = &efx->tx_queue[tx_ev_q_label];
819 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
820 } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
821 /* Rewrite the FIFO write pointer */
822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
823 tx_queue = &efx->tx_queue[tx_ev_q_label];
824
825 if (NET_DEV_REGISTERED(efx))
826 netif_tx_lock(efx->net_dev);
827 falcon_notify_tx_desc(tx_queue);
828 if (NET_DEV_REGISTERED(efx))
829 netif_tx_unlock(efx->net_dev);
830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
831 EFX_WORKAROUND_10727(efx)) {
832 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
833 } else {
834 EFX_ERR(efx, "channel %d unexpected TX event "
835 EFX_QWORD_FMT"\n", channel->channel,
836 EFX_QWORD_VAL(*event));
837 }
838}
839
840/* Check received packet's destination MAC address. */
841static int check_dest_mac(struct efx_rx_queue *rx_queue,
842 const efx_qword_t *event)
843{
844 struct efx_rx_buffer *rx_buf;
845 struct efx_nic *efx = rx_queue->efx;
846 int rx_ev_desc_ptr;
847 struct ethhdr *eh;
848
849 if (efx->promiscuous)
850 return 1;
851
852 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
853 rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
854 eh = (struct ethhdr *)rx_buf->data;
855 if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
856 return 0;
857 return 1;
858}
859
860/* Detect errors included in the rx_evt_pkt_ok bit. */
861static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
862 const efx_qword_t *event,
863 unsigned *rx_ev_pkt_ok,
864 int *discard, int byte_count)
865{
866 struct efx_nic *efx = rx_queue->efx;
867 unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
868 unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
869 unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
870 unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm;
871 unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
872 int snap, non_ip;
873
874 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
875 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
876 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
877 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
878 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
879 RX_EV_BUF_OWNER_ID_ERR);
880 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
881 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
882 RX_EV_IP_HDR_CHKSUM_ERR);
883 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
884 RX_EV_TCP_UDP_CHKSUM_ERR);
885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
887 rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ?
888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
890
891 /* Every error apart from tobe_disc and pause_frm */
892 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
893 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
894 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
895
896 snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
897 (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
898 non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
899
900 /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
901 * length field of an LLC frame, which sets TOBE_DISC. We could set
902 * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
903 * protect the RX block).
904 *
905 * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
906 * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
907 * LLC can't encapsulate IP, so by definition
908 * these packets are NON_IP.
909 *
910 * Unicast mismatch will also cause TOBE_DISC, so the driver needs
911 * to check this.
912 */
913 if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
914 /* If all the other flags are zero then we can state the
915 * entire packet is ok, which will flag to the kernel not
916 * to recalculate checksums.
917 */
918 if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
919 *rx_ev_pkt_ok = 1;
920
921 rx_ev_tobe_disc = 0;
922
923 /* TOBE_DISC is set for unicast mismatch. But given that
924 * we can't trust TOBE_DISC here, we must validate the dest
925 * MAC address ourselves.
926 */
927 if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
928 rx_ev_tobe_disc = 1;
929 }
930
931 /* Count errors that are not in MAC stats. */
932 if (rx_ev_frm_trunc)
933 ++rx_queue->channel->n_rx_frm_trunc;
934 else if (rx_ev_tobe_disc)
935 ++rx_queue->channel->n_rx_tobe_disc;
936 else if (rx_ev_ip_hdr_chksum_err)
937 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
938 else if (rx_ev_tcp_udp_chksum_err)
939 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
940 if (rx_ev_ip_frag_err)
941 ++rx_queue->channel->n_rx_ip_frag_err;
942
943 /* The frame must be discarded if any of these are true. */
944 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
945 rx_ev_tobe_disc | rx_ev_pause_frm);
946
947 /* TOBE_DISC is expected on unicast mismatches; don't print out an
948 * error message. FRM_TRUNC indicates RXDP dropped the packet due
949 * to a FIFO overflow.
950 */
951#ifdef EFX_ENABLE_DEBUG
952 if (rx_ev_other_err) {
953 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
954 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n",
955 rx_queue->queue, EFX_QWORD_VAL(*event),
956 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
957 rx_ev_ip_hdr_chksum_err ?
958 " [IP_HDR_CHKSUM_ERR]" : "",
959 rx_ev_tcp_udp_chksum_err ?
960 " [TCP_UDP_CHKSUM_ERR]" : "",
961 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
962 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
963 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
964 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
965 rx_ev_pause_frm ? " [PAUSE]" : "",
966 snap ? " [SNAP/LLC]" : "");
967 }
968#endif
969
970 if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
971 efx->phy_type == PHY_TYPE_10XPRESS))
972 tenxpress_crc_err(efx);
973}
974
975/* Handle receive events that are not in-order. */
976static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
977 unsigned index)
978{
979 struct efx_nic *efx = rx_queue->efx;
980 unsigned expected, dropped;
981
982 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
983 dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
984 FALCON_RXD_RING_MASK);
985 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
986 dropped, index, expected);
987
988 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
989 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
990}
991
992/* Handle a packet received event
993 *
994 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
995 * wrong destination address
996 * Also "is multicast" and "matches multicast filter" flags can be used to
997 * discard non-matching multicast packets.
998 */
999static inline int falcon_handle_rx_event(struct efx_channel *channel,
1000 const efx_qword_t *event)
1001{
1002 unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
1003 unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt;
1004 unsigned expected_ptr;
1005 int discard = 0, checksummed;
1006 struct efx_rx_queue *rx_queue;
1007 struct efx_nic *efx = channel->efx;
1008
1009 /* Basic packet information */
1010 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
1011 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
1012 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
1013 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
1014 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
1015
1016 rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL);
1017 rx_queue = &efx->rx_queue[rx_ev_q_label];
1018
1019 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
1020 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
1021 if (unlikely(rx_ev_desc_ptr != expected_ptr)) {
1022 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
1023 return rx_ev_q_label;
1024 }
1025
1026 if (likely(rx_ev_pkt_ok)) {
1027 /* If packet is marked as OK and packet type is TCP/IPv4 or
1028 * UDP/IPv4, then we can rely on the hardware checksum.
1029 */
1030 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
1031 } else {
1032 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
1033 &discard, rx_ev_byte_cnt);
1034 checksummed = 0;
1035 }
1036
1037 /* Detect multicast packets that didn't match the filter */
1038 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
1039 if (rx_ev_mcast_pkt) {
1040 unsigned int rx_ev_mcast_hash_match =
1041 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
1042
1043 if (unlikely(!rx_ev_mcast_hash_match))
1044 discard = 1;
1045 }
1046
1047 /* Handle received packet */
1048 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
1049 checksummed, discard);
1050
1051 return rx_ev_q_label;
1052}
1053
1054/* Global events are basically PHY events */
1055static void falcon_handle_global_event(struct efx_channel *channel,
1056 efx_qword_t *event)
1057{
1058 struct efx_nic *efx = channel->efx;
1059 int is_phy_event = 0, handled = 0;
1060
1061 /* Check for interrupt on either port. Some boards have a
1062 * single PHY wired to the interrupt line for port 1. */
1063 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
1064 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR))
1066 is_phy_event = 1;
1067
1068 if ((FALCON_REV(efx) >= FALCON_REV_B0) &&
1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
1070 is_phy_event = 1;
1071
1072 if (is_phy_event) {
1073 efx->phy_op->clear_interrupt(efx);
1074 queue_work(efx->workqueue, &efx->reconfigure_work);
1075 handled = 1;
1076 }
1077
1078 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
1079 EFX_ERR(efx, "channel %d seen global RX_RESET "
1080 "event. Resetting.\n", channel->channel);
1081
1082 atomic_inc(&efx->rx_reset);
1083 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
1084 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1085 handled = 1;
1086 }
1087
1088 if (!handled)
1089 EFX_ERR(efx, "channel %d unknown global event "
1090 EFX_QWORD_FMT "\n", channel->channel,
1091 EFX_QWORD_VAL(*event));
1092}
1093
1094static void falcon_handle_driver_event(struct efx_channel *channel,
1095 efx_qword_t *event)
1096{
1097 struct efx_nic *efx = channel->efx;
1098 unsigned int ev_sub_code;
1099 unsigned int ev_sub_data;
1100
1101 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1102 ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
1103
1104 switch (ev_sub_code) {
1105 case TX_DESCQ_FLS_DONE_EV_DECODE:
1106 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
1107 channel->channel, ev_sub_data);
1108 break;
1109 case RX_DESCQ_FLS_DONE_EV_DECODE:
1110 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
1111 channel->channel, ev_sub_data);
1112 break;
1113 case EVQ_INIT_DONE_EV_DECODE:
1114 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
1115 channel->channel, ev_sub_data);
1116 break;
1117 case SRM_UPD_DONE_EV_DECODE:
1118 EFX_TRACE(efx, "channel %d SRAM update done\n",
1119 channel->channel);
1120 break;
1121 case WAKE_UP_EV_DECODE:
1122 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
1123 channel->channel, ev_sub_data);
1124 break;
1125 case TIMER_EV_DECODE:
1126 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
1127 channel->channel, ev_sub_data);
1128 break;
1129 case RX_RECOVERY_EV_DECODE:
1130 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
1131 "Resetting.\n", channel->channel);
1132 efx_schedule_reset(efx,
1133 EFX_WORKAROUND_6555(efx) ?
1134 RESET_TYPE_RX_RECOVERY :
1135 RESET_TYPE_DISABLE);
1136 break;
1137 case RX_DSC_ERROR_EV_DECODE:
1138 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
1139 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1140 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1141 break;
1142 case TX_DSC_ERROR_EV_DECODE:
1143 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
1144 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1145 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1146 break;
1147 default:
1148 EFX_TRACE(efx, "channel %d unknown driver event code %d "
1149 "data %04x\n", channel->channel, ev_sub_code,
1150 ev_sub_data);
1151 break;
1152 }
1153}
1154
1155int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
1156{
1157 unsigned int read_ptr;
1158 efx_qword_t event, *p_event;
1159 int ev_code;
1160 int rxq;
1161 int rxdmaqs = 0;
1162
1163 read_ptr = channel->eventq_read_ptr;
1164
1165 do {
1166 p_event = falcon_event(channel, read_ptr);
1167 event = *p_event;
1168
1169 if (!falcon_event_present(&event))
1170 /* End of events */
1171 break;
1172
1173 EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
1174 channel->channel, EFX_QWORD_VAL(event));
1175
1176 /* Clear this event by marking it all ones */
1177 EFX_SET_QWORD(*p_event);
1178
1179 ev_code = EFX_QWORD_FIELD(event, EV_CODE);
1180
1181 switch (ev_code) {
1182 case RX_IP_EV_DECODE:
1183 rxq = falcon_handle_rx_event(channel, &event);
1184 rxdmaqs |= (1 << rxq);
1185 (*rx_quota)--;
1186 break;
1187 case TX_IP_EV_DECODE:
1188 falcon_handle_tx_event(channel, &event);
1189 break;
1190 case DRV_GEN_EV_DECODE:
1191 channel->eventq_magic
1192 = EFX_QWORD_FIELD(event, EVQ_MAGIC);
1193 EFX_LOG(channel->efx, "channel %d received generated "
1194 "event "EFX_QWORD_FMT"\n", channel->channel,
1195 EFX_QWORD_VAL(event));
1196 break;
1197 case GLOBAL_EV_DECODE:
1198 falcon_handle_global_event(channel, &event);
1199 break;
1200 case DRIVER_EV_DECODE:
1201 falcon_handle_driver_event(channel, &event);
1202 break;
1203 default:
1204 EFX_ERR(channel->efx, "channel %d unknown event type %d"
1205 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1206 ev_code, EFX_QWORD_VAL(event));
1207 }
1208
1209 /* Increment read pointer */
1210 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1211
1212 } while (*rx_quota);
1213
1214 channel->eventq_read_ptr = read_ptr;
1215 return rxdmaqs;
1216}
1217
1218void falcon_set_int_moderation(struct efx_channel *channel)
1219{
1220 efx_dword_t timer_cmd;
1221 struct efx_nic *efx = channel->efx;
1222
1223 /* Set timer register */
1224 if (channel->irq_moderation) {
1225 /* Round to resolution supported by hardware. The value we
1226 * program is based at 0. So actual interrupt moderation
1227 * achieved is ((x + 1) * res).
1228 */
1229 unsigned int res = 5;
1230 channel->irq_moderation -= (channel->irq_moderation % res);
1231 if (channel->irq_moderation < res)
1232 channel->irq_moderation = res;
1233 EFX_POPULATE_DWORD_2(timer_cmd,
1234 TIMER_MODE, TIMER_MODE_INT_HLDOFF,
1235 TIMER_VAL,
1236 (channel->irq_moderation / res) - 1);
1237 } else {
1238 EFX_POPULATE_DWORD_2(timer_cmd,
1239 TIMER_MODE, TIMER_MODE_DIS,
1240 TIMER_VAL, 0);
1241 }
1242 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
1243 channel->evqnum);
1244
1245}
1246
1247/* Allocate buffer table entries for event queue */
1248int falcon_probe_eventq(struct efx_channel *channel)
1249{
1250 struct efx_nic *efx = channel->efx;
1251 unsigned int evq_size;
1252
1253 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
1254 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
1255}
1256
1257int falcon_init_eventq(struct efx_channel *channel)
1258{
1259 efx_oword_t evq_ptr;
1260 struct efx_nic *efx = channel->efx;
1261 int rc;
1262
1263 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1264 channel->channel, channel->eventq.index,
1265 channel->eventq.index + channel->eventq.entries - 1);
1266
1267 /* Pin event queue buffer */
1268 rc = falcon_init_special_buffer(efx, &channel->eventq);
1269 if (rc)
1270 return rc;
1271
1272 /* Fill event queue with all ones (i.e. empty events) */
1273 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1274
1275 /* Push event queue to card */
1276 EFX_POPULATE_OWORD_3(evq_ptr,
1277 EVQ_EN, 1,
1278 EVQ_SIZE, FALCON_EVQ_ORDER,
1279 EVQ_BUF_BASE_ID, channel->eventq.index);
1280 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1281 channel->evqnum);
1282
1283 falcon_set_int_moderation(channel);
1284
1285 return 0;
1286}
1287
1288void falcon_fini_eventq(struct efx_channel *channel)
1289{
1290 efx_oword_t eventq_ptr;
1291 struct efx_nic *efx = channel->efx;
1292
1293 /* Remove event queue from card */
1294 EFX_ZERO_OWORD(eventq_ptr);
1295 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1296 channel->evqnum);
1297
1298 /* Unpin event queue */
1299 falcon_fini_special_buffer(efx, &channel->eventq);
1300}
1301
1302/* Free buffers backing event queue */
1303void falcon_remove_eventq(struct efx_channel *channel)
1304{
1305 falcon_free_special_buffer(channel->efx, &channel->eventq);
1306}
1307
1308
1309/* Generates a test event on the event queue. A subsequent call to
1310 * process_eventq() should pick up the event and place the value of
1311 * "magic" into channel->eventq_magic;
1312 */
1313void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1314{
1315 efx_qword_t test_event;
1316
1317 EFX_POPULATE_QWORD_2(test_event,
1318 EV_CODE, DRV_GEN_EV_DECODE,
1319 EVQ_MAGIC, magic);
1320 falcon_generate_event(channel, &test_event);
1321}
1322
1323
1324/**************************************************************************
1325 *
1326 * Falcon hardware interrupts
1327 * The hardware interrupt handler does very little work; all the event
1328 * queue processing is carried out by per-channel tasklets.
1329 *
1330 **************************************************************************/
1331
1332/* Enable/disable/generate Falcon interrupts */
1333static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1334 int force)
1335{
1336 efx_oword_t int_en_reg_ker;
1337
1338 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1339 KER_INT_KER, force,
1340 DRV_INT_EN_KER, enabled);
1341 falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
1342}
1343
1344void falcon_enable_interrupts(struct efx_nic *efx)
1345{
1346 efx_oword_t int_adr_reg_ker;
1347 struct efx_channel *channel;
1348
1349 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1350 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1351
1352 /* Program address */
1353 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1354 NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
1355 INT_ADR_KER, efx->irq_status.dma_addr);
1356 falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
1357
1358 /* Enable interrupts */
1359 falcon_interrupts(efx, 1, 0);
1360
1361 /* Force processing of all the channels to get the EVQ RPTRs up to
1362 date */
1363 efx_for_each_channel_with_interrupt(channel, efx)
1364 efx_schedule_channel(channel);
1365}
1366
1367void falcon_disable_interrupts(struct efx_nic *efx)
1368{
1369 /* Disable interrupts */
1370 falcon_interrupts(efx, 0, 0);
1371}
1372
1373/* Generate a Falcon test interrupt
1374 * Interrupt must already have been enabled, otherwise nasty things
1375 * may happen.
1376 */
1377void falcon_generate_interrupt(struct efx_nic *efx)
1378{
1379 falcon_interrupts(efx, 1, 1);
1380}
1381
1382/* Acknowledge a legacy interrupt from Falcon
1383 *
1384 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1385 *
1386 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1387 * BIU. Interrupt acknowledge is read sensitive so must write instead
1388 * (then read to ensure the BIU collector is flushed)
1389 *
1390 * NB most hardware supports MSI interrupts
1391 */
1392static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1393{
1394 efx_dword_t reg;
1395
1396 EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
1397 falcon_writel(efx, &reg, INT_ACK_REG_KER_A1);
1398 falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
1399}
1400
1401/* Process a fatal interrupt
1402 * Disable bus mastering ASAP and schedule a reset
1403 */
1404static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1405{
1406 struct falcon_nic_data *nic_data = efx->nic_data;
1407 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1408 efx_oword_t fatal_intr;
1409 int error, mem_perr;
1410 static int n_int_errors;
1411
1412 falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
1413 error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
1414
1415 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1416 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1417 EFX_OWORD_VAL(fatal_intr),
1418 error ? "disabling bus mastering" : "no recognised error");
1419 if (error == 0)
1420 goto out;
1421
1422 /* If this is a memory parity error dump which blocks are offending */
1423 mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
1424 if (mem_perr) {
1425 efx_oword_t reg;
1426 falcon_read(efx, &reg, MEM_STAT_REG_KER);
1427 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1428 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1429 }
1430
1431 /* Disable DMA bus mastering on both devices */
1432 pci_disable_device(efx->pci_dev);
1433 if (FALCON_IS_DUAL_FUNC(efx))
1434 pci_disable_device(nic_data->pci_dev2);
1435
1436 if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
1437 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1438 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1439 } else {
1440 EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
1441 "NIC will be disabled\n");
1442 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1443 }
1444out:
1445 return IRQ_HANDLED;
1446}
1447
1448/* Handle a legacy interrupt from Falcon
1449 * Acknowledges the interrupt and schedule event queue processing.
1450 */
1451static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1452{
1453 struct efx_nic *efx = (struct efx_nic *)dev_id;
1454 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1455 struct efx_channel *channel;
1456 efx_dword_t reg;
1457 u32 queues;
1458 int syserr;
1459
1460 /* Read the ISR which also ACKs the interrupts */
1461 falcon_readl(efx, &reg, INT_ISR0_B0);
1462 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1463
1464 /* Check to see if we have a serious error condition */
1465 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1466 if (unlikely(syserr))
1467 return falcon_fatal_interrupt(efx);
1468
1469 if (queues == 0)
1470 return IRQ_NONE;
1471
1472 efx->last_irq_cpu = raw_smp_processor_id();
1473 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1474 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1475
1476 /* Schedule processing of any interrupting queues */
1477 channel = &efx->channel[0];
1478 while (queues) {
1479 if (queues & 0x01)
1480 efx_schedule_channel(channel);
1481 channel++;
1482 queues >>= 1;
1483 }
1484
1485 return IRQ_HANDLED;
1486}
1487
1488
1489static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1490{
1491 struct efx_nic *efx = (struct efx_nic *)dev_id;
1492 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1493 struct efx_channel *channel;
1494 int syserr;
1495 int queues;
1496
1497 /* Check to see if this is our interrupt. If it isn't, we
1498 * exit without having touched the hardware.
1499 */
1500 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
1501 EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
1502 raw_smp_processor_id());
1503 return IRQ_NONE;
1504 }
1505 efx->last_irq_cpu = raw_smp_processor_id();
1506 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1507 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1508
1509 /* Check to see if we have a serious error condition */
1510 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1511 if (unlikely(syserr))
1512 return falcon_fatal_interrupt(efx);
1513
1514 /* Determine interrupting queues, clear interrupt status
1515 * register and acknowledge the device interrupt.
1516 */
1517 BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
1518 queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
1519 EFX_ZERO_OWORD(*int_ker);
1520 wmb(); /* Ensure the vector is cleared before interrupt ack */
1521 falcon_irq_ack_a1(efx);
1522
1523 /* Schedule processing of any interrupting queues */
1524 channel = &efx->channel[0];
1525 while (queues) {
1526 if (queues & 0x01)
1527 efx_schedule_channel(channel);
1528 channel++;
1529 queues >>= 1;
1530 }
1531
1532 return IRQ_HANDLED;
1533}
1534
1535/* Handle an MSI interrupt from Falcon
1536 *
1537 * Handle an MSI hardware interrupt. This routine schedules event
1538 * queue processing. No interrupt acknowledgement cycle is necessary.
1539 * Also, we never need to check that the interrupt is for us, since
1540 * MSI interrupts cannot be shared.
1541 */
1542static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1543{
1544 struct efx_channel *channel = (struct efx_channel *)dev_id;
1545 struct efx_nic *efx = channel->efx;
1546 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
1547 int syserr;
1548
1549 efx->last_irq_cpu = raw_smp_processor_id();
1550 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1551 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1552
1553 /* Check to see if we have a serious error condition */
1554 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
1555 if (unlikely(syserr))
1556 return falcon_fatal_interrupt(efx);
1557
1558 /* Schedule processing of the channel */
1559 efx_schedule_channel(channel);
1560
1561 return IRQ_HANDLED;
1562}
1563
1564
1565/* Setup RSS indirection table.
1566 * This maps from the hash value of the packet to RXQ
1567 */
1568static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1569{
1570 int i = 0;
1571 unsigned long offset;
1572 efx_dword_t dword;
1573
1574 if (FALCON_REV(efx) < FALCON_REV_B0)
1575 return;
1576
1577 for (offset = RX_RSS_INDIR_TBL_B0;
1578 offset < RX_RSS_INDIR_TBL_B0 + 0x800;
1579 offset += 0x10) {
1580 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
1581 i % efx->rss_queues);
1582 falcon_writel(efx, &dword, offset);
1583 i++;
1584 }
1585}
1586
1587/* Hook interrupt handler(s)
1588 * Try MSI and then legacy interrupts.
1589 */
1590int falcon_init_interrupt(struct efx_nic *efx)
1591{
1592 struct efx_channel *channel;
1593 int rc;
1594
1595 if (!EFX_INT_MODE_USE_MSI(efx)) {
1596 irq_handler_t handler;
1597 if (FALCON_REV(efx) >= FALCON_REV_B0)
1598 handler = falcon_legacy_interrupt_b0;
1599 else
1600 handler = falcon_legacy_interrupt_a1;
1601
1602 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1603 efx->name, efx);
1604 if (rc) {
1605 EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
1606 efx->pci_dev->irq);
1607 goto fail1;
1608 }
1609 return 0;
1610 }
1611
1612 /* Hook MSI or MSI-X interrupt */
1613 efx_for_each_channel_with_interrupt(channel, efx) {
1614 rc = request_irq(channel->irq, falcon_msi_interrupt,
1615 IRQF_PROBE_SHARED, /* Not shared */
1616 efx->name, channel);
1617 if (rc) {
1618 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1619 goto fail2;
1620 }
1621 }
1622
1623 return 0;
1624
1625 fail2:
1626 efx_for_each_channel_with_interrupt(channel, efx)
1627 free_irq(channel->irq, channel);
1628 fail1:
1629 return rc;
1630}
1631
1632void falcon_fini_interrupt(struct efx_nic *efx)
1633{
1634 struct efx_channel *channel;
1635 efx_oword_t reg;
1636
1637 /* Disable MSI/MSI-X interrupts */
1638 efx_for_each_channel_with_interrupt(channel, efx)
1639 if (channel->irq)
1640 free_irq(channel->irq, channel);
1641
1642 /* ACK legacy interrupt */
1643 if (FALCON_REV(efx) >= FALCON_REV_B0)
1644 falcon_read(efx, &reg, INT_ISR0_B0);
1645 else
1646 falcon_irq_ack_a1(efx);
1647
1648 /* Disable legacy interrupt */
1649 if (efx->legacy_irq)
1650 free_irq(efx->legacy_irq, efx);
1651}
1652
1653/**************************************************************************
1654 *
1655 * EEPROM/flash
1656 *
1657 **************************************************************************
1658 */
1659
1660#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1661
1662/* Wait for SPI command completion */
1663static int falcon_spi_wait(struct efx_nic *efx)
1664{
1665 efx_oword_t reg;
1666 int cmd_en, timer_active;
1667 int count;
1668
1669 count = 0;
1670 do {
1671 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
1672 cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
1673 timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
1674 if (!cmd_en && !timer_active)
1675 return 0;
1676 udelay(10);
1677 } while (++count < 10000); /* wait upto 100msec */
1678 EFX_ERR(efx, "timed out waiting for SPI\n");
1679 return -ETIMEDOUT;
1680}
1681
1682static int
1683falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command,
1684 unsigned int address, unsigned int addr_len,
1685 void *data, unsigned int len)
1686{
1687 efx_oword_t reg;
1688 int rc;
1689
1690 BUG_ON(len > FALCON_SPI_MAX_LEN);
1691
1692 /* Check SPI not currently being accessed */
1693 rc = falcon_spi_wait(efx);
1694 if (rc)
1695 return rc;
1696
1697 /* Program address register */
1698 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
1699 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
1700
1701 /* Issue read command */
1702 EFX_POPULATE_OWORD_7(reg,
1703 EE_SPI_HCMD_CMD_EN, 1,
1704 EE_SPI_HCMD_SF_SEL, device_id,
1705 EE_SPI_HCMD_DABCNT, len,
1706 EE_SPI_HCMD_READ, EE_SPI_READ,
1707 EE_SPI_HCMD_DUBCNT, 0,
1708 EE_SPI_HCMD_ADBCNT, addr_len,
1709 EE_SPI_HCMD_ENC, command);
1710 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
1711
1712 /* Wait for read to complete */
1713 rc = falcon_spi_wait(efx);
1714 if (rc)
1715 return rc;
1716
1717 /* Read data */
1718 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
1719 memcpy(data, &reg, len);
1720 return 0;
1721}
1722
1723/**************************************************************************
1724 *
1725 * MAC wrapper
1726 *
1727 **************************************************************************
1728 */
1729void falcon_drain_tx_fifo(struct efx_nic *efx)
1730{
1731 efx_oword_t temp;
1732 int count;
1733
1734 if (FALCON_REV(efx) < FALCON_REV_B0)
1735 return;
1736
1737 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1738 /* There is no point in draining more than once */
1739 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
1740 return;
1741
1742 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1743 * the drain sequence with the statistics fetch */
1744 spin_lock(&efx->stats_lock);
1745
1746 EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1);
1747 falcon_write(efx, &temp, MAC0_CTRL_REG_KER);
1748
1749 /* Reset the MAC and EM block. */
1750 falcon_read(efx, &temp, GLB_CTL_REG_KER);
1751 EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1);
1752 EFX_SET_OWORD_FIELD(temp, RST_XGRX, 1);
1753 EFX_SET_OWORD_FIELD(temp, RST_EM, 1);
1754 falcon_write(efx, &temp, GLB_CTL_REG_KER);
1755
1756 count = 0;
1757 while (1) {
1758 falcon_read(efx, &temp, GLB_CTL_REG_KER);
1759 if (!EFX_OWORD_FIELD(temp, RST_XGTX) &&
1760 !EFX_OWORD_FIELD(temp, RST_XGRX) &&
1761 !EFX_OWORD_FIELD(temp, RST_EM)) {
1762 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1763 count);
1764 break;
1765 }
1766 if (count > 20) {
1767 EFX_ERR(efx, "MAC reset failed\n");
1768 break;
1769 }
1770 count++;
1771 udelay(10);
1772 }
1773
1774 spin_unlock(&efx->stats_lock);
1775
1776 /* If we've reset the EM block and the link is up, then
1777 * we'll have to kick the XAUI link so the PHY can recover */
1778 if (efx->link_up && EFX_WORKAROUND_5147(efx))
1779 falcon_reset_xaui(efx);
1780}
1781
1782void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1783{
1784 efx_oword_t temp;
1785
1786 if (FALCON_REV(efx) < FALCON_REV_B0)
1787 return;
1788
1789 /* Isolate the MAC -> RX */
1790 falcon_read(efx, &temp, RX_CFG_REG_KER);
1791 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 0);
1792 falcon_write(efx, &temp, RX_CFG_REG_KER);
1793
1794 if (!efx->link_up)
1795 falcon_drain_tx_fifo(efx);
1796}
1797
1798void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1799{
1800 efx_oword_t reg;
1801 int link_speed;
1802 unsigned int tx_fc;
1803
1804 if (efx->link_options & GM_LPA_10000)
1805 link_speed = 0x3;
1806 else if (efx->link_options & GM_LPA_1000)
1807 link_speed = 0x2;
1808 else if (efx->link_options & GM_LPA_100)
1809 link_speed = 0x1;
1810 else
1811 link_speed = 0x0;
1812 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1813 * as advertised. Disable to ensure packets are not
1814 * indefinitely held and TX queue can be flushed at any point
1815 * while the link is down. */
1816 EFX_POPULATE_OWORD_5(reg,
1817 MAC_XOFF_VAL, 0xffff /* max pause time */,
1818 MAC_BCAD_ACPT, 1,
1819 MAC_UC_PROM, efx->promiscuous,
1820 MAC_LINK_STATUS, 1, /* always set */
1821 MAC_SPEED, link_speed);
1822 /* On B0, MAC backpressure can be disabled and packets get
1823 * discarded. */
1824 if (FALCON_REV(efx) >= FALCON_REV_B0) {
1825 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
1826 !efx->link_up);
1827 }
1828
1829 falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
1830
1831 /* Restore the multicast hash registers. */
1832 falcon_set_multicast_hash(efx);
1833
1834 /* Transmission of pause frames when RX crosses the threshold is
1835 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1836 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1837 tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
1838 falcon_read(efx, &reg, RX_CFG_REG_KER);
1839 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1840
1841 /* Unisolate the MAC -> RX */
1842 if (FALCON_REV(efx) >= FALCON_REV_B0)
1843 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
1844 falcon_write(efx, &reg, RX_CFG_REG_KER);
1845}
1846
1847int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1848{
1849 efx_oword_t reg;
1850 u32 *dma_done;
1851 int i;
1852
1853 if (disable_dma_stats)
1854 return 0;
1855
1856 /* Statistics fetch will fail if the MAC is in TX drain */
1857 if (FALCON_REV(efx) >= FALCON_REV_B0) {
1858 efx_oword_t temp;
1859 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1860 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
1861 return 0;
1862 }
1863
1864 dma_done = (efx->stats_buffer.addr + done_offset);
1865 *dma_done = FALCON_STATS_NOT_DONE;
1866 wmb(); /* ensure done flag is clear */
1867
1868 /* Initiate DMA transfer of stats */
1869 EFX_POPULATE_OWORD_2(reg,
1870 MAC_STAT_DMA_CMD, 1,
1871 MAC_STAT_DMA_ADR,
1872 efx->stats_buffer.dma_addr);
1873 falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER);
1874
1875 /* Wait for transfer to complete */
1876 for (i = 0; i < 400; i++) {
1877 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE)
1878 return 0;
1879 udelay(10);
1880 }
1881
1882 EFX_ERR(efx, "timed out waiting for statistics\n");
1883 return -ETIMEDOUT;
1884}
1885
1886/**************************************************************************
1887 *
1888 * PHY access via GMII
1889 *
1890 **************************************************************************
1891 */
1892
1893/* Use the top bit of the MII PHY id to indicate the PHY type
1894 * (1G/10G), with the remaining bits as the actual PHY id.
1895 *
1896 * This allows us to avoid leaking information from the mii_if_info
1897 * structure into other data structures.
1898 */
1899#define FALCON_PHY_ID_ID_WIDTH EFX_WIDTH(MD_PRT_DEV_ADR)
1900#define FALCON_PHY_ID_ID_MASK ((1 << FALCON_PHY_ID_ID_WIDTH) - 1)
1901#define FALCON_PHY_ID_WIDTH (FALCON_PHY_ID_ID_WIDTH + 1)
1902#define FALCON_PHY_ID_MASK ((1 << FALCON_PHY_ID_WIDTH) - 1)
1903#define FALCON_PHY_ID_10G (1 << (FALCON_PHY_ID_WIDTH - 1))
1904
1905
1906/* Packing the clause 45 port and device fields into a single value */
1907#define MD_PRT_ADR_COMP_LBN (MD_PRT_ADR_LBN - MD_DEV_ADR_LBN)
1908#define MD_PRT_ADR_COMP_WIDTH MD_PRT_ADR_WIDTH
1909#define MD_DEV_ADR_COMP_LBN 0
1910#define MD_DEV_ADR_COMP_WIDTH MD_DEV_ADR_WIDTH
1911
1912
1913/* Wait for GMII access to complete */
1914static int falcon_gmii_wait(struct efx_nic *efx)
1915{
1916 efx_dword_t md_stat;
1917 int count;
1918
1919 for (count = 0; count < 1000; count++) { /* wait upto 10ms */
1920 falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
1921 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
1922 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
1923 EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
1924 EFX_ERR(efx, "error from GMII access "
1925 EFX_DWORD_FMT"\n",
1926 EFX_DWORD_VAL(md_stat));
1927 return -EIO;
1928 }
1929 return 0;
1930 }
1931 udelay(10);
1932 }
1933 EFX_ERR(efx, "timed out waiting for GMII\n");
1934 return -ETIMEDOUT;
1935}
1936
1937/* Writes a GMII register of a PHY connected to Falcon using MDIO. */
1938static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
1939 int addr, int value)
1940{
1941 struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
1942 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
1943 efx_oword_t reg;
1944
1945 /* The 'generic' prt/dev packing in mdio_10g.h is conveniently
1946 * chosen so that the only current user, Falcon, can take the
1947 * packed value and use them directly.
1948 * Fail to build if this assumption is broken.
1949 */
1950 BUILD_BUG_ON(FALCON_PHY_ID_10G != MDIO45_XPRT_ID_IS10G);
1951 BUILD_BUG_ON(FALCON_PHY_ID_ID_WIDTH != MDIO45_PRT_DEV_WIDTH);
1952 BUILD_BUG_ON(MD_PRT_ADR_COMP_LBN != MDIO45_PRT_ID_COMP_LBN);
1953 BUILD_BUG_ON(MD_DEV_ADR_COMP_LBN != MDIO45_DEV_ID_COMP_LBN);
1954
1955 if (phy_id2 == PHY_ADDR_INVALID)
1956 return;
1957
1958 /* See falcon_mdio_read for an explanation. */
1959 if (!(phy_id & FALCON_PHY_ID_10G)) {
1960 int mmd = ffs(efx->phy_op->mmds) - 1;
1961 EFX_TRACE(efx, "Fixing erroneous clause22 write\n");
1962 phy_id2 = mdio_clause45_pack(phy_id2, mmd)
1963 & FALCON_PHY_ID_ID_MASK;
1964 }
1965
1966 EFX_REGDUMP(efx, "writing GMII %d register %02x with %04x\n", phy_id,
1967 addr, value);
1968
1969 spin_lock_bh(&efx->phy_lock);
1970
1971 /* Check MII not currently being accessed */
1972 if (falcon_gmii_wait(efx) != 0)
1973 goto out;
1974
1975 /* Write the address/ID register */
1976 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
1977 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
1978
1979 EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_id2);
1980 falcon_write(efx, &reg, MD_ID_REG_KER);
1981
1982 /* Write data */
1983 EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
1984 falcon_write(efx, &reg, MD_TXD_REG_KER);
1985
1986 EFX_POPULATE_OWORD_2(reg,
1987 MD_WRC, 1,
1988 MD_GC, 0);
1989 falcon_write(efx, &reg, MD_CS_REG_KER);
1990
1991 /* Wait for data to be written */
1992 if (falcon_gmii_wait(efx) != 0) {
1993 /* Abort the write operation */
1994 EFX_POPULATE_OWORD_2(reg,
1995 MD_WRC, 0,
1996 MD_GC, 1);
1997 falcon_write(efx, &reg, MD_CS_REG_KER);
1998 udelay(10);
1999 }
2000
2001 out:
2002 spin_unlock_bh(&efx->phy_lock);
2003}
2004
2005/* Reads a GMII register from a PHY connected to Falcon. If no value
2006 * could be read, -1 will be returned. */
2007static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2008{
2009 struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
2010 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2011 efx_oword_t reg;
2012 int value = -1;
2013
2014 if (phy_addr == PHY_ADDR_INVALID)
2015 return -1;
2016
2017 /* Our PHY code knows whether it needs to talk clause 22(1G) or 45(10G)
2018 * but the generic Linux code does not make any distinction or have
2019 * any state for this.
2020 * We spot the case where someone tried to talk 22 to a 45 PHY and
2021 * redirect the request to the lowest numbered MMD as a clause45
2022 * request. This is enough to allow simple queries like id and link
2023 * state to succeed. TODO: We may need to do more in future.
2024 */
2025 if (!(phy_id & FALCON_PHY_ID_10G)) {
2026 int mmd = ffs(efx->phy_op->mmds) - 1;
2027 EFX_TRACE(efx, "Fixing erroneous clause22 read\n");
2028 phy_addr = mdio_clause45_pack(phy_addr, mmd)
2029 & FALCON_PHY_ID_ID_MASK;
2030 }
2031
2032 spin_lock_bh(&efx->phy_lock);
2033
2034 /* Check MII not currently being accessed */
2035 if (falcon_gmii_wait(efx) != 0)
2036 goto out;
2037
2038 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
2039 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
2040
2041 EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_addr);
2042 falcon_write(efx, &reg, MD_ID_REG_KER);
2043
2044 /* Request data to be read */
2045 EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0);
2046 falcon_write(efx, &reg, MD_CS_REG_KER);
2047
2048 /* Wait for data to become available */
2049 value = falcon_gmii_wait(efx);
2050 if (value == 0) {
2051 falcon_read(efx, &reg, MD_RXD_REG_KER);
2052 value = EFX_OWORD_FIELD(reg, MD_RXD);
2053 EFX_REGDUMP(efx, "read from GMII %d register %02x, got %04x\n",
2054 phy_id, addr, value);
2055 } else {
2056 /* Abort the read operation */
2057 EFX_POPULATE_OWORD_2(reg,
2058 MD_RIC, 0,
2059 MD_GC, 1);
2060 falcon_write(efx, &reg, MD_CS_REG_KER);
2061
2062 EFX_LOG(efx, "read from GMII 0x%x register %02x, got "
2063 "error %d\n", phy_id, addr, value);
2064 }
2065
2066 out:
2067 spin_unlock_bh(&efx->phy_lock);
2068
2069 return value;
2070}
2071
2072static void falcon_init_mdio(struct mii_if_info *gmii)
2073{
2074 gmii->mdio_read = falcon_mdio_read;
2075 gmii->mdio_write = falcon_mdio_write;
2076 gmii->phy_id_mask = FALCON_PHY_ID_MASK;
2077 gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_PHY_ADR)) - 1);
2078}
2079
2080static int falcon_probe_phy(struct efx_nic *efx)
2081{
2082 switch (efx->phy_type) {
2083 case PHY_TYPE_10XPRESS:
2084 efx->phy_op = &falcon_tenxpress_phy_ops;
2085 break;
2086 case PHY_TYPE_XFP:
2087 efx->phy_op = &falcon_xfp_phy_ops;
2088 break;
2089 default:
2090 EFX_ERR(efx, "Unknown PHY type %d\n",
2091 efx->phy_type);
2092 return -1;
2093 }
2094 return 0;
2095}
2096
2097/* This call is responsible for hooking in the MAC and PHY operations */
2098int falcon_probe_port(struct efx_nic *efx)
2099{
2100 int rc;
2101
2102 /* Hook in PHY operations table */
2103 rc = falcon_probe_phy(efx);
2104 if (rc)
2105 return rc;
2106
2107 /* Set up GMII structure for PHY */
2108 efx->mii.supports_gmii = 1;
2109 falcon_init_mdio(&efx->mii);
2110
2111 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2112 if (FALCON_REV(efx) >= FALCON_REV_B0)
2113 efx->flow_control = EFX_FC_RX | EFX_FC_TX;
2114 else
2115 efx->flow_control = EFX_FC_RX;
2116
2117 /* Allocate buffer for stats */
2118 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
2119 FALCON_MAC_STATS_SIZE);
2120 if (rc)
2121 return rc;
2122 EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n",
2123 (unsigned long long)efx->stats_buffer.dma_addr,
2124 efx->stats_buffer.addr,
2125 virt_to_phys(efx->stats_buffer.addr));
2126
2127 return 0;
2128}
2129
2130void falcon_remove_port(struct efx_nic *efx)
2131{
2132 falcon_free_buffer(efx, &efx->stats_buffer);
2133}
2134
2135/**************************************************************************
2136 *
2137 * Multicast filtering
2138 *
2139 **************************************************************************
2140 */
2141
2142void falcon_set_multicast_hash(struct efx_nic *efx)
2143{
2144 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2145
2146 /* Broadcast packets go through the multicast hash filter.
2147 * ether_crc_le() of the broadcast address is 0xbe2612ff
2148 * so we always add bit 0xff to the mask.
2149 */
2150 set_bit_le(0xff, mc_hash->byte);
2151
2152 falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
2153 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
2154}
2155
2156/**************************************************************************
2157 *
2158 * Device reset
2159 *
2160 **************************************************************************
2161 */
2162
2163/* Resets NIC to known state. This routine must be called in process
2164 * context and is allowed to sleep. */
2165int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2166{
2167 struct falcon_nic_data *nic_data = efx->nic_data;
2168 efx_oword_t glb_ctl_reg_ker;
2169 int rc;
2170
2171 EFX_LOG(efx, "performing hardware reset (%d)\n", method);
2172
2173 /* Initiate device reset */
2174 if (method == RESET_TYPE_WORLD) {
2175 rc = pci_save_state(efx->pci_dev);
2176 if (rc) {
2177 EFX_ERR(efx, "failed to backup PCI state of primary "
2178 "function prior to hardware reset\n");
2179 goto fail1;
2180 }
2181 if (FALCON_IS_DUAL_FUNC(efx)) {
2182 rc = pci_save_state(nic_data->pci_dev2);
2183 if (rc) {
2184 EFX_ERR(efx, "failed to backup PCI state of "
2185 "secondary function prior to "
2186 "hardware reset\n");
2187 goto fail2;
2188 }
2189 }
2190
2191 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2192 EXT_PHY_RST_DUR, 0x7,
2193 SWRST, 1);
2194 } else {
2195 int reset_phy = (method == RESET_TYPE_INVISIBLE ?
2196 EXCLUDE_FROM_RESET : 0);
2197
2198 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2199 EXT_PHY_RST_CTL, reset_phy,
2200 PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
2201 PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
2202 PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
2203 EE_RST_CTL, EXCLUDE_FROM_RESET,
2204 EXT_PHY_RST_DUR, 0x7 /* 10ms */,
2205 SWRST, 1);
2206 }
2207 falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2208
2209 EFX_LOG(efx, "waiting for hardware reset\n");
2210 schedule_timeout_uninterruptible(HZ / 20);
2211
2212 /* Restore PCI configuration if needed */
2213 if (method == RESET_TYPE_WORLD) {
2214 if (FALCON_IS_DUAL_FUNC(efx)) {
2215 rc = pci_restore_state(nic_data->pci_dev2);
2216 if (rc) {
2217 EFX_ERR(efx, "failed to restore PCI config for "
2218 "the secondary function\n");
2219 goto fail3;
2220 }
2221 }
2222 rc = pci_restore_state(efx->pci_dev);
2223 if (rc) {
2224 EFX_ERR(efx, "failed to restore PCI config for the "
2225 "primary function\n");
2226 goto fail4;
2227 }
2228 EFX_LOG(efx, "successfully restored PCI config\n");
2229 }
2230
2231 /* Assert that reset complete */
2232 falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
2233 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
2234 rc = -ETIMEDOUT;
2235 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2236 goto fail5;
2237 }
2238 EFX_LOG(efx, "hardware reset complete\n");
2239
2240 return 0;
2241
2242 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2243fail2:
2244fail3:
2245 pci_restore_state(efx->pci_dev);
2246fail1:
2247fail4:
2248fail5:
2249 return rc;
2250}
2251
2252/* Zeroes out the SRAM contents. This routine must be called in
2253 * process context and is allowed to sleep.
2254 */
2255static int falcon_reset_sram(struct efx_nic *efx)
2256{
2257 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2258 int count;
2259
2260 /* Set the SRAM wake/sleep GPIO appropriately. */
2261 falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2262 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
2263 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1);
2264 falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
2265
2266 /* Initiate SRAM reset */
2267 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2268 SRAM_OOB_BT_INIT_EN, 1,
2269 SRM_NUM_BANKS_AND_BANK_SIZE, 0);
2270 falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2271
2272 /* Wait for SRAM reset to complete */
2273 count = 0;
2274 do {
2275 EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
2276
2277 /* SRAM reset is slow; expect around 16ms */
2278 schedule_timeout_uninterruptible(HZ / 50);
2279
2280 /* Check for reset complete */
2281 falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
2282 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
2283 EFX_LOG(efx, "SRAM reset complete\n");
2284
2285 return 0;
2286 }
2287 } while (++count < 20); /* wait upto 0.4 sec */
2288
2289 EFX_ERR(efx, "timed out waiting for SRAM reset\n");
2290 return -ETIMEDOUT;
2291}
2292
2293/* Extract non-volatile configuration */
2294static int falcon_probe_nvconfig(struct efx_nic *efx)
2295{
2296 struct falcon_nvconfig *nvconfig;
2297 efx_oword_t nic_stat;
2298 int device_id;
2299 unsigned addr_len;
2300 size_t offset, len;
2301 int magic_num, struct_ver, board_rev;
2302 int rc;
2303
2304 /* Find the boot device. */
2305 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2306 if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) {
2307 device_id = EE_SPI_FLASH;
2308 addr_len = 3;
2309 } else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) {
2310 device_id = EE_SPI_EEPROM;
2311 addr_len = 2;
2312 } else {
2313 return -ENODEV;
2314 }
2315
2316 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2317
2318 /* Read the whole configuration structure into memory. */
2319 for (offset = 0; offset < sizeof(*nvconfig); offset += len) {
2320 len = min(sizeof(*nvconfig) - offset,
2321 (size_t) FALCON_SPI_MAX_LEN);
2322 rc = falcon_spi_read(efx, device_id, SPI_READ,
2323 NVCONFIG_BASE + offset, addr_len,
2324 (char *)nvconfig + offset, len);
2325 if (rc)
2326 goto out;
2327 }
2328
2329 /* Read the MAC addresses */
2330 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2331
2332 /* Read the board configuration. */
2333 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2334 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2335
2336 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) {
2337 EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x "
2338 "therefore using defaults\n", magic_num, struct_ver);
2339 efx->phy_type = PHY_TYPE_NONE;
2340 efx->mii.phy_id = PHY_ADDR_INVALID;
2341 board_rev = 0;
2342 } else {
2343 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2344
2345 efx->phy_type = v2->port0_phy_type;
2346 efx->mii.phy_id = v2->port0_phy_addr;
2347 board_rev = le16_to_cpu(v2->board_revision);
2348 }
2349
2350 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
2351
2352 efx_set_board_info(efx, board_rev);
2353
2354 out:
2355 kfree(nvconfig);
2356 return rc;
2357}
2358
2359/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2360 * count, port speed). Set workaround and feature flags accordingly.
2361 */
2362static int falcon_probe_nic_variant(struct efx_nic *efx)
2363{
2364 efx_oword_t altera_build;
2365
2366 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
2367 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
2368 EFX_ERR(efx, "Falcon FPGA not supported\n");
2369 return -ENODEV;
2370 }
2371
2372 switch (FALCON_REV(efx)) {
2373 case FALCON_REV_A0:
2374 case 0xff:
2375 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2376 return -ENODEV;
2377
2378 case FALCON_REV_A1:{
2379 efx_oword_t nic_stat;
2380
2381 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2382
2383 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
2384 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2385 return -ENODEV;
2386 }
2387 if (!EFX_OWORD_FIELD(nic_stat, STRAP_10G)) {
2388 EFX_ERR(efx, "1G mode not supported\n");
2389 return -ENODEV;
2390 }
2391 break;
2392 }
2393
2394 case FALCON_REV_B0:
2395 break;
2396
2397 default:
2398 EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx));
2399 return -ENODEV;
2400 }
2401
2402 return 0;
2403}
2404
2405int falcon_probe_nic(struct efx_nic *efx)
2406{
2407 struct falcon_nic_data *nic_data;
2408 int rc;
2409
2410 /* Initialise I2C interface state */
2411 efx->i2c.efx = efx;
2412 efx->i2c.op = &falcon_i2c_bit_operations;
2413 efx->i2c.sda = 1;
2414 efx->i2c.scl = 1;
2415
2416 /* Allocate storage for hardware specific data */
2417 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2418 efx->nic_data = (void *) nic_data;
2419
2420 /* Determine number of ports etc. */
2421 rc = falcon_probe_nic_variant(efx);
2422 if (rc)
2423 goto fail1;
2424
2425 /* Probe secondary function if expected */
2426 if (FALCON_IS_DUAL_FUNC(efx)) {
2427 struct pci_dev *dev = pci_dev_get(efx->pci_dev);
2428
2429 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
2430 dev))) {
2431 if (dev->bus == efx->pci_dev->bus &&
2432 dev->devfn == efx->pci_dev->devfn + 1) {
2433 nic_data->pci_dev2 = dev;
2434 break;
2435 }
2436 }
2437 if (!nic_data->pci_dev2) {
2438 EFX_ERR(efx, "failed to find secondary function\n");
2439 rc = -ENODEV;
2440 goto fail2;
2441 }
2442 }
2443
2444 /* Now we can reset the NIC */
2445 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
2446 if (rc) {
2447 EFX_ERR(efx, "failed to reset NIC\n");
2448 goto fail3;
2449 }
2450
2451 /* Allocate memory for INT_KER */
2452 rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
2453 if (rc)
2454 goto fail4;
2455 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2456
2457 EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n",
2458 (unsigned long long)efx->irq_status.dma_addr,
2459 efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
2460
2461 /* Read in the non-volatile configuration */
2462 rc = falcon_probe_nvconfig(efx);
2463 if (rc)
2464 goto fail5;
2465
2466 return 0;
2467
2468 fail5:
2469 falcon_free_buffer(efx, &efx->irq_status);
2470 fail4:
2471 /* fall-thru */
2472 fail3:
2473 if (nic_data->pci_dev2) {
2474 pci_dev_put(nic_data->pci_dev2);
2475 nic_data->pci_dev2 = NULL;
2476 }
2477 fail2:
2478 /* fall-thru */
2479 fail1:
2480 kfree(efx->nic_data);
2481 return rc;
2482}
2483
2484/* This call performs hardware-specific global initialisation, such as
2485 * defining the descriptor cache sizes and number of RSS channels.
2486 * It does not set up any buffers, descriptor rings or event queues.
2487 */
2488int falcon_init_nic(struct efx_nic *efx)
2489{
2490 struct falcon_nic_data *data;
2491 efx_oword_t temp;
2492 unsigned thresh;
2493 int rc;
2494
2495 data = (struct falcon_nic_data *)efx->nic_data;
2496
2497 /* Set up the address region register. This is only needed
2498 * for the B0 FPGA, but since we are just pushing in the
2499 * reset defaults this may as well be unconditional. */
2500 EFX_POPULATE_OWORD_4(temp, ADR_REGION0, 0,
2501 ADR_REGION1, (1 << 16),
2502 ADR_REGION2, (2 << 16),
2503 ADR_REGION3, (3 << 16));
2504 falcon_write(efx, &temp, ADR_REGION_REG_KER);
2505
2506 /* Use on-chip SRAM */
2507 falcon_read(efx, &temp, NIC_STAT_REG);
2508 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
2509 falcon_write(efx, &temp, NIC_STAT_REG);
2510
2511 /* Set buffer table mode */
2512 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
2513 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
2514
2515 rc = falcon_reset_sram(efx);
2516 if (rc)
2517 return rc;
2518
2519 /* Set positions of descriptor caches in SRAM. */
2520 EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2521 falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
2522 EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2523 falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
2524
2525 /* Set TX descriptor cache size. */
2526 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
2527 EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2528 falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
2529
2530 /* Set RX descriptor cache size. Set low watermark to size-8, as
2531 * this allows most efficient prefetching.
2532 */
2533 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
2534 EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2535 falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
2536 EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2537 falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
2538
2539 /* Clear the parity enables on the TX data fifos as
2540 * they produce false parity errors because of timing issues
2541 */
2542 if (EFX_WORKAROUND_5129(efx)) {
2543 falcon_read(efx, &temp, SPARE_REG_KER);
2544 EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
2545 falcon_write(efx, &temp, SPARE_REG_KER);
2546 }
2547
2548 /* Enable all the genuinely fatal interrupts. (They are still
2549 * masked by the overall interrupt mask, controlled by
2550 * falcon_interrupts()).
2551 *
2552 * Note: All other fatal interrupts are enabled
2553 */
2554 EFX_POPULATE_OWORD_3(temp,
2555 ILL_ADR_INT_KER_EN, 1,
2556 RBUF_OWN_INT_KER_EN, 1,
2557 TBUF_OWN_INT_KER_EN, 1);
2558 EFX_INVERT_OWORD(temp);
2559 falcon_write(efx, &temp, FATAL_INTR_REG_KER);
2560
2561 /* Set number of RSS queues for receive path. */
2562 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2563 if (FALCON_REV(efx) >= FALCON_REV_B0)
2564 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
2565 else
2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
2567 if (EFX_WORKAROUND_7244(efx)) {
2568 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
2569 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
2570 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
2571 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
2572 }
2573 falcon_write(efx, &temp, RX_FILTER_CTL_REG);
2574
2575 falcon_setup_rss_indir_table(efx);
2576
2577 /* Setup RX. Wait for descriptor is broken and must
2578 * be disabled. RXDP recovery shouldn't be needed, but is.
2579 */
2580 falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
2581 EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
2582 EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
2583 if (EFX_WORKAROUND_5583(efx))
2584 EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
2585 falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
2586
2587 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
2588 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
2589 */
2590 falcon_read(efx, &temp, TX_CFG2_REG_KER);
2591 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
2592 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
2593 EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
2594 EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
2595 EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
2596 /* Enable SW_EV to inherit in char driver - assume harmless here */
2597 EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
2598 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
2599 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
2600 /* Squash TX of packets of 16 bytes or less */
2601 if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
2602 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
2603 falcon_write(efx, &temp, TX_CFG2_REG_KER);
2604
2605 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2606 * descriptors (which is bad).
2607 */
2608 falcon_read(efx, &temp, TX_CFG_REG_KER);
2609 EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
2610 falcon_write(efx, &temp, TX_CFG_REG_KER);
2611
2612 /* RX config */
2613 falcon_read(efx, &temp, RX_CFG_REG_KER);
2614 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
2615 if (EFX_WORKAROUND_7575(efx))
2616 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
2617 (3 * 4096) / 32);
2618 if (FALCON_REV(efx) >= FALCON_REV_B0)
2619 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
2620
2621 /* RX FIFO flow control thresholds */
2622 thresh = ((rx_xon_thresh_bytes >= 0) ?
2623 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
2624 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
2625 thresh = ((rx_xoff_thresh_bytes >= 0) ?
2626 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
2627 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
2628 /* RX control FIFO thresholds [32 entries] */
2629 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25);
2630 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20);
2631 falcon_write(efx, &temp, RX_CFG_REG_KER);
2632
2633 /* Set destination of both TX and RX Flush events */
2634 if (FALCON_REV(efx) >= FALCON_REV_B0) {
2635 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
2636 falcon_write(efx, &temp, DP_CTRL_REG);
2637 }
2638
2639 return 0;
2640}
2641
2642void falcon_remove_nic(struct efx_nic *efx)
2643{
2644 struct falcon_nic_data *nic_data = efx->nic_data;
2645
2646 falcon_free_buffer(efx, &efx->irq_status);
2647
2648 (void) falcon_reset_hw(efx, RESET_TYPE_ALL);
2649
2650 /* Release the second function after the reset */
2651 if (nic_data->pci_dev2) {
2652 pci_dev_put(nic_data->pci_dev2);
2653 nic_data->pci_dev2 = NULL;
2654 }
2655
2656 /* Tear down the private nic state */
2657 kfree(efx->nic_data);
2658 efx->nic_data = NULL;
2659}
2660
2661void falcon_update_nic_stats(struct efx_nic *efx)
2662{
2663 efx_oword_t cnt;
2664
2665 falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
2666 efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
2667}
2668
2669/**************************************************************************
2670 *
2671 * Revision-dependent attributes used by efx.c
2672 *
2673 **************************************************************************
2674 */
2675
2676struct efx_nic_type falcon_a_nic_type = {
2677 .mem_bar = 2,
2678 .mem_map_size = 0x20000,
2679 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
2680 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
2681 .buf_tbl_base = BUF_TBL_KER_A1,
2682 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
2683 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
2684 .txd_ring_mask = FALCON_TXD_RING_MASK,
2685 .rxd_ring_mask = FALCON_RXD_RING_MASK,
2686 .evq_size = FALCON_EVQ_SIZE,
2687 .max_dma_mask = FALCON_DMA_MASK,
2688 .tx_dma_mask = FALCON_TX_DMA_MASK,
2689 .bug5391_mask = 0xf,
2690 .rx_xoff_thresh = 2048,
2691 .rx_xon_thresh = 512,
2692 .rx_buffer_padding = 0x24,
2693 .max_interrupt_mode = EFX_INT_MODE_MSI,
2694 .phys_addr_channels = 4,
2695};
2696
2697struct efx_nic_type falcon_b_nic_type = {
2698 .mem_bar = 2,
2699 /* Map everything up to and including the RSS indirection
2700 * table. Don't map MSI-X table, MSI-X PBA since Linux
2701 * requires that they not be mapped. */
2702 .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
2703 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
2704 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
2705 .buf_tbl_base = BUF_TBL_KER_B0,
2706 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
2707 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
2708 .txd_ring_mask = FALCON_TXD_RING_MASK,
2709 .rxd_ring_mask = FALCON_RXD_RING_MASK,
2710 .evq_size = FALCON_EVQ_SIZE,
2711 .max_dma_mask = FALCON_DMA_MASK,
2712 .tx_dma_mask = FALCON_TX_DMA_MASK,
2713 .bug5391_mask = 0,
2714 .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
2715 .rx_xon_thresh = 27648, /* ~3*max MTU */
2716 .rx_buffer_padding = 0,
2717 .max_interrupt_mode = EFX_INT_MODE_MSIX,
2718 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
2719 * interrupt handler only supports 32
2720 * channels */
2721};
2722
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
new file mode 100644
index 000000000000..6117403b0c03
--- /dev/null
+++ b/drivers/net/sfc/falcon.h
@@ -0,0 +1,130 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_H
12#define EFX_FALCON_H
13
14#include "net_driver.h"
15
16/*
17 * Falcon hardware control
18 */
19
20enum falcon_revision {
21 FALCON_REV_A0 = 0,
22 FALCON_REV_A1 = 1,
23 FALCON_REV_B0 = 2,
24};
25
26#define FALCON_REV(efx) ((efx)->pci_dev->revision)
27
28extern struct efx_nic_type falcon_a_nic_type;
29extern struct efx_nic_type falcon_b_nic_type;
30
31/**************************************************************************
32 *
33 * Externs
34 *
35 **************************************************************************
36 */
37
38/* TX data path */
39extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
40extern int falcon_init_tx(struct efx_tx_queue *tx_queue);
41extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
42extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
43extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
44
45/* RX data path */
46extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
47extern int falcon_init_rx(struct efx_rx_queue *rx_queue);
48extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
49extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
50extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
51
52/* Event data path */
53extern int falcon_probe_eventq(struct efx_channel *channel);
54extern int falcon_init_eventq(struct efx_channel *channel);
55extern void falcon_fini_eventq(struct efx_channel *channel);
56extern void falcon_remove_eventq(struct efx_channel *channel);
57extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
58extern void falcon_eventq_read_ack(struct efx_channel *channel);
59
60/* Ports */
61extern int falcon_probe_port(struct efx_nic *efx);
62extern void falcon_remove_port(struct efx_nic *efx);
63
64/* MAC/PHY */
65extern int falcon_xaui_link_ok(struct efx_nic *efx);
66extern int falcon_dma_stats(struct efx_nic *efx,
67 unsigned int done_offset);
68extern void falcon_drain_tx_fifo(struct efx_nic *efx);
69extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
70extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
71
72/* Interrupts and test events */
73extern int falcon_init_interrupt(struct efx_nic *efx);
74extern void falcon_enable_interrupts(struct efx_nic *efx);
75extern void falcon_generate_test_event(struct efx_channel *channel,
76 unsigned int magic);
77extern void falcon_generate_interrupt(struct efx_nic *efx);
78extern void falcon_set_int_moderation(struct efx_channel *channel);
79extern void falcon_disable_interrupts(struct efx_nic *efx);
80extern void falcon_fini_interrupt(struct efx_nic *efx);
81
82/* Global Resources */
83extern int falcon_probe_nic(struct efx_nic *efx);
84extern int falcon_probe_resources(struct efx_nic *efx);
85extern int falcon_init_nic(struct efx_nic *efx);
86extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
87extern void falcon_remove_resources(struct efx_nic *efx);
88extern void falcon_remove_nic(struct efx_nic *efx);
89extern void falcon_update_nic_stats(struct efx_nic *efx);
90extern void falcon_set_multicast_hash(struct efx_nic *efx);
91extern int falcon_reset_xaui(struct efx_nic *efx);
92
93/**************************************************************************
94 *
95 * Falcon MAC stats
96 *
97 **************************************************************************
98 */
99
100#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
101#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
102
103/* Retrieve statistic from statistics block */
104#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
105 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
106 (efx)->mac_stats.efx_stat += le16_to_cpu( \
107 *((__force __le16 *) \
108 (efx->stats_buffer.addr + \
109 FALCON_STAT_OFFSET(falcon_stat)))); \
110 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
111 (efx)->mac_stats.efx_stat += le32_to_cpu( \
112 *((__force __le32 *) \
113 (efx->stats_buffer.addr + \
114 FALCON_STAT_OFFSET(falcon_stat)))); \
115 else \
116 (efx)->mac_stats.efx_stat += le64_to_cpu( \
117 *((__force __le64 *) \
118 (efx->stats_buffer.addr + \
119 FALCON_STAT_OFFSET(falcon_stat)))); \
120 } while (0)
121
122#define FALCON_MAC_STATS_SIZE 0x100
123
124#define MAC_DATA_LBN 0
125#define MAC_DATA_WIDTH 32
126
127extern void falcon_generate_event(struct efx_channel *channel,
128 efx_qword_t *event);
129
130#endif /* EFX_FALCON_H */
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
new file mode 100644
index 000000000000..0485a63eaff6
--- /dev/null
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -0,0 +1,1135 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_HWDEFS_H
12#define EFX_FALCON_HWDEFS_H
13
14/*
15 * Falcon hardware value definitions.
16 * Falcon is the internal codename for the SFC4000 controller that is
17 * present in SFE400X evaluation boards
18 */
19
20/**************************************************************************
21 *
22 * Falcon registers
23 *
24 **************************************************************************
25 */
26
27/* Address region register */
28#define ADR_REGION_REG_KER 0x00
29#define ADR_REGION0_LBN 0
30#define ADR_REGION0_WIDTH 18
31#define ADR_REGION1_LBN 32
32#define ADR_REGION1_WIDTH 18
33#define ADR_REGION2_LBN 64
34#define ADR_REGION2_WIDTH 18
35#define ADR_REGION3_LBN 96
36#define ADR_REGION3_WIDTH 18
37
38/* Interrupt enable register */
39#define INT_EN_REG_KER 0x0010
40#define KER_INT_KER_LBN 3
41#define KER_INT_KER_WIDTH 1
42#define DRV_INT_EN_KER_LBN 0
43#define DRV_INT_EN_KER_WIDTH 1
44
45/* Interrupt status address register */
46#define INT_ADR_REG_KER 0x0030
47#define NORM_INT_VEC_DIS_KER_LBN 64
48#define NORM_INT_VEC_DIS_KER_WIDTH 1
49#define INT_ADR_KER_LBN 0
50#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
51
52/* Interrupt status register (B0 only) */
53#define INT_ISR0_B0 0x90
54#define INT_ISR1_B0 0xA0
55
56/* Interrupt acknowledge register (A0/A1 only) */
57#define INT_ACK_REG_KER_A1 0x0050
58#define INT_ACK_DUMMY_DATA_LBN 0
59#define INT_ACK_DUMMY_DATA_WIDTH 32
60
61/* Interrupt acknowledge work-around register (A0/A1 only )*/
62#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
63
64/* SPI host command register */
65#define EE_SPI_HCMD_REG_KER 0x0100
66#define EE_SPI_HCMD_CMD_EN_LBN 31
67#define EE_SPI_HCMD_CMD_EN_WIDTH 1
68#define EE_WR_TIMER_ACTIVE_LBN 28
69#define EE_WR_TIMER_ACTIVE_WIDTH 1
70#define EE_SPI_HCMD_SF_SEL_LBN 24
71#define EE_SPI_HCMD_SF_SEL_WIDTH 1
72#define EE_SPI_EEPROM 0
73#define EE_SPI_FLASH 1
74#define EE_SPI_HCMD_DABCNT_LBN 16
75#define EE_SPI_HCMD_DABCNT_WIDTH 5
76#define EE_SPI_HCMD_READ_LBN 15
77#define EE_SPI_HCMD_READ_WIDTH 1
78#define EE_SPI_READ 1
79#define EE_SPI_WRITE 0
80#define EE_SPI_HCMD_DUBCNT_LBN 12
81#define EE_SPI_HCMD_DUBCNT_WIDTH 2
82#define EE_SPI_HCMD_ADBCNT_LBN 8
83#define EE_SPI_HCMD_ADBCNT_WIDTH 2
84#define EE_SPI_HCMD_ENC_LBN 0
85#define EE_SPI_HCMD_ENC_WIDTH 8
86
87/* SPI host address register */
88#define EE_SPI_HADR_REG_KER 0x0110
89#define EE_SPI_HADR_ADR_LBN 0
90#define EE_SPI_HADR_ADR_WIDTH 24
91
92/* SPI host data register */
93#define EE_SPI_HDATA_REG_KER 0x0120
94
95/* PCIE CORE ACCESS REG */
96#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
97#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
98#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
99#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
100
101/* NIC status register */
102#define NIC_STAT_REG 0x0200
103#define ONCHIP_SRAM_LBN 16
104#define ONCHIP_SRAM_WIDTH 1
105#define SF_PRST_LBN 9
106#define SF_PRST_WIDTH 1
107#define EE_PRST_LBN 8
108#define EE_PRST_WIDTH 1
109/* See pic_mode_t for decoding of this field */
110/* These bit definitions are extrapolated from the list of numerical
111 * values for STRAP_PINS.
112 */
113#define STRAP_10G_LBN 2
114#define STRAP_10G_WIDTH 1
115#define STRAP_PCIE_LBN 0
116#define STRAP_PCIE_WIDTH 1
117
118/* GPIO control register */
119#define GPIO_CTL_REG_KER 0x0210
120#define GPIO_OUTPUTS_LBN (16)
121#define GPIO_OUTPUTS_WIDTH (4)
122#define GPIO_INPUTS_LBN (8)
123#define GPIO_DIRECTION_LBN (24)
124#define GPIO_DIRECTION_WIDTH (4)
125#define GPIO_DIRECTION_OUT (1)
126#define GPIO_SRAM_SLEEP (1 << 1)
127
128#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
129#define GPIO3_OEN_WIDTH 1
130#define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
131#define GPIO2_OEN_WIDTH 1
132#define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
133#define GPIO1_OEN_WIDTH 1
134#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
135#define GPIO0_OEN_WIDTH 1
136
137#define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
138#define GPIO3_OUT_WIDTH 1
139#define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
140#define GPIO2_OUT_WIDTH 1
141#define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
142#define GPIO1_OUT_WIDTH 1
143#define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
144#define GPIO0_OUT_WIDTH 1
145
146#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
147#define GPIO3_IN_WIDTH 1
148#define GPIO2_IN_WIDTH 1
149#define GPIO1_IN_WIDTH 1
150#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
151#define GPIO0_IN_WIDTH 1
152
153/* Global control register */
154#define GLB_CTL_REG_KER 0x0220
155#define EXT_PHY_RST_CTL_LBN 63
156#define EXT_PHY_RST_CTL_WIDTH 1
157#define PCIE_SD_RST_CTL_LBN 61
158#define PCIE_SD_RST_CTL_WIDTH 1
159
160#define PCIE_NSTCK_RST_CTL_LBN 58
161#define PCIE_NSTCK_RST_CTL_WIDTH 1
162#define PCIE_CORE_RST_CTL_LBN 57
163#define PCIE_CORE_RST_CTL_WIDTH 1
164#define EE_RST_CTL_LBN 49
165#define EE_RST_CTL_WIDTH 1
166#define RST_XGRX_LBN 24
167#define RST_XGRX_WIDTH 1
168#define RST_XGTX_LBN 23
169#define RST_XGTX_WIDTH 1
170#define RST_EM_LBN 22
171#define RST_EM_WIDTH 1
172#define EXT_PHY_RST_DUR_LBN 1
173#define EXT_PHY_RST_DUR_WIDTH 3
174#define SWRST_LBN 0
175#define SWRST_WIDTH 1
176#define INCLUDE_IN_RESET 0
177#define EXCLUDE_FROM_RESET 1
178
179/* Fatal interrupt register */
180#define FATAL_INTR_REG_KER 0x0230
181#define RBUF_OWN_INT_KER_EN_LBN 39
182#define RBUF_OWN_INT_KER_EN_WIDTH 1
183#define TBUF_OWN_INT_KER_EN_LBN 38
184#define TBUF_OWN_INT_KER_EN_WIDTH 1
185#define ILL_ADR_INT_KER_EN_LBN 33
186#define ILL_ADR_INT_KER_EN_WIDTH 1
187#define MEM_PERR_INT_KER_LBN 8
188#define MEM_PERR_INT_KER_WIDTH 1
189#define INT_KER_ERROR_LBN 0
190#define INT_KER_ERROR_WIDTH 12
191
192#define DP_CTRL_REG 0x250
193#define FLS_EVQ_ID_LBN 0
194#define FLS_EVQ_ID_WIDTH 11
195
196#define MEM_STAT_REG_KER 0x260
197
198/* Debug probe register */
199#define DEBUG_BLK_SEL_MISC 7
200#define DEBUG_BLK_SEL_SERDES 6
201#define DEBUG_BLK_SEL_EM 5
202#define DEBUG_BLK_SEL_SR 4
203#define DEBUG_BLK_SEL_EV 3
204#define DEBUG_BLK_SEL_RX 2
205#define DEBUG_BLK_SEL_TX 1
206#define DEBUG_BLK_SEL_BIU 0
207
208/* FPGA build version */
209#define ALTERA_BUILD_REG_KER 0x0300
210#define VER_ALL_LBN 0
211#define VER_ALL_WIDTH 32
212
213/* Spare EEPROM bits register (flash 0x390) */
214#define SPARE_REG_KER 0x310
215#define MEM_PERR_EN_TX_DATA_LBN 72
216#define MEM_PERR_EN_TX_DATA_WIDTH 2
217
218/* Timer table for kernel access */
219#define TIMER_CMD_REG_KER 0x420
220#define TIMER_MODE_LBN 12
221#define TIMER_MODE_WIDTH 2
222#define TIMER_MODE_DIS 0
223#define TIMER_MODE_INT_HLDOFF 2
224#define TIMER_VAL_LBN 0
225#define TIMER_VAL_WIDTH 12
226
227/* Driver generated event register */
228#define DRV_EV_REG_KER 0x440
229#define DRV_EV_QID_LBN 64
230#define DRV_EV_QID_WIDTH 12
231#define DRV_EV_DATA_LBN 0
232#define DRV_EV_DATA_WIDTH 64
233
234/* Buffer table configuration register */
235#define BUF_TBL_CFG_REG_KER 0x600
236#define BUF_TBL_MODE_LBN 3
237#define BUF_TBL_MODE_WIDTH 1
238#define BUF_TBL_MODE_HALF 0
239#define BUF_TBL_MODE_FULL 1
240
241/* SRAM receive descriptor cache configuration register */
242#define SRM_RX_DC_CFG_REG_KER 0x610
243#define SRM_RX_DC_BASE_ADR_LBN 0
244#define SRM_RX_DC_BASE_ADR_WIDTH 21
245
246/* SRAM transmit descriptor cache configuration register */
247#define SRM_TX_DC_CFG_REG_KER 0x620
248#define SRM_TX_DC_BASE_ADR_LBN 0
249#define SRM_TX_DC_BASE_ADR_WIDTH 21
250
251/* SRAM configuration register */
252#define SRM_CFG_REG_KER 0x630
253#define SRAM_OOB_BT_INIT_EN_LBN 3
254#define SRAM_OOB_BT_INIT_EN_WIDTH 1
255#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
256#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
257#define SRM_NB_BSZ_1BANKS_2M 0
258#define SRM_NB_BSZ_1BANKS_4M 1
259#define SRM_NB_BSZ_1BANKS_8M 2
260#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
261#define SRM_NB_BSZ_2BANKS_4M 4
262#define SRM_NB_BSZ_2BANKS_8M 5
263#define SRM_NB_BSZ_2BANKS_16M 6
264#define SRM_NB_BSZ_RESERVED 7
265
266/* Special buffer table update register */
267#define BUF_TBL_UPD_REG_KER 0x0650
268#define BUF_UPD_CMD_LBN 63
269#define BUF_UPD_CMD_WIDTH 1
270#define BUF_CLR_CMD_LBN 62
271#define BUF_CLR_CMD_WIDTH 1
272#define BUF_CLR_END_ID_LBN 32
273#define BUF_CLR_END_ID_WIDTH 20
274#define BUF_CLR_START_ID_LBN 0
275#define BUF_CLR_START_ID_WIDTH 20
276
277/* Receive configuration register */
278#define RX_CFG_REG_KER 0x800
279
280/* B0 */
281#define RX_INGR_EN_B0_LBN 47
282#define RX_INGR_EN_B0_WIDTH 1
283#define RX_DESC_PUSH_EN_B0_LBN 43
284#define RX_DESC_PUSH_EN_B0_WIDTH 1
285#define RX_XON_TX_TH_B0_LBN 33
286#define RX_XON_TX_TH_B0_WIDTH 5
287#define RX_XOFF_TX_TH_B0_LBN 28
288#define RX_XOFF_TX_TH_B0_WIDTH 5
289#define RX_USR_BUF_SIZE_B0_LBN 19
290#define RX_USR_BUF_SIZE_B0_WIDTH 9
291#define RX_XON_MAC_TH_B0_LBN 10
292#define RX_XON_MAC_TH_B0_WIDTH 9
293#define RX_XOFF_MAC_TH_B0_LBN 1
294#define RX_XOFF_MAC_TH_B0_WIDTH 9
295#define RX_XOFF_MAC_EN_B0_LBN 0
296#define RX_XOFF_MAC_EN_B0_WIDTH 1
297
298/* A1 */
299#define RX_DESC_PUSH_EN_A1_LBN 35
300#define RX_DESC_PUSH_EN_A1_WIDTH 1
301#define RX_XON_TX_TH_A1_LBN 25
302#define RX_XON_TX_TH_A1_WIDTH 5
303#define RX_XOFF_TX_TH_A1_LBN 20
304#define RX_XOFF_TX_TH_A1_WIDTH 5
305#define RX_USR_BUF_SIZE_A1_LBN 11
306#define RX_USR_BUF_SIZE_A1_WIDTH 9
307#define RX_XON_MAC_TH_A1_LBN 6
308#define RX_XON_MAC_TH_A1_WIDTH 5
309#define RX_XOFF_MAC_TH_A1_LBN 1
310#define RX_XOFF_MAC_TH_A1_WIDTH 5
311#define RX_XOFF_MAC_EN_A1_LBN 0
312#define RX_XOFF_MAC_EN_A1_WIDTH 1
313
314/* Receive filter control register */
315#define RX_FILTER_CTL_REG 0x810
316#define UDP_FULL_SRCH_LIMIT_LBN 32
317#define UDP_FULL_SRCH_LIMIT_WIDTH 8
318#define NUM_KER_LBN 24
319#define NUM_KER_WIDTH 2
320#define UDP_WILD_SRCH_LIMIT_LBN 16
321#define UDP_WILD_SRCH_LIMIT_WIDTH 8
322#define TCP_WILD_SRCH_LIMIT_LBN 8
323#define TCP_WILD_SRCH_LIMIT_WIDTH 8
324#define TCP_FULL_SRCH_LIMIT_LBN 0
325#define TCP_FULL_SRCH_LIMIT_WIDTH 8
326
327/* RX queue flush register */
328#define RX_FLUSH_DESCQ_REG_KER 0x0820
329#define RX_FLUSH_DESCQ_CMD_LBN 24
330#define RX_FLUSH_DESCQ_CMD_WIDTH 1
331#define RX_FLUSH_DESCQ_LBN 0
332#define RX_FLUSH_DESCQ_WIDTH 12
333
334/* Receive descriptor update register */
335#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
336#define RX_DESC_WPTR_DWORD_LBN 0
337#define RX_DESC_WPTR_DWORD_WIDTH 12
338
339/* Receive descriptor cache configuration register */
340#define RX_DC_CFG_REG_KER 0x840
341#define RX_DC_SIZE_LBN 0
342#define RX_DC_SIZE_WIDTH 2
343
344#define RX_DC_PF_WM_REG_KER 0x850
345#define RX_DC_PF_LWM_LBN 0
346#define RX_DC_PF_LWM_WIDTH 6
347
348/* RX no descriptor drop counter */
349#define RX_NODESC_DROP_REG_KER 0x880
350#define RX_NODESC_DROP_CNT_LBN 0
351#define RX_NODESC_DROP_CNT_WIDTH 16
352
353/* RX black magic register */
354#define RX_SELF_RST_REG_KER 0x890
355#define RX_ISCSI_DIS_LBN 17
356#define RX_ISCSI_DIS_WIDTH 1
357#define RX_NODESC_WAIT_DIS_LBN 9
358#define RX_NODESC_WAIT_DIS_WIDTH 1
359#define RX_RECOVERY_EN_LBN 8
360#define RX_RECOVERY_EN_WIDTH 1
361
362/* TX queue flush register */
363#define TX_FLUSH_DESCQ_REG_KER 0x0a00
364#define TX_FLUSH_DESCQ_CMD_LBN 12
365#define TX_FLUSH_DESCQ_CMD_WIDTH 1
366#define TX_FLUSH_DESCQ_LBN 0
367#define TX_FLUSH_DESCQ_WIDTH 12
368
369/* Transmit descriptor update register */
370#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
371#define TX_DESC_WPTR_DWORD_LBN 0
372#define TX_DESC_WPTR_DWORD_WIDTH 12
373
374/* Transmit descriptor cache configuration register */
375#define TX_DC_CFG_REG_KER 0xa20
376#define TX_DC_SIZE_LBN 0
377#define TX_DC_SIZE_WIDTH 2
378
379/* Transmit checksum configuration register (A0/A1 only) */
380#define TX_CHKSM_CFG_REG_KER_A1 0xa30
381
382/* Transmit configuration register */
383#define TX_CFG_REG_KER 0xa50
384#define TX_NO_EOP_DISC_EN_LBN 5
385#define TX_NO_EOP_DISC_EN_WIDTH 1
386
387/* Transmit configuration register 2 */
388#define TX_CFG2_REG_KER 0xa80
389#define TX_CSR_PUSH_EN_LBN 89
390#define TX_CSR_PUSH_EN_WIDTH 1
391#define TX_RX_SPACER_LBN 64
392#define TX_RX_SPACER_WIDTH 8
393#define TX_SW_EV_EN_LBN 59
394#define TX_SW_EV_EN_WIDTH 1
395#define TX_RX_SPACER_EN_LBN 57
396#define TX_RX_SPACER_EN_WIDTH 1
397#define TX_PREF_THRESHOLD_LBN 19
398#define TX_PREF_THRESHOLD_WIDTH 2
399#define TX_ONE_PKT_PER_Q_LBN 18
400#define TX_ONE_PKT_PER_Q_WIDTH 1
401#define TX_DIS_NON_IP_EV_LBN 17
402#define TX_DIS_NON_IP_EV_WIDTH 1
403#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
404#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
405
406/* PHY management transmit data register */
407#define MD_TXD_REG_KER 0xc00
408#define MD_TXD_LBN 0
409#define MD_TXD_WIDTH 16
410
411/* PHY management receive data register */
412#define MD_RXD_REG_KER 0xc10
413#define MD_RXD_LBN 0
414#define MD_RXD_WIDTH 16
415
416/* PHY management configuration & status register */
417#define MD_CS_REG_KER 0xc20
418#define MD_GC_LBN 4
419#define MD_GC_WIDTH 1
420#define MD_RIC_LBN 2
421#define MD_RIC_WIDTH 1
422#define MD_RDC_LBN 1
423#define MD_RDC_WIDTH 1
424#define MD_WRC_LBN 0
425#define MD_WRC_WIDTH 1
426
427/* PHY management PHY address register */
428#define MD_PHY_ADR_REG_KER 0xc30
429#define MD_PHY_ADR_LBN 0
430#define MD_PHY_ADR_WIDTH 16
431
432/* PHY management ID register */
433#define MD_ID_REG_KER 0xc40
434#define MD_PRT_ADR_LBN 11
435#define MD_PRT_ADR_WIDTH 5
436#define MD_DEV_ADR_LBN 6
437#define MD_DEV_ADR_WIDTH 5
438/* Used for writing both at once */
439#define MD_PRT_DEV_ADR_LBN 6
440#define MD_PRT_DEV_ADR_WIDTH 10
441
442/* PHY management status & mask register (DWORD read only) */
443#define MD_STAT_REG_KER 0xc50
444#define MD_BSERR_LBN 2
445#define MD_BSERR_WIDTH 1
446#define MD_LNFL_LBN 1
447#define MD_LNFL_WIDTH 1
448#define MD_BSY_LBN 0
449#define MD_BSY_WIDTH 1
450
451/* Port 0 and 1 MAC stats registers */
452#define MAC0_STAT_DMA_REG_KER 0xc60
453#define MAC_STAT_DMA_CMD_LBN 48
454#define MAC_STAT_DMA_CMD_WIDTH 1
455#define MAC_STAT_DMA_ADR_LBN 0
456#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
457
458/* Port 0 and 1 MAC control registers */
459#define MAC0_CTRL_REG_KER 0xc80
460#define MAC_XOFF_VAL_LBN 16
461#define MAC_XOFF_VAL_WIDTH 16
462#define TXFIFO_DRAIN_EN_B0_LBN 7
463#define TXFIFO_DRAIN_EN_B0_WIDTH 1
464#define MAC_BCAD_ACPT_LBN 4
465#define MAC_BCAD_ACPT_WIDTH 1
466#define MAC_UC_PROM_LBN 3
467#define MAC_UC_PROM_WIDTH 1
468#define MAC_LINK_STATUS_LBN 2
469#define MAC_LINK_STATUS_WIDTH 1
470#define MAC_SPEED_LBN 0
471#define MAC_SPEED_WIDTH 2
472
473/* 10G XAUI XGXS default values */
474#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
475#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
476#define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */
477
478/* Multicast address hash table */
479#define MAC_MCAST_HASH_REG0_KER 0xca0
480#define MAC_MCAST_HASH_REG1_KER 0xcb0
481
482/* GMAC registers */
483#define FALCON_GMAC_REGBANK 0xe00
484#define FALCON_GMAC_REGBANK_SIZE 0x200
485#define FALCON_GMAC_REG_SIZE 0x10
486
487/* XMAC registers */
488#define FALCON_XMAC_REGBANK 0x1200
489#define FALCON_XMAC_REGBANK_SIZE 0x200
490#define FALCON_XMAC_REG_SIZE 0x10
491
492/* XGMAC address register low */
493#define XM_ADR_LO_REG_MAC 0x00
494#define XM_ADR_3_LBN 24
495#define XM_ADR_3_WIDTH 8
496#define XM_ADR_2_LBN 16
497#define XM_ADR_2_WIDTH 8
498#define XM_ADR_1_LBN 8
499#define XM_ADR_1_WIDTH 8
500#define XM_ADR_0_LBN 0
501#define XM_ADR_0_WIDTH 8
502
503/* XGMAC address register high */
504#define XM_ADR_HI_REG_MAC 0x01
505#define XM_ADR_5_LBN 8
506#define XM_ADR_5_WIDTH 8
507#define XM_ADR_4_LBN 0
508#define XM_ADR_4_WIDTH 8
509
510/* XGMAC global configuration */
511#define XM_GLB_CFG_REG_MAC 0x02
512#define XM_RX_STAT_EN_LBN 11
513#define XM_RX_STAT_EN_WIDTH 1
514#define XM_TX_STAT_EN_LBN 10
515#define XM_TX_STAT_EN_WIDTH 1
516#define XM_RX_JUMBO_MODE_LBN 6
517#define XM_RX_JUMBO_MODE_WIDTH 1
518#define XM_INTCLR_MODE_LBN 3
519#define XM_INTCLR_MODE_WIDTH 1
520#define XM_CORE_RST_LBN 0
521#define XM_CORE_RST_WIDTH 1
522
523/* XGMAC transmit configuration */
524#define XM_TX_CFG_REG_MAC 0x03
525#define XM_IPG_LBN 16
526#define XM_IPG_WIDTH 4
527#define XM_FCNTL_LBN 10
528#define XM_FCNTL_WIDTH 1
529#define XM_TXCRC_LBN 8
530#define XM_TXCRC_WIDTH 1
531#define XM_AUTO_PAD_LBN 5
532#define XM_AUTO_PAD_WIDTH 1
533#define XM_TX_PRMBL_LBN 2
534#define XM_TX_PRMBL_WIDTH 1
535#define XM_TXEN_LBN 1
536#define XM_TXEN_WIDTH 1
537
538/* XGMAC receive configuration */
539#define XM_RX_CFG_REG_MAC 0x04
540#define XM_PASS_CRC_ERR_LBN 25
541#define XM_PASS_CRC_ERR_WIDTH 1
542#define XM_ACPT_ALL_MCAST_LBN 11
543#define XM_ACPT_ALL_MCAST_WIDTH 1
544#define XM_ACPT_ALL_UCAST_LBN 9
545#define XM_ACPT_ALL_UCAST_WIDTH 1
546#define XM_AUTO_DEPAD_LBN 8
547#define XM_AUTO_DEPAD_WIDTH 1
548#define XM_RXEN_LBN 1
549#define XM_RXEN_WIDTH 1
550
551/* XGMAC management interrupt mask register */
552#define XM_MGT_INT_MSK_REG_MAC_B0 0x5
553#define XM_MSK_PRMBLE_ERR_LBN 2
554#define XM_MSK_PRMBLE_ERR_WIDTH 1
555#define XM_MSK_RMTFLT_LBN 1
556#define XM_MSK_RMTFLT_WIDTH 1
557#define XM_MSK_LCLFLT_LBN 0
558#define XM_MSK_LCLFLT_WIDTH 1
559
560/* XGMAC flow control register */
561#define XM_FC_REG_MAC 0x7
562#define XM_PAUSE_TIME_LBN 16
563#define XM_PAUSE_TIME_WIDTH 16
564#define XM_DIS_FCNTL_LBN 0
565#define XM_DIS_FCNTL_WIDTH 1
566
567/* XGMAC pause time count register */
568#define XM_PAUSE_TIME_REG_MAC 0x9
569
570/* XGMAC transmit parameter register */
571#define XM_TX_PARAM_REG_MAC 0x0d
572#define XM_TX_JUMBO_MODE_LBN 31
573#define XM_TX_JUMBO_MODE_WIDTH 1
574#define XM_MAX_TX_FRM_SIZE_LBN 16
575#define XM_MAX_TX_FRM_SIZE_WIDTH 14
576
577/* XGMAC receive parameter register */
578#define XM_RX_PARAM_REG_MAC 0x0e
579#define XM_MAX_RX_FRM_SIZE_LBN 0
580#define XM_MAX_RX_FRM_SIZE_WIDTH 14
581
582/* XGMAC management interrupt status register */
583#define XM_MGT_INT_REG_MAC_B0 0x0f
584#define XM_PRMBLE_ERR 2
585#define XM_PRMBLE_WIDTH 1
586#define XM_RMTFLT_LBN 1
587#define XM_RMTFLT_WIDTH 1
588#define XM_LCLFLT_LBN 0
589#define XM_LCLFLT_WIDTH 1
590
591/* XGXS/XAUI powerdown/reset register */
592#define XX_PWR_RST_REG_MAC 0x10
593
594#define XX_PWRDND_EN_LBN 15
595#define XX_PWRDND_EN_WIDTH 1
596#define XX_PWRDNC_EN_LBN 14
597#define XX_PWRDNC_EN_WIDTH 1
598#define XX_PWRDNB_EN_LBN 13
599#define XX_PWRDNB_EN_WIDTH 1
600#define XX_PWRDNA_EN_LBN 12
601#define XX_PWRDNA_EN_WIDTH 1
602#define XX_RSTPLLCD_EN_LBN 9
603#define XX_RSTPLLCD_EN_WIDTH 1
604#define XX_RSTPLLAB_EN_LBN 8
605#define XX_RSTPLLAB_EN_WIDTH 1
606#define XX_RESETD_EN_LBN 7
607#define XX_RESETD_EN_WIDTH 1
608#define XX_RESETC_EN_LBN 6
609#define XX_RESETC_EN_WIDTH 1
610#define XX_RESETB_EN_LBN 5
611#define XX_RESETB_EN_WIDTH 1
612#define XX_RESETA_EN_LBN 4
613#define XX_RESETA_EN_WIDTH 1
614#define XX_RSTXGXSRX_EN_LBN 2
615#define XX_RSTXGXSRX_EN_WIDTH 1
616#define XX_RSTXGXSTX_EN_LBN 1
617#define XX_RSTXGXSTX_EN_WIDTH 1
618#define XX_RST_XX_EN_LBN 0
619#define XX_RST_XX_EN_WIDTH 1
620
621/* XGXS/XAUI powerdown/reset control register */
622#define XX_SD_CTL_REG_MAC 0x11
623#define XX_HIDRVD_LBN 15
624#define XX_HIDRVD_WIDTH 1
625#define XX_LODRVD_LBN 14
626#define XX_LODRVD_WIDTH 1
627#define XX_HIDRVC_LBN 13
628#define XX_HIDRVC_WIDTH 1
629#define XX_LODRVC_LBN 12
630#define XX_LODRVC_WIDTH 1
631#define XX_HIDRVB_LBN 11
632#define XX_HIDRVB_WIDTH 1
633#define XX_LODRVB_LBN 10
634#define XX_LODRVB_WIDTH 1
635#define XX_HIDRVA_LBN 9
636#define XX_HIDRVA_WIDTH 1
637#define XX_LODRVA_LBN 8
638#define XX_LODRVA_WIDTH 1
639
640#define XX_TXDRV_CTL_REG_MAC 0x12
641#define XX_DEQD_LBN 28
642#define XX_DEQD_WIDTH 4
643#define XX_DEQC_LBN 24
644#define XX_DEQC_WIDTH 4
645#define XX_DEQB_LBN 20
646#define XX_DEQB_WIDTH 4
647#define XX_DEQA_LBN 16
648#define XX_DEQA_WIDTH 4
649#define XX_DTXD_LBN 12
650#define XX_DTXD_WIDTH 4
651#define XX_DTXC_LBN 8
652#define XX_DTXC_WIDTH 4
653#define XX_DTXB_LBN 4
654#define XX_DTXB_WIDTH 4
655#define XX_DTXA_LBN 0
656#define XX_DTXA_WIDTH 4
657
658/* XAUI XGXS core status register */
659#define XX_FORCE_SIG_DECODE_FORCED 0xff
660#define XX_CORE_STAT_REG_MAC 0x16
661#define XX_ALIGN_DONE_LBN 20
662#define XX_ALIGN_DONE_WIDTH 1
663#define XX_SYNC_STAT_LBN 16
664#define XX_SYNC_STAT_WIDTH 4
665#define XX_SYNC_STAT_DECODE_SYNCED 0xf
666#define XX_COMMA_DET_LBN 12
667#define XX_COMMA_DET_WIDTH 4
668#define XX_COMMA_DET_DECODE_DETECTED 0xf
669#define XX_COMMA_DET_RESET 0xf
670#define XX_CHARERR_LBN 4
671#define XX_CHARERR_WIDTH 4
672#define XX_CHARERR_RESET 0xf
673#define XX_DISPERR_LBN 0
674#define XX_DISPERR_WIDTH 4
675#define XX_DISPERR_RESET 0xf
676
677/* Receive filter table */
678#define RX_FILTER_TBL0 0xF00000
679
680/* Receive descriptor pointer table */
681#define RX_DESC_PTR_TBL_KER_A1 0x11800
682#define RX_DESC_PTR_TBL_KER_B0 0xF40000
683#define RX_DESC_PTR_TBL_KER_P0 0x900
684#define RX_ISCSI_DDIG_EN_LBN 88
685#define RX_ISCSI_DDIG_EN_WIDTH 1
686#define RX_ISCSI_HDIG_EN_LBN 87
687#define RX_ISCSI_HDIG_EN_WIDTH 1
688#define RX_DESCQ_BUF_BASE_ID_LBN 36
689#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
690#define RX_DESCQ_EVQ_ID_LBN 24
691#define RX_DESCQ_EVQ_ID_WIDTH 12
692#define RX_DESCQ_OWNER_ID_LBN 10
693#define RX_DESCQ_OWNER_ID_WIDTH 14
694#define RX_DESCQ_LABEL_LBN 5
695#define RX_DESCQ_LABEL_WIDTH 5
696#define RX_DESCQ_SIZE_LBN 3
697#define RX_DESCQ_SIZE_WIDTH 2
698#define RX_DESCQ_SIZE_4K 3
699#define RX_DESCQ_SIZE_2K 2
700#define RX_DESCQ_SIZE_1K 1
701#define RX_DESCQ_SIZE_512 0
702#define RX_DESCQ_TYPE_LBN 2
703#define RX_DESCQ_TYPE_WIDTH 1
704#define RX_DESCQ_JUMBO_LBN 1
705#define RX_DESCQ_JUMBO_WIDTH 1
706#define RX_DESCQ_EN_LBN 0
707#define RX_DESCQ_EN_WIDTH 1
708
709/* Transmit descriptor pointer table */
710#define TX_DESC_PTR_TBL_KER_A1 0x11900
711#define TX_DESC_PTR_TBL_KER_B0 0xF50000
712#define TX_DESC_PTR_TBL_KER_P0 0xa40
713#define TX_NON_IP_DROP_DIS_B0_LBN 91
714#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
715#define TX_IP_CHKSM_DIS_B0_LBN 90
716#define TX_IP_CHKSM_DIS_B0_WIDTH 1
717#define TX_TCP_CHKSM_DIS_B0_LBN 89
718#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
719#define TX_DESCQ_EN_LBN 88
720#define TX_DESCQ_EN_WIDTH 1
721#define TX_ISCSI_DDIG_EN_LBN 87
722#define TX_ISCSI_DDIG_EN_WIDTH 1
723#define TX_ISCSI_HDIG_EN_LBN 86
724#define TX_ISCSI_HDIG_EN_WIDTH 1
725#define TX_DESCQ_BUF_BASE_ID_LBN 36
726#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
727#define TX_DESCQ_EVQ_ID_LBN 24
728#define TX_DESCQ_EVQ_ID_WIDTH 12
729#define TX_DESCQ_OWNER_ID_LBN 10
730#define TX_DESCQ_OWNER_ID_WIDTH 14
731#define TX_DESCQ_LABEL_LBN 5
732#define TX_DESCQ_LABEL_WIDTH 5
733#define TX_DESCQ_SIZE_LBN 3
734#define TX_DESCQ_SIZE_WIDTH 2
735#define TX_DESCQ_SIZE_4K 3
736#define TX_DESCQ_SIZE_2K 2
737#define TX_DESCQ_SIZE_1K 1
738#define TX_DESCQ_SIZE_512 0
739#define TX_DESCQ_TYPE_LBN 1
740#define TX_DESCQ_TYPE_WIDTH 2
741
742/* Event queue pointer */
743#define EVQ_PTR_TBL_KER_A1 0x11a00
744#define EVQ_PTR_TBL_KER_B0 0xf60000
745#define EVQ_PTR_TBL_KER_P0 0x500
746#define EVQ_EN_LBN 23
747#define EVQ_EN_WIDTH 1
748#define EVQ_SIZE_LBN 20
749#define EVQ_SIZE_WIDTH 3
750#define EVQ_SIZE_32K 6
751#define EVQ_SIZE_16K 5
752#define EVQ_SIZE_8K 4
753#define EVQ_SIZE_4K 3
754#define EVQ_SIZE_2K 2
755#define EVQ_SIZE_1K 1
756#define EVQ_SIZE_512 0
757#define EVQ_BUF_BASE_ID_LBN 0
758#define EVQ_BUF_BASE_ID_WIDTH 20
759
760/* Event queue read pointer */
761#define EVQ_RPTR_REG_KER_A1 0x11b00
762#define EVQ_RPTR_REG_KER_B0 0xfa0000
763#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
764#define EVQ_RPTR_DWORD_LBN 0
765#define EVQ_RPTR_DWORD_WIDTH 14
766
767/* RSS indirection table */
768#define RX_RSS_INDIR_TBL_B0 0xFB0000
769#define RX_RSS_INDIR_ENT_B0_LBN 0
770#define RX_RSS_INDIR_ENT_B0_WIDTH 6
771
772/* Special buffer descriptors (full-mode) */
773#define BUF_FULL_TBL_KER_A1 0x8000
774#define BUF_FULL_TBL_KER_B0 0x800000
775#define IP_DAT_BUF_SIZE_LBN 50
776#define IP_DAT_BUF_SIZE_WIDTH 1
777#define IP_DAT_BUF_SIZE_8K 1
778#define IP_DAT_BUF_SIZE_4K 0
779#define BUF_ADR_REGION_LBN 48
780#define BUF_ADR_REGION_WIDTH 2
781#define BUF_ADR_FBUF_LBN 14
782#define BUF_ADR_FBUF_WIDTH 34
783#define BUF_OWNER_ID_FBUF_LBN 0
784#define BUF_OWNER_ID_FBUF_WIDTH 14
785
786/* Transmit descriptor */
787#define TX_KER_PORT_LBN 63
788#define TX_KER_PORT_WIDTH 1
789#define TX_KER_CONT_LBN 62
790#define TX_KER_CONT_WIDTH 1
791#define TX_KER_BYTE_CNT_LBN 48
792#define TX_KER_BYTE_CNT_WIDTH 14
793#define TX_KER_BUF_REGION_LBN 46
794#define TX_KER_BUF_REGION_WIDTH 2
795#define TX_KER_BUF_REGION0_DECODE 0
796#define TX_KER_BUF_REGION1_DECODE 1
797#define TX_KER_BUF_REGION2_DECODE 2
798#define TX_KER_BUF_REGION3_DECODE 3
799#define TX_KER_BUF_ADR_LBN 0
800#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
801
802/* Receive descriptor */
803#define RX_KER_BUF_SIZE_LBN 48
804#define RX_KER_BUF_SIZE_WIDTH 14
805#define RX_KER_BUF_REGION_LBN 46
806#define RX_KER_BUF_REGION_WIDTH 2
807#define RX_KER_BUF_REGION0_DECODE 0
808#define RX_KER_BUF_REGION1_DECODE 1
809#define RX_KER_BUF_REGION2_DECODE 2
810#define RX_KER_BUF_REGION3_DECODE 3
811#define RX_KER_BUF_ADR_LBN 0
812#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
813
814/**************************************************************************
815 *
816 * Falcon events
817 *
818 **************************************************************************
819 */
820
821/* Event queue entries */
822#define EV_CODE_LBN 60
823#define EV_CODE_WIDTH 4
824#define RX_IP_EV_DECODE 0
825#define TX_IP_EV_DECODE 2
826#define DRIVER_EV_DECODE 5
827#define GLOBAL_EV_DECODE 6
828#define DRV_GEN_EV_DECODE 7
829#define WHOLE_EVENT_LBN 0
830#define WHOLE_EVENT_WIDTH 64
831
832/* Receive events */
833#define RX_EV_PKT_OK_LBN 56
834#define RX_EV_PKT_OK_WIDTH 1
835#define RX_EV_PAUSE_FRM_ERR_LBN 55
836#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
837#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
838#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
839#define RX_EV_IF_FRAG_ERR_LBN 53
840#define RX_EV_IF_FRAG_ERR_WIDTH 1
841#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
842#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
843#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
844#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
845#define RX_EV_ETH_CRC_ERR_LBN 50
846#define RX_EV_ETH_CRC_ERR_WIDTH 1
847#define RX_EV_FRM_TRUNC_LBN 49
848#define RX_EV_FRM_TRUNC_WIDTH 1
849#define RX_EV_DRIB_NIB_LBN 48
850#define RX_EV_DRIB_NIB_WIDTH 1
851#define RX_EV_TOBE_DISC_LBN 47
852#define RX_EV_TOBE_DISC_WIDTH 1
853#define RX_EV_PKT_TYPE_LBN 44
854#define RX_EV_PKT_TYPE_WIDTH 3
855#define RX_EV_PKT_TYPE_ETH_DECODE 0
856#define RX_EV_PKT_TYPE_LLC_DECODE 1
857#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
858#define RX_EV_PKT_TYPE_VLAN_DECODE 3
859#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
860#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
861#define RX_EV_HDR_TYPE_LBN 42
862#define RX_EV_HDR_TYPE_WIDTH 2
863#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
864#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
865#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
866#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
867#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
868 ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
869#define RX_EV_MCAST_HASH_MATCH_LBN 40
870#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
871#define RX_EV_MCAST_PKT_LBN 39
872#define RX_EV_MCAST_PKT_WIDTH 1
873#define RX_EV_Q_LABEL_LBN 32
874#define RX_EV_Q_LABEL_WIDTH 5
875#define RX_EV_JUMBO_CONT_LBN 31
876#define RX_EV_JUMBO_CONT_WIDTH 1
877#define RX_EV_BYTE_CNT_LBN 16
878#define RX_EV_BYTE_CNT_WIDTH 14
879#define RX_EV_SOP_LBN 15
880#define RX_EV_SOP_WIDTH 1
881#define RX_EV_DESC_PTR_LBN 0
882#define RX_EV_DESC_PTR_WIDTH 12
883
884/* Transmit events */
885#define TX_EV_PKT_ERR_LBN 38
886#define TX_EV_PKT_ERR_WIDTH 1
887#define TX_EV_Q_LABEL_LBN 32
888#define TX_EV_Q_LABEL_WIDTH 5
889#define TX_EV_WQ_FF_FULL_LBN 15
890#define TX_EV_WQ_FF_FULL_WIDTH 1
891#define TX_EV_COMP_LBN 12
892#define TX_EV_COMP_WIDTH 1
893#define TX_EV_DESC_PTR_LBN 0
894#define TX_EV_DESC_PTR_WIDTH 12
895
896/* Driver events */
897#define DRIVER_EV_SUB_CODE_LBN 56
898#define DRIVER_EV_SUB_CODE_WIDTH 4
899#define DRIVER_EV_SUB_DATA_LBN 0
900#define DRIVER_EV_SUB_DATA_WIDTH 14
901#define TX_DESCQ_FLS_DONE_EV_DECODE 0
902#define RX_DESCQ_FLS_DONE_EV_DECODE 1
903#define EVQ_INIT_DONE_EV_DECODE 2
904#define EVQ_NOT_EN_EV_DECODE 3
905#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
906#define SRM_UPD_DONE_EV_DECODE 5
907#define WAKE_UP_EV_DECODE 6
908#define TX_PKT_NON_TCP_UDP_DECODE 9
909#define TIMER_EV_DECODE 10
910#define RX_RECOVERY_EV_DECODE 11
911#define RX_DSC_ERROR_EV_DECODE 14
912#define TX_DSC_ERROR_EV_DECODE 15
913#define DRIVER_EV_TX_DESCQ_ID_LBN 0
914#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
915#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
916#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
917#define DRIVER_EV_RX_DESCQ_ID_LBN 0
918#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
919#define SRM_CLR_EV_DECODE 0
920#define SRM_UPD_EV_DECODE 1
921#define SRM_ILLCLR_EV_DECODE 2
922
923/* Global events */
924#define RX_RECOVERY_B0_LBN 12
925#define RX_RECOVERY_B0_WIDTH 1
926#define XG_MNT_INTR_B0_LBN 11
927#define XG_MNT_INTR_B0_WIDTH 1
928#define RX_RECOVERY_A1_LBN 11
929#define RX_RECOVERY_A1_WIDTH 1
930#define XG_PHY_INTR_LBN 9
931#define XG_PHY_INTR_WIDTH 1
932#define G_PHY1_INTR_LBN 8
933#define G_PHY1_INTR_WIDTH 1
934#define G_PHY0_INTR_LBN 7
935#define G_PHY0_INTR_WIDTH 1
936
937/* Driver-generated test events */
938#define EVQ_MAGIC_LBN 0
939#define EVQ_MAGIC_WIDTH 32
940
941/**************************************************************************
942 *
943 * Falcon MAC stats
944 *
945 **************************************************************************
946 *
947 */
948#define GRxGoodOct_offset 0x0
949#define GRxBadOct_offset 0x8
950#define GRxMissPkt_offset 0x10
951#define GRxFalseCRS_offset 0x14
952#define GRxPausePkt_offset 0x18
953#define GRxBadPkt_offset 0x1C
954#define GRxUcastPkt_offset 0x20
955#define GRxMcastPkt_offset 0x24
956#define GRxBcastPkt_offset 0x28
957#define GRxGoodLt64Pkt_offset 0x2C
958#define GRxBadLt64Pkt_offset 0x30
959#define GRx64Pkt_offset 0x34
960#define GRx65to127Pkt_offset 0x38
961#define GRx128to255Pkt_offset 0x3C
962#define GRx256to511Pkt_offset 0x40
963#define GRx512to1023Pkt_offset 0x44
964#define GRx1024to15xxPkt_offset 0x48
965#define GRx15xxtoJumboPkt_offset 0x4C
966#define GRxGtJumboPkt_offset 0x50
967#define GRxFcsErr64to15xxPkt_offset 0x54
968#define GRxFcsErr15xxtoJumboPkt_offset 0x58
969#define GRxFcsErrGtJumboPkt_offset 0x5C
970#define GTxGoodBadOct_offset 0x80
971#define GTxGoodOct_offset 0x88
972#define GTxSglColPkt_offset 0x90
973#define GTxMultColPkt_offset 0x94
974#define GTxExColPkt_offset 0x98
975#define GTxDefPkt_offset 0x9C
976#define GTxLateCol_offset 0xA0
977#define GTxExDefPkt_offset 0xA4
978#define GTxPausePkt_offset 0xA8
979#define GTxBadPkt_offset 0xAC
980#define GTxUcastPkt_offset 0xB0
981#define GTxMcastPkt_offset 0xB4
982#define GTxBcastPkt_offset 0xB8
983#define GTxLt64Pkt_offset 0xBC
984#define GTx64Pkt_offset 0xC0
985#define GTx65to127Pkt_offset 0xC4
986#define GTx128to255Pkt_offset 0xC8
987#define GTx256to511Pkt_offset 0xCC
988#define GTx512to1023Pkt_offset 0xD0
989#define GTx1024to15xxPkt_offset 0xD4
990#define GTx15xxtoJumboPkt_offset 0xD8
991#define GTxGtJumboPkt_offset 0xDC
992#define GTxNonTcpUdpPkt_offset 0xE0
993#define GTxMacSrcErrPkt_offset 0xE4
994#define GTxIpSrcErrPkt_offset 0xE8
995#define GDmaDone_offset 0xEC
996
997#define XgRxOctets_offset 0x0
998#define XgRxOctets_WIDTH 48
999#define XgRxOctetsOK_offset 0x8
1000#define XgRxOctetsOK_WIDTH 48
1001#define XgRxPkts_offset 0x10
1002#define XgRxPkts_WIDTH 32
1003#define XgRxPktsOK_offset 0x14
1004#define XgRxPktsOK_WIDTH 32
1005#define XgRxBroadcastPkts_offset 0x18
1006#define XgRxBroadcastPkts_WIDTH 32
1007#define XgRxMulticastPkts_offset 0x1C
1008#define XgRxMulticastPkts_WIDTH 32
1009#define XgRxUnicastPkts_offset 0x20
1010#define XgRxUnicastPkts_WIDTH 32
1011#define XgRxUndersizePkts_offset 0x24
1012#define XgRxUndersizePkts_WIDTH 32
1013#define XgRxOversizePkts_offset 0x28
1014#define XgRxOversizePkts_WIDTH 32
1015#define XgRxJabberPkts_offset 0x2C
1016#define XgRxJabberPkts_WIDTH 32
1017#define XgRxUndersizeFCSerrorPkts_offset 0x30
1018#define XgRxUndersizeFCSerrorPkts_WIDTH 32
1019#define XgRxDropEvents_offset 0x34
1020#define XgRxDropEvents_WIDTH 32
1021#define XgRxFCSerrorPkts_offset 0x38
1022#define XgRxFCSerrorPkts_WIDTH 32
1023#define XgRxAlignError_offset 0x3C
1024#define XgRxAlignError_WIDTH 32
1025#define XgRxSymbolError_offset 0x40
1026#define XgRxSymbolError_WIDTH 32
1027#define XgRxInternalMACError_offset 0x44
1028#define XgRxInternalMACError_WIDTH 32
1029#define XgRxControlPkts_offset 0x48
1030#define XgRxControlPkts_WIDTH 32
1031#define XgRxPausePkts_offset 0x4C
1032#define XgRxPausePkts_WIDTH 32
1033#define XgRxPkts64Octets_offset 0x50
1034#define XgRxPkts64Octets_WIDTH 32
1035#define XgRxPkts65to127Octets_offset 0x54
1036#define XgRxPkts65to127Octets_WIDTH 32
1037#define XgRxPkts128to255Octets_offset 0x58
1038#define XgRxPkts128to255Octets_WIDTH 32
1039#define XgRxPkts256to511Octets_offset 0x5C
1040#define XgRxPkts256to511Octets_WIDTH 32
1041#define XgRxPkts512to1023Octets_offset 0x60
1042#define XgRxPkts512to1023Octets_WIDTH 32
1043#define XgRxPkts1024to15xxOctets_offset 0x64
1044#define XgRxPkts1024to15xxOctets_WIDTH 32
1045#define XgRxPkts15xxtoMaxOctets_offset 0x68
1046#define XgRxPkts15xxtoMaxOctets_WIDTH 32
1047#define XgRxLengthError_offset 0x6C
1048#define XgRxLengthError_WIDTH 32
1049#define XgTxPkts_offset 0x80
1050#define XgTxPkts_WIDTH 32
1051#define XgTxOctets_offset 0x88
1052#define XgTxOctets_WIDTH 48
1053#define XgTxMulticastPkts_offset 0x90
1054#define XgTxMulticastPkts_WIDTH 32
1055#define XgTxBroadcastPkts_offset 0x94
1056#define XgTxBroadcastPkts_WIDTH 32
1057#define XgTxUnicastPkts_offset 0x98
1058#define XgTxUnicastPkts_WIDTH 32
1059#define XgTxControlPkts_offset 0x9C
1060#define XgTxControlPkts_WIDTH 32
1061#define XgTxPausePkts_offset 0xA0
1062#define XgTxPausePkts_WIDTH 32
1063#define XgTxPkts64Octets_offset 0xA4
1064#define XgTxPkts64Octets_WIDTH 32
1065#define XgTxPkts65to127Octets_offset 0xA8
1066#define XgTxPkts65to127Octets_WIDTH 32
1067#define XgTxPkts128to255Octets_offset 0xAC
1068#define XgTxPkts128to255Octets_WIDTH 32
1069#define XgTxPkts256to511Octets_offset 0xB0
1070#define XgTxPkts256to511Octets_WIDTH 32
1071#define XgTxPkts512to1023Octets_offset 0xB4
1072#define XgTxPkts512to1023Octets_WIDTH 32
1073#define XgTxPkts1024to15xxOctets_offset 0xB8
1074#define XgTxPkts1024to15xxOctets_WIDTH 32
1075#define XgTxPkts1519toMaxOctets_offset 0xBC
1076#define XgTxPkts1519toMaxOctets_WIDTH 32
1077#define XgTxUndersizePkts_offset 0xC0
1078#define XgTxUndersizePkts_WIDTH 32
1079#define XgTxOversizePkts_offset 0xC4
1080#define XgTxOversizePkts_WIDTH 32
1081#define XgTxNonTcpUdpPkt_offset 0xC8
1082#define XgTxNonTcpUdpPkt_WIDTH 16
1083#define XgTxMacSrcErrPkt_offset 0xCC
1084#define XgTxMacSrcErrPkt_WIDTH 16
1085#define XgTxIpSrcErrPkt_offset 0xD0
1086#define XgTxIpSrcErrPkt_WIDTH 16
1087#define XgDmaDone_offset 0xD4
1088
1089#define FALCON_STATS_NOT_DONE 0x00000000
1090#define FALCON_STATS_DONE 0xffffffff
1091
1092/* Interrupt status register bits */
1093#define FATAL_INT_LBN 64
1094#define FATAL_INT_WIDTH 1
1095#define INT_EVQS_LBN 40
1096#define INT_EVQS_WIDTH 4
1097
1098/**************************************************************************
1099 *
1100 * Falcon non-volatile configuration
1101 *
1102 **************************************************************************
1103 */
1104
1105/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
1106struct falcon_nvconfig_board_v2 {
1107 __le16 nports;
1108 u8 port0_phy_addr;
1109 u8 port0_phy_type;
1110 u8 port1_phy_addr;
1111 u8 port1_phy_type;
1112 __le16 asic_sub_revision;
1113 __le16 board_revision;
1114} __attribute__ ((packed));
1115
1116#define NVCONFIG_BASE 0x300
1117#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1118struct falcon_nvconfig {
1119 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
1120 u8 mac_address[2][8]; /* 0x310 */
1121 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
1122 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
1123 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
1124 efx_oword_t hw_init_reg; /* 0x350 */
1125 efx_oword_t nic_stat_reg; /* 0x360 */
1126 efx_oword_t glb_ctl_reg; /* 0x370 */
1127 efx_oword_t srm_cfg_reg; /* 0x380 */
1128 efx_oword_t spare_reg; /* 0x390 */
1129 __le16 board_magic_num; /* 0x3A0 */
1130 __le16 board_struct_ver;
1131 __le16 board_checksum;
1132 struct falcon_nvconfig_board_v2 board_v2;
1133} __attribute__ ((packed));
1134
1135#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
new file mode 100644
index 000000000000..ea08184ddfa9
--- /dev/null
+++ b/drivers/net/sfc/falcon_io.h
@@ -0,0 +1,243 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_IO_H
12#define EFX_FALCON_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16#include "net_driver.h"
17
18/**************************************************************************
19 *
20 * Falcon hardware access
21 *
22 **************************************************************************
23 *
24 * Notes on locking strategy:
25 *
26 * Most Falcon registers require 16-byte (or 8-byte, for SRAM
27 * registers) atomic writes which necessitates locking.
28 * Under normal operation few writes to the Falcon BAR are made and these
29 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
30 * cased to allow 4-byte (hence lockless) accesses.
31 *
32 * It *is* safe to write to these 4-byte registers in the middle of an
33 * access to an 8-byte or 16-byte register. We therefore use a
34 * spinlock to protect accesses to the larger registers, but no locks
35 * for the 4-byte registers.
36 *
37 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
38 * due to the way the 16byte registers are "collected" in the Falcon BIU
39 *
40 * We also lock when carrying out reads, to ensure consistency of the
41 * data (made possible since the BIU reads all 128 bits into a cache).
42 * Reads are very rare, so this isn't a significant performance
43 * impact. (Most data transferred from NIC to host is DMAed directly
44 * into host memory).
45 *
46 * I/O BAR access uses locks for both reads and writes (but is only provided
47 * for testing purposes).
48 */
49
50/* Special buffer descriptors (Falcon SRAM) */
51#define BUF_TBL_KER_A1 0x18000
52#define BUF_TBL_KER_B0 0x800000
53
54
55#if BITS_PER_LONG == 64
56#define FALCON_USE_QWORD_IO 1
57#endif
58
59#define _falcon_writeq(efx, value, reg) \
60 __raw_writeq((__force u64) (value), (efx)->membase + (reg))
61#define _falcon_writel(efx, value, reg) \
62 __raw_writel((__force u32) (value), (efx)->membase + (reg))
63#define _falcon_readq(efx, reg) \
64 ((__force __le64) __raw_readq((efx)->membase + (reg)))
65#define _falcon_readl(efx, reg) \
66 ((__force __le32) __raw_readl((efx)->membase + (reg)))
67
68/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
69static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
70 unsigned int reg)
71{
72 unsigned long flags;
73
74 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
75 EFX_OWORD_VAL(*value));
76
77 spin_lock_irqsave(&efx->biu_lock, flags);
78#ifdef FALCON_USE_QWORD_IO
79 _falcon_writeq(efx, value->u64[0], reg + 0);
80 wmb();
81 _falcon_writeq(efx, value->u64[1], reg + 8);
82#else
83 _falcon_writel(efx, value->u32[0], reg + 0);
84 _falcon_writel(efx, value->u32[1], reg + 4);
85 _falcon_writel(efx, value->u32[2], reg + 8);
86 wmb();
87 _falcon_writel(efx, value->u32[3], reg + 12);
88#endif
89 mmiowb();
90 spin_unlock_irqrestore(&efx->biu_lock, flags);
91}
92
93/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
94static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
95 unsigned int index)
96{
97 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
98 unsigned long flags;
99
100 EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
101 reg, EFX_QWORD_VAL(*value));
102
103 spin_lock_irqsave(&efx->biu_lock, flags);
104#ifdef FALCON_USE_QWORD_IO
105 _falcon_writeq(efx, value->u64[0], reg + 0);
106#else
107 _falcon_writel(efx, value->u32[0], reg + 0);
108 wmb();
109 _falcon_writel(efx, value->u32[1], reg + 4);
110#endif
111 mmiowb();
112 spin_unlock_irqrestore(&efx->biu_lock, flags);
113}
114
115/* Write dword to Falcon register that allows partial writes
116 *
117 * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
118 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
119 * for lockless writes.
120 */
121static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
122 unsigned int reg)
123{
124 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
125 reg, EFX_DWORD_VAL(*value));
126
127 /* No lock required */
128 _falcon_writel(efx, value->u32[0], reg);
129}
130
131/* Read from a Falcon register
132 *
133 * This reads an entire 16-byte Falcon register in one go, locking as
134 * appropriate. It is essential to read the first dword first, as this
135 * prompts Falcon to load the current value into the shadow register.
136 */
137static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
138 unsigned int reg)
139{
140 unsigned long flags;
141
142 spin_lock_irqsave(&efx->biu_lock, flags);
143 value->u32[0] = _falcon_readl(efx, reg + 0);
144 rmb();
145 value->u32[1] = _falcon_readl(efx, reg + 4);
146 value->u32[2] = _falcon_readl(efx, reg + 8);
147 value->u32[3] = _falcon_readl(efx, reg + 12);
148 spin_unlock_irqrestore(&efx->biu_lock, flags);
149
150 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
151 EFX_OWORD_VAL(*value));
152}
153
154/* This reads an 8-byte Falcon SRAM entry in one go. */
155static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
156 unsigned int index)
157{
158 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
159 unsigned long flags;
160
161 spin_lock_irqsave(&efx->biu_lock, flags);
162#ifdef FALCON_USE_QWORD_IO
163 value->u64[0] = _falcon_readq(efx, reg + 0);
164#else
165 value->u32[0] = _falcon_readl(efx, reg + 0);
166 rmb();
167 value->u32[1] = _falcon_readl(efx, reg + 4);
168#endif
169 spin_unlock_irqrestore(&efx->biu_lock, flags);
170
171 EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
172 reg, EFX_QWORD_VAL(*value));
173}
174
175/* Read dword from Falcon register that allows partial writes (sic) */
176static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
177 unsigned int reg)
178{
179 value->u32[0] = _falcon_readl(efx, reg);
180 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
181 reg, EFX_DWORD_VAL(*value));
182}
183
184/* Write to a register forming part of a table */
185static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
186 unsigned int reg, unsigned int index)
187{
188 falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
189}
190
191/* Read to a register forming part of a table */
192static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
193 unsigned int reg, unsigned int index)
194{
195 falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
196}
197
198/* Write to a dword register forming part of a table */
199static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
200 unsigned int reg, unsigned int index)
201{
202 falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
203}
204
205/* Page-mapped register block size */
206#define FALCON_PAGE_BLOCK_SIZE 0x2000
207
208/* Calculate offset to page-mapped register block */
209#define FALCON_PAGED_REG(page, reg) \
210 ((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
211
212/* As for falcon_write(), but for a page-mapped register. */
213static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
214 unsigned int reg, unsigned int page)
215{
216 falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
217}
218
219/* As for falcon_writel(), but for a page-mapped register. */
220static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
221 unsigned int reg, unsigned int page)
222{
223 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
224}
225
226/* Write dword to Falcon page-mapped register with an extra lock.
227 *
228 * As for falcon_writel_page(), but for a register that suffers from
229 * SFC bug 3181. Take out a lock so the BIU collector cannot be
230 * confused. */
231static inline void falcon_writel_page_locked(struct efx_nic *efx,
232 efx_dword_t *value,
233 unsigned int reg,
234 unsigned int page)
235{
236 unsigned long flags;
237
238 spin_lock_irqsave(&efx->biu_lock, flags);
239 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
240 spin_unlock_irqrestore(&efx->biu_lock, flags);
241}
242
243#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
new file mode 100644
index 000000000000..aa7521b24a5d
--- /dev/null
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -0,0 +1,585 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "efx.h"
14#include "falcon.h"
15#include "falcon_hwdefs.h"
16#include "falcon_io.h"
17#include "mac.h"
18#include "gmii.h"
19#include "mdio_10g.h"
20#include "phy.h"
21#include "boards.h"
22#include "workarounds.h"
23
24/**************************************************************************
25 *
26 * MAC register access
27 *
28 **************************************************************************/
29
30/* Offset of an XMAC register within Falcon */
31#define FALCON_XMAC_REG(mac_reg) \
32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
33
34void falcon_xmac_writel(struct efx_nic *efx,
35 efx_dword_t *value, unsigned int mac_reg)
36{
37 efx_oword_t temp;
38
39 EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
40 falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg));
41}
42
43void falcon_xmac_readl(struct efx_nic *efx,
44 efx_dword_t *value, unsigned int mac_reg)
45{
46 efx_oword_t temp;
47
48 falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg));
49 EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
50}
51
52/**************************************************************************
53 *
54 * MAC operations
55 *
56 *************************************************************************/
57static int falcon_reset_xmac(struct efx_nic *efx)
58{
59 efx_dword_t reg;
60 int count;
61
62 EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1);
63 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
64
65 for (count = 0; count < 10000; count++) { /* wait upto 100ms */
66 falcon_xmac_readl(efx, &reg, XM_GLB_CFG_REG_MAC);
67 if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0)
68 return 0;
69 udelay(10);
70 }
71
72 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
73 return -ETIMEDOUT;
74}
75
76/* Configure the XAUI driver that is an output from Falcon */
77static void falcon_setup_xaui(struct efx_nic *efx)
78{
79 efx_dword_t sdctl, txdrv;
80
81 /* Move the XAUI into low power, unless there is no PHY, in
82 * which case the XAUI will have to drive a cable. */
83 if (efx->phy_type == PHY_TYPE_NONE)
84 return;
85
86 falcon_xmac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC);
87 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
88 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
89 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
90 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
91 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
92 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
93 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
94 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
95 falcon_xmac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC);
96
97 EFX_POPULATE_DWORD_8(txdrv,
98 XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
99 XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
100 XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
101 XX_DEQA, XX_TXDRV_DEQ_DEFAULT,
102 XX_DTXD, XX_TXDRV_DTX_DEFAULT,
103 XX_DTXC, XX_TXDRV_DTX_DEFAULT,
104 XX_DTXB, XX_TXDRV_DTX_DEFAULT,
105 XX_DTXA, XX_TXDRV_DTX_DEFAULT);
106 falcon_xmac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC);
107}
108
109static void falcon_hold_xaui_in_rst(struct efx_nic *efx)
110{
111 efx_dword_t reg;
112
113 EFX_ZERO_DWORD(reg);
114 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1);
115 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1);
116 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1);
117 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1);
118 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
119 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
120 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1);
121 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1);
122 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1);
123 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1);
124 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
125 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
126 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
127 udelay(10);
128}
129
130static int _falcon_reset_xaui_a(struct efx_nic *efx)
131{
132 efx_dword_t reg;
133
134 falcon_hold_xaui_in_rst(efx);
135 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
136
137 /* Follow the RAMBUS XAUI data reset sequencing
138 * Channels A and B first: power down, reset PLL, reset, clear
139 */
140 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0);
141 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0);
142 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
143 udelay(10);
144
145 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
146 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
147 udelay(10);
148
149 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0);
150 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0);
151 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
152 udelay(10);
153
154 /* Channels C and D: power down, reset PLL, reset, clear */
155 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0);
156 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0);
157 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
158 udelay(10);
159
160 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
161 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
162 udelay(10);
163
164 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0);
165 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0);
166 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
167 udelay(10);
168
169 /* Setup XAUI */
170 falcon_setup_xaui(efx);
171 udelay(10);
172
173 /* Take XGXS out of reset */
174 EFX_ZERO_DWORD(reg);
175 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
176 udelay(10);
177
178 return 0;
179}
180
181static int _falcon_reset_xaui_b(struct efx_nic *efx)
182{
183 efx_dword_t reg;
184 int count;
185
186 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
187 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
188
189 /* Give some time for the link to establish */
190 for (count = 0; count < 1000; count++) { /* wait upto 10ms */
191 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
192 if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
193 falcon_setup_xaui(efx);
194 return 0;
195 }
196 udelay(10);
197 }
198 EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
199 return -ETIMEDOUT;
200}
201
202int falcon_reset_xaui(struct efx_nic *efx)
203{
204 int rc;
205
206 if (EFX_WORKAROUND_9388(efx)) {
207 falcon_hold_xaui_in_rst(efx);
208 efx->phy_op->reset_xaui(efx);
209 rc = _falcon_reset_xaui_a(efx);
210 } else {
211 rc = _falcon_reset_xaui_b(efx);
212 }
213 return rc;
214}
215
216static int falcon_xgmii_status(struct efx_nic *efx)
217{
218 efx_dword_t reg;
219
220 if (FALCON_REV(efx) < FALCON_REV_B0)
221 return 1;
222
223 /* The ISR latches, so clear it and re-read */
224 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
225 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
226
227 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
228 EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
229 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
230 return 0;
231 }
232
233 return 1;
234}
235
236static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
237{
238 efx_dword_t reg;
239
240 if (FALCON_REV(efx) < FALCON_REV_B0)
241 return;
242
243 /* Flush the ISR */
244 if (enable)
245 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
246
247 EFX_POPULATE_DWORD_2(reg,
248 XM_MSK_RMTFLT, !enable,
249 XM_MSK_LCLFLT, !enable);
250 falcon_xmac_writel(efx, &reg, XM_MGT_INT_MSK_REG_MAC_B0);
251}
252
253int falcon_init_xmac(struct efx_nic *efx)
254{
255 int rc;
256
257 /* Initialize the PHY first so the clock is around */
258 rc = efx->phy_op->init(efx);
259 if (rc)
260 goto fail1;
261
262 rc = falcon_reset_xaui(efx);
263 if (rc)
264 goto fail2;
265
266 /* Wait again. Give the PHY and MAC time to come back */
267 schedule_timeout_uninterruptible(HZ / 10);
268
269 rc = falcon_reset_xmac(efx);
270 if (rc)
271 goto fail2;
272
273 falcon_mask_status_intr(efx, 1);
274 return 0;
275
276 fail2:
277 efx->phy_op->fini(efx);
278 fail1:
279 return rc;
280}
281
282int falcon_xaui_link_ok(struct efx_nic *efx)
283{
284 efx_dword_t reg;
285 int align_done, sync_status, link_ok = 0;
286
287 /* Read link status */
288 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
289
290 align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE);
291 sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT);
292 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
293 link_ok = 1;
294
295 /* Clear link status ready for next read */
296 EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
297 EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
298 EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
299 falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
300
301 /* If the link is up, then check the phy side of the xaui link
302 * (error conditions from the wire side propoagate back through
303 * the phy to the xaui side). */
304 if (efx->link_up && link_ok) {
305 int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
306 if (has_phyxs)
307 link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
308 }
309
310 /* If the PHY and XAUI links are up, then check the mac's xgmii
311 * fault state */
312 if (efx->link_up && link_ok)
313 link_ok = falcon_xgmii_status(efx);
314
315 return link_ok;
316}
317
318static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
319{
320 unsigned int max_frame_len;
321 efx_dword_t reg;
322 int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
323
324 /* Configure MAC - cut-thru mode is hard wired on */
325 EFX_POPULATE_DWORD_3(reg,
326 XM_RX_JUMBO_MODE, 1,
327 XM_TX_STAT_EN, 1,
328 XM_RX_STAT_EN, 1);
329 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
330
331 /* Configure TX */
332 EFX_POPULATE_DWORD_6(reg,
333 XM_TXEN, 1,
334 XM_TX_PRMBL, 1,
335 XM_AUTO_PAD, 1,
336 XM_TXCRC, 1,
337 XM_FCNTL, 1,
338 XM_IPG, 0x3);
339 falcon_xmac_writel(efx, &reg, XM_TX_CFG_REG_MAC);
340
341 /* Configure RX */
342 EFX_POPULATE_DWORD_5(reg,
343 XM_RXEN, 1,
344 XM_AUTO_DEPAD, 0,
345 XM_ACPT_ALL_MCAST, 1,
346 XM_ACPT_ALL_UCAST, efx->promiscuous,
347 XM_PASS_CRC_ERR, 1);
348 falcon_xmac_writel(efx, &reg, XM_RX_CFG_REG_MAC);
349
350 /* Set frame length */
351 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
352 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
353 falcon_xmac_writel(efx, &reg, XM_RX_PARAM_REG_MAC);
354 EFX_POPULATE_DWORD_2(reg,
355 XM_MAX_TX_FRM_SIZE, max_frame_len,
356 XM_TX_JUMBO_MODE, 1);
357 falcon_xmac_writel(efx, &reg, XM_TX_PARAM_REG_MAC);
358
359 EFX_POPULATE_DWORD_2(reg,
360 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
361 XM_DIS_FCNTL, rx_fc ? 0 : 1);
362 falcon_xmac_writel(efx, &reg, XM_FC_REG_MAC);
363
364 /* Set MAC address */
365 EFX_POPULATE_DWORD_4(reg,
366 XM_ADR_0, efx->net_dev->dev_addr[0],
367 XM_ADR_1, efx->net_dev->dev_addr[1],
368 XM_ADR_2, efx->net_dev->dev_addr[2],
369 XM_ADR_3, efx->net_dev->dev_addr[3]);
370 falcon_xmac_writel(efx, &reg, XM_ADR_LO_REG_MAC);
371 EFX_POPULATE_DWORD_2(reg,
372 XM_ADR_4, efx->net_dev->dev_addr[4],
373 XM_ADR_5, efx->net_dev->dev_addr[5]);
374 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
375}
376
377/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
378 * to come back up. Bash it until it comes back up */
379static int falcon_check_xaui_link_up(struct efx_nic *efx)
380{
381 int max_tries, tries;
382 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
383 max_tries = tries;
384
385 if (efx->phy_type == PHY_TYPE_NONE)
386 return 0;
387
388 while (tries) {
389 if (falcon_xaui_link_ok(efx))
390 return 1;
391
392 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
393 __func__, tries);
394 (void) falcon_reset_xaui(efx);
395 udelay(200);
396 tries--;
397 }
398
399 EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n",
400 max_tries);
401 return 0;
402}
403
404void falcon_reconfigure_xmac(struct efx_nic *efx)
405{
406 int xaui_link_ok;
407
408 falcon_mask_status_intr(efx, 0);
409
410 falcon_deconfigure_mac_wrapper(efx);
411 efx->phy_op->reconfigure(efx);
412 falcon_reconfigure_xmac_core(efx);
413 falcon_reconfigure_mac_wrapper(efx);
414
415 /* Ensure XAUI link is up */
416 xaui_link_ok = falcon_check_xaui_link_up(efx);
417
418 if (xaui_link_ok && efx->link_up)
419 falcon_mask_status_intr(efx, 1);
420}
421
422void falcon_fini_xmac(struct efx_nic *efx)
423{
424 /* Isolate the MAC - PHY */
425 falcon_deconfigure_mac_wrapper(efx);
426
427 /* Potentially power down the PHY */
428 efx->phy_op->fini(efx);
429}
430
431void falcon_update_stats_xmac(struct efx_nic *efx)
432{
433 struct efx_mac_stats *mac_stats = &efx->mac_stats;
434 int rc;
435
436 rc = falcon_dma_stats(efx, XgDmaDone_offset);
437 if (rc)
438 return;
439
440 /* Update MAC stats from DMAed values */
441 FALCON_STAT(efx, XgRxOctets, rx_bytes);
442 FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
443 FALCON_STAT(efx, XgRxPkts, rx_packets);
444 FALCON_STAT(efx, XgRxPktsOK, rx_good);
445 FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
446 FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
447 FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
448 FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
449 FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
450 FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
451 FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
452 FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
453 FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
454 FALCON_STAT(efx, XgRxAlignError, rx_align_error);
455 FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
456 FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
457 FALCON_STAT(efx, XgRxControlPkts, rx_control);
458 FALCON_STAT(efx, XgRxPausePkts, rx_pause);
459 FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
460 FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
461 FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
462 FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
463 FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
464 FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
465 FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
466 FALCON_STAT(efx, XgRxLengthError, rx_length_error);
467 FALCON_STAT(efx, XgTxPkts, tx_packets);
468 FALCON_STAT(efx, XgTxOctets, tx_bytes);
469 FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
470 FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
471 FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
472 FALCON_STAT(efx, XgTxControlPkts, tx_control);
473 FALCON_STAT(efx, XgTxPausePkts, tx_pause);
474 FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
475 FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
476 FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
477 FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
478 FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
479 FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
480 FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
481 FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
482 FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
483 FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
484 FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
485 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
486
487 /* Update derived statistics */
488 mac_stats->tx_good_bytes =
489 (mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
490 mac_stats->rx_bad_bytes =
491 (mac_stats->rx_bytes - mac_stats->rx_good_bytes);
492}
493
494#define EFX_XAUI_RETRAIN_MAX 8
495
496int falcon_check_xmac(struct efx_nic *efx)
497{
498 unsigned xaui_link_ok;
499 int rc;
500
501 falcon_mask_status_intr(efx, 0);
502 xaui_link_ok = falcon_xaui_link_ok(efx);
503
504 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
505 (void) falcon_reset_xaui(efx);
506
507 /* Call the PHY check_hw routine */
508 rc = efx->phy_op->check_hw(efx);
509
510 /* Unmask interrupt if everything was (and still is) ok */
511 if (xaui_link_ok && efx->link_up)
512 falcon_mask_status_intr(efx, 1);
513
514 return rc;
515}
516
517/* Simulate a PHY event */
518void falcon_xmac_sim_phy_event(struct efx_nic *efx)
519{
520 efx_qword_t phy_event;
521
522 EFX_POPULATE_QWORD_2(phy_event,
523 EV_CODE, GLOBAL_EV_DECODE,
524 XG_PHY_INTR, 1);
525 falcon_generate_event(&efx->channel[0], &phy_event);
526}
527
528int falcon_xmac_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
529{
530 mdio_clause45_get_settings(efx, ecmd);
531 ecmd->transceiver = XCVR_INTERNAL;
532 ecmd->phy_address = efx->mii.phy_id;
533 ecmd->autoneg = AUTONEG_DISABLE;
534 ecmd->duplex = DUPLEX_FULL;
535 return 0;
536}
537
538int falcon_xmac_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
539{
540 if (ecmd->transceiver != XCVR_INTERNAL)
541 return -EINVAL;
542 if (ecmd->autoneg != AUTONEG_DISABLE)
543 return -EINVAL;
544 if (ecmd->duplex != DUPLEX_FULL)
545 return -EINVAL;
546
547 return mdio_clause45_set_settings(efx, ecmd);
548}
549
550
551int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
552{
553 int reset;
554
555 if (flow_control & EFX_FC_AUTO) {
556 EFX_LOG(efx, "10G does not support flow control "
557 "autonegotiation\n");
558 return -EINVAL;
559 }
560
561 if ((flow_control & EFX_FC_TX) && !(flow_control & EFX_FC_RX))
562 return -EINVAL;
563
564 /* TX flow control may automatically turn itself off if the
565 * link partner (intermittently) stops responding to pause
566 * frames. There isn't any indication that this has happened,
567 * so the best we do is leave it up to the user to spot this
568 * and fix it be cycling transmit flow control on this end. */
569 reset = ((flow_control & EFX_FC_TX) &&
570 !(efx->flow_control & EFX_FC_TX));
571 if (EFX_WORKAROUND_11482(efx) && reset) {
572 if (FALCON_REV(efx) >= FALCON_REV_B0) {
573 /* Recover by resetting the EM block */
574 if (efx->link_up)
575 falcon_drain_tx_fifo(efx);
576 } else {
577 /* Schedule a reset to recover */
578 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
579 }
580 }
581
582 efx->flow_control = flow_control;
583
584 return 0;
585}
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
new file mode 100644
index 000000000000..d25bbd1297f4
--- /dev/null
+++ b/drivers/net/sfc/gmii.h
@@ -0,0 +1,195 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_GMII_H
12#define EFX_GMII_H
13
14/*
15 * GMII interface
16 */
17
18#include <linux/mii.h>
19
20/* GMII registers, excluding registers already defined as MII
21 * registers in mii.h
22 */
23#define GMII_IER 0x12 /* Interrupt enable register */
24#define GMII_ISR 0x13 /* Interrupt status register */
25
26/* Interrupt enable register */
27#define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
28#define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
29#define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
30#define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */
31#define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
32#define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */
33#define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */
34#define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
35#define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
36#define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
37#define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
38#define IER_ENERGY 0x0010 /* Bit 4 - energy detect */
39#define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
40#define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
41#define IER_JABBER 0x0001 /* Bit 0 - jabber */
42
43/* Interrupt status register */
44#define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
45#define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
46#define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
47#define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */
48#define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
49#define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */
50#define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */
51#define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
52#define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
53#define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
54#define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
55#define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */
56#define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
57#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
58#define ISR_JABBER 0x0001 /* Bit 0 - jabber */
59
60/* Logically extended advertisement register */
61#define GM_ADVERTISE_SLCT ADVERTISE_SLCT
62#define GM_ADVERTISE_CSMA ADVERTISE_CSMA
63#define GM_ADVERTISE_10HALF ADVERTISE_10HALF
64#define GM_ADVERTISE_1000XFULL ADVERTISE_1000XFULL
65#define GM_ADVERTISE_10FULL ADVERTISE_10FULL
66#define GM_ADVERTISE_1000XHALF ADVERTISE_1000XHALF
67#define GM_ADVERTISE_100HALF ADVERTISE_100HALF
68#define GM_ADVERTISE_1000XPAUSE ADVERTISE_1000XPAUSE
69#define GM_ADVERTISE_100FULL ADVERTISE_100FULL
70#define GM_ADVERTISE_1000XPSE_ASYM ADVERTISE_1000XPSE_ASYM
71#define GM_ADVERTISE_100BASE4 ADVERTISE_100BASE4
72#define GM_ADVERTISE_PAUSE_CAP ADVERTISE_PAUSE_CAP
73#define GM_ADVERTISE_PAUSE_ASYM ADVERTISE_PAUSE_ASYM
74#define GM_ADVERTISE_RESV ADVERTISE_RESV
75#define GM_ADVERTISE_RFAULT ADVERTISE_RFAULT
76#define GM_ADVERTISE_LPACK ADVERTISE_LPACK
77#define GM_ADVERTISE_NPAGE ADVERTISE_NPAGE
78#define GM_ADVERTISE_1000FULL (ADVERTISE_1000FULL << 8)
79#define GM_ADVERTISE_1000HALF (ADVERTISE_1000HALF << 8)
80#define GM_ADVERTISE_1000 (GM_ADVERTISE_1000FULL | \
81 GM_ADVERTISE_1000HALF)
82#define GM_ADVERTISE_FULL (GM_ADVERTISE_1000FULL | \
83 ADVERTISE_FULL)
84#define GM_ADVERTISE_ALL (GM_ADVERTISE_1000FULL | \
85 GM_ADVERTISE_1000HALF | \
86 ADVERTISE_ALL)
87
88/* Logically extended link partner ability register */
89#define GM_LPA_SLCT LPA_SLCT
90#define GM_LPA_10HALF LPA_10HALF
91#define GM_LPA_1000XFULL LPA_1000XFULL
92#define GM_LPA_10FULL LPA_10FULL
93#define GM_LPA_1000XHALF LPA_1000XHALF
94#define GM_LPA_100HALF LPA_100HALF
95#define GM_LPA_1000XPAUSE LPA_1000XPAUSE
96#define GM_LPA_100FULL LPA_100FULL
97#define GM_LPA_1000XPAUSE_ASYM LPA_1000XPAUSE_ASYM
98#define GM_LPA_100BASE4 LPA_100BASE4
99#define GM_LPA_PAUSE_CAP LPA_PAUSE_CAP
100#define GM_LPA_PAUSE_ASYM LPA_PAUSE_ASYM
101#define GM_LPA_RESV LPA_RESV
102#define GM_LPA_RFAULT LPA_RFAULT
103#define GM_LPA_LPACK LPA_LPACK
104#define GM_LPA_NPAGE LPA_NPAGE
105#define GM_LPA_1000FULL (LPA_1000FULL << 6)
106#define GM_LPA_1000HALF (LPA_1000HALF << 6)
107#define GM_LPA_10000FULL 0x00040000
108#define GM_LPA_10000HALF 0x00080000
109#define GM_LPA_DUPLEX (GM_LPA_1000FULL | GM_LPA_10000FULL \
110 | LPA_DUPLEX)
111#define GM_LPA_10 (LPA_10FULL | LPA_10HALF)
112#define GM_LPA_100 LPA_100
113#define GM_LPA_1000 (GM_LPA_1000FULL | GM_LPA_1000HALF)
114#define GM_LPA_10000 (GM_LPA_10000FULL | GM_LPA_10000HALF)
115
116/* Retrieve GMII autonegotiation advertised abilities
117 *
118 * The MII advertisment register (MII_ADVERTISE) is logically extended
119 * to include advertisement bits ADVERTISE_1000FULL and
120 * ADVERTISE_1000HALF from MII_CTRL1000. The result can be tested
121 * against the GM_ADVERTISE_xxx constants.
122 */
123static inline unsigned int gmii_advertised(struct mii_if_info *gmii)
124{
125 unsigned int advertise;
126 unsigned int ctrl1000;
127
128 advertise = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_ADVERTISE);
129 ctrl1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_CTRL1000);
130 return (((ctrl1000 << 8) & GM_ADVERTISE_1000) | advertise);
131}
132
133/* Retrieve GMII autonegotiation link partner abilities
134 *
135 * The MII link partner ability register (MII_LPA) is logically
136 * extended by adding bits LPA_1000HALF and LPA_1000FULL from
137 * MII_STAT1000. The result can be tested against the GM_LPA_xxx
138 * constants.
139 */
140static inline unsigned int gmii_lpa(struct mii_if_info *gmii)
141{
142 unsigned int lpa;
143 unsigned int stat1000;
144
145 lpa = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_LPA);
146 stat1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_STAT1000);
147 return (((stat1000 << 6) & GM_LPA_1000) | lpa);
148}
149
150/* Calculate GMII autonegotiated link technology
151 *
152 * "negotiated" should be the result of gmii_advertised() logically
153 * ANDed with the result of gmii_lpa().
154 *
155 * "tech" will be negotiated with the unused bits masked out. For
156 * example, if both ends of the link are capable of both
157 * GM_LPA_1000FULL and GM_LPA_100FULL, GM_LPA_100FULL will be masked
158 * out.
159 */
160static inline unsigned int gmii_nway_result(unsigned int negotiated)
161{
162 unsigned int other_bits;
163
164 /* Mask out the speed and duplexity bits */
165 other_bits = negotiated & ~(GM_LPA_10 | GM_LPA_100 | GM_LPA_1000);
166
167 if (negotiated & GM_LPA_1000FULL)
168 return (other_bits | GM_LPA_1000FULL);
169 else if (negotiated & GM_LPA_1000HALF)
170 return (other_bits | GM_LPA_1000HALF);
171 else
172 return (other_bits | mii_nway_result(negotiated));
173}
174
175/* Calculate GMII non-autonegotiated link technology
176 *
177 * This provides an equivalent to gmii_nway_result for the case when
178 * autonegotiation is disabled.
179 */
180static inline unsigned int gmii_forced_result(unsigned int bmcr)
181{
182 unsigned int result;
183 int full_duplex;
184
185 full_duplex = bmcr & BMCR_FULLDPLX;
186 if (bmcr & BMCR_SPEED1000)
187 result = full_duplex ? GM_LPA_1000FULL : GM_LPA_1000HALF;
188 else if (bmcr & BMCR_SPEED100)
189 result = full_duplex ? GM_LPA_100FULL : GM_LPA_100HALF;
190 else
191 result = full_duplex ? GM_LPA_10FULL : GM_LPA_10HALF;
192 return result;
193}
194
195#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/i2c-direct.c b/drivers/net/sfc/i2c-direct.c
new file mode 100644
index 000000000000..b6c62d0ed9c2
--- /dev/null
+++ b/drivers/net/sfc/i2c-direct.c
@@ -0,0 +1,381 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "i2c-direct.h"
14
15/*
16 * I2C data (SDA) and clock (SCL) line read/writes with appropriate
17 * delays.
18 */
19
20static inline void setsda(struct efx_i2c_interface *i2c, int state)
21{
22 udelay(i2c->op->udelay);
23 i2c->sda = state;
24 i2c->op->setsda(i2c);
25 udelay(i2c->op->udelay);
26}
27
28static inline void setscl(struct efx_i2c_interface *i2c, int state)
29{
30 udelay(i2c->op->udelay);
31 i2c->scl = state;
32 i2c->op->setscl(i2c);
33 udelay(i2c->op->udelay);
34}
35
36static inline int getsda(struct efx_i2c_interface *i2c)
37{
38 int sda;
39
40 udelay(i2c->op->udelay);
41 sda = i2c->op->getsda(i2c);
42 udelay(i2c->op->udelay);
43 return sda;
44}
45
46static inline int getscl(struct efx_i2c_interface *i2c)
47{
48 int scl;
49
50 udelay(i2c->op->udelay);
51 scl = i2c->op->getscl(i2c);
52 udelay(i2c->op->udelay);
53 return scl;
54}
55
56/*
57 * I2C low-level protocol operations
58 *
59 */
60
61static inline void i2c_release(struct efx_i2c_interface *i2c)
62{
63 EFX_WARN_ON_PARANOID(!i2c->scl);
64 EFX_WARN_ON_PARANOID(!i2c->sda);
65 /* Devices may time out if operations do not end */
66 setscl(i2c, 1);
67 setsda(i2c, 1);
68 EFX_BUG_ON_PARANOID(getsda(i2c) != 1);
69 EFX_BUG_ON_PARANOID(getscl(i2c) != 1);
70}
71
72static inline void i2c_start(struct efx_i2c_interface *i2c)
73{
74 /* We may be restarting immediately after a {send,recv}_bit,
75 * so SCL will not necessarily already be high.
76 */
77 EFX_WARN_ON_PARANOID(!i2c->sda);
78 setscl(i2c, 1);
79 setsda(i2c, 0);
80 setscl(i2c, 0);
81 setsda(i2c, 1);
82}
83
84static inline void i2c_send_bit(struct efx_i2c_interface *i2c, int bit)
85{
86 EFX_WARN_ON_PARANOID(i2c->scl != 0);
87 setsda(i2c, bit);
88 setscl(i2c, 1);
89 setscl(i2c, 0);
90 setsda(i2c, 1);
91}
92
93static inline int i2c_recv_bit(struct efx_i2c_interface *i2c)
94{
95 int bit;
96
97 EFX_WARN_ON_PARANOID(i2c->scl != 0);
98 EFX_WARN_ON_PARANOID(!i2c->sda);
99 setscl(i2c, 1);
100 bit = getsda(i2c);
101 setscl(i2c, 0);
102 return bit;
103}
104
105static inline void i2c_stop(struct efx_i2c_interface *i2c)
106{
107 EFX_WARN_ON_PARANOID(i2c->scl != 0);
108 setsda(i2c, 0);
109 setscl(i2c, 1);
110 setsda(i2c, 1);
111}
112
113/*
114 * I2C mid-level protocol operations
115 *
116 */
117
118/* Sends a byte via the I2C bus and checks for an acknowledgement from
119 * the slave device.
120 */
121static int i2c_send_byte(struct efx_i2c_interface *i2c, u8 byte)
122{
123 int i;
124
125 /* Send byte */
126 for (i = 0; i < 8; i++) {
127 i2c_send_bit(i2c, !!(byte & 0x80));
128 byte <<= 1;
129 }
130
131 /* Check for acknowledgement from slave */
132 return (i2c_recv_bit(i2c) == 0 ? 0 : -EIO);
133}
134
135/* Receives a byte via the I2C bus and sends ACK/NACK to the slave device. */
136static u8 i2c_recv_byte(struct efx_i2c_interface *i2c, int ack)
137{
138 u8 value = 0;
139 int i;
140
141 /* Receive byte */
142 for (i = 0; i < 8; i++)
143 value = (value << 1) | i2c_recv_bit(i2c);
144
145 /* Send ACK/NACK */
146 i2c_send_bit(i2c, (ack ? 0 : 1));
147
148 return value;
149}
150
151/* Calculate command byte for a read operation */
152static inline u8 i2c_read_cmd(u8 device_id)
153{
154 return ((device_id << 1) | 1);
155}
156
157/* Calculate command byte for a write operation */
158static inline u8 i2c_write_cmd(u8 device_id)
159{
160 return ((device_id << 1) | 0);
161}
162
163int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id)
164{
165 int rc;
166
167 /* If someone is driving the bus low we just give up. */
168 if (getsda(i2c) == 0 || getscl(i2c) == 0) {
169 EFX_ERR(i2c->efx, "%s someone is holding the I2C bus low."
170 " Giving up.\n", __func__);
171 return -EFAULT;
172 }
173
174 /* Pretend to initiate a device write */
175 i2c_start(i2c);
176 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
177 if (rc)
178 goto out;
179
180 out:
181 i2c_stop(i2c);
182 i2c_release(i2c);
183
184 return rc;
185}
186
187/* This performs a fast read of one or more consecutive bytes from an
188 * I2C device. Not all devices support consecutive reads of more than
189 * one byte; for these devices use efx_i2c_read() instead.
190 */
191int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
192 u8 device_id, u8 offset, u8 *data, unsigned int len)
193{
194 int i;
195 int rc;
196
197 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
198 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
199 EFX_WARN_ON_PARANOID(data == NULL);
200 EFX_WARN_ON_PARANOID(len < 1);
201
202 /* Select device and starting offset */
203 i2c_start(i2c);
204 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
205 if (rc)
206 goto out;
207 rc = i2c_send_byte(i2c, offset);
208 if (rc)
209 goto out;
210
211 /* Read data from device */
212 i2c_start(i2c);
213 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
214 if (rc)
215 goto out;
216 for (i = 0; i < (len - 1); i++)
217 /* Read and acknowledge all but the last byte */
218 data[i] = i2c_recv_byte(i2c, 1);
219 /* Read last byte with no acknowledgement */
220 data[i] = i2c_recv_byte(i2c, 0);
221
222 out:
223 i2c_stop(i2c);
224 i2c_release(i2c);
225
226 return rc;
227}
228
229/* This performs a fast write of one or more consecutive bytes to an
230 * I2C device. Not all devices support consecutive writes of more
231 * than one byte; for these devices use efx_i2c_write() instead.
232 */
233int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
234 u8 device_id, u8 offset,
235 const u8 *data, unsigned int len)
236{
237 int i;
238 int rc;
239
240 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
241 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
242 EFX_WARN_ON_PARANOID(len < 1);
243
244 /* Select device and starting offset */
245 i2c_start(i2c);
246 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
247 if (rc)
248 goto out;
249 rc = i2c_send_byte(i2c, offset);
250 if (rc)
251 goto out;
252
253 /* Write data to device */
254 for (i = 0; i < len; i++) {
255 rc = i2c_send_byte(i2c, data[i]);
256 if (rc)
257 goto out;
258 }
259
260 out:
261 i2c_stop(i2c);
262 i2c_release(i2c);
263
264 return rc;
265}
266
267/* I2C byte-by-byte read */
268int efx_i2c_read(struct efx_i2c_interface *i2c,
269 u8 device_id, u8 offset, u8 *data, unsigned int len)
270{
271 int rc;
272
273 /* i2c_fast_read with length 1 is a single byte read */
274 for (; len > 0; offset++, data++, len--) {
275 rc = efx_i2c_fast_read(i2c, device_id, offset, data, 1);
276 if (rc)
277 return rc;
278 }
279
280 return 0;
281}
282
283/* I2C byte-by-byte write */
284int efx_i2c_write(struct efx_i2c_interface *i2c,
285 u8 device_id, u8 offset, const u8 *data, unsigned int len)
286{
287 int rc;
288
289 /* i2c_fast_write with length 1 is a single byte write */
290 for (; len > 0; offset++, data++, len--) {
291 rc = efx_i2c_fast_write(i2c, device_id, offset, data, 1);
292 if (rc)
293 return rc;
294 mdelay(i2c->op->mdelay);
295 }
296
297 return 0;
298}
299
300
301/* This is just a slightly neater wrapper round efx_i2c_fast_write
302 * in the case where the target doesn't take an offset
303 */
304int efx_i2c_send_bytes(struct efx_i2c_interface *i2c,
305 u8 device_id, const u8 *data, unsigned int len)
306{
307 return efx_i2c_fast_write(i2c, device_id, data[0], data + 1, len - 1);
308}
309
310/* I2C receiving of bytes - does not send an offset byte */
311int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
312 u8 *bytes, unsigned int len)
313{
314 int i;
315 int rc;
316
317 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
318 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
319 EFX_WARN_ON_PARANOID(len < 1);
320
321 /* Select device */
322 i2c_start(i2c);
323
324 /* Read data from device */
325 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
326 if (rc)
327 goto out;
328
329 for (i = 0; i < (len - 1); i++)
330 /* Read and acknowledge all but the last byte */
331 bytes[i] = i2c_recv_byte(i2c, 1);
332 /* Read last byte with no acknowledgement */
333 bytes[i] = i2c_recv_byte(i2c, 0);
334
335 out:
336 i2c_stop(i2c);
337 i2c_release(i2c);
338
339 return rc;
340}
341
342/* SMBus and some I2C devices will time out if the I2C clock is
343 * held low for too long. This is most likely to happen in virtualised
344 * systems (when the entire domain is descheduled) but could in
345 * principle happen due to preemption on any busy system (and given the
346 * potential length of an I2C operation turning preemption off is not
347 * a sensible option). The following functions deal with the failure by
348 * retrying up to a fixed number of times.
349 */
350
351#define I2C_MAX_RETRIES (10)
352
353/* The timeout problem will result in -EIO. If the wrapped function
354 * returns any other error, pass this up and do not retry. */
355#define RETRY_WRAPPER(_f) \
356 int retries = I2C_MAX_RETRIES; \
357 int rc; \
358 while (retries) { \
359 rc = _f; \
360 if (rc != -EIO) \
361 return rc; \
362 retries--; \
363 } \
364 return rc; \
365
366int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c, u8 device_id)
367{
368 RETRY_WRAPPER(efx_i2c_check_presence(i2c, device_id))
369}
370
371int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
372 u8 device_id, u8 offset, u8 *data, unsigned int len)
373{
374 RETRY_WRAPPER(efx_i2c_read(i2c, device_id, offset, data, len))
375}
376
377int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
378 u8 device_id, u8 offset, const u8 *data, unsigned int len)
379{
380 RETRY_WRAPPER(efx_i2c_write(i2c, device_id, offset, data, len))
381}
diff --git a/drivers/net/sfc/i2c-direct.h b/drivers/net/sfc/i2c-direct.h
new file mode 100644
index 000000000000..291e561071f5
--- /dev/null
+++ b/drivers/net/sfc/i2c-direct.h
@@ -0,0 +1,91 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_I2C_DIRECT_H
12#define EFX_I2C_DIRECT_H
13
14#include "net_driver.h"
15
16/*
17 * Direct control of an I2C bus
18 */
19
20struct efx_i2c_interface;
21
22/**
23 * struct efx_i2c_bit_operations - I2C bus direct control methods
24 *
25 * I2C bus direct control methods.
26 *
27 * @setsda: Set state of SDA line
28 * @setscl: Set state of SCL line
29 * @getsda: Get state of SDA line
30 * @getscl: Get state of SCL line
31 * @udelay: Delay between each bit operation
32 * @mdelay: Delay between each byte write
33 */
34struct efx_i2c_bit_operations {
35 void (*setsda) (struct efx_i2c_interface *i2c);
36 void (*setscl) (struct efx_i2c_interface *i2c);
37 int (*getsda) (struct efx_i2c_interface *i2c);
38 int (*getscl) (struct efx_i2c_interface *i2c);
39 unsigned int udelay;
40 unsigned int mdelay;
41};
42
43/**
44 * struct efx_i2c_interface - an I2C interface
45 *
46 * An I2C interface.
47 *
48 * @efx: Attached Efx NIC
49 * @op: I2C bus control methods
50 * @sda: Current output state of SDA line
51 * @scl: Current output state of SCL line
52 */
53struct efx_i2c_interface {
54 struct efx_nic *efx;
55 struct efx_i2c_bit_operations *op;
56 unsigned int sda:1;
57 unsigned int scl:1;
58};
59
60extern int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id);
61extern int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
62 u8 device_id, u8 offset,
63 u8 *data, unsigned int len);
64extern int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
65 u8 device_id, u8 offset,
66 const u8 *data, unsigned int len);
67extern int efx_i2c_read(struct efx_i2c_interface *i2c,
68 u8 device_id, u8 offset, u8 *data, unsigned int len);
69extern int efx_i2c_write(struct efx_i2c_interface *i2c,
70 u8 device_id, u8 offset,
71 const u8 *data, unsigned int len);
72
73extern int efx_i2c_send_bytes(struct efx_i2c_interface *i2c, u8 device_id,
74 const u8 *bytes, unsigned int len);
75
76extern int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
77 u8 *bytes, unsigned int len);
78
79
80/* Versions of the API that retry on failure. */
81extern int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c,
82 u8 device_id);
83
84extern int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
85 u8 device_id, u8 offset, u8 *data, unsigned int len);
86
87extern int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
88 u8 device_id, u8 offset,
89 const u8 *data, unsigned int len);
90
91#endif /* EFX_I2C_DIRECT_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
new file mode 100644
index 000000000000..edd07d4dee18
--- /dev/null
+++ b/drivers/net/sfc/mac.h
@@ -0,0 +1,33 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2007 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_MAC_H
12#define EFX_MAC_H
13
14#include "net_driver.h"
15
16extern void falcon_xmac_writel(struct efx_nic *efx,
17 efx_dword_t *value, unsigned int mac_reg);
18extern void falcon_xmac_readl(struct efx_nic *efx,
19 efx_dword_t *value, unsigned int mac_reg);
20extern int falcon_init_xmac(struct efx_nic *efx);
21extern void falcon_reconfigure_xmac(struct efx_nic *efx);
22extern void falcon_update_stats_xmac(struct efx_nic *efx);
23extern void falcon_fini_xmac(struct efx_nic *efx);
24extern int falcon_check_xmac(struct efx_nic *efx);
25extern void falcon_xmac_sim_phy_event(struct efx_nic *efx);
26extern int falcon_xmac_get_settings(struct efx_nic *efx,
27 struct ethtool_cmd *ecmd);
28extern int falcon_xmac_set_settings(struct efx_nic *efx,
29 struct ethtool_cmd *ecmd);
30extern int falcon_xmac_set_pause(struct efx_nic *efx,
31 enum efx_fc_type pause_params);
32
33#endif
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
new file mode 100644
index 000000000000..dc06bb0aa575
--- /dev/null
+++ b/drivers/net/sfc/mdio_10g.c
@@ -0,0 +1,282 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9/*
10 * Useful functions for working with MDIO clause 45 PHYs
11 */
12#include <linux/types.h>
13#include <linux/ethtool.h>
14#include <linux/delay.h>
15#include "net_driver.h"
16#include "mdio_10g.h"
17#include "boards.h"
18
19int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
20 int spins, int spintime)
21{
22 u32 ctrl;
23 int phy_id = port->mii.phy_id;
24
25 /* Catch callers passing values in the wrong units (or just silly) */
26 EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
27
28 mdio_clause45_write(port, phy_id, mmd, MDIO_MMDREG_CTRL1,
29 (1 << MDIO_MMDREG_CTRL1_RESET_LBN));
30 /* Wait for the reset bit to clear. */
31 do {
32 msleep(spintime);
33 ctrl = mdio_clause45_read(port, phy_id, mmd, MDIO_MMDREG_CTRL1);
34 spins--;
35
36 } while (spins && (ctrl & (1 << MDIO_MMDREG_CTRL1_RESET_LBN)));
37
38 return spins ? spins : -ETIMEDOUT;
39}
40
41static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
42 int fault_fatal)
43{
44 int status;
45 int phy_id = efx->mii.phy_id;
46
47 /* Read MMD STATUS2 to check it is responding. */
48 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
49 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
50 ((1 << MDIO_MMDREG_STAT2_PRESENT_WIDTH) - 1)) !=
51 MDIO_MMDREG_STAT2_PRESENT_VAL) {
52 EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd);
53 return -EIO;
54 }
55
56 /* Read MMD STATUS 1 to check for fault. */
57 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT1);
58 if ((status & (1 << MDIO_MMDREG_STAT1_FAULT_LBN)) != 0) {
59 if (fault_fatal) {
60 EFX_ERR(efx, "PHY MMD %d reporting fatal"
61 " fault: status %x\n", mmd, status);
62 return -EIO;
63 } else {
64 EFX_LOG(efx, "PHY MMD %d reporting status"
65 " %x (expected)\n", mmd, status);
66 }
67 }
68 return 0;
69}
70
71/* This ought to be ridiculous overkill. We expect it to fail rarely */
72#define MDIO45_RESET_TIME 1000 /* ms */
73#define MDIO45_RESET_ITERS 100
74
75int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
76 unsigned int mmd_mask)
77{
78 const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
79 int tries = MDIO45_RESET_ITERS;
80 int rc = 0;
81 int in_reset;
82
83 while (tries) {
84 int mask = mmd_mask;
85 int mmd = 0;
86 int stat;
87 in_reset = 0;
88 while (mask) {
89 if (mask & 1) {
90 stat = mdio_clause45_read(efx,
91 efx->mii.phy_id,
92 mmd,
93 MDIO_MMDREG_CTRL1);
94 if (stat < 0) {
95 EFX_ERR(efx, "failed to read status of"
96 " MMD %d\n", mmd);
97 return -EIO;
98 }
99 if (stat & (1 << MDIO_MMDREG_CTRL1_RESET_LBN))
100 in_reset |= (1 << mmd);
101 }
102 mask = mask >> 1;
103 mmd++;
104 }
105 if (!in_reset)
106 break;
107 tries--;
108 msleep(spintime);
109 }
110 if (in_reset != 0) {
111 EFX_ERR(efx, "not all MMDs came out of reset in time."
112 " MMDs still in reset: %x\n", in_reset);
113 rc = -ETIMEDOUT;
114 }
115 return rc;
116}
117
118int mdio_clause45_check_mmds(struct efx_nic *efx,
119 unsigned int mmd_mask, unsigned int fatal_mask)
120{
121 int devices, mmd = 0;
122 int probe_mmd;
123
124 /* Historically we have probed the PHYXS to find out what devices are
125 * present,but that doesn't work so well if the PHYXS isn't expected
126 * to exist, if so just find the first item in the list supplied. */
127 probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS0_PHYXS) ? MDIO_MMD_PHYXS :
128 __ffs(mmd_mask);
129 devices = mdio_clause45_read(efx, efx->mii.phy_id,
130 probe_mmd, MDIO_MMDREG_DEVS0);
131
132 /* Check all the expected MMDs are present */
133 if (devices < 0) {
134 EFX_ERR(efx, "failed to read devices present\n");
135 return -EIO;
136 }
137 if ((devices & mmd_mask) != mmd_mask) {
138 EFX_ERR(efx, "required MMDs not present: got %x, "
139 "wanted %x\n", devices, mmd_mask);
140 return -ENODEV;
141 }
142 EFX_TRACE(efx, "Devices present: %x\n", devices);
143
144 /* Check all required MMDs are responding and happy. */
145 while (mmd_mask) {
146 if (mmd_mask & 1) {
147 int fault_fatal = fatal_mask & 1;
148 if (mdio_clause45_check_mmd(efx, mmd, fault_fatal))
149 return -EIO;
150 }
151 mmd_mask = mmd_mask >> 1;
152 fatal_mask = fatal_mask >> 1;
153 mmd++;
154 }
155
156 return 0;
157}
158
159int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
160{
161 int phy_id = efx->mii.phy_id;
162 int status;
163 int ok = 1;
164 int mmd = 0;
165 int good;
166
167 while (mmd_mask) {
168 if (mmd_mask & 1) {
169 /* Double reads because link state is latched, and a
170 * read moves the current state into the register */
171 status = mdio_clause45_read(efx, phy_id,
172 mmd, MDIO_MMDREG_STAT1);
173 status = mdio_clause45_read(efx, phy_id,
174 mmd, MDIO_MMDREG_STAT1);
175
176 good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN);
177 ok = ok && good;
178 }
179 mmd_mask = (mmd_mask >> 1);
180 mmd++;
181 }
182 return ok;
183}
184
185/**
186 * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
187 * @efx: Efx NIC
188 * @ecmd: Buffer for settings
189 *
190 * On return the 'port', 'speed', 'supported' and 'advertising' fields of
191 * ecmd have been filled out based on the PMA type.
192 */
193void mdio_clause45_get_settings(struct efx_nic *efx,
194 struct ethtool_cmd *ecmd)
195{
196 int pma_type;
197
198 /* If no PMA is present we are presumably talking something XAUI-ish
199 * like CX4. Which we report as FIBRE (see below) */
200 if ((efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)) == 0) {
201 ecmd->speed = SPEED_10000;
202 ecmd->port = PORT_FIBRE;
203 ecmd->supported = SUPPORTED_FIBRE;
204 ecmd->advertising = ADVERTISED_FIBRE;
205 return;
206 }
207
208 pma_type = mdio_clause45_read(efx, efx->mii.phy_id,
209 MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL2);
210 pma_type &= MDIO_PMAPMD_CTRL2_TYPE_MASK;
211
212 switch (pma_type) {
213 /* We represent CX4 as fibre in the absence of anything
214 better. */
215 case MDIO_PMAPMD_CTRL2_10G_CX4:
216 ecmd->speed = SPEED_10000;
217 ecmd->port = PORT_FIBRE;
218 ecmd->supported = SUPPORTED_FIBRE;
219 ecmd->advertising = ADVERTISED_FIBRE;
220 break;
221 /* 10G Base-T */
222 case MDIO_PMAPMD_CTRL2_10G_BT:
223 ecmd->speed = SPEED_10000;
224 ecmd->port = PORT_TP;
225 ecmd->supported = SUPPORTED_TP | SUPPORTED_10000baseT_Full;
226 ecmd->advertising = (ADVERTISED_FIBRE
227 | ADVERTISED_10000baseT_Full);
228 break;
229 case MDIO_PMAPMD_CTRL2_1G_BT:
230 ecmd->speed = SPEED_1000;
231 ecmd->port = PORT_TP;
232 ecmd->supported = SUPPORTED_TP | SUPPORTED_1000baseT_Full;
233 ecmd->advertising = (ADVERTISED_FIBRE
234 | ADVERTISED_1000baseT_Full);
235 break;
236 case MDIO_PMAPMD_CTRL2_100_BT:
237 ecmd->speed = SPEED_100;
238 ecmd->port = PORT_TP;
239 ecmd->supported = SUPPORTED_TP | SUPPORTED_100baseT_Full;
240 ecmd->advertising = (ADVERTISED_FIBRE
241 | ADVERTISED_100baseT_Full);
242 break;
243 case MDIO_PMAPMD_CTRL2_10_BT:
244 ecmd->speed = SPEED_10;
245 ecmd->port = PORT_TP;
246 ecmd->supported = SUPPORTED_TP | SUPPORTED_10baseT_Full;
247 ecmd->advertising = ADVERTISED_FIBRE | ADVERTISED_10baseT_Full;
248 break;
249 /* All the other defined modes are flavours of
250 * 10G optical */
251 default:
252 ecmd->speed = SPEED_10000;
253 ecmd->port = PORT_FIBRE;
254 ecmd->supported = SUPPORTED_FIBRE;
255 ecmd->advertising = ADVERTISED_FIBRE;
256 break;
257 }
258}
259
260/**
261 * mdio_clause45_set_settings - Set (some of) the PHY settings over MDIO.
262 * @efx: Efx NIC
263 * @ecmd: New settings
264 *
265 * Currently this just enforces that we are _not_ changing the
266 * 'port', 'speed', 'supported' or 'advertising' settings as these
267 * cannot be changed on any currently supported PHY.
268 */
269int mdio_clause45_set_settings(struct efx_nic *efx,
270 struct ethtool_cmd *ecmd)
271{
272 struct ethtool_cmd tmpcmd;
273 mdio_clause45_get_settings(efx, &tmpcmd);
274 /* None of the current PHYs support more than one mode
275 * of operation (and only 10GBT ever will), so keep things
276 * simple for now */
277 if ((ecmd->speed == tmpcmd.speed) && (ecmd->port == tmpcmd.port) &&
278 (ecmd->supported == tmpcmd.supported) &&
279 (ecmd->advertising == tmpcmd.advertising))
280 return 0;
281 return -EOPNOTSUPP;
282}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
new file mode 100644
index 000000000000..2214b6d820a7
--- /dev/null
+++ b/drivers/net/sfc/mdio_10g.h
@@ -0,0 +1,232 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_MDIO_10G_H
11#define EFX_MDIO_10G_H
12
13/*
14 * Definitions needed for doing 10G MDIO as specified in clause 45
15 * MDIO, which do not appear in Linux yet. Also some helper functions.
16 */
17
18#include "efx.h"
19#include "boards.h"
20
21/* Numbering of the MDIO Manageable Devices (MMDs) */
22/* Physical Medium Attachment/ Physical Medium Dependent sublayer */
23#define MDIO_MMD_PMAPMD (1)
24/* WAN Interface Sublayer */
25#define MDIO_MMD_WIS (2)
26/* Physical Coding Sublayer */
27#define MDIO_MMD_PCS (3)
28/* PHY Extender Sublayer */
29#define MDIO_MMD_PHYXS (4)
30/* Extender Sublayer */
31#define MDIO_MMD_DTEXS (5)
32/* Transmission convergence */
33#define MDIO_MMD_TC (6)
34/* Auto negotiation */
35#define MDIO_MMD_AN (7)
36
37/* Generic register locations */
38#define MDIO_MMDREG_CTRL1 (0)
39#define MDIO_MMDREG_STAT1 (1)
40#define MDIO_MMDREG_IDHI (2)
41#define MDIO_MMDREG_IDLOW (3)
42#define MDIO_MMDREG_SPEED (4)
43#define MDIO_MMDREG_DEVS0 (5)
44#define MDIO_MMDREG_DEVS1 (6)
45#define MDIO_MMDREG_CTRL2 (7)
46#define MDIO_MMDREG_STAT2 (8)
47
48/* Bits in MMDREG_CTRL1 */
49/* Reset */
50#define MDIO_MMDREG_CTRL1_RESET_LBN (15)
51#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1)
52
53/* Bits in MMDREG_STAT1 */
54#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
55#define MDIO_MMDREG_STAT1_FAULT_WIDTH (1)
56/* Link state */
57#define MDIO_MMDREG_STAT1_LINK_LBN (2)
58#define MDIO_MMDREG_STAT1_LINK_WIDTH (1)
59
60/* Bits in ID reg */
61#define MDIO_ID_REV(_id32) (_id32 & 0xf)
62#define MDIO_ID_MODEL(_id32) ((_id32 >> 4) & 0x3f)
63#define MDIO_ID_OUI(_id32) (_id32 >> 10)
64
65/* Bits in MMDREG_DEVS0. Someone thoughtfully layed things out
66 * so the 'bit present' bit number of an MMD is the number of
67 * that MMD */
68#define DEV_PRESENT_BIT(_b) (1 << _b)
69
70#define MDIO_MMDREG_DEVS0_PHYXS DEV_PRESENT_BIT(MDIO_MMD_PHYXS)
71#define MDIO_MMDREG_DEVS0_PCS DEV_PRESENT_BIT(MDIO_MMD_PCS)
72#define MDIO_MMDREG_DEVS0_PMAPMD DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)
73
74/* Bits in MMDREG_STAT2 */
75#define MDIO_MMDREG_STAT2_PRESENT_VAL (2)
76#define MDIO_MMDREG_STAT2_PRESENT_LBN (14)
77#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
78
79/* PMA type (4 bits) */
80#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0)
81#define MDIO_PMAPMD_CTRL2_10G_EW (0x1)
82#define MDIO_PMAPMD_CTRL2_10G_LW (0x2)
83#define MDIO_PMAPMD_CTRL2_10G_SW (0x3)
84#define MDIO_PMAPMD_CTRL2_10G_LX4 (0x4)
85#define MDIO_PMAPMD_CTRL2_10G_ER (0x5)
86#define MDIO_PMAPMD_CTRL2_10G_LR (0x6)
87#define MDIO_PMAPMD_CTRL2_10G_SR (0x7)
88/* Reserved */
89#define MDIO_PMAPMD_CTRL2_10G_BT (0x9)
90/* Reserved */
91/* Reserved */
92#define MDIO_PMAPMD_CTRL2_1G_BT (0xc)
93/* Reserved */
94#define MDIO_PMAPMD_CTRL2_100_BT (0xe)
95#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
96#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
97
98/* /\* PHY XGXS lane state *\/ */
99#define MDIO_PHYXS_LANE_STATE (0x18)
100#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
101
102/* AN registers */
103#define MDIO_AN_STATUS (1)
104#define MDIO_AN_STATUS_XNP_LBN (7)
105#define MDIO_AN_STATUS_PAGE_LBN (6)
106#define MDIO_AN_STATUS_AN_DONE_LBN (5)
107#define MDIO_AN_STATUS_LP_AN_CAP_LBN (0)
108
109#define MDIO_AN_10GBT_STATUS (33)
110#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
111#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */
112#define MDIO_AN_10GBT_STATUS_LOC_OK_LBN (13) /* Local OK */
113#define MDIO_AN_10GBT_STATUS_REM_OK_LBN (12) /* Remote OK */
114#define MDIO_AN_10GBT_STATUS_LP_10G_LBN (11) /* Link partner is 10GBT capable */
115#define MDIO_AN_10GBT_STATUS_LP_LTA_LBN (10) /* LP loop timing ability */
116#define MDIO_AN_10GBT_STATUS_LP_TRR_LBN (9) /* LP Training Reset Request */
117
118
119/* Packing of the prt and dev arguments of clause 45 style MDIO into a
120 * single int so they can be passed into the mdio_read/write functions
121 * that currently exist. Note that as Falcon is the only current user,
122 * the packed form is chosen to match what Falcon needs to write into
123 * a register. This is checked at compile-time so do not change it. If
124 * your target chip needs things layed out differently you will need
125 * to unpack the arguments in your chip-specific mdio functions.
126 */
127 /* These are defined by the standard. */
128#define MDIO45_PRT_ID_WIDTH (5)
129#define MDIO45_DEV_ID_WIDTH (5)
130
131/* The prt ID is just packed in immediately to the left of the dev ID */
132#define MDIO45_PRT_DEV_WIDTH (MDIO45_PRT_ID_WIDTH + MDIO45_DEV_ID_WIDTH)
133
134#define MDIO45_PRT_ID_MASK ((1 << MDIO45_PRT_DEV_WIDTH) - 1)
135/* This is the prt + dev extended by 1 bit to hold the 'is clause 45' flag. */
136#define MDIO45_XPRT_ID_WIDTH (MDIO45_PRT_DEV_WIDTH + 1)
137#define MDIO45_XPRT_ID_MASK ((1 << MDIO45_XPRT_ID_WIDTH) - 1)
138#define MDIO45_XPRT_ID_IS10G (1 << (MDIO45_XPRT_ID_WIDTH - 1))
139
140
141#define MDIO45_PRT_ID_COMP_LBN MDIO45_DEV_ID_WIDTH
142#define MDIO45_PRT_ID_COMP_WIDTH MDIO45_PRT_ID_WIDTH
143#define MDIO45_DEV_ID_COMP_LBN 0
144#define MDIO45_DEV_ID_COMP_WIDTH MDIO45_DEV_ID_WIDTH
145
146/* Compose port and device into a phy_id */
147static inline int mdio_clause45_pack(u8 prt, u8 dev)
148{
149 efx_dword_t phy_id;
150 EFX_POPULATE_DWORD_2(phy_id, MDIO45_PRT_ID_COMP, prt,
151 MDIO45_DEV_ID_COMP, dev);
152 return MDIO45_XPRT_ID_IS10G | EFX_DWORD_VAL(phy_id);
153}
154
155static inline void mdio_clause45_unpack(u32 val, u8 *prt, u8 *dev)
156{
157 efx_dword_t phy_id;
158 EFX_POPULATE_DWORD_1(phy_id, EFX_DWORD_0, val);
159 *prt = EFX_DWORD_FIELD(phy_id, MDIO45_PRT_ID_COMP);
160 *dev = EFX_DWORD_FIELD(phy_id, MDIO45_DEV_ID_COMP);
161}
162
163static inline int mdio_clause45_read(struct efx_nic *efx,
164 u8 prt, u8 dev, u16 addr)
165{
166 return efx->mii.mdio_read(efx->net_dev,
167 mdio_clause45_pack(prt, dev), addr);
168}
169
170static inline void mdio_clause45_write(struct efx_nic *efx,
171 u8 prt, u8 dev, u16 addr, int value)
172{
173 efx->mii.mdio_write(efx->net_dev,
174 mdio_clause45_pack(prt, dev), addr, value);
175}
176
177
178static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd)
179{
180 int phy_id = efx->mii.phy_id;
181 u16 id_low = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDLOW);
182 u16 id_hi = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDHI);
183 return (id_hi << 16) | (id_low);
184}
185
186static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
187{
188 int i, sync, lane_status;
189
190 for (i = 0; i < 2; ++i)
191 lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
192 MDIO_MMD_PHYXS,
193 MDIO_PHYXS_LANE_STATE);
194
195 sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0;
196 if (!sync)
197 EFX_INFO(efx, "XGXS lane status: %x\n", lane_status);
198 return sync;
199}
200
201extern const char *mdio_clause45_mmd_name(int mmd);
202
203/*
204 * Reset a specific MMD and wait for reset to clear.
205 * Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
206 *
207 * This function will sleep
208 */
209extern int mdio_clause45_reset_mmd(struct efx_nic *efx, int mmd,
210 int spins, int spintime);
211
212/* As mdio_clause45_check_mmd but for multiple MMDs */
213int mdio_clause45_check_mmds(struct efx_nic *efx,
214 unsigned int mmd_mask, unsigned int fatal_mask);
215
216/* Check the link status of specified mmds in bit mask */
217extern int mdio_clause45_links_ok(struct efx_nic *efx,
218 unsigned int mmd_mask);
219
220/* Read (some of) the PHY settings over MDIO */
221extern void mdio_clause45_get_settings(struct efx_nic *efx,
222 struct ethtool_cmd *ecmd);
223
224/* Set (some of) the PHY settings over MDIO */
225extern int mdio_clause45_set_settings(struct efx_nic *efx,
226 struct ethtool_cmd *ecmd);
227
228/* Wait for specified MMDs to exit reset within a timeout */
229extern int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
230 unsigned int mmd_mask);
231
232#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
new file mode 100644
index 000000000000..c505482c2520
--- /dev/null
+++ b/drivers/net/sfc/net_driver.h
@@ -0,0 +1,883 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11/* Common definitions for all Efx net driver code */
12
13#ifndef EFX_NET_DRIVER_H
14#define EFX_NET_DRIVER_H
15
16#include <linux/version.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/ethtool.h>
20#include <linux/if_vlan.h>
21#include <linux/timer.h>
22#include <linux/mii.h>
23#include <linux/list.h>
24#include <linux/pci.h>
25#include <linux/device.h>
26#include <linux/highmem.h>
27#include <linux/workqueue.h>
28#include <linux/inet_lro.h>
29
30#include "enum.h"
31#include "bitfield.h"
32#include "i2c-direct.h"
33
34#define EFX_MAX_LRO_DESCRIPTORS 8
35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
36
37/**************************************************************************
38 *
39 * Build definitions
40 *
41 **************************************************************************/
42#ifndef EFX_DRIVER_NAME
43#define EFX_DRIVER_NAME "sfc"
44#endif
45#define EFX_DRIVER_VERSION "2.2.0136"
46
47#ifdef EFX_ENABLE_DEBUG
48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
49#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
50#else
51#define EFX_BUG_ON_PARANOID(x) do {} while (0)
52#define EFX_WARN_ON_PARANOID(x) do {} while (0)
53#endif
54
55#define NET_DEV_REGISTERED(efx) \
56 ((efx)->net_dev->reg_state == NETREG_REGISTERED)
57
58/* Include net device name in log messages if it has been registered.
59 * Use efx->name not efx->net_dev->name so that races with (un)registration
60 * are harmless.
61 */
62#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
63
64/* Un-rate-limited logging */
65#define EFX_ERR(efx, fmt, args...) \
66dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args)
67
68#define EFX_INFO(efx, fmt, args...) \
69dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args)
70
71#ifdef EFX_ENABLE_DEBUG
72#define EFX_LOG(efx, fmt, args...) \
73dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
74#else
75#define EFX_LOG(efx, fmt, args...) \
76dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
77#endif
78
79#define EFX_TRACE(efx, fmt, args...) do {} while (0)
80
81#define EFX_REGDUMP(efx, fmt, args...) do {} while (0)
82
83/* Rate-limited logging */
84#define EFX_ERR_RL(efx, fmt, args...) \
85do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0)
86
87#define EFX_INFO_RL(efx, fmt, args...) \
88do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
89
90#define EFX_LOG_RL(efx, fmt, args...) \
91do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
92
93/* Kernel headers may redefine inline anyway */
94#ifndef inline
95#define inline inline __attribute__ ((always_inline))
96#endif
97
98/**************************************************************************
99 *
100 * Efx data structures
101 *
102 **************************************************************************/
103
104#define EFX_MAX_CHANNELS 32
105#define EFX_MAX_TX_QUEUES 1
106#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
107
108/**
109 * struct efx_special_buffer - An Efx special buffer
110 * @addr: CPU base address of the buffer
111 * @dma_addr: DMA base address of the buffer
112 * @len: Buffer length, in bytes
113 * @index: Buffer index within controller;s buffer table
114 * @entries: Number of buffer table entries
115 *
116 * Special buffers are used for the event queues and the TX and RX
117 * descriptor queues for each channel. They are *not* used for the
118 * actual transmit and receive buffers.
119 *
120 * Note that for Falcon, TX and RX descriptor queues live in host memory.
121 * Allocation and freeing procedures must take this into account.
122 */
123struct efx_special_buffer {
124 void *addr;
125 dma_addr_t dma_addr;
126 unsigned int len;
127 int index;
128 int entries;
129};
130
131/**
132 * struct efx_tx_buffer - An Efx TX buffer
133 * @skb: The associated socket buffer.
134 * Set only on the final fragment of a packet; %NULL for all other
135 * fragments. When this fragment completes, then we can free this
136 * skb.
137 * @dma_addr: DMA address of the fragment.
138 * @len: Length of this fragment.
139 * This field is zero when the queue slot is empty.
140 * @continuation: True if this fragment is not the end of a packet.
141 * @unmap_single: True if pci_unmap_single should be used.
142 * @unmap_addr: DMA address to unmap
143 * @unmap_len: Length of this fragment to unmap
144 */
145struct efx_tx_buffer {
146 const struct sk_buff *skb;
147 dma_addr_t dma_addr;
148 unsigned short len;
149 unsigned char continuation;
150 unsigned char unmap_single;
151 dma_addr_t unmap_addr;
152 unsigned short unmap_len;
153};
154
155/**
156 * struct efx_tx_queue - An Efx TX queue
157 *
158 * This is a ring buffer of TX fragments.
159 * Since the TX completion path always executes on the same
160 * CPU and the xmit path can operate on different CPUs,
161 * performance is increased by ensuring that the completion
162 * path and the xmit path operate on different cache lines.
163 * This is particularly important if the xmit path is always
164 * executing on one CPU which is different from the completion
165 * path. There is also a cache line for members which are
166 * read but not written on the fast path.
167 *
168 * @efx: The associated Efx NIC
169 * @queue: DMA queue number
170 * @used: Queue is used by net driver
171 * @channel: The associated channel
172 * @buffer: The software buffer ring
173 * @txd: The hardware descriptor ring
174 * @read_count: Current read pointer.
175 * This is the number of buffers that have been removed from both rings.
176 * @stopped: Stopped flag.
177 * Set if this TX queue is currently stopping its port.
178 * @insert_count: Current insert pointer
179 * This is the number of buffers that have been added to the
180 * software ring.
181 * @write_count: Current write pointer
182 * This is the number of buffers that have been added to the
183 * hardware ring.
184 * @old_read_count: The value of read_count when last checked.
185 * This is here for performance reasons. The xmit path will
186 * only get the up-to-date value of read_count if this
187 * variable indicates that the queue is full. This is to
188 * avoid cache-line ping-pong between the xmit path and the
189 * completion path.
190 */
191struct efx_tx_queue {
192 /* Members which don't change on the fast path */
193 struct efx_nic *efx ____cacheline_aligned_in_smp;
194 int queue;
195 int used;
196 struct efx_channel *channel;
197 struct efx_nic *nic;
198 struct efx_tx_buffer *buffer;
199 struct efx_special_buffer txd;
200
201 /* Members used mainly on the completion path */
202 unsigned int read_count ____cacheline_aligned_in_smp;
203 int stopped;
204
205 /* Members used only on the xmit path */
206 unsigned int insert_count ____cacheline_aligned_in_smp;
207 unsigned int write_count;
208 unsigned int old_read_count;
209};
210
211/**
212 * struct efx_rx_buffer - An Efx RX data buffer
213 * @dma_addr: DMA base address of the buffer
214 * @skb: The associated socket buffer, if any.
215 * If both this and page are %NULL, the buffer slot is currently free.
216 * @page: The associated page buffer, if any.
217 * If both this and skb are %NULL, the buffer slot is currently free.
218 * @data: Pointer to ethernet header
219 * @len: Buffer length, in bytes.
220 * @unmap_addr: DMA address to unmap
221 */
222struct efx_rx_buffer {
223 dma_addr_t dma_addr;
224 struct sk_buff *skb;
225 struct page *page;
226 char *data;
227 unsigned int len;
228 dma_addr_t unmap_addr;
229};
230
231/**
232 * struct efx_rx_queue - An Efx RX queue
233 * @efx: The associated Efx NIC
234 * @queue: DMA queue number
235 * @used: Queue is used by net driver
236 * @channel: The associated channel
237 * @buffer: The software buffer ring
238 * @rxd: The hardware descriptor ring
239 * @added_count: Number of buffers added to the receive queue.
240 * @notified_count: Number of buffers given to NIC (<= @added_count).
241 * @removed_count: Number of buffers removed from the receive queue.
242 * @add_lock: Receive queue descriptor add spin lock.
243 * This lock must be held in order to add buffers to the RX
244 * descriptor ring (rxd and buffer) and to update added_count (but
245 * not removed_count).
246 * @max_fill: RX descriptor maximum fill level (<= ring size)
247 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
248 * (<= @max_fill)
249 * @fast_fill_limit: The level to which a fast fill will fill
250 * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
251 * @min_fill: RX descriptor minimum non-zero fill level.
252 * This records the minimum fill level observed when a ring
253 * refill was triggered.
254 * @min_overfill: RX descriptor minimum overflow fill level.
255 * This records the minimum fill level at which RX queue
256 * overflow was observed. It should never be set.
257 * @alloc_page_count: RX allocation strategy counter.
258 * @alloc_skb_count: RX allocation strategy counter.
259 * @work: Descriptor push work thread
260 * @buf_page: Page for next RX buffer.
261 * We can use a single page for multiple RX buffers. This tracks
262 * the remaining space in the allocation.
263 * @buf_dma_addr: Page's DMA address.
264 * @buf_data: Page's host address.
265 */
266struct efx_rx_queue {
267 struct efx_nic *efx;
268 int queue;
269 int used;
270 struct efx_channel *channel;
271 struct efx_rx_buffer *buffer;
272 struct efx_special_buffer rxd;
273
274 int added_count;
275 int notified_count;
276 int removed_count;
277 spinlock_t add_lock;
278 unsigned int max_fill;
279 unsigned int fast_fill_trigger;
280 unsigned int fast_fill_limit;
281 unsigned int min_fill;
282 unsigned int min_overfill;
283 unsigned int alloc_page_count;
284 unsigned int alloc_skb_count;
285 struct delayed_work work;
286 unsigned int slow_fill_count;
287
288 struct page *buf_page;
289 dma_addr_t buf_dma_addr;
290 char *buf_data;
291};
292
293/**
294 * struct efx_buffer - An Efx general-purpose buffer
295 * @addr: host base address of the buffer
296 * @dma_addr: DMA base address of the buffer
297 * @len: Buffer length, in bytes
298 *
299 * Falcon uses these buffers for its interrupt status registers and
300 * MAC stats dumps.
301 */
302struct efx_buffer {
303 void *addr;
304 dma_addr_t dma_addr;
305 unsigned int len;
306};
307
308
309/* Flags for channel->used_flags */
310#define EFX_USED_BY_RX 1
311#define EFX_USED_BY_TX 2
312#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
313
314enum efx_rx_alloc_method {
315 RX_ALLOC_METHOD_AUTO = 0,
316 RX_ALLOC_METHOD_SKB = 1,
317 RX_ALLOC_METHOD_PAGE = 2,
318};
319
320/**
321 * struct efx_channel - An Efx channel
322 *
323 * A channel comprises an event queue, at least one TX queue, at least
324 * one RX queue, and an associated tasklet for processing the event
325 * queue.
326 *
327 * @efx: Associated Efx NIC
328 * @evqnum: Event queue number
329 * @channel: Channel instance number
330 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only)
333 * @has_interrupt: Channel has an interrupt
334 * @irq_moderation: IRQ moderation value (in us)
335 * @napi_dev: Net device used with NAPI
336 * @napi_str: NAPI control structure
337 * @reset_work: Scheduled reset work thread
338 * @work_pending: Is work pending via NAPI?
339 * @eventq: Event queue buffer
340 * @eventq_read_ptr: Event queue read pointer
341 * @last_eventq_read_ptr: Last event queue read pointer value.
342 * @eventq_magic: Event queue magic value for driver-generated test events
343 * @lro_mgr: LRO state
344 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
345 * and diagnostic counters
346 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
347 * descriptors
348 * @rx_alloc_pop_pages: RX allocation method currently in use for popping
349 * descriptors
350 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
351 * @n_rx_ip_frag_err: Count of RX IP fragment errors
352 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
353 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
354 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
355 * @n_rx_overlength: Count of RX_OVERLENGTH errors
356 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
357 */
358struct efx_channel {
359 struct efx_nic *efx;
360 int evqnum;
361 int channel;
362 int used_flags;
363 int enabled;
364 int irq;
365 unsigned int has_interrupt;
366 unsigned int irq_moderation;
367 struct net_device *napi_dev;
368 struct napi_struct napi_str;
369 struct work_struct reset_work;
370 int work_pending;
371 struct efx_special_buffer eventq;
372 unsigned int eventq_read_ptr;
373 unsigned int last_eventq_read_ptr;
374 unsigned int eventq_magic;
375
376 struct net_lro_mgr lro_mgr;
377 int rx_alloc_level;
378 int rx_alloc_push_pages;
379 int rx_alloc_pop_pages;
380
381 unsigned n_rx_tobe_disc;
382 unsigned n_rx_ip_frag_err;
383 unsigned n_rx_ip_hdr_chksum_err;
384 unsigned n_rx_tcp_udp_chksum_err;
385 unsigned n_rx_frm_trunc;
386 unsigned n_rx_overlength;
387 unsigned n_skbuff_leaks;
388
389 /* Used to pipeline received packets in order to optimise memory
390 * access with prefetches.
391 */
392 struct efx_rx_buffer *rx_pkt;
393 int rx_pkt_csummed;
394
395};
396
397/**
398 * struct efx_blinker - S/W LED blinking context
399 * @led_num: LED ID (board-specific meaning)
400 * @state: Current state - on or off
401 * @resubmit: Timer resubmission flag
402 * @timer: Control timer for blinking
403 */
404struct efx_blinker {
405 int led_num;
406 int state;
407 int resubmit;
408 struct timer_list timer;
409};
410
411
412/**
413 * struct efx_board - board information
414 * @type: Board model type
415 * @major: Major rev. ('A', 'B' ...)
416 * @minor: Minor rev. (0, 1, ...)
417 * @init: Initialisation function
418 * @init_leds: Sets up board LEDs
419 * @set_fault_led: Turns the fault LED on or off
420 * @blink: Starts/stops blinking
421 * @blinker: used to blink LEDs in software
422 */
423struct efx_board {
424 int type;
425 int major;
426 int minor;
427 int (*init) (struct efx_nic *nic);
428 /* As the LEDs are typically attached to the PHY, LEDs
429 * have a separate init callback that happens later than
430 * board init. */
431 int (*init_leds)(struct efx_nic *efx);
432 void (*set_fault_led) (struct efx_nic *efx, int state);
433 void (*blink) (struct efx_nic *efx, int start);
434 struct efx_blinker blinker;
435};
436
437enum efx_int_mode {
438 /* Be careful if altering to correct macro below */
439 EFX_INT_MODE_MSIX = 0,
440 EFX_INT_MODE_MSI = 1,
441 EFX_INT_MODE_LEGACY = 2,
442 EFX_INT_MODE_MAX /* Insert any new items before this */
443};
444#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
445
446enum phy_type {
447 PHY_TYPE_NONE = 0,
448 PHY_TYPE_CX4_RTMR = 1,
449 PHY_TYPE_1G_ALASKA = 2,
450 PHY_TYPE_10XPRESS = 3,
451 PHY_TYPE_XFP = 4,
452 PHY_TYPE_PM8358 = 6,
453 PHY_TYPE_MAX /* Insert any new items before this */
454};
455
456#define PHY_ADDR_INVALID 0xff
457
458enum nic_state {
459 STATE_INIT = 0,
460 STATE_RUNNING = 1,
461 STATE_FINI = 2,
462 STATE_RESETTING = 3, /* rtnl_lock always held */
463 STATE_DISABLED = 4,
464 STATE_MAX,
465};
466
467/*
468 * Alignment of page-allocated RX buffers
469 *
470 * Controls the number of bytes inserted at the start of an RX buffer.
471 * This is the equivalent of NET_IP_ALIGN [which controls the alignment
472 * of the skb->head for hardware DMA].
473 */
474#if defined(__i386__) || defined(__x86_64__)
475#define EFX_PAGE_IP_ALIGN 0
476#else
477#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
478#endif
479
480/*
481 * Alignment of the skb->head which wraps a page-allocated RX buffer
482 *
483 * The skb allocated to wrap an rx_buffer can have this alignment. Since
484 * the data is memcpy'd from the rx_buf, it does not need to be equal to
485 * EFX_PAGE_IP_ALIGN.
486 */
487#define EFX_PAGE_SKB_ALIGN 2
488
489/* Forward declaration */
490struct efx_nic;
491
492/* Pseudo bit-mask flow control field */
493enum efx_fc_type {
494 EFX_FC_RX = 1,
495 EFX_FC_TX = 2,
496 EFX_FC_AUTO = 4,
497};
498
499/**
500 * struct efx_phy_operations - Efx PHY operations table
501 * @init: Initialise PHY
502 * @fini: Shut down PHY
503 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
504 * @clear_interrupt: Clear down interrupt
505 * @blink: Blink LEDs
506 * @check_hw: Check hardware
507 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
508 * @mmds: MMD presence mask
509 */
510struct efx_phy_operations {
511 int (*init) (struct efx_nic *efx);
512 void (*fini) (struct efx_nic *efx);
513 void (*reconfigure) (struct efx_nic *efx);
514 void (*clear_interrupt) (struct efx_nic *efx);
515 int (*check_hw) (struct efx_nic *efx);
516 void (*reset_xaui) (struct efx_nic *efx);
517 int mmds;
518};
519
520/*
521 * Efx extended statistics
522 *
523 * Not all statistics are provided by all supported MACs. The purpose
524 * is this structure is to contain the raw statistics provided by each
525 * MAC.
526 */
527struct efx_mac_stats {
528 u64 tx_bytes;
529 u64 tx_good_bytes;
530 u64 tx_bad_bytes;
531 unsigned long tx_packets;
532 unsigned long tx_bad;
533 unsigned long tx_pause;
534 unsigned long tx_control;
535 unsigned long tx_unicast;
536 unsigned long tx_multicast;
537 unsigned long tx_broadcast;
538 unsigned long tx_lt64;
539 unsigned long tx_64;
540 unsigned long tx_65_to_127;
541 unsigned long tx_128_to_255;
542 unsigned long tx_256_to_511;
543 unsigned long tx_512_to_1023;
544 unsigned long tx_1024_to_15xx;
545 unsigned long tx_15xx_to_jumbo;
546 unsigned long tx_gtjumbo;
547 unsigned long tx_collision;
548 unsigned long tx_single_collision;
549 unsigned long tx_multiple_collision;
550 unsigned long tx_excessive_collision;
551 unsigned long tx_deferred;
552 unsigned long tx_late_collision;
553 unsigned long tx_excessive_deferred;
554 unsigned long tx_non_tcpudp;
555 unsigned long tx_mac_src_error;
556 unsigned long tx_ip_src_error;
557 u64 rx_bytes;
558 u64 rx_good_bytes;
559 u64 rx_bad_bytes;
560 unsigned long rx_packets;
561 unsigned long rx_good;
562 unsigned long rx_bad;
563 unsigned long rx_pause;
564 unsigned long rx_control;
565 unsigned long rx_unicast;
566 unsigned long rx_multicast;
567 unsigned long rx_broadcast;
568 unsigned long rx_lt64;
569 unsigned long rx_64;
570 unsigned long rx_65_to_127;
571 unsigned long rx_128_to_255;
572 unsigned long rx_256_to_511;
573 unsigned long rx_512_to_1023;
574 unsigned long rx_1024_to_15xx;
575 unsigned long rx_15xx_to_jumbo;
576 unsigned long rx_gtjumbo;
577 unsigned long rx_bad_lt64;
578 unsigned long rx_bad_64_to_15xx;
579 unsigned long rx_bad_15xx_to_jumbo;
580 unsigned long rx_bad_gtjumbo;
581 unsigned long rx_overflow;
582 unsigned long rx_missed;
583 unsigned long rx_false_carrier;
584 unsigned long rx_symbol_error;
585 unsigned long rx_align_error;
586 unsigned long rx_length_error;
587 unsigned long rx_internal_error;
588 unsigned long rx_good_lt64;
589};
590
591/* Number of bits used in a multicast filter hash address */
592#define EFX_MCAST_HASH_BITS 8
593
594/* Number of (single-bit) entries in a multicast filter hash */
595#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
596
597/* An Efx multicast filter hash */
598union efx_multicast_hash {
599 u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
600 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
601};
602
603/**
604 * struct efx_nic - an Efx NIC
605 * @name: Device name (net device name or bus id before net device registered)
606 * @pci_dev: The PCI device
607 * @type: Controller type attributes
608 * @legacy_irq: IRQ number
609 * @workqueue: Workqueue for resets, port reconfigures and the HW monitor
610 * @reset_work: Scheduled reset workitem
611 * @monitor_work: Hardware monitor workitem
612 * @membase_phys: Memory BAR value as physical address
613 * @membase: Memory BAR value
614 * @biu_lock: BIU (bus interface unit) lock
615 * @interrupt_mode: Interrupt mode
616 * @i2c: I2C interface
617 * @board_info: Board-level information
618 * @state: Device state flag. Serialised by the rtnl_lock.
619 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
620 * @tx_queue: TX DMA queues
621 * @rx_queue: RX DMA queues
622 * @channel: Channels
623 * @rss_queues: Number of RSS queues
624 * @rx_buffer_len: RX buffer length
625 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
626 * @irq_status: Interrupt status buffer
627 * @last_irq_cpu: Last CPU to handle interrupt.
628 * This register is written with the SMP processor ID whenever an
629 * interrupt is handled. It is used by falcon_test_interrupt()
630 * to verify that an interrupt has occurred.
631 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
632 * @nic_data: Hardware dependant state
633 * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and
634 * efx_reconfigure_port()
635 * @port_enabled: Port enabled indicator.
636 * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
637 * efx_reconfigure_work with kernel interfaces. Safe to read under any
638 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
639 * be held to modify it.
640 * @port_initialized: Port initialized?
641 * @net_dev: Operating system network device. Consider holding the rtnl lock
642 * @rx_checksum_enabled: RX checksumming enabled
643 * @netif_stop_count: Port stop count
644 * @netif_stop_lock: Port stop lock
645 * @mac_stats: MAC statistics. These include all statistics the MACs
646 * can provide. Generic code converts these into a standard
647 * &struct net_device_stats.
648 * @stats_buffer: DMA buffer for statistics
649 * @stats_lock: Statistics update lock
650 * @mac_address: Permanent MAC address
651 * @phy_type: PHY type
652 * @phy_lock: PHY access lock
653 * @phy_op: PHY interface
654 * @phy_data: PHY private data (including PHY-specific stats)
655 * @mii: PHY interface
656 * @phy_powered: PHY power state
657 * @tx_disabled: PHY transmitter turned off
658 * @link_up: Link status
659 * @link_options: Link options (MII/GMII format)
660 * @n_link_state_changes: Number of times the link has changed state
661 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
662 * @multicast_hash: Multicast hash table
663 * @flow_control: Flow control flags - separate RX/TX so can't use link_options
664 * @reconfigure_work: work item for dealing with PHY events
665 *
666 * The @priv field of the corresponding &struct net_device points to
667 * this.
668 */
669struct efx_nic {
670 char name[IFNAMSIZ];
671 struct pci_dev *pci_dev;
672 const struct efx_nic_type *type;
673 int legacy_irq;
674 struct workqueue_struct *workqueue;
675 struct work_struct reset_work;
676 struct delayed_work monitor_work;
677 unsigned long membase_phys;
678 void __iomem *membase;
679 spinlock_t biu_lock;
680 enum efx_int_mode interrupt_mode;
681
682 struct efx_i2c_interface i2c;
683 struct efx_board board_info;
684
685 enum nic_state state;
686 enum reset_type reset_pending;
687
688 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
689 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
690 struct efx_channel channel[EFX_MAX_CHANNELS];
691
692 int rss_queues;
693 unsigned int rx_buffer_len;
694 unsigned int rx_buffer_order;
695
696 struct efx_buffer irq_status;
697 volatile signed int last_irq_cpu;
698
699 unsigned n_rx_nodesc_drop_cnt;
700
701 void *nic_data;
702
703 struct mutex mac_lock;
704 int port_enabled;
705
706 int port_initialized;
707 struct net_device *net_dev;
708 int rx_checksum_enabled;
709
710 atomic_t netif_stop_count;
711 spinlock_t netif_stop_lock;
712
713 struct efx_mac_stats mac_stats;
714 struct efx_buffer stats_buffer;
715 spinlock_t stats_lock;
716
717 unsigned char mac_address[ETH_ALEN];
718
719 enum phy_type phy_type;
720 spinlock_t phy_lock;
721 struct efx_phy_operations *phy_op;
722 void *phy_data;
723 struct mii_if_info mii;
724
725 int link_up;
726 unsigned int link_options;
727 unsigned int n_link_state_changes;
728
729 int promiscuous;
730 union efx_multicast_hash multicast_hash;
731 enum efx_fc_type flow_control;
732 struct work_struct reconfigure_work;
733
734 atomic_t rx_reset;
735};
736
737/**
738 * struct efx_nic_type - Efx device type definition
739 * @mem_bar: Memory BAR number
740 * @mem_map_size: Memory BAR mapped size
741 * @txd_ptr_tbl_base: TX descriptor ring base address
742 * @rxd_ptr_tbl_base: RX descriptor ring base address
743 * @buf_tbl_base: Buffer table base address
744 * @evq_ptr_tbl_base: Event queue pointer table base address
745 * @evq_rptr_tbl_base: Event queue read-pointer table base address
746 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
747 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
748 * @evq_size: Event queue size (must be a power of two)
749 * @max_dma_mask: Maximum possible DMA mask
750 * @tx_dma_mask: TX DMA mask
751 * @bug5391_mask: Address mask for bug 5391 workaround
752 * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
753 * @rx_xon_thresh: RX FIFO XON watermark (bytes)
754 * @rx_buffer_padding: Padding added to each RX buffer
755 * @max_interrupt_mode: Highest capability interrupt mode supported
756 * from &enum efx_init_mode.
757 * @phys_addr_channels: Number of channels with physically addressed
758 * descriptors
759 */
760struct efx_nic_type {
761 unsigned int mem_bar;
762 unsigned int mem_map_size;
763 unsigned int txd_ptr_tbl_base;
764 unsigned int rxd_ptr_tbl_base;
765 unsigned int buf_tbl_base;
766 unsigned int evq_ptr_tbl_base;
767 unsigned int evq_rptr_tbl_base;
768
769 unsigned int txd_ring_mask;
770 unsigned int rxd_ring_mask;
771 unsigned int evq_size;
772 dma_addr_t max_dma_mask;
773 unsigned int tx_dma_mask;
774 unsigned bug5391_mask;
775
776 int rx_xoff_thresh;
777 int rx_xon_thresh;
778 unsigned int rx_buffer_padding;
779 unsigned int max_interrupt_mode;
780 unsigned int phys_addr_channels;
781};
782
783/**************************************************************************
784 *
785 * Prototypes and inline functions
786 *
787 *************************************************************************/
788
789/* Iterate over all used channels */
790#define efx_for_each_channel(_channel, _efx) \
791 for (_channel = &_efx->channel[0]; \
792 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
793 _channel++) \
794 if (!_channel->used_flags) \
795 continue; \
796 else
797
798/* Iterate over all used channels with interrupts */
799#define efx_for_each_channel_with_interrupt(_channel, _efx) \
800 for (_channel = &_efx->channel[0]; \
801 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
802 _channel++) \
803 if (!(_channel->used_flags && _channel->has_interrupt)) \
804 continue; \
805 else
806
807/* Iterate over all used TX queues */
808#define efx_for_each_tx_queue(_tx_queue, _efx) \
809 for (_tx_queue = &_efx->tx_queue[0]; \
810 _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES]; \
811 _tx_queue++) \
812 if (!_tx_queue->used) \
813 continue; \
814 else
815
816/* Iterate over all TX queues belonging to a channel */
817#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
818 for (_tx_queue = &_channel->efx->tx_queue[0]; \
819 _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES]; \
820 _tx_queue++) \
821 if ((!_tx_queue->used) || \
822 (_tx_queue->channel != _channel)) \
823 continue; \
824 else
825
826/* Iterate over all used RX queues */
827#define efx_for_each_rx_queue(_rx_queue, _efx) \
828 for (_rx_queue = &_efx->rx_queue[0]; \
829 _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \
830 _rx_queue++) \
831 if (!_rx_queue->used) \
832 continue; \
833 else
834
835/* Iterate over all RX queues belonging to a channel */
836#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
837 for (_rx_queue = &_channel->efx->rx_queue[0]; \
838 _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \
839 _rx_queue++) \
840 if ((!_rx_queue->used) || \
841 (_rx_queue->channel != _channel)) \
842 continue; \
843 else
844
845/* Returns a pointer to the specified receive buffer in the RX
846 * descriptor queue.
847 */
848static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
849 unsigned int index)
850{
851 return (&rx_queue->buffer[index]);
852}
853
854/* Set bit in a little-endian bitfield */
855static inline void set_bit_le(int nr, unsigned char *addr)
856{
857 addr[nr / 8] |= (1 << (nr % 8));
858}
859
860/* Clear bit in a little-endian bitfield */
861static inline void clear_bit_le(int nr, unsigned char *addr)
862{
863 addr[nr / 8] &= ~(1 << (nr % 8));
864}
865
866
867/**
868 * EFX_MAX_FRAME_LEN - calculate maximum frame length
869 *
870 * This calculates the maximum frame length that will be used for a
871 * given MTU. The frame length will be equal to the MTU plus a
872 * constant amount of header space and padding. This is the quantity
873 * that the net driver will program into the MAC as the maximum frame
874 * length.
875 *
876 * The 10G MAC used in Falcon requires 8-byte alignment on the frame
877 * length, so we round up to the nearest 8.
878 */
879#define EFX_MAX_FRAME_LEN(mtu) \
880 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */) + 7) & ~7)
881
882
883#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
new file mode 100644
index 000000000000..9d02c84e6b2d
--- /dev/null
+++ b/drivers/net/sfc/phy.h
@@ -0,0 +1,48 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_PHY_H
11#define EFX_PHY_H
12
13/****************************************************************************
14 * 10Xpress (SFX7101) PHY
15 */
16extern struct efx_phy_operations falcon_tenxpress_phy_ops;
17
18enum tenxpress_state {
19 TENXPRESS_STATUS_OFF = 0,
20 TENXPRESS_STATUS_OTEMP = 1,
21 TENXPRESS_STATUS_NORMAL = 2,
22};
23
24extern void tenxpress_set_state(struct efx_nic *efx,
25 enum tenxpress_state state);
26extern void tenxpress_phy_blink(struct efx_nic *efx, int blink);
27extern void tenxpress_crc_err(struct efx_nic *efx);
28
29/****************************************************************************
30 * Exported functions from the driver for XFP optical PHYs
31 */
32extern struct efx_phy_operations falcon_xfp_phy_ops;
33
34/* The QUAKE XFP PHY provides various H/W control states for LEDs */
35#define QUAKE_LED_LINK_INVAL (0)
36#define QUAKE_LED_LINK_STAT (1)
37#define QUAKE_LED_LINK_ACT (2)
38#define QUAKE_LED_LINK_ACTSTAT (3)
39#define QUAKE_LED_OFF (4)
40#define QUAKE_LED_ON (5)
41#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */
42/* What link the LED tracks */
43#define QUAKE_LED_TXLINK (0)
44#define QUAKE_LED_RXLINK (8)
45
46extern void xfp_set_led(struct efx_nic *p, int led, int state);
47
48#endif
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
new file mode 100644
index 000000000000..551299b462ae
--- /dev/null
+++ b/drivers/net/sfc/rx.c
@@ -0,0 +1,875 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/socket.h>
12#include <linux/in.h>
13#include <linux/ip.h>
14#include <linux/tcp.h>
15#include <linux/udp.h>
16#include <net/ip.h>
17#include <net/checksum.h>
18#include "net_driver.h"
19#include "rx.h"
20#include "efx.h"
21#include "falcon.h"
22#include "workarounds.h"
23
24/* Number of RX descriptors pushed at once. */
25#define EFX_RX_BATCH 8
26
27/* Size of buffer allocated for skb header area. */
28#define EFX_SKB_HEADERS 64u
29
30/*
31 * rx_alloc_method - RX buffer allocation method
32 *
33 * This driver supports two methods for allocating and using RX buffers:
34 * each RX buffer may be backed by an skb or by an order-n page.
35 *
36 * When LRO is in use then the second method has a lower overhead,
37 * since we don't have to allocate then free skbs on reassembled frames.
38 *
39 * Values:
40 * - RX_ALLOC_METHOD_AUTO = 0
41 * - RX_ALLOC_METHOD_SKB = 1
42 * - RX_ALLOC_METHOD_PAGE = 2
43 *
44 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
45 * controlled by the parameters below.
46 *
47 * - Since pushing and popping descriptors are separated by the rx_queue
48 * size, so the watermarks should be ~rxd_size.
49 * - The performance win by using page-based allocation for LRO is less
50 * than the performance hit of using page-based allocation of non-LRO,
51 * so the watermarks should reflect this.
52 *
53 * Per channel we maintain a single variable, updated by each channel:
54 *
55 * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
56 * RX_ALLOC_FACTOR_SKB)
57 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
58 * limits the hysteresis), and update the allocation strategy:
59 *
60 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
61 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
62 */
63static int rx_alloc_method = RX_ALLOC_METHOD_PAGE;
64
65#define RX_ALLOC_LEVEL_LRO 0x2000
66#define RX_ALLOC_LEVEL_MAX 0x3000
67#define RX_ALLOC_FACTOR_LRO 1
68#define RX_ALLOC_FACTOR_SKB (-2)
69
70/* This is the percentage fill level below which new RX descriptors
71 * will be added to the RX descriptor ring.
72 */
73static unsigned int rx_refill_threshold = 90;
74
75/* This is the percentage fill level to which an RX queue will be refilled
76 * when the "RX refill threshold" is reached.
77 */
78static unsigned int rx_refill_limit = 95;
79
80/*
81 * RX maximum head room required.
82 *
83 * This must be at least 1 to prevent overflow and at least 2 to allow
84 * pipelined receives.
85 */
86#define EFX_RXD_HEAD_ROOM 2
87
88/* Macros for zero-order pages (potentially) containing multiple RX buffers */
89#define RX_DATA_OFFSET(_data) \
90 (((unsigned long) (_data)) & (PAGE_SIZE-1))
91#define RX_BUF_OFFSET(_rx_buf) \
92 RX_DATA_OFFSET((_rx_buf)->data)
93
94#define RX_PAGE_SIZE(_efx) \
95 (PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
96
97
98/**************************************************************************
99 *
100 * Linux generic LRO handling
101 *
102 **************************************************************************
103 */
104
105static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
106 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
107{
108 struct efx_channel *channel = (struct efx_channel *)priv;
109 struct iphdr *iph;
110 struct tcphdr *th;
111
112 iph = (struct iphdr *)skb->data;
113 if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
114 goto fail;
115
116 th = (struct tcphdr *)(skb->data + iph->ihl * 4);
117
118 *tcpudp_hdr = th;
119 *ip_hdr = iph;
120 *hdr_flags = LRO_IPV4 | LRO_TCP;
121
122 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
123 return 0;
124fail:
125 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
126 return -1;
127}
128
129static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
130 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
131 void *priv)
132{
133 struct efx_channel *channel = (struct efx_channel *)priv;
134 struct ethhdr *eh;
135 struct iphdr *iph;
136
137 /* We support EtherII and VLAN encapsulated IPv4 */
138 eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset);
139 *mac_hdr = eh;
140
141 if (eh->h_proto == htons(ETH_P_IP)) {
142 iph = (struct iphdr *)(eh + 1);
143 } else {
144 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
145 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
146 goto fail;
147
148 iph = (struct iphdr *)(veh + 1);
149 }
150 *ip_hdr = iph;
151
152 /* We can only do LRO over TCP */
153 if (iph->protocol != IPPROTO_TCP)
154 goto fail;
155
156 *hdr_flags = LRO_IPV4 | LRO_TCP;
157 *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
158
159 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
160 return 0;
161 fail:
162 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
163 return -1;
164}
165
166int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
167{
168 size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
169 struct net_lro_desc *lro_arr;
170
171 /* Allocate the LRO descriptors structure */
172 lro_arr = kzalloc(s, GFP_KERNEL);
173 if (lro_arr == NULL)
174 return -ENOMEM;
175
176 lro_mgr->lro_arr = lro_arr;
177 lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
178 lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
179 lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
180
181 lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
182 lro_mgr->get_frag_header = efx_get_frag_hdr;
183 lro_mgr->dev = efx->net_dev;
184
185 lro_mgr->features = LRO_F_NAPI;
186
187 /* We can pass packets up with the checksum intact */
188 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
189
190 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
191
192 return 0;
193}
194
195void efx_lro_fini(struct net_lro_mgr *lro_mgr)
196{
197 kfree(lro_mgr->lro_arr);
198 lro_mgr->lro_arr = NULL;
199}
200
201/**
202 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
203 *
204 * @rx_queue: Efx RX queue
205 * @rx_buf: RX buffer structure to populate
206 *
207 * This allocates memory for a new receive buffer, maps it for DMA,
208 * and populates a struct efx_rx_buffer with the relevant
209 * information. Return a negative error code or 0 on success.
210 */
211static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
212 struct efx_rx_buffer *rx_buf)
213{
214 struct efx_nic *efx = rx_queue->efx;
215 struct net_device *net_dev = efx->net_dev;
216 int skb_len = efx->rx_buffer_len;
217
218 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
219 if (unlikely(!rx_buf->skb))
220 return -ENOMEM;
221
222 /* Adjust the SKB for padding and checksum */
223 skb_reserve(rx_buf->skb, NET_IP_ALIGN);
224 rx_buf->len = skb_len - NET_IP_ALIGN;
225 rx_buf->data = (char *)rx_buf->skb->data;
226 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
227
228 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
229 rx_buf->data, rx_buf->len,
230 PCI_DMA_FROMDEVICE);
231
232 if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
233 dev_kfree_skb_any(rx_buf->skb);
234 rx_buf->skb = NULL;
235 return -EIO;
236 }
237
238 return 0;
239}
240
241/**
242 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
243 *
244 * @rx_queue: Efx RX queue
245 * @rx_buf: RX buffer structure to populate
246 *
247 * This allocates memory for a new receive buffer, maps it for DMA,
248 * and populates a struct efx_rx_buffer with the relevant
249 * information. Return a negative error code or 0 on success.
250 */
251static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
252 struct efx_rx_buffer *rx_buf)
253{
254 struct efx_nic *efx = rx_queue->efx;
255 int bytes, space, offset;
256
257 bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
258
259 /* If there is space left in the previously allocated page,
260 * then use it. Otherwise allocate a new one */
261 rx_buf->page = rx_queue->buf_page;
262 if (rx_buf->page == NULL) {
263 dma_addr_t dma_addr;
264
265 rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
266 efx->rx_buffer_order);
267 if (unlikely(rx_buf->page == NULL))
268 return -ENOMEM;
269
270 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
271 0, RX_PAGE_SIZE(efx),
272 PCI_DMA_FROMDEVICE);
273
274 if (unlikely(pci_dma_mapping_error(dma_addr))) {
275 __free_pages(rx_buf->page, efx->rx_buffer_order);
276 rx_buf->page = NULL;
277 return -EIO;
278 }
279
280 rx_queue->buf_page = rx_buf->page;
281 rx_queue->buf_dma_addr = dma_addr;
282 rx_queue->buf_data = ((char *) page_address(rx_buf->page) +
283 EFX_PAGE_IP_ALIGN);
284 }
285
286 offset = RX_DATA_OFFSET(rx_queue->buf_data);
287 rx_buf->len = bytes;
288 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
289 rx_buf->data = rx_queue->buf_data;
290
291 /* Try to pack multiple buffers per page */
292 if (efx->rx_buffer_order == 0) {
293 /* The next buffer starts on the next 512 byte boundary */
294 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
295 offset += ((bytes + 0x1ff) & ~0x1ff);
296
297 space = RX_PAGE_SIZE(efx) - offset;
298 if (space >= bytes) {
299 /* Refs dropped on kernel releasing each skb */
300 get_page(rx_queue->buf_page);
301 goto out;
302 }
303 }
304
305 /* This is the final RX buffer for this page, so mark it for
306 * unmapping */
307 rx_queue->buf_page = NULL;
308 rx_buf->unmap_addr = rx_queue->buf_dma_addr;
309
310 out:
311 return 0;
312}
313
314/* This allocates memory for a new receive buffer, maps it for DMA,
315 * and populates a struct efx_rx_buffer with the relevant
316 * information.
317 */
318static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
319 struct efx_rx_buffer *new_rx_buf)
320{
321 int rc = 0;
322
323 if (rx_queue->channel->rx_alloc_push_pages) {
324 new_rx_buf->skb = NULL;
325 rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
326 rx_queue->alloc_page_count++;
327 } else {
328 new_rx_buf->page = NULL;
329 rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
330 rx_queue->alloc_skb_count++;
331 }
332
333 if (unlikely(rc < 0))
334 EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
335 rx_queue->queue, rc);
336 return rc;
337}
338
339static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
340 struct efx_rx_buffer *rx_buf)
341{
342 if (rx_buf->page) {
343 EFX_BUG_ON_PARANOID(rx_buf->skb);
344 if (rx_buf->unmap_addr) {
345 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
346 RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE);
347 rx_buf->unmap_addr = 0;
348 }
349 } else if (likely(rx_buf->skb)) {
350 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
351 rx_buf->len, PCI_DMA_FROMDEVICE);
352 }
353}
354
355static inline void efx_free_rx_buffer(struct efx_nic *efx,
356 struct efx_rx_buffer *rx_buf)
357{
358 if (rx_buf->page) {
359 __free_pages(rx_buf->page, efx->rx_buffer_order);
360 rx_buf->page = NULL;
361 } else if (likely(rx_buf->skb)) {
362 dev_kfree_skb_any(rx_buf->skb);
363 rx_buf->skb = NULL;
364 }
365}
366
367static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
368 struct efx_rx_buffer *rx_buf)
369{
370 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
371 efx_free_rx_buffer(rx_queue->efx, rx_buf);
372}
373
374/**
375 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
376 * @rx_queue: RX descriptor queue
377 * @retry: Recheck the fill level
378 * This will aim to fill the RX descriptor queue up to
379 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
380 * memory to do so, the caller should retry.
381 */
382static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
383 int retry)
384{
385 struct efx_rx_buffer *rx_buf;
386 unsigned fill_level, index;
387 int i, space, rc = 0;
388
389 /* Calculate current fill level. Do this outside the lock,
390 * because most of the time we'll end up not wanting to do the
391 * fill anyway.
392 */
393 fill_level = (rx_queue->added_count - rx_queue->removed_count);
394 EFX_BUG_ON_PARANOID(fill_level >
395 rx_queue->efx->type->rxd_ring_mask + 1);
396
397 /* Don't fill if we don't need to */
398 if (fill_level >= rx_queue->fast_fill_trigger)
399 return 0;
400
401 /* Record minimum fill level */
402 if (unlikely(fill_level < rx_queue->min_fill))
403 if (fill_level)
404 rx_queue->min_fill = fill_level;
405
406 /* Acquire RX add lock. If this lock is contended, then a fast
407 * fill must already be in progress (e.g. in the refill
408 * tasklet), so we don't need to do anything
409 */
410 if (!spin_trylock_bh(&rx_queue->add_lock))
411 return -1;
412
413 retry:
414 /* Recalculate current fill level now that we have the lock */
415 fill_level = (rx_queue->added_count - rx_queue->removed_count);
416 EFX_BUG_ON_PARANOID(fill_level >
417 rx_queue->efx->type->rxd_ring_mask + 1);
418 space = rx_queue->fast_fill_limit - fill_level;
419 if (space < EFX_RX_BATCH)
420 goto out_unlock;
421
422 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
423 " level %d to level %d using %s allocation\n",
424 rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
425 rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
426
427 do {
428 for (i = 0; i < EFX_RX_BATCH; ++i) {
429 index = (rx_queue->added_count &
430 rx_queue->efx->type->rxd_ring_mask);
431 rx_buf = efx_rx_buffer(rx_queue, index);
432 rc = efx_init_rx_buffer(rx_queue, rx_buf);
433 if (unlikely(rc))
434 goto out;
435 ++rx_queue->added_count;
436 }
437 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
438
439 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
440 "to level %d\n", rx_queue->queue,
441 rx_queue->added_count - rx_queue->removed_count);
442
443 out:
444 /* Send write pointer to card. */
445 falcon_notify_rx_desc(rx_queue);
446
447 /* If the fast fill is running inside from the refill tasklet, then
448 * for SMP systems it may be running on a different CPU to
449 * RX event processing, which means that the fill level may now be
450 * out of date. */
451 if (unlikely(retry && (rc == 0)))
452 goto retry;
453
454 out_unlock:
455 spin_unlock_bh(&rx_queue->add_lock);
456
457 return rc;
458}
459
460/**
461 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
462 * @rx_queue: RX descriptor queue
463 *
464 * This will aim to fill the RX descriptor queue up to
465 * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
466 * it will schedule a work item to immediately continue the fast fill
467 */
468void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
469{
470 int rc;
471
472 rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
473 if (unlikely(rc)) {
474 /* Schedule the work item to run immediately. The hope is
475 * that work is immediately pending to free some memory
476 * (e.g. an RX event or TX completion)
477 */
478 efx_schedule_slow_fill(rx_queue, 0);
479 }
480}
481
482void efx_rx_work(struct work_struct *data)
483{
484 struct efx_rx_queue *rx_queue;
485 int rc;
486
487 rx_queue = container_of(data, struct efx_rx_queue, work.work);
488
489 if (unlikely(!rx_queue->channel->enabled))
490 return;
491
492 EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
493 "%d\n", rx_queue->queue, raw_smp_processor_id());
494
495 ++rx_queue->slow_fill_count;
496 /* Push new RX descriptors, allowing at least 1 jiffy for
497 * the kernel to free some more memory. */
498 rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
499 if (rc)
500 efx_schedule_slow_fill(rx_queue, 1);
501}
502
503static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
504 struct efx_rx_buffer *rx_buf,
505 int len, int *discard,
506 int *leak_packet)
507{
508 struct efx_nic *efx = rx_queue->efx;
509 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
510
511 if (likely(len <= max_len))
512 return;
513
514 /* The packet must be discarded, but this is only a fatal error
515 * if the caller indicated it was
516 */
517 *discard = 1;
518
519 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
520 EFX_ERR_RL(efx, " RX queue %d seriously overlength "
521 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
522 rx_queue->queue, len, max_len,
523 efx->type->rx_buffer_padding);
524 /* If this buffer was skb-allocated, then the meta
525 * data at the end of the skb will be trashed. So
526 * we have no choice but to leak the fragment.
527 */
528 *leak_packet = (rx_buf->skb != NULL);
529 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
530 } else {
531 EFX_ERR_RL(efx, " RX queue %d overlength RX event "
532 "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
533 }
534
535 rx_queue->channel->n_rx_overlength++;
536}
537
538/* Pass a received packet up through the generic LRO stack
539 *
540 * Handles driverlink veto, and passes the fragment up via
541 * the appropriate LRO method
542 */
543static inline void efx_rx_packet_lro(struct efx_channel *channel,
544 struct efx_rx_buffer *rx_buf)
545{
546 struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
547 void *priv = channel;
548
549 /* Pass the skb/page into the LRO engine */
550 if (rx_buf->page) {
551 struct skb_frag_struct frags;
552
553 frags.page = rx_buf->page;
554 frags.page_offset = RX_BUF_OFFSET(rx_buf);
555 frags.size = rx_buf->len;
556
557 lro_receive_frags(lro_mgr, &frags, rx_buf->len,
558 rx_buf->len, priv, 0);
559
560 EFX_BUG_ON_PARANOID(rx_buf->skb);
561 rx_buf->page = NULL;
562 } else {
563 EFX_BUG_ON_PARANOID(!rx_buf->skb);
564
565 lro_receive_skb(lro_mgr, rx_buf->skb, priv);
566 rx_buf->skb = NULL;
567 }
568}
569
570/* Allocate and construct an SKB around a struct page.*/
571static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
572 struct efx_nic *efx,
573 int hdr_len)
574{
575 struct sk_buff *skb;
576
577 /* Allocate an SKB to store the headers */
578 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
579 if (unlikely(skb == NULL)) {
580 EFX_ERR_RL(efx, "RX out of memory for skb\n");
581 return NULL;
582 }
583
584 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
585 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
586
587 skb->ip_summed = CHECKSUM_UNNECESSARY;
588 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
589
590 skb->len = rx_buf->len;
591 skb->truesize = rx_buf->len + sizeof(struct sk_buff);
592 memcpy(skb->data, rx_buf->data, hdr_len);
593 skb->tail += hdr_len;
594
595 /* Append the remaining page onto the frag list */
596 if (unlikely(rx_buf->len > hdr_len)) {
597 struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
598 frag->page = rx_buf->page;
599 frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len;
600 frag->size = skb->len - hdr_len;
601 skb_shinfo(skb)->nr_frags = 1;
602 skb->data_len = frag->size;
603 } else {
604 __free_pages(rx_buf->page, efx->rx_buffer_order);
605 skb->data_len = 0;
606 }
607
608 /* Ownership has transferred from the rx_buf to skb */
609 rx_buf->page = NULL;
610
611 /* Move past the ethernet header */
612 skb->protocol = eth_type_trans(skb, efx->net_dev);
613
614 return skb;
615}
616
617void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
618 unsigned int len, int checksummed, int discard)
619{
620 struct efx_nic *efx = rx_queue->efx;
621 struct efx_rx_buffer *rx_buf;
622 int leak_packet = 0;
623
624 rx_buf = efx_rx_buffer(rx_queue, index);
625 EFX_BUG_ON_PARANOID(!rx_buf->data);
626 EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
627 EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
628
629 /* This allows the refill path to post another buffer.
630 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
631 * isn't overwritten yet.
632 */
633 rx_queue->removed_count++;
634
635 /* Validate the length encoded in the event vs the descriptor pushed */
636 efx_rx_packet__check_len(rx_queue, rx_buf, len,
637 &discard, &leak_packet);
638
639 EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
640 rx_queue->queue, index,
641 (unsigned long long)rx_buf->dma_addr, len,
642 (checksummed ? " [SUMMED]" : ""),
643 (discard ? " [DISCARD]" : ""));
644
645 /* Discard packet, if instructed to do so */
646 if (unlikely(discard)) {
647 if (unlikely(leak_packet))
648 rx_queue->channel->n_skbuff_leaks++;
649 else
650 /* We haven't called efx_unmap_rx_buffer yet,
651 * so fini the entire rx_buffer here */
652 efx_fini_rx_buffer(rx_queue, rx_buf);
653 return;
654 }
655
656 /* Release card resources - assumes all RX buffers consumed in-order
657 * per RX queue
658 */
659 efx_unmap_rx_buffer(efx, rx_buf);
660
661 /* Prefetch nice and early so data will (hopefully) be in cache by
662 * the time we look at it.
663 */
664 prefetch(rx_buf->data);
665
666 /* Pipeline receives so that we give time for packet headers to be
667 * prefetched into cache.
668 */
669 rx_buf->len = len;
670 if (rx_queue->channel->rx_pkt)
671 __efx_rx_packet(rx_queue->channel,
672 rx_queue->channel->rx_pkt,
673 rx_queue->channel->rx_pkt_csummed);
674 rx_queue->channel->rx_pkt = rx_buf;
675 rx_queue->channel->rx_pkt_csummed = checksummed;
676}
677
678/* Handle a received packet. Second half: Touches packet payload. */
679void __efx_rx_packet(struct efx_channel *channel,
680 struct efx_rx_buffer *rx_buf, int checksummed)
681{
682 struct efx_nic *efx = channel->efx;
683 struct sk_buff *skb;
684 int lro = efx->net_dev->features & NETIF_F_LRO;
685
686 if (rx_buf->skb) {
687 prefetch(skb_shinfo(rx_buf->skb));
688
689 skb_put(rx_buf->skb, rx_buf->len);
690
691 /* Move past the ethernet header. rx_buf->data still points
692 * at the ethernet header */
693 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
694 efx->net_dev);
695 }
696
697 /* Both our generic-LRO and SFC-SSR support skb and page based
698 * allocation, but neither support switching from one to the
699 * other on the fly. If we spot that the allocation mode has
700 * changed, then flush the LRO state.
701 */
702 if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
703 efx_flush_lro(channel);
704 channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
705 }
706 if (likely(checksummed && lro)) {
707 efx_rx_packet_lro(channel, rx_buf);
708 goto done;
709 }
710
711 /* Form an skb if required */
712 if (rx_buf->page) {
713 int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS);
714 skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
715 if (unlikely(skb == NULL)) {
716 efx_free_rx_buffer(efx, rx_buf);
717 goto done;
718 }
719 } else {
720 /* We now own the SKB */
721 skb = rx_buf->skb;
722 rx_buf->skb = NULL;
723 }
724
725 EFX_BUG_ON_PARANOID(rx_buf->page);
726 EFX_BUG_ON_PARANOID(rx_buf->skb);
727 EFX_BUG_ON_PARANOID(!skb);
728
729 /* Set the SKB flags */
730 if (unlikely(!checksummed || !efx->rx_checksum_enabled))
731 skb->ip_summed = CHECKSUM_NONE;
732
733 /* Pass the packet up */
734 netif_receive_skb(skb);
735
736 /* Update allocation strategy method */
737 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
738
739 /* fall-thru */
740done:
741 efx->net_dev->last_rx = jiffies;
742}
743
744void efx_rx_strategy(struct efx_channel *channel)
745{
746 enum efx_rx_alloc_method method = rx_alloc_method;
747
748 /* Only makes sense to use page based allocation if LRO is enabled */
749 if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
750 method = RX_ALLOC_METHOD_SKB;
751 } else if (method == RX_ALLOC_METHOD_AUTO) {
752 /* Constrain the rx_alloc_level */
753 if (channel->rx_alloc_level < 0)
754 channel->rx_alloc_level = 0;
755 else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
756 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
757
758 /* Decide on the allocation method */
759 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
760 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
761 }
762
763 /* Push the option */
764 channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
765}
766
767int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
768{
769 struct efx_nic *efx = rx_queue->efx;
770 unsigned int rxq_size;
771 int rc;
772
773 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
774
775 /* Allocate RX buffers */
776 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
777 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
778 if (!rx_queue->buffer) {
779 rc = -ENOMEM;
780 goto fail1;
781 }
782
783 rc = falcon_probe_rx(rx_queue);
784 if (rc)
785 goto fail2;
786
787 return 0;
788
789 fail2:
790 kfree(rx_queue->buffer);
791 rx_queue->buffer = NULL;
792 fail1:
793 rx_queue->used = 0;
794
795 return rc;
796}
797
798int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
799{
800 struct efx_nic *efx = rx_queue->efx;
801 unsigned int max_fill, trigger, limit;
802
803 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
804
805 /* Initialise ptr fields */
806 rx_queue->added_count = 0;
807 rx_queue->notified_count = 0;
808 rx_queue->removed_count = 0;
809 rx_queue->min_fill = -1U;
810 rx_queue->min_overfill = -1U;
811
812 /* Initialise limit fields */
813 max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
814 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
815 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
816
817 rx_queue->max_fill = max_fill;
818 rx_queue->fast_fill_trigger = trigger;
819 rx_queue->fast_fill_limit = limit;
820
821 /* Set up RX descriptor ring */
822 return falcon_init_rx(rx_queue);
823}
824
825void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
826{
827 int i;
828 struct efx_rx_buffer *rx_buf;
829
830 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
831
832 falcon_fini_rx(rx_queue);
833
834 /* Release RX buffers NB start at index 0 not current HW ptr */
835 if (rx_queue->buffer) {
836 for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
837 rx_buf = efx_rx_buffer(rx_queue, i);
838 efx_fini_rx_buffer(rx_queue, rx_buf);
839 }
840 }
841
842 /* For a page that is part-way through splitting into RX buffers */
843 if (rx_queue->buf_page != NULL) {
844 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
845 RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE);
846 __free_pages(rx_queue->buf_page,
847 rx_queue->efx->rx_buffer_order);
848 rx_queue->buf_page = NULL;
849 }
850}
851
852void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
853{
854 EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
855
856 falcon_remove_rx(rx_queue);
857
858 kfree(rx_queue->buffer);
859 rx_queue->buffer = NULL;
860 rx_queue->used = 0;
861}
862
863void efx_flush_lro(struct efx_channel *channel)
864{
865 lro_flush_all(&channel->lro_mgr);
866}
867
868
869module_param(rx_alloc_method, int, 0644);
870MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
871
872module_param(rx_refill_threshold, uint, 0444);
873MODULE_PARM_DESC(rx_refill_threshold,
874 "RX descriptor ring fast/slow fill threshold (%)");
875
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
new file mode 100644
index 000000000000..f35e377bfc5f
--- /dev/null
+++ b/drivers/net/sfc/rx.h
@@ -0,0 +1,29 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_RX_H
11#define EFX_RX_H
12
13#include "net_driver.h"
14
15int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
16void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17int efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19
20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
21void efx_lro_fini(struct net_lro_mgr *lro_mgr);
22void efx_flush_lro(struct efx_channel *channel);
23void efx_rx_strategy(struct efx_channel *channel);
24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
25void efx_rx_work(struct work_struct *data);
26void __efx_rx_packet(struct efx_channel *channel,
27 struct efx_rx_buffer *rx_buf, int checksummed);
28
29#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
new file mode 100644
index 000000000000..11fa9fb8f48b
--- /dev/null
+++ b/drivers/net/sfc/sfe4001.c
@@ -0,0 +1,252 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/*****************************************************************************
11 * Support for the SFE4001 NIC: driver code for the PCA9539 I/O expander that
12 * controls the PHY power rails, and for the MAX6647 temp. sensor used to check
13 * the PHY
14 */
15#include <linux/delay.h>
16#include "efx.h"
17#include "phy.h"
18#include "boards.h"
19#include "falcon.h"
20#include "falcon_hwdefs.h"
21#include "mac.h"
22
23/**************************************************************************
24 *
25 * I2C IO Expander device
26 *
27 **************************************************************************/
28#define PCA9539 0x74
29
30#define P0_IN 0x00
31#define P0_OUT 0x02
32#define P0_INVERT 0x04
33#define P0_CONFIG 0x06
34
35#define P0_EN_1V0X_LBN 0
36#define P0_EN_1V0X_WIDTH 1
37#define P0_EN_1V2_LBN 1
38#define P0_EN_1V2_WIDTH 1
39#define P0_EN_2V5_LBN 2
40#define P0_EN_2V5_WIDTH 1
41#define P0_EN_3V3X_LBN 3
42#define P0_EN_3V3X_WIDTH 1
43#define P0_EN_5V_LBN 4
44#define P0_EN_5V_WIDTH 1
45#define P0_SHORTEN_JTAG_LBN 5
46#define P0_SHORTEN_JTAG_WIDTH 1
47#define P0_X_TRST_LBN 6
48#define P0_X_TRST_WIDTH 1
49#define P0_DSP_RESET_LBN 7
50#define P0_DSP_RESET_WIDTH 1
51
52#define P1_IN 0x01
53#define P1_OUT 0x03
54#define P1_INVERT 0x05
55#define P1_CONFIG 0x07
56
57#define P1_AFE_PWD_LBN 0
58#define P1_AFE_PWD_WIDTH 1
59#define P1_DSP_PWD25_LBN 1
60#define P1_DSP_PWD25_WIDTH 1
61#define P1_RESERVED_LBN 2
62#define P1_RESERVED_WIDTH 2
63#define P1_SPARE_LBN 4
64#define P1_SPARE_WIDTH 4
65
66
67/**************************************************************************
68 *
69 * Temperature Sensor
70 *
71 **************************************************************************/
72#define MAX6647 0x4e
73
74#define RLTS 0x00
75#define RLTE 0x01
76#define RSL 0x02
77#define RCL 0x03
78#define RCRA 0x04
79#define RLHN 0x05
80#define RLLI 0x06
81#define RRHI 0x07
82#define RRLS 0x08
83#define WCRW 0x0a
84#define WLHO 0x0b
85#define WRHA 0x0c
86#define WRLN 0x0e
87#define OSHT 0x0f
88#define REET 0x10
89#define RIET 0x11
90#define RWOE 0x19
91#define RWOI 0x20
92#define HYS 0x21
93#define QUEUE 0x22
94#define MFID 0xfe
95#define REVID 0xff
96
97/* Status bits */
98#define MAX6647_BUSY (1 << 7) /* ADC is converting */
99#define MAX6647_LHIGH (1 << 6) /* Local high temp. alarm */
100#define MAX6647_LLOW (1 << 5) /* Local low temp. alarm */
101#define MAX6647_RHIGH (1 << 4) /* Remote high temp. alarm */
102#define MAX6647_RLOW (1 << 3) /* Remote low temp. alarm */
103#define MAX6647_FAULT (1 << 2) /* DXN/DXP short/open circuit */
104#define MAX6647_EOT (1 << 1) /* Remote junction overtemp. */
105#define MAX6647_IOT (1 << 0) /* Local junction overtemp. */
106
107static const u8 xgphy_max_temperature = 90;
108
109void sfe4001_poweroff(struct efx_nic *efx)
110{
111 struct efx_i2c_interface *i2c = &efx->i2c;
112
113 u8 cfg, out, in;
114
115 EFX_INFO(efx, "%s\n", __func__);
116
117 /* Turn off all power rails */
118 out = 0xff;
119 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
120
121 /* Disable port 1 outputs on IO expander */
122 cfg = 0xff;
123 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
124
125 /* Disable port 0 outputs on IO expander */
126 cfg = 0xff;
127 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
128
129 /* Clear any over-temperature alert */
130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
131}
132
133/* This board uses an I2C expander to provider power to the PHY, which needs to
134 * be turned on before the PHY can be used.
135 * Context: Process context, rtnl lock held
136 */
137int sfe4001_poweron(struct efx_nic *efx)
138{
139 struct efx_i2c_interface *i2c = &efx->i2c;
140 unsigned int count;
141 int rc;
142 u8 out, in, cfg;
143 efx_dword_t reg;
144
145 /* 10Xpress has fixed-function LED pins, so there is no board-specific
146 * blink code. */
147 efx->board_info.blink = tenxpress_phy_blink;
148
149 /* Ensure that XGXS and XAUI SerDes are held in reset */
150 EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1,
151 XX_PWRDNB_EN, 1,
152 XX_RSTPLLAB_EN, 1,
153 XX_RESETA_EN, 1,
154 XX_RESETB_EN, 1,
155 XX_RSTXGXSRX_EN, 1,
156 XX_RSTXGXSTX_EN, 1);
157 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
158 udelay(10);
159
160 /* Set DSP over-temperature alert threshold */
161 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
162 rc = efx_i2c_write(i2c, MAX6647, WLHO,
163 &xgphy_max_temperature, 1);
164 if (rc)
165 goto fail1;
166
167 /* Read it back and verify */
168 rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, 1);
169 if (rc)
170 goto fail1;
171 if (in != xgphy_max_temperature) {
172 rc = -EFAULT;
173 goto fail1;
174 }
175
176 /* Clear any previous over-temperature alert */
177 rc = efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
178 if (rc)
179 goto fail1;
180
181 /* Enable port 0 and port 1 outputs on IO expander */
182 cfg = 0x00;
183 rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
184 if (rc)
185 goto fail1;
186 cfg = 0xff & ~(1 << P1_SPARE_LBN);
187 rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
188 if (rc)
189 goto fail2;
190
191 /* Turn all power off then wait 1 sec. This ensures PHY is reset */
192 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
193 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
194 (0 << P0_EN_1V0X_LBN));
195 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
196 if (rc)
197 goto fail3;
198
199 schedule_timeout_uninterruptible(HZ);
200 count = 0;
201 do {
202 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
203 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
204 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
205 (1 << P0_X_TRST_LBN));
206
207 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
208 if (rc)
209 goto fail3;
210 msleep(10);
211
212 /* Turn on 1V power rail */
213 out &= ~(1 << P0_EN_1V0X_LBN);
214 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
215 if (rc)
216 goto fail3;
217
218 EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
219
220 schedule_timeout_uninterruptible(HZ);
221
222 /* Check DSP is powered */
223 rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, 1);
224 if (rc)
225 goto fail3;
226 if (in & (1 << P1_AFE_PWD_LBN))
227 goto done;
228
229 } while (++count < 20);
230
231 EFX_INFO(efx, "timed out waiting for power\n");
232 rc = -ETIMEDOUT;
233 goto fail3;
234
235done:
236 EFX_INFO(efx, "PHY is powered on\n");
237 return 0;
238
239fail3:
240 /* Turn off all power rails */
241 out = 0xff;
242 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
243 /* Disable port 1 outputs on IO expander */
244 out = 0xff;
245 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
246fail2:
247 /* Disable port 0 outputs on IO expander */
248 out = 0xff;
249 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
250fail1:
251 return rc;
252}
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
new file mode 100644
index 000000000000..34412f3d41c9
--- /dev/null
+++ b/drivers/net/sfc/spi.h
@@ -0,0 +1,71 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_SPI_H
12#define EFX_SPI_H
13
14#include "net_driver.h"
15
16/**************************************************************************
17 *
18 * Basic SPI command set and bit definitions
19 *
20 *************************************************************************/
21
22/*
23 * Commands common to all known devices.
24 *
25 */
26
27/* Write status register */
28#define SPI_WRSR 0x01
29
30/* Write data to memory array */
31#define SPI_WRITE 0x02
32
33/* Read data from memory array */
34#define SPI_READ 0x03
35
36/* Reset write enable latch */
37#define SPI_WRDI 0x04
38
39/* Read status register */
40#define SPI_RDSR 0x05
41
42/* Set write enable latch */
43#define SPI_WREN 0x06
44
45/* SST: Enable write to status register */
46#define SPI_SST_EWSR 0x50
47
48/*
49 * Status register bits. Not all bits are supported on all devices.
50 *
51 */
52
53/* Write-protect pin enabled */
54#define SPI_STATUS_WPEN 0x80
55
56/* Block protection bit 2 */
57#define SPI_STATUS_BP2 0x10
58
59/* Block protection bit 1 */
60#define SPI_STATUS_BP1 0x08
61
62/* Block protection bit 0 */
63#define SPI_STATUS_BP0 0x04
64
65/* State of the write enable latch */
66#define SPI_STATUS_WEN 0x02
67
68/* Device busy flag */
69#define SPI_STATUS_NRDY 0x01
70
71#endif /* EFX_SPI_H */
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
new file mode 100644
index 000000000000..a2e9f79e47b1
--- /dev/null
+++ b/drivers/net/sfc/tenxpress.c
@@ -0,0 +1,434 @@
1/****************************************************************************
2 * Driver for Solarflare 802.3an compliant PHY
3 * Copyright 2007 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include <linux/seq_file.h>
12#include "efx.h"
13#include "gmii.h"
14#include "mdio_10g.h"
15#include "falcon.h"
16#include "phy.h"
17#include "falcon_hwdefs.h"
18#include "boards.h"
19#include "mac.h"
20
21/* We expect these MMDs to be in the package */
22/* AN not here as mdio_check_mmds() requires STAT2 support */
23#define TENXPRESS_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PMAPMD | \
24 MDIO_MMDREG_DEVS0_PCS | \
25 MDIO_MMDREG_DEVS0_PHYXS)
26
27/* We complain if we fail to see the link partner as 10G capable this many
28 * times in a row (must be > 1 as sampling the autoneg. registers is racy)
29 */
30#define MAX_BAD_LP_TRIES (5)
31
32/* Extended control register */
33#define PMA_PMD_XCONTROL_REG 0xc000
34#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
35#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
36
37/* extended status register */
38#define PMA_PMD_XSTATUS_REG 0xc001
39#define PMA_PMD_XSTAT_FLP_LBN (12)
40
41/* LED control register */
42#define PMA_PMD_LED_CTRL_REG (0xc007)
43#define PMA_PMA_LED_ACTIVITY_LBN (3)
44
45/* LED function override register */
46#define PMA_PMD_LED_OVERR_REG (0xc009)
47/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
48#define PMA_PMD_LED_LINK_LBN (0)
49#define PMA_PMD_LED_SPEED_LBN (2)
50#define PMA_PMD_LED_TX_LBN (4)
51#define PMA_PMD_LED_RX_LBN (6)
52/* Override settings */
53#define PMA_PMD_LED_AUTO (0) /* H/W control */
54#define PMA_PMD_LED_ON (1)
55#define PMA_PMD_LED_OFF (2)
56#define PMA_PMD_LED_FLASH (3)
57/* All LEDs under hardware control */
58#define PMA_PMD_LED_FULL_AUTO (0)
59/* Green and Amber under hardware control, Red off */
60#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
61
62
63/* Self test (BIST) control register */
64#define PMA_PMD_BIST_CTRL_REG (0xc014)
65#define PMA_PMD_BIST_BER_LBN (2) /* Run BER test */
66#define PMA_PMD_BIST_CONT_LBN (1) /* Run continuous BIST until cleared */
67#define PMA_PMD_BIST_SINGLE_LBN (0) /* Run 1 BIST iteration (self clears) */
68/* Self test status register */
69#define PMA_PMD_BIST_STAT_REG (0xc015)
70#define PMA_PMD_BIST_ENX_LBN (3)
71#define PMA_PMD_BIST_PMA_LBN (2)
72#define PMA_PMD_BIST_RXD_LBN (1)
73#define PMA_PMD_BIST_AFE_LBN (0)
74
75#define BIST_MAX_DELAY (1000)
76#define BIST_POLL_DELAY (10)
77
78/* Misc register defines */
79#define PCS_CLOCK_CTRL_REG 0xd801
80#define PLL312_RST_N_LBN 2
81
82#define PCS_SOFT_RST2_REG 0xd806
83#define SERDES_RST_N_LBN 13
84#define XGXS_RST_N_LBN 12
85
86#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */
87#define CLK312_EN_LBN 3
88
89/* Boot status register */
90#define PCS_BOOT_STATUS_REG (0xd000)
91#define PCS_BOOT_FATAL_ERR_LBN (0)
92#define PCS_BOOT_PROGRESS_LBN (1)
93#define PCS_BOOT_PROGRESS_WIDTH (2)
94#define PCS_BOOT_COMPLETE_LBN (3)
95#define PCS_BOOT_MAX_DELAY (100)
96#define PCS_BOOT_POLL_DELAY (10)
97
98/* Time to wait between powering down the LNPGA and turning off the power
99 * rails */
100#define LNPGA_PDOWN_WAIT (HZ / 5)
101
102static int crc_error_reset_threshold = 100;
103module_param(crc_error_reset_threshold, int, 0644);
104MODULE_PARM_DESC(crc_error_reset_threshold,
105 "Max number of CRC errors before XAUI reset");
106
107struct tenxpress_phy_data {
108 enum tenxpress_state state;
109 atomic_t bad_crc_count;
110 int bad_lp_tries;
111};
112
113static int tenxpress_state_is(struct efx_nic *efx, int state)
114{
115 struct tenxpress_phy_data *phy_data = efx->phy_data;
116 return (phy_data != NULL) && (state == phy_data->state);
117}
118
119void tenxpress_set_state(struct efx_nic *efx,
120 enum tenxpress_state state)
121{
122 struct tenxpress_phy_data *phy_data = efx->phy_data;
123 if (phy_data != NULL)
124 phy_data->state = state;
125}
126
127void tenxpress_crc_err(struct efx_nic *efx)
128{
129 struct tenxpress_phy_data *phy_data = efx->phy_data;
130 if (phy_data != NULL)
131 atomic_inc(&phy_data->bad_crc_count);
132}
133
134/* Check that the C166 has booted successfully */
135static int tenxpress_phy_check(struct efx_nic *efx)
136{
137 int phy_id = efx->mii.phy_id;
138 int count = PCS_BOOT_MAX_DELAY / PCS_BOOT_POLL_DELAY;
139 int boot_stat;
140
141 /* Wait for the boot to complete (or not) */
142 while (count) {
143 boot_stat = mdio_clause45_read(efx, phy_id,
144 MDIO_MMD_PCS,
145 PCS_BOOT_STATUS_REG);
146 if (boot_stat & (1 << PCS_BOOT_COMPLETE_LBN))
147 break;
148 count--;
149 udelay(PCS_BOOT_POLL_DELAY);
150 }
151
152 if (!count) {
153 EFX_ERR(efx, "%s: PHY boot timed out. Last status "
154 "%x\n", __func__,
155 (boot_stat >> PCS_BOOT_PROGRESS_LBN) &
156 ((1 << PCS_BOOT_PROGRESS_WIDTH) - 1));
157 return -ETIMEDOUT;
158 }
159
160 return 0;
161}
162
163static void tenxpress_reset_xaui(struct efx_nic *efx);
164
165static int tenxpress_init(struct efx_nic *efx)
166{
167 int rc, reg;
168
169 /* Turn on the clock */
170 reg = (1 << CLK312_EN_LBN);
171 mdio_clause45_write(efx, efx->mii.phy_id,
172 MDIO_MMD_PCS, PCS_TEST_SELECT_REG, reg);
173
174 rc = tenxpress_phy_check(efx);
175 if (rc < 0)
176 return rc;
177
178 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
179 reg = mdio_clause45_read(efx, efx->mii.phy_id,
180 MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG);
181 reg |= (1 << PMA_PMA_LED_ACTIVITY_LBN);
182 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
183 PMA_PMD_LED_CTRL_REG, reg);
184
185 reg = PMA_PMD_LED_DEFAULT;
186 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
187 PMA_PMD_LED_OVERR_REG, reg);
188
189 return rc;
190}
191
192static int tenxpress_phy_init(struct efx_nic *efx)
193{
194 struct tenxpress_phy_data *phy_data;
195 int rc = 0;
196
197 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
198 efx->phy_data = phy_data;
199
200 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
201
202 rc = mdio_clause45_wait_reset_mmds(efx,
203 TENXPRESS_REQUIRED_DEVS);
204 if (rc < 0)
205 goto fail;
206
207 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
208 if (rc < 0)
209 goto fail;
210
211 rc = tenxpress_init(efx);
212 if (rc < 0)
213 goto fail;
214
215 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
216
217 /* Let XGXS and SerDes out of reset and resets 10XPress */
218 falcon_reset_xaui(efx);
219
220 return 0;
221
222 fail:
223 kfree(efx->phy_data);
224 efx->phy_data = NULL;
225 return rc;
226}
227
228static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
229{
230 struct tenxpress_phy_data *pd = efx->phy_data;
231 int reg;
232
233 /* Nothing to do if all is well and was previously so. */
234 if (!(bad_lp || pd->bad_lp_tries))
235 return;
236
237 reg = mdio_clause45_read(efx, efx->mii.phy_id,
238 MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG);
239
240 if (bad_lp)
241 pd->bad_lp_tries++;
242 else
243 pd->bad_lp_tries = 0;
244
245 if (pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
246 pd->bad_lp_tries = 0; /* Restart count */
247 reg &= ~(PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
248 reg |= (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
249 EFX_ERR(efx, "This NIC appears to be plugged into"
250 " a port that is not 10GBASE-T capable.\n"
251 " This PHY is 10GBASE-T ONLY, so no link can"
252 " be established.\n");
253 } else {
254 reg |= (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN);
255 }
256 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
257 PMA_PMD_LED_OVERR_REG, reg);
258}
259
260/* Check link status and return a boolean OK value. If the link is NOT
261 * OK we have a quick rummage round to see if we appear to be plugged
262 * into a non-10GBT port and if so warn the user that they won't get
263 * link any time soon as we are 10GBT only, unless caller specified
264 * not to do this check (it isn't useful in loopback) */
265static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
266{
267 int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
268
269 if (ok) {
270 tenxpress_set_bad_lp(efx, 0);
271 } else if (check_lp) {
272 /* Are we plugged into the wrong sort of link? */
273 int bad_lp = 0;
274 int phy_id = efx->mii.phy_id;
275 int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
276 MDIO_AN_STATUS);
277 int xphy_stat = mdio_clause45_read(efx, phy_id,
278 MDIO_MMD_PMAPMD,
279 PMA_PMD_XSTATUS_REG);
280 /* Are we plugged into anything that sends FLPs? If
281 * not we can't distinguish between not being plugged
282 * in and being plugged into a non-AN antique. The FLP
283 * bit has the advantage of not clearing when autoneg
284 * restarts. */
285 if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
286 tenxpress_set_bad_lp(efx, 0);
287 return ok;
288 }
289
290 /* If it can do 10GBT it must be XNP capable */
291 bad_lp = !(an_stat & (1 << MDIO_AN_STATUS_XNP_LBN));
292 if (!bad_lp && (an_stat & (1 << MDIO_AN_STATUS_PAGE_LBN))) {
293 bad_lp = !(mdio_clause45_read(efx, phy_id,
294 MDIO_MMD_AN, MDIO_AN_10GBT_STATUS) &
295 (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN));
296 }
297 tenxpress_set_bad_lp(efx, bad_lp);
298 }
299 return ok;
300}
301
302static void tenxpress_phy_reconfigure(struct efx_nic *efx)
303{
304 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
305 return;
306
307 efx->link_up = tenxpress_link_ok(efx, 0);
308 efx->link_options = GM_LPA_10000FULL;
309}
310
311static void tenxpress_phy_clear_interrupt(struct efx_nic *efx)
312{
313 /* Nothing done here - LASI interrupts aren't reliable so poll */
314}
315
316
317/* Poll PHY for interrupt */
318static int tenxpress_phy_check_hw(struct efx_nic *efx)
319{
320 struct tenxpress_phy_data *phy_data = efx->phy_data;
321 int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL);
322 int link_ok;
323
324 link_ok = phy_up && tenxpress_link_ok(efx, 1);
325
326 if (link_ok != efx->link_up)
327 falcon_xmac_sim_phy_event(efx);
328
329 /* Nothing to check if we've already shut down the PHY */
330 if (!phy_up)
331 return 0;
332
333 if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
334 EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
335 falcon_reset_xaui(efx);
336 atomic_set(&phy_data->bad_crc_count, 0);
337 }
338
339 return 0;
340}
341
342static void tenxpress_phy_fini(struct efx_nic *efx)
343{
344 int reg;
345
346 /* Power down the LNPGA */
347 reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
348 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
349 PMA_PMD_XCONTROL_REG, reg);
350
351 /* Waiting here ensures that the board fini, which can turn off the
352 * power to the PHY, won't get run until the LNPGA powerdown has been
353 * given long enough to complete. */
354 schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
355
356 kfree(efx->phy_data);
357 efx->phy_data = NULL;
358}
359
360
361/* Set the RX and TX LEDs and Link LED flashing. The other LEDs
362 * (which probably aren't wired anyway) are left in AUTO mode */
363void tenxpress_phy_blink(struct efx_nic *efx, int blink)
364{
365 int reg;
366
367 if (blink)
368 reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) |
369 (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) |
370 (PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN);
371 else
372 reg = PMA_PMD_LED_DEFAULT;
373
374 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
375 PMA_PMD_LED_OVERR_REG, reg);
376}
377
378static void tenxpress_reset_xaui(struct efx_nic *efx)
379{
380 int phy = efx->mii.phy_id;
381 int clk_ctrl, test_select, soft_rst2;
382
383 /* Real work is done on clock_ctrl other resets are thought to be
384 * optional but make the reset more reliable
385 */
386
387 /* Read */
388 clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
389 PCS_CLOCK_CTRL_REG);
390 test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
391 PCS_TEST_SELECT_REG);
392 soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
393 PCS_SOFT_RST2_REG);
394
395 /* Put in reset */
396 test_select &= ~(1 << CLK312_EN_LBN);
397 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
398 PCS_TEST_SELECT_REG, test_select);
399
400 soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
401 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
402 PCS_SOFT_RST2_REG, soft_rst2);
403
404 clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
405 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
406 PCS_CLOCK_CTRL_REG, clk_ctrl);
407 udelay(10);
408
409 /* Remove reset */
410 clk_ctrl |= (1 << PLL312_RST_N_LBN);
411 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
412 PCS_CLOCK_CTRL_REG, clk_ctrl);
413 udelay(10);
414
415 soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
416 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
417 PCS_SOFT_RST2_REG, soft_rst2);
418 udelay(10);
419
420 test_select |= (1 << CLK312_EN_LBN);
421 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
422 PCS_TEST_SELECT_REG, test_select);
423 udelay(10);
424}
425
426struct efx_phy_operations falcon_tenxpress_phy_ops = {
427 .init = tenxpress_phy_init,
428 .reconfigure = tenxpress_phy_reconfigure,
429 .check_hw = tenxpress_phy_check_hw,
430 .fini = tenxpress_phy_fini,
431 .clear_interrupt = tenxpress_phy_clear_interrupt,
432 .reset_xaui = tenxpress_reset_xaui,
433 .mmds = TENXPRESS_REQUIRED_DEVS,
434};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
new file mode 100644
index 000000000000..fbb866b2185e
--- /dev/null
+++ b/drivers/net/sfc/tx.c
@@ -0,0 +1,452 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/pci.h>
12#include <linux/tcp.h>
13#include <linux/ip.h>
14#include <linux/in.h>
15#include <linux/if_ether.h>
16#include <linux/highmem.h>
17#include "net_driver.h"
18#include "tx.h"
19#include "efx.h"
20#include "falcon.h"
21#include "workarounds.h"
22
23/*
24 * TX descriptor ring full threshold
25 *
26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue
28 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31
32/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue.
34 */
35void efx_stop_queue(struct efx_nic *efx)
36{
37 spin_lock_bh(&efx->netif_stop_lock);
38 EFX_TRACE(efx, "stop TX queue\n");
39
40 atomic_inc(&efx->netif_stop_count);
41 netif_stop_queue(efx->net_dev);
42
43 spin_unlock_bh(&efx->netif_stop_lock);
44}
45
46/* Wake netif's TX queue
47 * We want to be able to nest calls to netif_stop_queue(), since each
48 * channel can have an individual stop on the queue.
49 */
50inline void efx_wake_queue(struct efx_nic *efx)
51{
52 local_bh_disable();
53 if (atomic_dec_and_lock(&efx->netif_stop_count,
54 &efx->netif_stop_lock)) {
55 EFX_TRACE(efx, "waking TX queue\n");
56 netif_wake_queue(efx->net_dev);
57 spin_unlock(&efx->netif_stop_lock);
58 }
59 local_bh_enable();
60}
61
62static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63 struct efx_tx_buffer *buffer)
64{
65 if (buffer->unmap_len) {
66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
67 if (buffer->unmap_single)
68 pci_unmap_single(pci_dev, buffer->unmap_addr,
69 buffer->unmap_len, PCI_DMA_TODEVICE);
70 else
71 pci_unmap_page(pci_dev, buffer->unmap_addr,
72 buffer->unmap_len, PCI_DMA_TODEVICE);
73 buffer->unmap_len = 0;
74 buffer->unmap_single = 0;
75 }
76
77 if (buffer->skb) {
78 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
79 buffer->skb = NULL;
80 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
81 "complete\n", tx_queue->queue, read_ptr);
82 }
83}
84
85
86/*
87 * Add a socket buffer to a TX queue
88 *
89 * This maps all fragments of a socket buffer for DMA and adds them to
90 * the TX queue. The queue's insert pointer will be incremented by
91 * the number of fragments in the socket buffer.
92 *
93 * If any DMA mapping fails, any mapped fragments will be unmapped,
94 * the queue's insert pointer will be restored to its original value.
95 *
96 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
97 * You must hold netif_tx_lock() to call this function.
98 */
99static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
100 const struct sk_buff *skb)
101{
102 struct efx_nic *efx = tx_queue->efx;
103 struct pci_dev *pci_dev = efx->pci_dev;
104 struct efx_tx_buffer *buffer;
105 skb_frag_t *fragment;
106 struct page *page;
107 int page_offset;
108 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
109 dma_addr_t dma_addr, unmap_addr = 0;
110 unsigned int dma_len;
111 unsigned unmap_single;
112 int q_space, i = 0;
113 int rc = NETDEV_TX_OK;
114
115 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
116
117 /* Get size of the initial fragment */
118 len = skb_headlen(skb);
119
120 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
121 q_space = efx->type->txd_ring_mask - 1 - fill_level;
122
123 /* Map for DMA. Use pci_map_single rather than pci_map_page
124 * since this is more efficient on machines with sparse
125 * memory.
126 */
127 unmap_single = 1;
128 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
129
130 /* Process all fragments */
131 while (1) {
132 if (unlikely(pci_dma_mapping_error(dma_addr)))
133 goto pci_err;
134
135 /* Store fields for marking in the per-fragment final
136 * descriptor */
137 unmap_len = len;
138 unmap_addr = dma_addr;
139
140 /* Add to TX queue, splitting across DMA boundaries */
141 do {
142 if (unlikely(q_space-- <= 0)) {
143 /* It might be that completions have
144 * happened since the xmit path last
145 * checked. Update the xmit path's
146 * copy of read_count.
147 */
148 ++tx_queue->stopped;
149 /* This memory barrier protects the
150 * change of stopped from the access
151 * of read_count. */
152 smp_mb();
153 tx_queue->old_read_count =
154 *(volatile unsigned *)
155 &tx_queue->read_count;
156 fill_level = (tx_queue->insert_count
157 - tx_queue->old_read_count);
158 q_space = (efx->type->txd_ring_mask - 1 -
159 fill_level);
160 if (unlikely(q_space-- <= 0))
161 goto stop;
162 smp_mb();
163 --tx_queue->stopped;
164 }
165
166 insert_ptr = (tx_queue->insert_count &
167 efx->type->txd_ring_mask);
168 buffer = &tx_queue->buffer[insert_ptr];
169 EFX_BUG_ON_PARANOID(buffer->skb);
170 EFX_BUG_ON_PARANOID(buffer->len);
171 EFX_BUG_ON_PARANOID(buffer->continuation != 1);
172 EFX_BUG_ON_PARANOID(buffer->unmap_len);
173
174 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
175 if (likely(dma_len > len))
176 dma_len = len;
177
178 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
179 if (misalign && dma_len + misalign > 512)
180 dma_len = 512 - misalign;
181
182 /* Fill out per descriptor fields */
183 buffer->len = dma_len;
184 buffer->dma_addr = dma_addr;
185 len -= dma_len;
186 dma_addr += dma_len;
187 ++tx_queue->insert_count;
188 } while (len);
189
190 /* Transfer ownership of the unmapping to the final buffer */
191 buffer->unmap_addr = unmap_addr;
192 buffer->unmap_single = unmap_single;
193 buffer->unmap_len = unmap_len;
194 unmap_len = 0;
195
196 /* Get address and size of next fragment */
197 if (i >= skb_shinfo(skb)->nr_frags)
198 break;
199 fragment = &skb_shinfo(skb)->frags[i];
200 len = fragment->size;
201 page = fragment->page;
202 page_offset = fragment->page_offset;
203 i++;
204 /* Map for DMA */
205 unmap_single = 0;
206 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
207 PCI_DMA_TODEVICE);
208 }
209
210 /* Transfer ownership of the skb to the final buffer */
211 buffer->skb = skb;
212 buffer->continuation = 0;
213
214 /* Pass off to hardware */
215 falcon_push_buffers(tx_queue);
216
217 return NETDEV_TX_OK;
218
219 pci_err:
220 EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
221 "fragments for DMA\n", tx_queue->queue, skb->len,
222 skb_shinfo(skb)->nr_frags + 1);
223
224 /* Mark the packet as transmitted, and free the SKB ourselves */
225 dev_kfree_skb_any((struct sk_buff *)skb);
226 goto unwind;
227
228 stop:
229 rc = NETDEV_TX_BUSY;
230
231 if (tx_queue->stopped == 1)
232 efx_stop_queue(efx);
233
234 unwind:
235 /* Work backwards until we hit the original insert pointer value */
236 while (tx_queue->insert_count != tx_queue->write_count) {
237 --tx_queue->insert_count;
238 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
239 buffer = &tx_queue->buffer[insert_ptr];
240 efx_dequeue_buffer(tx_queue, buffer);
241 buffer->len = 0;
242 }
243
244 /* Free the fragment we were mid-way through pushing */
245 if (unmap_len)
246 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
247 PCI_DMA_TODEVICE);
248
249 return rc;
250}
251
252/* Remove packets from the TX queue
253 *
254 * This removes packets from the TX queue, up to and including the
255 * specified index.
256 */
257static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
258 unsigned int index)
259{
260 struct efx_nic *efx = tx_queue->efx;
261 unsigned int stop_index, read_ptr;
262 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
263
264 stop_index = (index + 1) & mask;
265 read_ptr = tx_queue->read_count & mask;
266
267 while (read_ptr != stop_index) {
268 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
269 if (unlikely(buffer->len == 0)) {
270 EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
271 "completion id %x\n", tx_queue->queue,
272 read_ptr);
273 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
274 return;
275 }
276
277 efx_dequeue_buffer(tx_queue, buffer);
278 buffer->continuation = 1;
279 buffer->len = 0;
280
281 ++tx_queue->read_count;
282 read_ptr = tx_queue->read_count & mask;
283 }
284}
285
286/* Initiate a packet transmission on the specified TX queue.
287 * Note that returning anything other than NETDEV_TX_OK will cause the
288 * OS to free the skb.
289 *
290 * This function is split out from efx_hard_start_xmit to allow the
291 * loopback test to direct packets via specific TX queues. It is
292 * therefore a non-static inline, so as not to penalise performance
293 * for non-loopback transmissions.
294 *
295 * Context: netif_tx_lock held
296 */
297inline int efx_xmit(struct efx_nic *efx,
298 struct efx_tx_queue *tx_queue, struct sk_buff *skb)
299{
300 int rc;
301
302 /* Map fragments for DMA and add to TX queue */
303 rc = efx_enqueue_skb(tx_queue, skb);
304 if (unlikely(rc != NETDEV_TX_OK))
305 goto out;
306
307 /* Update last TX timer */
308 efx->net_dev->trans_start = jiffies;
309
310 out:
311 return rc;
312}
313
314/* Initiate a packet transmission. We use one channel per CPU
315 * (sharing when we have more CPUs than channels). On Falcon, the TX
316 * completion events will be directed back to the CPU that transmitted
317 * the packet, which should be cache-efficient.
318 *
319 * Context: non-blocking.
320 * Note that returning anything other than NETDEV_TX_OK will cause the
321 * OS to free the skb.
322 */
323int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
324{
325 struct efx_nic *efx = net_dev->priv;
326 return efx_xmit(efx, &efx->tx_queue[0], skb);
327}
328
329void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
330{
331 unsigned fill_level;
332 struct efx_nic *efx = tx_queue->efx;
333
334 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);
335
336 efx_dequeue_buffers(tx_queue, index);
337
338 /* See if we need to restart the netif queue. This barrier
339 * separates the update of read_count from the test of
340 * stopped. */
341 smp_mb();
342 if (unlikely(tx_queue->stopped)) {
343 fill_level = tx_queue->insert_count - tx_queue->read_count;
344 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
345 EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx));
346
347 /* Do this under netif_tx_lock(), to avoid racing
348 * with efx_xmit(). */
349 netif_tx_lock(efx->net_dev);
350 if (tx_queue->stopped) {
351 tx_queue->stopped = 0;
352 efx_wake_queue(efx);
353 }
354 netif_tx_unlock(efx->net_dev);
355 }
356 }
357}
358
359int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
360{
361 struct efx_nic *efx = tx_queue->efx;
362 unsigned int txq_size;
363 int i, rc;
364
365 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
366
367 /* Allocate software ring */
368 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
369 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
370 if (!tx_queue->buffer) {
371 rc = -ENOMEM;
372 goto fail1;
373 }
374 for (i = 0; i <= efx->type->txd_ring_mask; ++i)
375 tx_queue->buffer[i].continuation = 1;
376
377 /* Allocate hardware ring */
378 rc = falcon_probe_tx(tx_queue);
379 if (rc)
380 goto fail2;
381
382 return 0;
383
384 fail2:
385 kfree(tx_queue->buffer);
386 tx_queue->buffer = NULL;
387 fail1:
388 tx_queue->used = 0;
389
390 return rc;
391}
392
393int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
394{
395 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
396
397 tx_queue->insert_count = 0;
398 tx_queue->write_count = 0;
399 tx_queue->read_count = 0;
400 tx_queue->old_read_count = 0;
401 BUG_ON(tx_queue->stopped);
402
403 /* Set up TX descriptor ring */
404 return falcon_init_tx(tx_queue);
405}
406
407void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
408{
409 struct efx_tx_buffer *buffer;
410
411 if (!tx_queue->buffer)
412 return;
413
414 /* Free any buffers left in the ring */
415 while (tx_queue->read_count != tx_queue->write_count) {
416 buffer = &tx_queue->buffer[tx_queue->read_count &
417 tx_queue->efx->type->txd_ring_mask];
418 efx_dequeue_buffer(tx_queue, buffer);
419 buffer->continuation = 1;
420 buffer->len = 0;
421
422 ++tx_queue->read_count;
423 }
424}
425
426void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
427{
428 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
429
430 /* Flush TX queue, remove descriptor ring */
431 falcon_fini_tx(tx_queue);
432
433 efx_release_tx_buffers(tx_queue);
434
435 /* Release queue's stop on port, if any */
436 if (tx_queue->stopped) {
437 tx_queue->stopped = 0;
438 efx_wake_queue(tx_queue->efx);
439 }
440}
441
442void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
443{
444 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
445 falcon_remove_tx(tx_queue);
446
447 kfree(tx_queue->buffer);
448 tx_queue->buffer = NULL;
449 tx_queue->used = 0;
450}
451
452
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
new file mode 100644
index 000000000000..1526a73b4b51
--- /dev/null
+++ b/drivers/net/sfc/tx.h
@@ -0,0 +1,24 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_TX_H
12#define EFX_TX_H
13
14#include "net_driver.h"
15
16int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
17void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
18int efx_init_tx_queue(struct efx_tx_queue *tx_queue);
19void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
20
21int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
22void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
23
24#endif /* EFX_TX_H */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
new file mode 100644
index 000000000000..dca62f190198
--- /dev/null
+++ b/drivers/net/sfc/workarounds.h
@@ -0,0 +1,56 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_WORKAROUNDS_H
11#define EFX_WORKAROUNDS_H
12
13/*
14 * Hardware workarounds.
15 * Bug numbers are from Solarflare's Bugzilla.
16 */
17
18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1)
20
21/* XAUI resets if link not detected */
22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
23/* SNAP frames have TOBE_DISC set */
24#define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS
25/* RX PCIe double split performance issue */
26#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
27/* TX pkt parser problem with <= 16 byte TXes */
28#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
29/* XGXS and XAUI reset sequencing in SW */
30#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
31/* Low rate CRC errors require XAUI reset */
32#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
33/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
34 * or a PCIe error (bug 11028) */
35#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
36/* Transmit flow control may get disabled */
37#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS
38/* Flush events can take a very long time to appear */
39#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
40
41/* Spurious parity errors in TSORT buffers */
42#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
43/* iSCSI parsing errors */
44#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
45/* RX events go missing */
46#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
47/* RX_RESET on A1 */
48#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
49/* Increase filter depth to avoid RX_RESET */
50#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
51/* Flushes may never complete */
52#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A
53/* Leak overlength packets rather than free */
54#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
55
56#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/sfc/xenpack.h b/drivers/net/sfc/xenpack.h
new file mode 100644
index 000000000000..b0d1f225b70a
--- /dev/null
+++ b/drivers/net/sfc/xenpack.h
@@ -0,0 +1,62 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_XENPACK_H
11#define EFX_XENPACK_H
12
13/* Exported functions from Xenpack standard PHY control */
14
15#include "mdio_10g.h"
16
17/****************************************************************************/
18/* XENPACK MDIO register extensions */
19#define MDIO_XP_LASI_RX_CTRL (0x9000)
20#define MDIO_XP_LASI_TX_CTRL (0x9001)
21#define MDIO_XP_LASI_CTRL (0x9002)
22#define MDIO_XP_LASI_RX_STAT (0x9003)
23#define MDIO_XP_LASI_TX_STAT (0x9004)
24#define MDIO_XP_LASI_STAT (0x9005)
25
26/* Control/Status bits */
27#define XP_LASI_LS_ALARM (1 << 0)
28#define XP_LASI_TX_ALARM (1 << 1)
29#define XP_LASI_RX_ALARM (1 << 2)
30/* These two are Quake vendor extensions to the standard XENPACK defines */
31#define XP_LASI_LS_INTB (1 << 3)
32#define XP_LASI_TEST (1 << 7)
33
34/* Enable LASI interrupts for PHY */
35static inline void xenpack_enable_lasi_irqs(struct efx_nic *efx)
36{
37 int reg;
38 int phy_id = efx->mii.phy_id;
39 /* Read to clear LASI status register */
40 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
41 MDIO_XP_LASI_STAT);
42
43 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
44 MDIO_XP_LASI_CTRL, XP_LASI_LS_ALARM);
45}
46
47/* Read the LASI interrupt status to clear the interrupt. */
48static inline int xenpack_clear_lasi_irqs(struct efx_nic *efx)
49{
50 /* Read to clear link status alarm */
51 return mdio_clause45_read(efx, efx->mii.phy_id,
52 MDIO_MMD_PMAPMD, MDIO_XP_LASI_STAT);
53}
54
55/* Turn off LASI interrupts */
56static inline void xenpack_disable_lasi_irqs(struct efx_nic *efx)
57{
58 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
59 MDIO_XP_LASI_CTRL, 0);
60}
61
62#endif /* EFX_XENPACK_H */
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
new file mode 100644
index 000000000000..66dd5bf1eaa9
--- /dev/null
+++ b/drivers/net/sfc/xfp_phy.c
@@ -0,0 +1,132 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9/*
10 * Driver for XFP optical PHYs (plus some support specific to the Quake 2032)
11 * See www.amcc.com for details (search for qt2032)
12 */
13
14#include <linux/timer.h>
15#include <linux/delay.h>
16#include "efx.h"
17#include "gmii.h"
18#include "mdio_10g.h"
19#include "xenpack.h"
20#include "phy.h"
21#include "mac.h"
22
23#define XFP_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PCS | \
24 MDIO_MMDREG_DEVS0_PMAPMD | \
25 MDIO_MMDREG_DEVS0_PHYXS)
26
27/****************************************************************************/
28/* Quake-specific MDIO registers */
29#define MDIO_QUAKE_LED0_REG (0xD006)
30
31void xfp_set_led(struct efx_nic *p, int led, int mode)
32{
33 int addr = MDIO_QUAKE_LED0_REG + led;
34 mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr,
35 mode);
36}
37
38#define XFP_MAX_RESET_TIME 500
39#define XFP_RESET_WAIT 10
40
41/* Reset the PHYXS MMD. This is documented (for the Quake PHY) as doing
42 * a complete soft reset.
43 */
44static int xfp_reset_phy(struct efx_nic *efx)
45{
46 int rc;
47
48 rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_PHYXS,
49 XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
50 XFP_RESET_WAIT);
51 if (rc < 0)
52 goto fail;
53
54 /* Wait 250ms for the PHY to complete bootup */
55 msleep(250);
56
57 /* Check that all the MMDs we expect are present and responding. We
58 * expect faults on some if the link is down, but not on the PHY XS */
59 rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS,
60 MDIO_MMDREG_DEVS0_PHYXS);
61 if (rc < 0)
62 goto fail;
63
64 efx->board_info.init_leds(efx);
65
66 return rc;
67
68 fail:
69 EFX_ERR(efx, "XFP: reset timed out!\n");
70 return rc;
71}
72
73static int xfp_phy_init(struct efx_nic *efx)
74{
75 u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
76 int rc;
77
78 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
79 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
80 MDIO_ID_REV(devid));
81
82 rc = xfp_reset_phy(efx);
83
84 EFX_INFO(efx, "XFP: PHY init %s.\n",
85 rc ? "failed" : "successful");
86
87 return rc;
88}
89
90static void xfp_phy_clear_interrupt(struct efx_nic *efx)
91{
92 xenpack_clear_lasi_irqs(efx);
93}
94
95static int xfp_link_ok(struct efx_nic *efx)
96{
97 return mdio_clause45_links_ok(efx, XFP_REQUIRED_DEVS);
98}
99
100static int xfp_phy_check_hw(struct efx_nic *efx)
101{
102 int rc = 0;
103 int link_up = xfp_link_ok(efx);
104 /* Simulate a PHY event if link state has changed */
105 if (link_up != efx->link_up)
106 falcon_xmac_sim_phy_event(efx);
107
108 return rc;
109}
110
111static void xfp_phy_reconfigure(struct efx_nic *efx)
112{
113 efx->link_up = xfp_link_ok(efx);
114 efx->link_options = GM_LPA_10000FULL;
115}
116
117
118static void xfp_phy_fini(struct efx_nic *efx)
119{
120 /* Clobber the LED if it was blinking */
121 efx->board_info.blink(efx, 0);
122}
123
124struct efx_phy_operations falcon_xfp_phy_ops = {
125 .init = xfp_phy_init,
126 .reconfigure = xfp_phy_reconfigure,
127 .check_hw = xfp_phy_check_hw,
128 .fini = xfp_phy_fini,
129 .clear_interrupt = xfp_phy_clear_interrupt,
130 .reset_xaui = efx_port_dummy_op_void,
131 .mmds = XFP_REQUIRED_DEVS,
132};
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 20745fd4e973..abc63b0663be 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -212,6 +212,12 @@ enum _DescStatusBit {
212 THOL2 = 0x20000000, 212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000, 213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000, 214 THOL0 = 0x00000000,
215
216 WND = 0x00080000,
217 TABRT = 0x00040000,
218 FIFO = 0x00020000,
219 LINK = 0x00010000,
220 ColCountMask = 0x0000ffff,
215 /* RxDesc.status */ 221 /* RxDesc.status */
216 IPON = 0x20000000, 222 IPON = 0x20000000,
217 TCPON = 0x10000000, 223 TCPON = 0x10000000,
@@ -480,30 +486,23 @@ static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
480 desc->status = 0x0; 486 desc->status = 0x0;
481} 487}
482 488
483static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff, 489static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
484 struct RxDesc *desc, u32 rx_buf_sz) 490 struct RxDesc *desc)
485{ 491{
492 u32 rx_buf_sz = tp->rx_buf_sz;
486 struct sk_buff *skb; 493 struct sk_buff *skb;
487 dma_addr_t mapping;
488 int ret = 0;
489
490 skb = dev_alloc_skb(rx_buf_sz);
491 if (!skb)
492 goto err_out;
493
494 *sk_buff = skb;
495 494
496 mapping = pci_map_single(pdev, skb->data, rx_buf_sz, 495 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
497 PCI_DMA_FROMDEVICE); 496 if (likely(skb)) {
497 dma_addr_t mapping;
498 498
499 sis190_map_to_asic(desc, mapping, rx_buf_sz); 499 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
500out: 500 PCI_DMA_FROMDEVICE);
501 return ret; 501 sis190_map_to_asic(desc, mapping, rx_buf_sz);
502 } else
503 sis190_make_unusable_by_asic(desc);
502 504
503err_out: 505 return skb;
504 ret = -ENOMEM;
505 sis190_make_unusable_by_asic(desc);
506 goto out;
507} 506}
508 507
509static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, 508static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
@@ -512,37 +511,41 @@ static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
512 u32 cur; 511 u32 cur;
513 512
514 for (cur = start; cur < end; cur++) { 513 for (cur = start; cur < end; cur++) {
515 int ret, i = cur % NUM_RX_DESC; 514 unsigned int i = cur % NUM_RX_DESC;
516 515
517 if (tp->Rx_skbuff[i]) 516 if (tp->Rx_skbuff[i])
518 continue; 517 continue;
519 518
520 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i, 519 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
521 tp->RxDescRing + i, tp->rx_buf_sz); 520
522 if (ret < 0) 521 if (!tp->Rx_skbuff[i])
523 break; 522 break;
524 } 523 }
525 return cur - start; 524 return cur - start;
526} 525}
527 526
528static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, 527static bool sis190_try_rx_copy(struct sis190_private *tp,
529 struct RxDesc *desc, int rx_buf_sz) 528 struct sk_buff **sk_buff, int pkt_size,
529 dma_addr_t addr)
530{ 530{
531 int ret = -1; 531 struct sk_buff *skb;
532 bool done = false;
532 533
533 if (pkt_size < rx_copybreak) { 534 if (pkt_size >= rx_copybreak)
534 struct sk_buff *skb; 535 goto out;
535 536
536 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 537 skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
537 if (skb) { 538 if (!skb)
538 skb_reserve(skb, NET_IP_ALIGN); 539 goto out;
539 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); 540
540 *sk_buff = skb; 541 pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size,
541 sis190_give_to_asic(desc, rx_buf_sz); 542 PCI_DMA_FROMDEVICE);
542 ret = 0; 543 skb_reserve(skb, 2);
543 } 544 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
544 } 545 *sk_buff = skb;
545 return ret; 546 done = true;
547out:
548 return done;
546} 549}
547 550
548static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) 551static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
@@ -592,9 +595,9 @@ static int sis190_rx_interrupt(struct net_device *dev,
592 sis190_give_to_asic(desc, tp->rx_buf_sz); 595 sis190_give_to_asic(desc, tp->rx_buf_sz);
593 else { 596 else {
594 struct sk_buff *skb = tp->Rx_skbuff[entry]; 597 struct sk_buff *skb = tp->Rx_skbuff[entry];
598 dma_addr_t addr = le32_to_cpu(desc->addr);
595 int pkt_size = (status & RxSizeMask) - 4; 599 int pkt_size = (status & RxSizeMask) - 4;
596 void (*pci_action)(struct pci_dev *, dma_addr_t, 600 struct pci_dev *pdev = tp->pci_dev;
597 size_t, int) = pci_dma_sync_single_for_device;
598 601
599 if (unlikely(pkt_size > tp->rx_buf_sz)) { 602 if (unlikely(pkt_size > tp->rx_buf_sz)) {
600 net_intr(tp, KERN_INFO 603 net_intr(tp, KERN_INFO
@@ -606,20 +609,18 @@ static int sis190_rx_interrupt(struct net_device *dev,
606 continue; 609 continue;
607 } 610 }
608 611
609 pci_dma_sync_single_for_cpu(tp->pci_dev,
610 le32_to_cpu(desc->addr), tp->rx_buf_sz,
611 PCI_DMA_FROMDEVICE);
612 612
613 if (sis190_try_rx_copy(&skb, pkt_size, desc, 613 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
614 tp->rx_buf_sz)) { 614 pci_dma_sync_single_for_device(pdev, addr,
615 pci_action = pci_unmap_single; 615 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
616 sis190_give_to_asic(desc, tp->rx_buf_sz);
617 } else {
618 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
619 PCI_DMA_FROMDEVICE);
616 tp->Rx_skbuff[entry] = NULL; 620 tp->Rx_skbuff[entry] = NULL;
617 sis190_make_unusable_by_asic(desc); 621 sis190_make_unusable_by_asic(desc);
618 } 622 }
619 623
620 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
621 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
622
623 skb_put(skb, pkt_size); 624 skb_put(skb, pkt_size);
624 skb->protocol = eth_type_trans(skb, dev); 625 skb->protocol = eth_type_trans(skb, dev);
625 626
@@ -658,9 +659,31 @@ static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
658 memset(desc, 0x00, sizeof(*desc)); 659 memset(desc, 0x00, sizeof(*desc));
659} 660}
660 661
662static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
663{
664#define TxErrMask (WND | TABRT | FIFO | LINK)
665
666 if (!unlikely(status & TxErrMask))
667 return 0;
668
669 if (status & WND)
670 stats->tx_window_errors++;
671 if (status & TABRT)
672 stats->tx_aborted_errors++;
673 if (status & FIFO)
674 stats->tx_fifo_errors++;
675 if (status & LINK)
676 stats->tx_carrier_errors++;
677
678 stats->tx_errors++;
679
680 return -1;
681}
682
661static void sis190_tx_interrupt(struct net_device *dev, 683static void sis190_tx_interrupt(struct net_device *dev,
662 struct sis190_private *tp, void __iomem *ioaddr) 684 struct sis190_private *tp, void __iomem *ioaddr)
663{ 685{
686 struct net_device_stats *stats = &dev->stats;
664 u32 pending, dirty_tx = tp->dirty_tx; 687 u32 pending, dirty_tx = tp->dirty_tx;
665 /* 688 /*
666 * It would not be needed if queueing was allowed to be enabled 689 * It would not be needed if queueing was allowed to be enabled
@@ -675,15 +698,19 @@ static void sis190_tx_interrupt(struct net_device *dev,
675 for (; pending; pending--, dirty_tx++) { 698 for (; pending; pending--, dirty_tx++) {
676 unsigned int entry = dirty_tx % NUM_TX_DESC; 699 unsigned int entry = dirty_tx % NUM_TX_DESC;
677 struct TxDesc *txd = tp->TxDescRing + entry; 700 struct TxDesc *txd = tp->TxDescRing + entry;
701 u32 status = le32_to_cpu(txd->status);
678 struct sk_buff *skb; 702 struct sk_buff *skb;
679 703
680 if (le32_to_cpu(txd->status) & OWNbit) 704 if (status & OWNbit)
681 break; 705 break;
682 706
683 skb = tp->Tx_skbuff[entry]; 707 skb = tp->Tx_skbuff[entry];
684 708
685 dev->stats.tx_packets++; 709 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
686 dev->stats.tx_bytes += skb->len; 710 stats->tx_packets++;
711 stats->tx_bytes += skb->len;
712 stats->collisions += ((status & ColCountMask) - 1);
713 }
687 714
688 sis190_unmap_tx_skb(tp->pci_dev, skb, txd); 715 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
689 tp->Tx_skbuff[entry] = NULL; 716 tp->Tx_skbuff[entry] = NULL;
@@ -904,10 +931,9 @@ static void sis190_phy_task(struct work_struct *work)
904 mod_timer(&tp->timer, jiffies + HZ/10); 931 mod_timer(&tp->timer, jiffies + HZ/10);
905 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) & 932 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
906 BMSR_ANEGCOMPLETE)) { 933 BMSR_ANEGCOMPLETE)) {
907 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
908 dev->name);
909 netif_carrier_off(dev); 934 netif_carrier_off(dev);
910 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET); 935 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
936 dev->name);
911 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); 937 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
912 } else { 938 } else {
913 /* Rejoice ! */ 939 /* Rejoice ! */
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 5a55ede352f4..84af68fdb6c2 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -396,14 +396,14 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
396 396
397 /* Order of next two lines is *very* important. 397 /* Order of next two lines is *very* important.
398 * When we are sending a little amount of data, 398 * When we are sending a little amount of data,
399 * the transfer may be completed inside driver.write() 399 * the transfer may be completed inside the ops->write()
400 * routine, because it's running with interrupts enabled. 400 * routine, because it's running with interrupts enabled.
401 * In this case we *never* got WRITE_WAKEUP event, 401 * In this case we *never* got WRITE_WAKEUP event,
402 * if we did not request it before write operation. 402 * if we did not request it before write operation.
403 * 14 Oct 1994 Dmitry Gorodchanin. 403 * 14 Oct 1994 Dmitry Gorodchanin.
404 */ 404 */
405 sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 405 sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
406 actual = sl->tty->driver->write(sl->tty, sl->xbuff, count); 406 actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
407#ifdef SL_CHECK_TRANSMIT 407#ifdef SL_CHECK_TRANSMIT
408 sl->dev->trans_start = jiffies; 408 sl->dev->trans_start = jiffies;
409#endif 409#endif
@@ -437,7 +437,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
437 return; 437 return;
438 } 438 }
439 439
440 actual = tty->driver->write(tty, sl->xhead, sl->xleft); 440 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
441 sl->xleft -= actual; 441 sl->xleft -= actual;
442 sl->xhead += actual; 442 sl->xhead += actual;
443} 443}
@@ -462,7 +462,7 @@ static void sl_tx_timeout(struct net_device *dev)
462 } 462 }
463 printk(KERN_WARNING "%s: transmit timed out, %s?\n", 463 printk(KERN_WARNING "%s: transmit timed out, %s?\n",
464 dev->name, 464 dev->name,
465 (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ? 465 (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
466 "bad line quality" : "driver error"); 466 "bad line quality" : "driver error");
467 sl->xleft = 0; 467 sl->xleft = 0;
468 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 468 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
@@ -830,6 +830,9 @@ static int slip_open(struct tty_struct *tty)
830 if (!capable(CAP_NET_ADMIN)) 830 if (!capable(CAP_NET_ADMIN))
831 return -EPERM; 831 return -EPERM;
832 832
833 if (tty->ops->write == NULL)
834 return -EOPNOTSUPP;
835
833 /* RTnetlink lock is misused here to serialize concurrent 836 /* RTnetlink lock is misused here to serialize concurrent
834 opens of slip channels. There are better ways, but it is 837 opens of slip channels. There are better ways, but it is
835 the simplest one. 838 the simplest one.
@@ -1432,7 +1435,7 @@ static void sl_outfill(unsigned long sls)
1432 /* put END into tty queue. Is it right ??? */ 1435 /* put END into tty queue. Is it right ??? */
1433 if (!netif_queue_stopped(sl->dev)) { 1436 if (!netif_queue_stopped(sl->dev)) {
1434 /* if device busy no outfill */ 1437 /* if device busy no outfill */
1435 sl->tty->driver->write(sl->tty, &s, 1); 1438 sl->tty->ops->write(sl->tty, &s, 1);
1436 } 1439 }
1437 } else 1440 } else
1438 set_bit(SLF_OUTWAIT, &sl->flags); 1441 set_bit(SLF_OUTWAIT, &sl->flags);
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index e83b166aa6b9..432e837a1760 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -649,7 +649,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
649 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); 649 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
650 } 650 }
651 651
652 if (!capable(CAP_NET_ADMIN)) 652 if (!capable(CAP_SYS_RAWIO))
653 return -EPERM; 653 return -EPERM;
654 654
655 switch (data[0]) { 655 switch (data[0]) {
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e3f74c9f78bd..07b3f77e7626 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
64 64
65#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": " 66#define PFX DRV_MODULE_NAME ": "
67#define DRV_MODULE_VERSION "3.91" 67#define DRV_MODULE_VERSION "3.92"
68#define DRV_MODULE_RELDATE "April 18, 2008" 68#define DRV_MODULE_RELDATE "May 2, 2008"
69 69
70#define TG3_DEF_MAC_MODE 0 70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0 71#define TG3_DEF_RX_MODE 0
@@ -1656,12 +1656,76 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1656 return 0; 1656 return 0;
1657} 1657}
1658 1658
1659/* tp->lock is held. */
1660static void tg3_wait_for_event_ack(struct tg3 *tp)
1661{
1662 int i;
1663
1664 /* Wait for up to 2.5 milliseconds */
1665 for (i = 0; i < 250000; i++) {
1666 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1667 break;
1668 udelay(10);
1669 }
1670}
1671
1672/* tp->lock is held. */
1673static void tg3_ump_link_report(struct tg3 *tp)
1674{
1675 u32 reg;
1676 u32 val;
1677
1678 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1679 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1680 return;
1681
1682 tg3_wait_for_event_ack(tp);
1683
1684 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1685
1686 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1687
1688 val = 0;
1689 if (!tg3_readphy(tp, MII_BMCR, &reg))
1690 val = reg << 16;
1691 if (!tg3_readphy(tp, MII_BMSR, &reg))
1692 val |= (reg & 0xffff);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1694
1695 val = 0;
1696 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1697 val = reg << 16;
1698 if (!tg3_readphy(tp, MII_LPA, &reg))
1699 val |= (reg & 0xffff);
1700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1701
1702 val = 0;
1703 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1704 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1705 val = reg << 16;
1706 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1707 val |= (reg & 0xffff);
1708 }
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1710
1711 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1712 val = reg << 16;
1713 else
1714 val = 0;
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1716
1717 val = tr32(GRC_RX_CPU_EVENT);
1718 val |= GRC_RX_CPU_DRIVER_EVENT;
1719 tw32_f(GRC_RX_CPU_EVENT, val);
1720}
1721
1659static void tg3_link_report(struct tg3 *tp) 1722static void tg3_link_report(struct tg3 *tp)
1660{ 1723{
1661 if (!netif_carrier_ok(tp->dev)) { 1724 if (!netif_carrier_ok(tp->dev)) {
1662 if (netif_msg_link(tp)) 1725 if (netif_msg_link(tp))
1663 printk(KERN_INFO PFX "%s: Link is down.\n", 1726 printk(KERN_INFO PFX "%s: Link is down.\n",
1664 tp->dev->name); 1727 tp->dev->name);
1728 tg3_ump_link_report(tp);
1665 } else if (netif_msg_link(tp)) { 1729 } else if (netif_msg_link(tp)) {
1666 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", 1730 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1667 tp->dev->name, 1731 tp->dev->name,
@@ -1679,6 +1743,7 @@ static void tg3_link_report(struct tg3 *tp)
1679 "on" : "off", 1743 "on" : "off",
1680 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ? 1744 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1681 "on" : "off"); 1745 "on" : "off");
1746 tg3_ump_link_report(tp);
1682 } 1747 }
1683} 1748}
1684 1749
@@ -2097,9 +2162,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2097 MAC_STATUS_LNKSTATE_CHANGED)); 2162 MAC_STATUS_LNKSTATE_CHANGED));
2098 udelay(40); 2163 udelay(40);
2099 2164
2100 tp->mi_mode = MAC_MI_MODE_BASE; 2165 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2101 tw32_f(MAC_MI_MODE, tp->mi_mode); 2166 tw32_f(MAC_MI_MODE,
2102 udelay(80); 2167 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2168 udelay(80);
2169 }
2103 2170
2104 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); 2171 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2105 2172
@@ -4361,7 +4428,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4361 } 4428 }
4362 4429
4363 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); 4430 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4364 if (unlikely(IS_ERR(segs))) 4431 if (IS_ERR(segs))
4365 goto tg3_tso_bug_end; 4432 goto tg3_tso_bug_end;
4366 4433
4367 do { 4434 do {
@@ -5498,19 +5565,17 @@ static void tg3_stop_fw(struct tg3 *tp)
5498 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 5565 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5499 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { 5566 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5500 u32 val; 5567 u32 val;
5501 int i; 5568
5569 /* Wait for RX cpu to ACK the previous event. */
5570 tg3_wait_for_event_ack(tp);
5502 5571
5503 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 5572 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5504 val = tr32(GRC_RX_CPU_EVENT); 5573 val = tr32(GRC_RX_CPU_EVENT);
5505 val |= (1 << 14); 5574 val |= GRC_RX_CPU_DRIVER_EVENT;
5506 tw32(GRC_RX_CPU_EVENT, val); 5575 tw32(GRC_RX_CPU_EVENT, val);
5507 5576
5508 /* Wait for RX cpu to ACK the event. */ 5577 /* Wait for RX cpu to ACK this event. */
5509 for (i = 0; i < 100; i++) { 5578 tg3_wait_for_event_ack(tp);
5510 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5511 break;
5512 udelay(1);
5513 }
5514 } 5579 }
5515} 5580}
5516 5581
@@ -7102,7 +7167,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7102 tp->link_config.autoneg = tp->link_config.orig_autoneg; 7167 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7103 } 7168 }
7104 7169
7105 tp->mi_mode = MAC_MI_MODE_BASE; 7170 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
7106 tw32_f(MAC_MI_MODE, tp->mi_mode); 7171 tw32_f(MAC_MI_MODE, tp->mi_mode);
7107 udelay(80); 7172 udelay(80);
7108 7173
@@ -7400,14 +7465,16 @@ static void tg3_timer(unsigned long __opaque)
7400 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 7465 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7401 u32 val; 7466 u32 val;
7402 7467
7468 tg3_wait_for_event_ack(tp);
7469
7403 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 7470 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7404 FWCMD_NICDRV_ALIVE3); 7471 FWCMD_NICDRV_ALIVE3);
7405 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 7472 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7406 /* 5 seconds timeout */ 7473 /* 5 seconds timeout */
7407 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); 7474 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7408 val = tr32(GRC_RX_CPU_EVENT); 7475 val = tr32(GRC_RX_CPU_EVENT);
7409 val |= (1 << 14); 7476 val |= GRC_RX_CPU_DRIVER_EVENT;
7410 tw32(GRC_RX_CPU_EVENT, val); 7477 tw32_f(GRC_RX_CPU_EVENT, val);
7411 } 7478 }
7412 tp->asf_counter = tp->asf_multiplier; 7479 tp->asf_counter = tp->asf_multiplier;
7413 } 7480 }
@@ -9568,14 +9635,9 @@ static int tg3_test_loopback(struct tg3 *tp)
9568 9635
9569 /* Turn off link-based power management. */ 9636 /* Turn off link-based power management. */
9570 cpmuctrl = tr32(TG3_CPMU_CTRL); 9637 cpmuctrl = tr32(TG3_CPMU_CTRL);
9571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 9638 tw32(TG3_CPMU_CTRL,
9572 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) 9639 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9573 tw32(TG3_CPMU_CTRL, 9640 CPMU_CTRL_LINK_AWARE_MODE));
9574 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9575 CPMU_CTRL_LINK_AWARE_MODE));
9576 else
9577 tw32(TG3_CPMU_CTRL,
9578 cpmuctrl & ~CPMU_CTRL_LINK_AWARE_MODE);
9579 } 9641 }
9580 9642
9581 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 9643 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
@@ -9892,7 +9954,7 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9892 return; 9954 return;
9893 } 9955 }
9894 } 9956 }
9895 tp->nvram_size = 0x80000; 9957 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
9896} 9958}
9897 9959
9898static void __devinit tg3_get_nvram_info(struct tg3 *tp) 9960static void __devinit tg3_get_nvram_info(struct tg3 *tp)
@@ -10033,11 +10095,14 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10033 tp->nvram_pagesize = 264; 10095 tp->nvram_pagesize = 264;
10034 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 10096 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10035 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 10097 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10036 tp->nvram_size = (protect ? 0x3e200 : 0x80000); 10098 tp->nvram_size = (protect ? 0x3e200 :
10099 TG3_NVRAM_SIZE_512KB);
10037 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) 10100 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10038 tp->nvram_size = (protect ? 0x1f200 : 0x40000); 10101 tp->nvram_size = (protect ? 0x1f200 :
10102 TG3_NVRAM_SIZE_256KB);
10039 else 10103 else
10040 tp->nvram_size = (protect ? 0x1f200 : 0x20000); 10104 tp->nvram_size = (protect ? 0x1f200 :
10105 TG3_NVRAM_SIZE_128KB);
10041 break; 10106 break;
10042 case FLASH_5752VENDOR_ST_M45PE10: 10107 case FLASH_5752VENDOR_ST_M45PE10:
10043 case FLASH_5752VENDOR_ST_M45PE20: 10108 case FLASH_5752VENDOR_ST_M45PE20:
@@ -10047,11 +10112,17 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10047 tp->tg3_flags2 |= TG3_FLG2_FLASH; 10112 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10048 tp->nvram_pagesize = 256; 10113 tp->nvram_pagesize = 256;
10049 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 10114 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10050 tp->nvram_size = (protect ? 0x10000 : 0x20000); 10115 tp->nvram_size = (protect ?
10116 TG3_NVRAM_SIZE_64KB :
10117 TG3_NVRAM_SIZE_128KB);
10051 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) 10118 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10052 tp->nvram_size = (protect ? 0x10000 : 0x40000); 10119 tp->nvram_size = (protect ?
10120 TG3_NVRAM_SIZE_64KB :
10121 TG3_NVRAM_SIZE_256KB);
10053 else 10122 else
10054 tp->nvram_size = (protect ? 0x20000 : 0x80000); 10123 tp->nvram_size = (protect ?
10124 TG3_NVRAM_SIZE_128KB :
10125 TG3_NVRAM_SIZE_512KB);
10055 break; 10126 break;
10056 } 10127 }
10057} 10128}
@@ -10145,25 +10216,25 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10145 case FLASH_5761VENDOR_ATMEL_MDB161D: 10216 case FLASH_5761VENDOR_ATMEL_MDB161D:
10146 case FLASH_5761VENDOR_ST_A_M45PE16: 10217 case FLASH_5761VENDOR_ST_A_M45PE16:
10147 case FLASH_5761VENDOR_ST_M_M45PE16: 10218 case FLASH_5761VENDOR_ST_M_M45PE16:
10148 tp->nvram_size = 0x100000; 10219 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10149 break; 10220 break;
10150 case FLASH_5761VENDOR_ATMEL_ADB081D: 10221 case FLASH_5761VENDOR_ATMEL_ADB081D:
10151 case FLASH_5761VENDOR_ATMEL_MDB081D: 10222 case FLASH_5761VENDOR_ATMEL_MDB081D:
10152 case FLASH_5761VENDOR_ST_A_M45PE80: 10223 case FLASH_5761VENDOR_ST_A_M45PE80:
10153 case FLASH_5761VENDOR_ST_M_M45PE80: 10224 case FLASH_5761VENDOR_ST_M_M45PE80:
10154 tp->nvram_size = 0x80000; 10225 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10155 break; 10226 break;
10156 case FLASH_5761VENDOR_ATMEL_ADB041D: 10227 case FLASH_5761VENDOR_ATMEL_ADB041D:
10157 case FLASH_5761VENDOR_ATMEL_MDB041D: 10228 case FLASH_5761VENDOR_ATMEL_MDB041D:
10158 case FLASH_5761VENDOR_ST_A_M45PE40: 10229 case FLASH_5761VENDOR_ST_A_M45PE40:
10159 case FLASH_5761VENDOR_ST_M_M45PE40: 10230 case FLASH_5761VENDOR_ST_M_M45PE40:
10160 tp->nvram_size = 0x40000; 10231 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10161 break; 10232 break;
10162 case FLASH_5761VENDOR_ATMEL_ADB021D: 10233 case FLASH_5761VENDOR_ATMEL_ADB021D:
10163 case FLASH_5761VENDOR_ATMEL_MDB021D: 10234 case FLASH_5761VENDOR_ATMEL_MDB021D:
10164 case FLASH_5761VENDOR_ST_A_M45PE20: 10235 case FLASH_5761VENDOR_ST_A_M45PE20:
10165 case FLASH_5761VENDOR_ST_M_M45PE20: 10236 case FLASH_5761VENDOR_ST_M_M45PE20:
10166 tp->nvram_size = 0x20000; 10237 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10167 break; 10238 break;
10168 } 10239 }
10169 } 10240 }
@@ -11764,6 +11835,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11764 tp->phy_otp = TG3_OTP_DEFAULT; 11835 tp->phy_otp = TG3_OTP_DEFAULT;
11765 } 11836 }
11766 11837
11838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11840 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11841 else
11842 tp->mi_mode = MAC_MI_MODE_BASE;
11843
11767 tp->coalesce_mode = 0; 11844 tp->coalesce_mode = 0;
11768 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 11845 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11769 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 11846 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
@@ -12692,7 +12769,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
12692 tp->mac_mode = TG3_DEF_MAC_MODE; 12769 tp->mac_mode = TG3_DEF_MAC_MODE;
12693 tp->rx_mode = TG3_DEF_RX_MODE; 12770 tp->rx_mode = TG3_DEF_RX_MODE;
12694 tp->tx_mode = TG3_DEF_TX_MODE; 12771 tp->tx_mode = TG3_DEF_TX_MODE;
12695 tp->mi_mode = MAC_MI_MODE_BASE; 12772
12696 if (tg3_debug > 0) 12773 if (tg3_debug > 0)
12697 tp->msg_enable = tg3_debug; 12774 tp->msg_enable = tg3_debug;
12698 else 12775 else
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index c688c3ac5035..0404f93baa29 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -415,7 +415,7 @@
415#define MAC_MI_MODE_CLK_10MHZ 0x00000001 415#define MAC_MI_MODE_CLK_10MHZ 0x00000001
416#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002 416#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002
417#define MAC_MI_MODE_AUTO_POLL 0x00000010 417#define MAC_MI_MODE_AUTO_POLL 0x00000010
418#define MAC_MI_MODE_CORE_CLK_62MHZ 0x00008000 418#define MAC_MI_MODE_500KHZ_CONST 0x00008000
419#define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */ 419#define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */
420#define MAC_AUTO_POLL_STATUS 0x00000458 420#define MAC_AUTO_POLL_STATUS 0x00000458
421#define MAC_AUTO_POLL_ERROR 0x00000001 421#define MAC_AUTO_POLL_ERROR 0x00000001
@@ -1429,6 +1429,7 @@
1429#define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000 1429#define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000
1430#define GRC_TIMER 0x0000680c 1430#define GRC_TIMER 0x0000680c
1431#define GRC_RX_CPU_EVENT 0x00006810 1431#define GRC_RX_CPU_EVENT 0x00006810
1432#define GRC_RX_CPU_DRIVER_EVENT 0x00004000
1432#define GRC_RX_TIMER_REF 0x00006814 1433#define GRC_RX_TIMER_REF 0x00006814
1433#define GRC_RX_CPU_SEM 0x00006818 1434#define GRC_RX_CPU_SEM 0x00006818
1434#define GRC_REMOTE_RX_CPU_ATTN 0x0000681c 1435#define GRC_REMOTE_RX_CPU_ATTN 0x0000681c
@@ -1676,6 +1677,7 @@
1676#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004 1677#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004
1677#define FWCMD_NICDRV_FIX_DMAR 0x00000005 1678#define FWCMD_NICDRV_FIX_DMAR 0x00000005
1678#define FWCMD_NICDRV_FIX_DMAW 0x00000006 1679#define FWCMD_NICDRV_FIX_DMAW 0x00000006
1680#define FWCMD_NICDRV_LINK_UPDATE 0x0000000c
1679#define FWCMD_NICDRV_ALIVE2 0x0000000d 1681#define FWCMD_NICDRV_ALIVE2 0x0000000d
1680#define FWCMD_NICDRV_ALIVE3 0x0000000e 1682#define FWCMD_NICDRV_ALIVE3 0x0000000e
1681#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c 1683#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c
@@ -2576,6 +2578,13 @@ struct tg3 {
2576 2578
2577 int nvram_lock_cnt; 2579 int nvram_lock_cnt;
2578 u32 nvram_size; 2580 u32 nvram_size;
2581#define TG3_NVRAM_SIZE_64KB 0x00010000
2582#define TG3_NVRAM_SIZE_128KB 0x00020000
2583#define TG3_NVRAM_SIZE_256KB 0x00040000
2584#define TG3_NVRAM_SIZE_512KB 0x00080000
2585#define TG3_NVRAM_SIZE_1MB 0x00100000
2586#define TG3_NVRAM_SIZE_2MB 0x00200000
2587
2579 u32 nvram_pagesize; 2588 u32 nvram_pagesize;
2580 u32 nvram_jedecnum; 2589 u32 nvram_jedecnum;
2581 2590
@@ -2584,10 +2593,10 @@ struct tg3 {
2584#define JEDEC_SAIFUN 0x4f 2593#define JEDEC_SAIFUN 0x4f
2585#define JEDEC_SST 0xbf 2594#define JEDEC_SST 0xbf
2586 2595
2587#define ATMEL_AT24C64_CHIP_SIZE (64 * 1024) 2596#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB
2588#define ATMEL_AT24C64_PAGE_SIZE (32) 2597#define ATMEL_AT24C64_PAGE_SIZE (32)
2589 2598
2590#define ATMEL_AT24C512_CHIP_SIZE (512 * 1024) 2599#define ATMEL_AT24C512_CHIP_SIZE TG3_NVRAM_SIZE_512KB
2591#define ATMEL_AT24C512_PAGE_SIZE (128) 2600#define ATMEL_AT24C512_PAGE_SIZE (128)
2592 2601
2593#define ATMEL_AT45DB0X1B_PAGE_POS 9 2602#define ATMEL_AT45DB0X1B_PAGE_POS 9
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 6c6fc325c8f9..bc30c6e8fea2 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -482,7 +482,6 @@
482static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; 482static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
483 483
484#define c_char const char 484#define c_char const char
485#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((__le16 *)(a)))
486 485
487/* 486/*
488** MII Information 487** MII Information
@@ -4405,7 +4404,7 @@ srom_infoleaf_info(struct net_device *dev)
4405 } 4404 }
4406 } 4405 }
4407 4406
4408 lp->infoleaf_offset = TWIDDLE(p+1); 4407 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4409 4408
4410 return 0; 4409 return 0;
4411} 4410}
@@ -4476,7 +4475,7 @@ srom_exec(struct net_device *dev, u_char *p)
4476 4475
4477 while (count--) { 4476 while (count--) {
4478 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? 4477 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4479 *p++ : TWIDDLE(w++)), dev); 4478 *p++ : get_unaligned_le16(w++)), dev);
4480 mdelay(2); /* 2ms per action */ 4479 mdelay(2); /* 2ms per action */
4481 } 4480 }
4482 4481
@@ -4711,10 +4710,10 @@ type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4711 lp->active = *p++; 4710 lp->active = *p++;
4712 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1); 4711 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4713 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1); 4712 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4714 lp->phy[lp->active].mc = TWIDDLE(p); p += 2; 4713 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4715 lp->phy[lp->active].ana = TWIDDLE(p); p += 2; 4714 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4716 lp->phy[lp->active].fdx = TWIDDLE(p); p += 2; 4715 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4717 lp->phy[lp->active].ttm = TWIDDLE(p); 4716 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4718 return 0; 4717 return 0;
4719 } else if ((lp->media == INIT) && (lp->timeout < 0)) { 4718 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4720 lp->ibn = 1; 4719 lp->ibn = 1;
@@ -4751,16 +4750,16 @@ type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4751 lp->infoblock_media = (*p) & MEDIA_CODE; 4750 lp->infoblock_media = (*p) & MEDIA_CODE;
4752 4751
4753 if ((*p++) & EXT_FIELD) { 4752 if ((*p++) & EXT_FIELD) {
4754 lp->cache.csr13 = TWIDDLE(p); p += 2; 4753 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4755 lp->cache.csr14 = TWIDDLE(p); p += 2; 4754 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4756 lp->cache.csr15 = TWIDDLE(p); p += 2; 4755 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4757 } else { 4756 } else {
4758 lp->cache.csr13 = CSR13; 4757 lp->cache.csr13 = CSR13;
4759 lp->cache.csr14 = CSR14; 4758 lp->cache.csr14 = CSR14;
4760 lp->cache.csr15 = CSR15; 4759 lp->cache.csr15 = CSR15;
4761 } 4760 }
4762 lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2; 4761 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4763 lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); 4762 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4764 lp->infoblock_csr6 = OMR_SIA; 4763 lp->infoblock_csr6 = OMR_SIA;
4765 lp->useMII = false; 4764 lp->useMII = false;
4766 4765
@@ -4792,10 +4791,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4792 if (MOTO_SROM_BUG) lp->active = 0; 4791 if (MOTO_SROM_BUG) lp->active = 0;
4793 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); 4792 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4794 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); 4793 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4795 lp->phy[lp->active].mc = TWIDDLE(p); p += 2; 4794 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4796 lp->phy[lp->active].ana = TWIDDLE(p); p += 2; 4795 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4797 lp->phy[lp->active].fdx = TWIDDLE(p); p += 2; 4796 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4798 lp->phy[lp->active].ttm = TWIDDLE(p); p += 2; 4797 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4799 lp->phy[lp->active].mci = *p; 4798 lp->phy[lp->active].mci = *p;
4800 return 0; 4799 return 0;
4801 } else if ((lp->media == INIT) && (lp->timeout < 0)) { 4800 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
@@ -4835,8 +4834,8 @@ type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4835 lp->cache.csr13 = CSR13; /* Hard coded defaults */ 4834 lp->cache.csr13 = CSR13; /* Hard coded defaults */
4836 lp->cache.csr14 = CSR14; 4835 lp->cache.csr14 = CSR14;
4837 lp->cache.csr15 = CSR15; 4836 lp->cache.csr15 = CSR15;
4838 lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2; 4837 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4839 lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); p += 2; 4838 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4840 csr6 = *p++; 4839 csr6 = *p++;
4841 flags = *p++; 4840 flags = *p++;
4842 4841
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
index 9fb8d7f07994..f5f33b3eb067 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/tulip/de4x5.h
@@ -1017,4 +1017,4 @@ struct de4x5_ioctl {
1017#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */ 1017#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
1018#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */ 1018#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
1019 1019
1020#define MOTO_SROM_BUG ((lp->active == 8) && (((le32_to_cpu(get_unaligned(((__le32 *)dev->dev_addr))))&0x00ffffff)==0x3e0008)) 1020#define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008)
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 908422f2f320..92c68a22f16b 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -25,6 +25,7 @@
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/irq.h> 27#include <asm/irq.h>
28#include <asm/unaligned.h>
28 29
29 30
30 31
@@ -304,11 +305,7 @@ enum t21143_csr6_bits {
304 305
305#define RUN_AT(x) (jiffies + (x)) 306#define RUN_AT(x) (jiffies + (x))
306 307
307#if defined(__i386__) /* AKA get_unaligned() */ 308#define get_u16(ptr) get_unaligned_le16((ptr))
308#define get_u16(ptr) (*(u16 *)(ptr))
309#else
310#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
311#endif
312 309
313struct medialeaf { 310struct medialeaf {
314 u8 type; 311 u8 type;
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index fa1c1c329a2d..f9d13fa05d64 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -327,8 +327,8 @@ static void tulip_up(struct net_device *dev)
327 tp->dirty_rx = tp->dirty_tx = 0; 327 tp->dirty_rx = tp->dirty_tx = 0;
328 328
329 if (tp->flags & MC_HASH_ONLY) { 329 if (tp->flags & MC_HASH_ONLY) {
330 u32 addr_low = le32_to_cpu(get_unaligned((__le32 *)dev->dev_addr)); 330 u32 addr_low = get_unaligned_le32(dev->dev_addr);
331 u32 addr_high = le16_to_cpu(get_unaligned((__le16 *)(dev->dev_addr+4))); 331 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
332 if (tp->chip_id == AX88140) { 332 if (tp->chip_id == AX88140) {
333 iowrite32(0, ioaddr + CSR13); 333 iowrite32(0, ioaddr + CSR13);
334 iowrite32(addr_low, ioaddr + CSR14); 334 iowrite32(addr_low, ioaddr + CSR14);
@@ -1437,13 +1437,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1437 do 1437 do
1438 value = ioread32(ioaddr + CSR9); 1438 value = ioread32(ioaddr + CSR9);
1439 while (value < 0 && --boguscnt > 0); 1439 while (value < 0 && --boguscnt > 0);
1440 put_unaligned(cpu_to_le16(value), ((__le16*)dev->dev_addr) + i); 1440 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1441 sum += value & 0xffff; 1441 sum += value & 0xffff;
1442 } 1442 }
1443 } else if (chip_idx == COMET) { 1443 } else if (chip_idx == COMET) {
1444 /* No need to read the EEPROM. */ 1444 /* No need to read the EEPROM. */
1445 put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (__le32 *)dev->dev_addr); 1445 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1446 put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (__le16 *)(dev->dev_addr + 4)); 1446 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1447 for (i = 0; i < 6; i ++) 1447 for (i = 0; i < 6; i ++)
1448 sum += dev->dev_addr[i]; 1448 sum += dev->dev_addr[i];
1449 } else { 1449 } else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 555b70c8b863..f926b5ab3d09 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -41,6 +41,9 @@ struct virtnet_info
41 struct net_device *dev; 41 struct net_device *dev;
42 struct napi_struct napi; 42 struct napi_struct napi;
43 43
44 /* The skb we couldn't send because buffers were full. */
45 struct sk_buff *last_xmit_skb;
46
44 /* Number of input buffers, and max we've ever had. */ 47 /* Number of input buffers, and max we've ever had. */
45 unsigned int num, max; 48 unsigned int num, max;
46 49
@@ -142,10 +145,10 @@ drop:
142static void try_fill_recv(struct virtnet_info *vi) 145static void try_fill_recv(struct virtnet_info *vi)
143{ 146{
144 struct sk_buff *skb; 147 struct sk_buff *skb;
145 struct scatterlist sg[1+MAX_SKB_FRAGS]; 148 struct scatterlist sg[2+MAX_SKB_FRAGS];
146 int num, err; 149 int num, err;
147 150
148 sg_init_table(sg, 1+MAX_SKB_FRAGS); 151 sg_init_table(sg, 2+MAX_SKB_FRAGS);
149 for (;;) { 152 for (;;) {
150 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); 153 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
151 if (unlikely(!skb)) 154 if (unlikely(!skb))
@@ -221,23 +224,22 @@ static void free_old_xmit_skbs(struct virtnet_info *vi)
221 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 224 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
222 pr_debug("Sent skb %p\n", skb); 225 pr_debug("Sent skb %p\n", skb);
223 __skb_unlink(skb, &vi->send); 226 __skb_unlink(skb, &vi->send);
224 vi->dev->stats.tx_bytes += len; 227 vi->dev->stats.tx_bytes += skb->len;
225 vi->dev->stats.tx_packets++; 228 vi->dev->stats.tx_packets++;
226 kfree_skb(skb); 229 kfree_skb(skb);
227 } 230 }
228} 231}
229 232
230static int start_xmit(struct sk_buff *skb, struct net_device *dev) 233static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
231{ 234{
232 struct virtnet_info *vi = netdev_priv(dev); 235 int num;
233 int num, err; 236 struct scatterlist sg[2+MAX_SKB_FRAGS];
234 struct scatterlist sg[1+MAX_SKB_FRAGS];
235 struct virtio_net_hdr *hdr; 237 struct virtio_net_hdr *hdr;
236 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 238 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
237 239
238 sg_init_table(sg, 1+MAX_SKB_FRAGS); 240 sg_init_table(sg, 2+MAX_SKB_FRAGS);
239 241
240 pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb, 242 pr_debug("%s: xmit %p " MAC_FMT "\n", vi->dev->name, skb,
241 dest[0], dest[1], dest[2], 243 dest[0], dest[1], dest[2],
242 dest[3], dest[4], dest[5]); 244 dest[3], dest[4], dest[5]);
243 245
@@ -272,30 +274,51 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
272 274
273 vnet_hdr_to_sg(sg, skb); 275 vnet_hdr_to_sg(sg, skb);
274 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 276 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
275 __skb_queue_head(&vi->send, skb); 277
278 return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
279}
280
281static int start_xmit(struct sk_buff *skb, struct net_device *dev)
282{
283 struct virtnet_info *vi = netdev_priv(dev);
276 284
277again: 285again:
278 /* Free up any pending old buffers before queueing new ones. */ 286 /* Free up any pending old buffers before queueing new ones. */
279 free_old_xmit_skbs(vi); 287 free_old_xmit_skbs(vi);
280 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 288
281 if (err) { 289 /* If we has a buffer left over from last time, send it now. */
282 pr_debug("%s: virtio not prepared to send\n", dev->name); 290 if (vi->last_xmit_skb) {
283 netif_stop_queue(dev); 291 if (xmit_skb(vi, vi->last_xmit_skb) != 0) {
284 292 /* Drop this skb: we only queue one. */
285 /* Activate callback for using skbs: if this returns false it 293 vi->dev->stats.tx_dropped++;
286 * means some were used in the meantime. */ 294 kfree_skb(skb);
287 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 295 goto stop_queue;
288 vi->svq->vq_ops->disable_cb(vi->svq);
289 netif_start_queue(dev);
290 goto again;
291 } 296 }
292 __skb_unlink(skb, &vi->send); 297 vi->last_xmit_skb = NULL;
298 }
293 299
294 return NETDEV_TX_BUSY; 300 /* Put new one in send queue and do transmit */
301 __skb_queue_head(&vi->send, skb);
302 if (xmit_skb(vi, skb) != 0) {
303 vi->last_xmit_skb = skb;
304 goto stop_queue;
295 } 305 }
306done:
296 vi->svq->vq_ops->kick(vi->svq); 307 vi->svq->vq_ops->kick(vi->svq);
297 308 return NETDEV_TX_OK;
298 return 0; 309
310stop_queue:
311 pr_debug("%s: virtio not prepared to send\n", dev->name);
312 netif_stop_queue(dev);
313
314 /* Activate callback for using skbs: if this returns false it
315 * means some were used in the meantime. */
316 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
317 vi->svq->vq_ops->disable_cb(vi->svq);
318 netif_start_queue(dev);
319 goto again;
320 }
321 goto done;
299} 322}
300 323
301#ifdef CONFIG_NET_POLL_CONTROLLER 324#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -355,17 +378,26 @@ static int virtnet_probe(struct virtio_device *vdev)
355 SET_NETDEV_DEV(dev, &vdev->dev); 378 SET_NETDEV_DEV(dev, &vdev->dev);
356 379
357 /* Do we support "hardware" checksums? */ 380 /* Do we support "hardware" checksums? */
358 if (csum && vdev->config->feature(vdev, VIRTIO_NET_F_CSUM)) { 381 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
359 /* This opens up the world of extra features. */ 382 /* This opens up the world of extra features. */
360 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 383 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
361 if (gso && vdev->config->feature(vdev, VIRTIO_NET_F_GSO)) { 384 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
362 dev->features |= NETIF_F_TSO | NETIF_F_UFO 385 dev->features |= NETIF_F_TSO | NETIF_F_UFO
363 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 386 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
364 } 387 }
388 /* Individual feature bits: what can host handle? */
389 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
390 dev->features |= NETIF_F_TSO;
391 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
392 dev->features |= NETIF_F_TSO6;
393 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
394 dev->features |= NETIF_F_TSO_ECN;
395 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
396 dev->features |= NETIF_F_UFO;
365 } 397 }
366 398
367 /* Configuration may specify what MAC to use. Otherwise random. */ 399 /* Configuration may specify what MAC to use. Otherwise random. */
368 if (vdev->config->feature(vdev, VIRTIO_NET_F_MAC)) { 400 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
369 vdev->config->get(vdev, 401 vdev->config->get(vdev,
370 offsetof(struct virtio_net_config, mac), 402 offsetof(struct virtio_net_config, mac),
371 dev->dev_addr, dev->addr_len); 403 dev->dev_addr, dev->addr_len);
@@ -454,7 +486,15 @@ static struct virtio_device_id id_table[] = {
454 { 0 }, 486 { 0 },
455}; 487};
456 488
489static unsigned int features[] = {
490 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
491 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
492 VIRTIO_NET_F_HOST_ECN,
493};
494
457static struct virtio_driver virtio_net = { 495static struct virtio_driver virtio_net = {
496 .feature_table = features,
497 .feature_table_size = ARRAY_SIZE(features),
458 .driver.name = KBUILD_MODNAME, 498 .driver.name = KBUILD_MODNAME,
459 .driver.owner = THIS_MODULE, 499 .driver.owner = THIS_MODULE,
460 .id_table = id_table, 500 .id_table = id_table,
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 63abfd72542d..e03eef2f2282 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -178,6 +178,20 @@ static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char signal)
178 CPC_TTY_UNLOCK(card,flags); 178 CPC_TTY_UNLOCK(card,flags);
179} 179}
180 180
181
182static const struct tty_operations pc300_ops = {
183 .open = cpc_tty_open,
184 .close = cpc_tty_close,
185 .write = cpc_tty_write,
186 .write_room = cpc_tty_write_room,
187 .chars_in_buffer = cpc_tty_chars_in_buffer,
188 .tiocmset = pc300_tiocmset,
189 .tiocmget = pc300_tiocmget,
190 .flush_buffer = cpc_tty_flush_buffer,
191 .hangup = cpc_tty_hangup,
192};
193
194
181/* 195/*
182 * PC300 TTY initialization routine 196 * PC300 TTY initialization routine
183 * 197 *
@@ -225,15 +239,7 @@ void cpc_tty_init(pc300dev_t *pc300dev)
225 serial_drv.flags = TTY_DRIVER_REAL_RAW; 239 serial_drv.flags = TTY_DRIVER_REAL_RAW;
226 240
227 /* interface routines from the upper tty layer to the tty driver */ 241 /* interface routines from the upper tty layer to the tty driver */
228 serial_drv.open = cpc_tty_open; 242 tty_set_operations(&serial_drv, &pc300_ops);
229 serial_drv.close = cpc_tty_close;
230 serial_drv.write = cpc_tty_write;
231 serial_drv.write_room = cpc_tty_write_room;
232 serial_drv.chars_in_buffer = cpc_tty_chars_in_buffer;
233 serial_drv.tiocmset = pc300_tiocmset;
234 serial_drv.tiocmget = pc300_tiocmget;
235 serial_drv.flush_buffer = cpc_tty_flush_buffer;
236 serial_drv.hangup = cpc_tty_hangup;
237 243
238 /* register the TTY driver */ 244 /* register the TTY driver */
239 if (tty_register_driver(&serial_drv)) { 245 if (tty_register_driver(&serial_drv)) {
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 0f8aca8a4d43..249e18053d5f 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -17,7 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19#include <asm/system.h> 19#include <asm/system.h>
20#include <asm/uaccess.h> 20#include <linux/uaccess.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
@@ -95,7 +95,7 @@ static struct x25_asy *x25_asy_alloc(void)
95 x25_asy_devs[i] = dev; 95 x25_asy_devs[i] = dev;
96 return sl; 96 return sl;
97 } else { 97 } else {
98 printk("x25_asy_alloc() - register_netdev() failure.\n"); 98 printk(KERN_WARNING "x25_asy_alloc() - register_netdev() failure.\n");
99 free_netdev(dev); 99 free_netdev(dev);
100 } 100 }
101 } 101 }
@@ -112,23 +112,22 @@ static void x25_asy_free(struct x25_asy *sl)
112 kfree(sl->xbuff); 112 kfree(sl->xbuff);
113 sl->xbuff = NULL; 113 sl->xbuff = NULL;
114 114
115 if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) { 115 if (!test_and_clear_bit(SLF_INUSE, &sl->flags))
116 printk("%s: x25_asy_free for already free unit.\n", sl->dev->name); 116 printk(KERN_ERR "%s: x25_asy_free for already free unit.\n",
117 } 117 sl->dev->name);
118} 118}
119 119
120static int x25_asy_change_mtu(struct net_device *dev, int newmtu) 120static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
121{ 121{
122 struct x25_asy *sl = dev->priv; 122 struct x25_asy *sl = dev->priv;
123 unsigned char *xbuff, *rbuff; 123 unsigned char *xbuff, *rbuff;
124 int len = 2* newmtu; 124 int len = 2 * newmtu;
125 125
126 xbuff = kmalloc(len + 4, GFP_ATOMIC); 126 xbuff = kmalloc(len + 4, GFP_ATOMIC);
127 rbuff = kmalloc(len + 4, GFP_ATOMIC); 127 rbuff = kmalloc(len + 4, GFP_ATOMIC);
128 128
129 if (xbuff == NULL || rbuff == NULL) 129 if (xbuff == NULL || rbuff == NULL) {
130 { 130 printk(KERN_WARNING "%s: unable to grow X.25 buffers, MTU change cancelled.\n",
131 printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n",
132 dev->name); 131 dev->name);
133 kfree(xbuff); 132 kfree(xbuff);
134 kfree(rbuff); 133 kfree(rbuff);
@@ -193,25 +192,23 @@ static void x25_asy_bump(struct x25_asy *sl)
193 int err; 192 int err;
194 193
195 count = sl->rcount; 194 count = sl->rcount;
196 sl->stats.rx_bytes+=count; 195 sl->stats.rx_bytes += count;
197 196
198 skb = dev_alloc_skb(count+1); 197 skb = dev_alloc_skb(count+1);
199 if (skb == NULL) 198 if (skb == NULL) {
200 { 199 printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n",
201 printk("%s: memory squeeze, dropping packet.\n", sl->dev->name); 200 sl->dev->name);
202 sl->stats.rx_dropped++; 201 sl->stats.rx_dropped++;
203 return; 202 return;
204 } 203 }
205 skb_push(skb,1); /* LAPB internal control */ 204 skb_push(skb, 1); /* LAPB internal control */
206 memcpy(skb_put(skb,count), sl->rbuff, count); 205 memcpy(skb_put(skb, count), sl->rbuff, count);
207 skb->protocol = x25_type_trans(skb, sl->dev); 206 skb->protocol = x25_type_trans(skb, sl->dev);
208 if((err=lapb_data_received(skb->dev, skb))!=LAPB_OK) 207 err = lapb_data_received(skb->dev, skb);
209 { 208 if (err != LAPB_OK) {
210 kfree_skb(skb); 209 kfree_skb(skb);
211 printk(KERN_DEBUG "x25_asy: data received err - %d\n",err); 210 printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
212 } 211 } else {
213 else
214 {
215 netif_rx(skb); 212 netif_rx(skb);
216 sl->dev->last_rx = jiffies; 213 sl->dev->last_rx = jiffies;
217 sl->stats.rx_packets++; 214 sl->stats.rx_packets++;
@@ -224,10 +221,11 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
224 unsigned char *p; 221 unsigned char *p;
225 int actual, count, mtu = sl->dev->mtu; 222 int actual, count, mtu = sl->dev->mtu;
226 223
227 if (len > mtu) 224 if (len > mtu) {
228 { /* Sigh, shouldn't occur BUT ... */ 225 /* Sigh, shouldn't occur BUT ... */
229 len = mtu; 226 len = mtu;
230 printk ("%s: truncating oversized transmit packet!\n", sl->dev->name); 227 printk(KERN_DEBUG "%s: truncating oversized transmit packet!\n",
228 sl->dev->name);
231 sl->stats.tx_dropped++; 229 sl->stats.tx_dropped++;
232 x25_asy_unlock(sl); 230 x25_asy_unlock(sl);
233 return; 231 return;
@@ -245,7 +243,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
245 * 14 Oct 1994 Dmitry Gorodchanin. 243 * 14 Oct 1994 Dmitry Gorodchanin.
246 */ 244 */
247 sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 245 sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
248 actual = sl->tty->driver->write(sl->tty, sl->xbuff, count); 246 actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
249 sl->xleft = count - actual; 247 sl->xleft = count - actual;
250 sl->xhead = sl->xbuff + actual; 248 sl->xhead = sl->xbuff + actual;
251 /* VSV */ 249 /* VSV */
@@ -265,8 +263,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
265 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) 263 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
266 return; 264 return;
267 265
268 if (sl->xleft <= 0) 266 if (sl->xleft <= 0) {
269 {
270 /* Now serial buffer is almost free & we can start 267 /* Now serial buffer is almost free & we can start
271 * transmission of another packet */ 268 * transmission of another packet */
272 sl->stats.tx_packets++; 269 sl->stats.tx_packets++;
@@ -275,14 +272,14 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
275 return; 272 return;
276 } 273 }
277 274
278 actual = tty->driver->write(tty, sl->xhead, sl->xleft); 275 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
279 sl->xleft -= actual; 276 sl->xleft -= actual;
280 sl->xhead += actual; 277 sl->xhead += actual;
281} 278}
282 279
283static void x25_asy_timeout(struct net_device *dev) 280static void x25_asy_timeout(struct net_device *dev)
284{ 281{
285 struct x25_asy *sl = (struct x25_asy*)(dev->priv); 282 struct x25_asy *sl = dev->priv;
286 283
287 spin_lock(&sl->lock); 284 spin_lock(&sl->lock);
288 if (netif_queue_stopped(dev)) { 285 if (netif_queue_stopped(dev)) {
@@ -290,7 +287,7 @@ static void x25_asy_timeout(struct net_device *dev)
290 * 14 Oct 1994 Dmitry Gorodchanin. 287 * 14 Oct 1994 Dmitry Gorodchanin.
291 */ 288 */
292 printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, 289 printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
293 (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ? 290 (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
294 "bad line quality" : "driver error"); 291 "bad line quality" : "driver error");
295 sl->xleft = 0; 292 sl->xleft = 0;
296 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 293 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
@@ -303,31 +300,34 @@ static void x25_asy_timeout(struct net_device *dev)
303 300
304static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev) 301static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
305{ 302{
306 struct x25_asy *sl = (struct x25_asy*)(dev->priv); 303 struct x25_asy *sl = dev->priv;
307 int err; 304 int err;
308 305
309 if (!netif_running(sl->dev)) { 306 if (!netif_running(sl->dev)) {
310 printk("%s: xmit call when iface is down\n", dev->name); 307 printk(KERN_ERR "%s: xmit call when iface is down\n",
308 dev->name);
311 kfree_skb(skb); 309 kfree_skb(skb);
312 return 0; 310 return 0;
313 } 311 }
314 312
315 switch(skb->data[0]) 313 switch (skb->data[0]) {
316 { 314 case 0x00:
317 case 0x00:break; 315 break;
318 case 0x01: /* Connection request .. do nothing */ 316 case 0x01: /* Connection request .. do nothing */
319 if((err=lapb_connect_request(dev))!=LAPB_OK) 317 err = lapb_connect_request(dev);
320 printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err); 318 if (err != LAPB_OK)
321 kfree_skb(skb); 319 printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
322 return 0; 320 kfree_skb(skb);
323 case 0x02: /* Disconnect request .. do nothing - hang up ?? */ 321 return 0;
324 if((err=lapb_disconnect_request(dev))!=LAPB_OK) 322 case 0x02: /* Disconnect request .. do nothing - hang up ?? */
325 printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err); 323 err = lapb_disconnect_request(dev);
326 default: 324 if (err != LAPB_OK)
327 kfree_skb(skb); 325 printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
328 return 0; 326 default:
327 kfree_skb(skb);
328 return 0;
329 } 329 }
330 skb_pull(skb,1); /* Remove control byte */ 330 skb_pull(skb, 1); /* Remove control byte */
331 /* 331 /*
332 * If we are busy already- too bad. We ought to be able 332 * If we are busy already- too bad. We ought to be able
333 * to queue things at this point, to allow for a little 333 * to queue things at this point, to allow for a little
@@ -338,10 +338,10 @@ static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
338 * So, no queues ! 338 * So, no queues !
339 * 14 Oct 1994 Dmitry Gorodchanin. 339 * 14 Oct 1994 Dmitry Gorodchanin.
340 */ 340 */
341 341
342 if((err=lapb_data_request(dev,skb))!=LAPB_OK) 342 err = lapb_data_request(dev, skb);
343 { 343 if (err != LAPB_OK) {
344 printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err); 344 printk(KERN_ERR "x25_asy: lapb_data_request error - %d\n", err);
345 kfree_skb(skb); 345 kfree_skb(skb);
346 return 0; 346 return 0;
347 } 347 }
@@ -357,7 +357,7 @@ static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
357 * Called when I frame data arrives. We did the work above - throw it 357 * Called when I frame data arrives. We did the work above - throw it
358 * at the net layer. 358 * at the net layer.
359 */ 359 */
360 360
361static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) 361static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
362{ 362{
363 skb->dev->last_rx = jiffies; 363 skb->dev->last_rx = jiffies;
@@ -369,24 +369,22 @@ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
369 * busy cases too well. Its tricky to see how to do this nicely - 369 * busy cases too well. Its tricky to see how to do this nicely -
370 * perhaps lapb should allow us to bounce this ? 370 * perhaps lapb should allow us to bounce this ?
371 */ 371 */
372 372
373static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb) 373static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
374{ 374{
375 struct x25_asy *sl=dev->priv; 375 struct x25_asy *sl = dev->priv;
376 376
377 spin_lock(&sl->lock); 377 spin_lock(&sl->lock);
378 if (netif_queue_stopped(sl->dev) || sl->tty == NULL) 378 if (netif_queue_stopped(sl->dev) || sl->tty == NULL) {
379 {
380 spin_unlock(&sl->lock); 379 spin_unlock(&sl->lock);
381 printk(KERN_ERR "x25_asy: tbusy drop\n"); 380 printk(KERN_ERR "x25_asy: tbusy drop\n");
382 kfree_skb(skb); 381 kfree_skb(skb);
383 return; 382 return;
384 } 383 }
385 /* We were not busy, so we are now... :-) */ 384 /* We were not busy, so we are now... :-) */
386 if (skb != NULL) 385 if (skb != NULL) {
387 {
388 x25_asy_lock(sl); 386 x25_asy_lock(sl);
389 sl->stats.tx_bytes+=skb->len; 387 sl->stats.tx_bytes += skb->len;
390 x25_asy_encaps(sl, skb->data, skb->len); 388 x25_asy_encaps(sl, skb->data, skb->len);
391 dev_kfree_skb(skb); 389 dev_kfree_skb(skb);
392 } 390 }
@@ -396,15 +394,16 @@ static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
396/* 394/*
397 * LAPB connection establish/down information. 395 * LAPB connection establish/down information.
398 */ 396 */
399 397
400static void x25_asy_connected(struct net_device *dev, int reason) 398static void x25_asy_connected(struct net_device *dev, int reason)
401{ 399{
402 struct x25_asy *sl = dev->priv; 400 struct x25_asy *sl = dev->priv;
403 struct sk_buff *skb; 401 struct sk_buff *skb;
404 unsigned char *ptr; 402 unsigned char *ptr;
405 403
406 if ((skb = dev_alloc_skb(1)) == NULL) { 404 skb = dev_alloc_skb(1);
407 printk(KERN_ERR "lapbeth: out of memory\n"); 405 if (skb == NULL) {
406 printk(KERN_ERR "x25_asy: out of memory\n");
408 return; 407 return;
409 } 408 }
410 409
@@ -422,7 +421,8 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
422 struct sk_buff *skb; 421 struct sk_buff *skb;
423 unsigned char *ptr; 422 unsigned char *ptr;
424 423
425 if ((skb = dev_alloc_skb(1)) == NULL) { 424 skb = dev_alloc_skb(1);
425 if (skb == NULL) {
426 printk(KERN_ERR "x25_asy: out of memory\n"); 426 printk(KERN_ERR "x25_asy: out of memory\n");
427 return; 427 return;
428 } 428 }
@@ -449,7 +449,7 @@ static struct lapb_register_struct x25_asy_callbacks = {
449/* Open the low-level part of the X.25 channel. Easy! */ 449/* Open the low-level part of the X.25 channel. Easy! */
450static int x25_asy_open(struct net_device *dev) 450static int x25_asy_open(struct net_device *dev)
451{ 451{
452 struct x25_asy *sl = (struct x25_asy*)(dev->priv); 452 struct x25_asy *sl = dev->priv;
453 unsigned long len; 453 unsigned long len;
454 int err; 454 int err;
455 455
@@ -466,13 +466,11 @@ static int x25_asy_open(struct net_device *dev)
466 len = dev->mtu * 2; 466 len = dev->mtu * 2;
467 467
468 sl->rbuff = kmalloc(len + 4, GFP_KERNEL); 468 sl->rbuff = kmalloc(len + 4, GFP_KERNEL);
469 if (sl->rbuff == NULL) { 469 if (sl->rbuff == NULL)
470 goto norbuff; 470 goto norbuff;
471 }
472 sl->xbuff = kmalloc(len + 4, GFP_KERNEL); 471 sl->xbuff = kmalloc(len + 4, GFP_KERNEL);
473 if (sl->xbuff == NULL) { 472 if (sl->xbuff == NULL)
474 goto noxbuff; 473 goto noxbuff;
475 }
476 474
477 sl->buffsize = len; 475 sl->buffsize = len;
478 sl->rcount = 0; 476 sl->rcount = 0;
@@ -480,11 +478,12 @@ static int x25_asy_open(struct net_device *dev)
480 sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */ 478 sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */
481 479
482 netif_start_queue(dev); 480 netif_start_queue(dev);
483 481
484 /* 482 /*
485 * Now attach LAPB 483 * Now attach LAPB
486 */ 484 */
487 if((err=lapb_register(dev, &x25_asy_callbacks))==LAPB_OK) 485 err = lapb_register(dev, &x25_asy_callbacks);
486 if (err == LAPB_OK)
488 return 0; 487 return 0;
489 488
490 /* Cleanup */ 489 /* Cleanup */
@@ -499,18 +498,20 @@ norbuff:
499/* Close the low-level part of the X.25 channel. Easy! */ 498/* Close the low-level part of the X.25 channel. Easy! */
500static int x25_asy_close(struct net_device *dev) 499static int x25_asy_close(struct net_device *dev)
501{ 500{
502 struct x25_asy *sl = (struct x25_asy*)(dev->priv); 501 struct x25_asy *sl = dev->priv;
503 int err; 502 int err;
504 503
505 spin_lock(&sl->lock); 504 spin_lock(&sl->lock);
506 if (sl->tty) 505 if (sl->tty)
507 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 506 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
508 507
509 netif_stop_queue(dev); 508 netif_stop_queue(dev);
510 sl->rcount = 0; 509 sl->rcount = 0;
511 sl->xleft = 0; 510 sl->xleft = 0;
512 if((err=lapb_unregister(dev))!=LAPB_OK) 511 err = lapb_unregister(dev);
513 printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",err); 512 if (err != LAPB_OK)
513 printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
514 err);
514 spin_unlock(&sl->lock); 515 spin_unlock(&sl->lock);
515 return 0; 516 return 0;
516} 517}
@@ -521,8 +522,9 @@ static int x25_asy_close(struct net_device *dev)
521 * a block of X.25 data has been received, which can now be decapsulated 522 * a block of X.25 data has been received, which can now be decapsulated
522 * and sent on to some IP layer for further processing. 523 * and sent on to some IP layer for further processing.
523 */ 524 */
524 525
525static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) 526static void x25_asy_receive_buf(struct tty_struct *tty,
527 const unsigned char *cp, char *fp, int count)
526{ 528{
527 struct x25_asy *sl = (struct x25_asy *) tty->disc_data; 529 struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
528 530
@@ -533,9 +535,8 @@ static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp,
533 /* Read the characters out of the buffer */ 535 /* Read the characters out of the buffer */
534 while (count--) { 536 while (count--) {
535 if (fp && *fp++) { 537 if (fp && *fp++) {
536 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) { 538 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
537 sl->stats.rx_errors++; 539 sl->stats.rx_errors++;
538 }
539 cp++; 540 cp++;
540 continue; 541 continue;
541 } 542 }
@@ -556,31 +557,31 @@ static int x25_asy_open_tty(struct tty_struct *tty)
556 struct x25_asy *sl = (struct x25_asy *) tty->disc_data; 557 struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
557 int err; 558 int err;
558 559
560 if (tty->ops->write == NULL)
561 return -EOPNOTSUPP;
562
559 /* First make sure we're not already connected. */ 563 /* First make sure we're not already connected. */
560 if (sl && sl->magic == X25_ASY_MAGIC) { 564 if (sl && sl->magic == X25_ASY_MAGIC)
561 return -EEXIST; 565 return -EEXIST;
562 }
563 566
564 /* OK. Find a free X.25 channel to use. */ 567 /* OK. Find a free X.25 channel to use. */
565 if ((sl = x25_asy_alloc()) == NULL) { 568 sl = x25_asy_alloc();
569 if (sl == NULL)
566 return -ENFILE; 570 return -ENFILE;
567 }
568 571
569 sl->tty = tty; 572 sl->tty = tty;
570 tty->disc_data = sl; 573 tty->disc_data = sl;
571 tty->receive_room = 65536; 574 tty->receive_room = 65536;
572 if (tty->driver->flush_buffer) { 575 tty_driver_flush_buffer(tty);
573 tty->driver->flush_buffer(tty);
574 }
575 tty_ldisc_flush(tty); 576 tty_ldisc_flush(tty);
576 577
577 /* Restore default settings */ 578 /* Restore default settings */
578 sl->dev->type = ARPHRD_X25; 579 sl->dev->type = ARPHRD_X25;
579 580
580 /* Perform the low-level X.25 async init */ 581 /* Perform the low-level X.25 async init */
581 if ((err = x25_asy_open(sl->dev))) 582 err = x25_asy_open(sl->dev);
583 if (err)
582 return err; 584 return err;
583
584 /* Done. We have linked the TTY line to a channel. */ 585 /* Done. We have linked the TTY line to a channel. */
585 return sl->dev->base_addr; 586 return sl->dev->base_addr;
586} 587}
@@ -601,9 +602,7 @@ static void x25_asy_close_tty(struct tty_struct *tty)
601 return; 602 return;
602 603
603 if (sl->dev->flags & IFF_UP) 604 if (sl->dev->flags & IFF_UP)
604 { 605 dev_close(sl->dev);
605 (void) dev_close(sl->dev);
606 }
607 606
608 tty->disc_data = NULL; 607 tty->disc_data = NULL;
609 sl->tty = NULL; 608 sl->tty = NULL;
@@ -613,8 +612,7 @@ static void x25_asy_close_tty(struct tty_struct *tty)
613 612
614static struct net_device_stats *x25_asy_get_stats(struct net_device *dev) 613static struct net_device_stats *x25_asy_get_stats(struct net_device *dev)
615{ 614{
616 struct x25_asy *sl = (struct x25_asy*)(dev->priv); 615 struct x25_asy *sl = dev->priv;
617
618 return &sl->stats; 616 return &sl->stats;
619} 617}
620 618
@@ -641,21 +639,19 @@ int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
641 * character sequence, according to the X.25 protocol. 639 * character sequence, according to the X.25 protocol.
642 */ 640 */
643 641
644 while (len-- > 0) 642 while (len-- > 0) {
645 { 643 switch (c = *s++) {
646 switch(c = *s++) 644 case X25_END:
647 { 645 *ptr++ = X25_ESC;
648 case X25_END: 646 *ptr++ = X25_ESCAPE(X25_END);
649 *ptr++ = X25_ESC; 647 break;
650 *ptr++ = X25_ESCAPE(X25_END); 648 case X25_ESC:
651 break; 649 *ptr++ = X25_ESC;
652 case X25_ESC: 650 *ptr++ = X25_ESCAPE(X25_ESC);
653 *ptr++ = X25_ESC; 651 break;
654 *ptr++ = X25_ESCAPE(X25_ESC); 652 default:
655 break; 653 *ptr++ = c;
656 default: 654 break;
657 *ptr++ = c;
658 break;
659 } 655 }
660 } 656 }
661 *ptr++ = X25_END; 657 *ptr++ = X25_END;
@@ -665,31 +661,25 @@ int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
665static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) 661static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
666{ 662{
667 663
668 switch(s) 664 switch (s) {
669 { 665 case X25_END:
670 case X25_END: 666 if (!test_and_clear_bit(SLF_ERROR, &sl->flags)
671 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2)) 667 && sl->rcount > 2)
672 { 668 x25_asy_bump(sl);
673 x25_asy_bump(sl); 669 clear_bit(SLF_ESCAPE, &sl->flags);
674 } 670 sl->rcount = 0;
675 clear_bit(SLF_ESCAPE, &sl->flags); 671 return;
676 sl->rcount = 0; 672 case X25_ESC:
677 return; 673 set_bit(SLF_ESCAPE, &sl->flags);
678 674 return;
679 case X25_ESC: 675 case X25_ESCAPE(X25_ESC):
680 set_bit(SLF_ESCAPE, &sl->flags); 676 case X25_ESCAPE(X25_END):
681 return; 677 if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
682 678 s = X25_UNESCAPE(s);
683 case X25_ESCAPE(X25_ESC): 679 break;
684 case X25_ESCAPE(X25_END): 680 }
685 if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) 681 if (!test_bit(SLF_ERROR, &sl->flags)) {
686 s = X25_UNESCAPE(s); 682 if (sl->rcount < sl->buffsize) {
687 break;
688 }
689 if (!test_bit(SLF_ERROR, &sl->flags))
690 {
691 if (sl->rcount < sl->buffsize)
692 {
693 sl->rbuff[sl->rcount++] = s; 683 sl->rbuff[sl->rcount++] = s;
694 return; 684 return;
695 } 685 }
@@ -709,7 +699,7 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
709 if (!sl || sl->magic != X25_ASY_MAGIC) 699 if (!sl || sl->magic != X25_ASY_MAGIC)
710 return -EINVAL; 700 return -EINVAL;
711 701
712 switch(cmd) { 702 switch (cmd) {
713 case SIOCGIFNAME: 703 case SIOCGIFNAME:
714 if (copy_to_user((void __user *)arg, sl->dev->name, 704 if (copy_to_user((void __user *)arg, sl->dev->name,
715 strlen(sl->dev->name) + 1)) 705 strlen(sl->dev->name) + 1))
@@ -724,8 +714,8 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
724 714
725static int x25_asy_open_dev(struct net_device *dev) 715static int x25_asy_open_dev(struct net_device *dev)
726{ 716{
727 struct x25_asy *sl = (struct x25_asy*)(dev->priv); 717 struct x25_asy *sl = dev->priv;
728 if(sl->tty==NULL) 718 if (sl->tty == NULL)
729 return -ENODEV; 719 return -ENODEV;
730 return 0; 720 return 0;
731} 721}
@@ -741,9 +731,9 @@ static void x25_asy_setup(struct net_device *dev)
741 set_bit(SLF_INUSE, &sl->flags); 731 set_bit(SLF_INUSE, &sl->flags);
742 732
743 /* 733 /*
744 * Finish setting up the DEVICE info. 734 * Finish setting up the DEVICE info.
745 */ 735 */
746 736
747 dev->mtu = SL_MTU; 737 dev->mtu = SL_MTU;
748 dev->hard_start_xmit = x25_asy_xmit; 738 dev->hard_start_xmit = x25_asy_xmit;
749 dev->tx_timeout = x25_asy_timeout; 739 dev->tx_timeout = x25_asy_timeout;
@@ -778,9 +768,10 @@ static int __init init_x25_asy(void)
778 x25_asy_maxdev = 4; /* Sanity */ 768 x25_asy_maxdev = 4; /* Sanity */
779 769
780 printk(KERN_INFO "X.25 async: version 0.00 ALPHA " 770 printk(KERN_INFO "X.25 async: version 0.00 ALPHA "
781 "(dynamic channels, max=%d).\n", x25_asy_maxdev ); 771 "(dynamic channels, max=%d).\n", x25_asy_maxdev);
782 772
783 x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device*), GFP_KERNEL); 773 x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *),
774 GFP_KERNEL);
784 if (!x25_asy_devs) { 775 if (!x25_asy_devs) {
785 printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] " 776 printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] "
786 "array! Uaargh! (-> No X.25 available)\n"); 777 "array! Uaargh! (-> No X.25 available)\n");
@@ -802,7 +793,7 @@ static void __exit exit_x25_asy(void)
802 struct x25_asy *sl = dev->priv; 793 struct x25_asy *sl = dev->priv;
803 794
804 spin_lock_bh(&sl->lock); 795 spin_lock_bh(&sl->lock);
805 if (sl->tty) 796 if (sl->tty)
806 tty_hangup(sl->tty); 797 tty_hangup(sl->tty);
807 798
808 spin_unlock_bh(&sl->lock); 799 spin_unlock_bh(&sl->lock);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 932d6b1c9d0b..45f47c1c0a35 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -3657,7 +3657,7 @@ void mpi_receive_802_11 (struct airo_info *ai)
3657 ptr += hdrlen; 3657 ptr += hdrlen;
3658 if (hdrlen == 24) 3658 if (hdrlen == 24)
3659 ptr += 6; 3659 ptr += 6;
3660 gap = le16_to_cpu(get_unaligned((__le16 *)ptr)); 3660 gap = get_unaligned_le16(ptr);
3661 ptr += sizeof(__le16); 3661 ptr += sizeof(__le16);
3662 if (gap) { 3662 if (gap) {
3663 if (gap <= 8) 3663 if (gap <= 8)
@@ -4347,24 +4347,28 @@ static int proc_config_open( struct inode *inode, struct file *file );
4347static int proc_wepkey_open( struct inode *inode, struct file *file ); 4347static int proc_wepkey_open( struct inode *inode, struct file *file );
4348 4348
4349static const struct file_operations proc_statsdelta_ops = { 4349static const struct file_operations proc_statsdelta_ops = {
4350 .owner = THIS_MODULE,
4350 .read = proc_read, 4351 .read = proc_read,
4351 .open = proc_statsdelta_open, 4352 .open = proc_statsdelta_open,
4352 .release = proc_close 4353 .release = proc_close
4353}; 4354};
4354 4355
4355static const struct file_operations proc_stats_ops = { 4356static const struct file_operations proc_stats_ops = {
4357 .owner = THIS_MODULE,
4356 .read = proc_read, 4358 .read = proc_read,
4357 .open = proc_stats_open, 4359 .open = proc_stats_open,
4358 .release = proc_close 4360 .release = proc_close
4359}; 4361};
4360 4362
4361static const struct file_operations proc_status_ops = { 4363static const struct file_operations proc_status_ops = {
4364 .owner = THIS_MODULE,
4362 .read = proc_read, 4365 .read = proc_read,
4363 .open = proc_status_open, 4366 .open = proc_status_open,
4364 .release = proc_close 4367 .release = proc_close
4365}; 4368};
4366 4369
4367static const struct file_operations proc_SSID_ops = { 4370static const struct file_operations proc_SSID_ops = {
4371 .owner = THIS_MODULE,
4368 .read = proc_read, 4372 .read = proc_read,
4369 .write = proc_write, 4373 .write = proc_write,
4370 .open = proc_SSID_open, 4374 .open = proc_SSID_open,
@@ -4372,6 +4376,7 @@ static const struct file_operations proc_SSID_ops = {
4372}; 4376};
4373 4377
4374static const struct file_operations proc_BSSList_ops = { 4378static const struct file_operations proc_BSSList_ops = {
4379 .owner = THIS_MODULE,
4375 .read = proc_read, 4380 .read = proc_read,
4376 .write = proc_write, 4381 .write = proc_write,
4377 .open = proc_BSSList_open, 4382 .open = proc_BSSList_open,
@@ -4379,6 +4384,7 @@ static const struct file_operations proc_BSSList_ops = {
4379}; 4384};
4380 4385
4381static const struct file_operations proc_APList_ops = { 4386static const struct file_operations proc_APList_ops = {
4387 .owner = THIS_MODULE,
4382 .read = proc_read, 4388 .read = proc_read,
4383 .write = proc_write, 4389 .write = proc_write,
4384 .open = proc_APList_open, 4390 .open = proc_APList_open,
@@ -4386,6 +4392,7 @@ static const struct file_operations proc_APList_ops = {
4386}; 4392};
4387 4393
4388static const struct file_operations proc_config_ops = { 4394static const struct file_operations proc_config_ops = {
4395 .owner = THIS_MODULE,
4389 .read = proc_read, 4396 .read = proc_read,
4390 .write = proc_write, 4397 .write = proc_write,
4391 .open = proc_config_open, 4398 .open = proc_config_open,
@@ -4393,6 +4400,7 @@ static const struct file_operations proc_config_ops = {
4393}; 4400};
4394 4401
4395static const struct file_operations proc_wepkey_ops = { 4402static const struct file_operations proc_wepkey_ops = {
4403 .owner = THIS_MODULE,
4396 .read = proc_read, 4404 .read = proc_read,
4397 .write = proc_write, 4405 .write = proc_write,
4398 .open = proc_wepkey_open, 4406 .open = proc_wepkey_open,
@@ -4411,10 +4419,6 @@ struct proc_data {
4411 void (*on_close) (struct inode *, struct file *); 4419 void (*on_close) (struct inode *, struct file *);
4412}; 4420};
4413 4421
4414#ifndef SETPROC_OPS
4415#define SETPROC_OPS(entry, ops) (entry)->proc_fops = &(ops)
4416#endif
4417
4418static int setup_proc_entry( struct net_device *dev, 4422static int setup_proc_entry( struct net_device *dev,
4419 struct airo_info *apriv ) { 4423 struct airo_info *apriv ) {
4420 struct proc_dir_entry *entry; 4424 struct proc_dir_entry *entry;
@@ -4430,100 +4434,76 @@ static int setup_proc_entry( struct net_device *dev,
4430 apriv->proc_entry->owner = THIS_MODULE; 4434 apriv->proc_entry->owner = THIS_MODULE;
4431 4435
4432 /* Setup the StatsDelta */ 4436 /* Setup the StatsDelta */
4433 entry = create_proc_entry("StatsDelta", 4437 entry = proc_create_data("StatsDelta",
4434 S_IFREG | (S_IRUGO&proc_perm), 4438 S_IFREG | (S_IRUGO&proc_perm),
4435 apriv->proc_entry); 4439 apriv->proc_entry, &proc_statsdelta_ops, dev);
4436 if (!entry) 4440 if (!entry)
4437 goto fail_stats_delta; 4441 goto fail_stats_delta;
4438 entry->uid = proc_uid; 4442 entry->uid = proc_uid;
4439 entry->gid = proc_gid; 4443 entry->gid = proc_gid;
4440 entry->data = dev;
4441 entry->owner = THIS_MODULE;
4442 SETPROC_OPS(entry, proc_statsdelta_ops);
4443 4444
4444 /* Setup the Stats */ 4445 /* Setup the Stats */
4445 entry = create_proc_entry("Stats", 4446 entry = proc_create_data("Stats",
4446 S_IFREG | (S_IRUGO&proc_perm), 4447 S_IFREG | (S_IRUGO&proc_perm),
4447 apriv->proc_entry); 4448 apriv->proc_entry, &proc_stats_ops, dev);
4448 if (!entry) 4449 if (!entry)
4449 goto fail_stats; 4450 goto fail_stats;
4450 entry->uid = proc_uid; 4451 entry->uid = proc_uid;
4451 entry->gid = proc_gid; 4452 entry->gid = proc_gid;
4452 entry->data = dev;
4453 entry->owner = THIS_MODULE;
4454 SETPROC_OPS(entry, proc_stats_ops);
4455 4453
4456 /* Setup the Status */ 4454 /* Setup the Status */
4457 entry = create_proc_entry("Status", 4455 entry = proc_create_data("Status",
4458 S_IFREG | (S_IRUGO&proc_perm), 4456 S_IFREG | (S_IRUGO&proc_perm),
4459 apriv->proc_entry); 4457 apriv->proc_entry, &proc_status_ops, dev);
4460 if (!entry) 4458 if (!entry)
4461 goto fail_status; 4459 goto fail_status;
4462 entry->uid = proc_uid; 4460 entry->uid = proc_uid;
4463 entry->gid = proc_gid; 4461 entry->gid = proc_gid;
4464 entry->data = dev;
4465 entry->owner = THIS_MODULE;
4466 SETPROC_OPS(entry, proc_status_ops);
4467 4462
4468 /* Setup the Config */ 4463 /* Setup the Config */
4469 entry = create_proc_entry("Config", 4464 entry = proc_create_data("Config",
4470 S_IFREG | proc_perm, 4465 S_IFREG | proc_perm,
4471 apriv->proc_entry); 4466 apriv->proc_entry, &proc_config_ops, dev);
4472 if (!entry) 4467 if (!entry)
4473 goto fail_config; 4468 goto fail_config;
4474 entry->uid = proc_uid; 4469 entry->uid = proc_uid;
4475 entry->gid = proc_gid; 4470 entry->gid = proc_gid;
4476 entry->data = dev;
4477 entry->owner = THIS_MODULE;
4478 SETPROC_OPS(entry, proc_config_ops);
4479 4471
4480 /* Setup the SSID */ 4472 /* Setup the SSID */
4481 entry = create_proc_entry("SSID", 4473 entry = proc_create_data("SSID",
4482 S_IFREG | proc_perm, 4474 S_IFREG | proc_perm,
4483 apriv->proc_entry); 4475 apriv->proc_entry, &proc_SSID_ops, dev);
4484 if (!entry) 4476 if (!entry)
4485 goto fail_ssid; 4477 goto fail_ssid;
4486 entry->uid = proc_uid; 4478 entry->uid = proc_uid;
4487 entry->gid = proc_gid; 4479 entry->gid = proc_gid;
4488 entry->data = dev;
4489 entry->owner = THIS_MODULE;
4490 SETPROC_OPS(entry, proc_SSID_ops);
4491 4480
4492 /* Setup the APList */ 4481 /* Setup the APList */
4493 entry = create_proc_entry("APList", 4482 entry = proc_create_data("APList",
4494 S_IFREG | proc_perm, 4483 S_IFREG | proc_perm,
4495 apriv->proc_entry); 4484 apriv->proc_entry, &proc_APList_ops, dev);
4496 if (!entry) 4485 if (!entry)
4497 goto fail_aplist; 4486 goto fail_aplist;
4498 entry->uid = proc_uid; 4487 entry->uid = proc_uid;
4499 entry->gid = proc_gid; 4488 entry->gid = proc_gid;
4500 entry->data = dev;
4501 entry->owner = THIS_MODULE;
4502 SETPROC_OPS(entry, proc_APList_ops);
4503 4489
4504 /* Setup the BSSList */ 4490 /* Setup the BSSList */
4505 entry = create_proc_entry("BSSList", 4491 entry = proc_create_data("BSSList",
4506 S_IFREG | proc_perm, 4492 S_IFREG | proc_perm,
4507 apriv->proc_entry); 4493 apriv->proc_entry, &proc_BSSList_ops, dev);
4508 if (!entry) 4494 if (!entry)
4509 goto fail_bsslist; 4495 goto fail_bsslist;
4510 entry->uid = proc_uid; 4496 entry->uid = proc_uid;
4511 entry->gid = proc_gid; 4497 entry->gid = proc_gid;
4512 entry->data = dev;
4513 entry->owner = THIS_MODULE;
4514 SETPROC_OPS(entry, proc_BSSList_ops);
4515 4498
4516 /* Setup the WepKey */ 4499 /* Setup the WepKey */
4517 entry = create_proc_entry("WepKey", 4500 entry = proc_create_data("WepKey",
4518 S_IFREG | proc_perm, 4501 S_IFREG | proc_perm,
4519 apriv->proc_entry); 4502 apriv->proc_entry, &proc_wepkey_ops, dev);
4520 if (!entry) 4503 if (!entry)
4521 goto fail_wepkey; 4504 goto fail_wepkey;
4522 entry->uid = proc_uid; 4505 entry->uid = proc_uid;
4523 entry->gid = proc_gid; 4506 entry->gid = proc_gid;
4524 entry->data = dev;
4525 entry->owner = THIS_MODULE;
4526 SETPROC_OPS(entry, proc_wepkey_ops);
4527 4507
4528 return 0; 4508 return 0;
4529 4509
@@ -5625,9 +5605,9 @@ static int __init airo_init_module( void )
5625 int have_isa_dev = 0; 5605 int have_isa_dev = 0;
5626#endif 5606#endif
5627 5607
5628 airo_entry = create_proc_entry("aironet", 5608 airo_entry = create_proc_entry("driver/aironet",
5629 S_IFDIR | airo_perm, 5609 S_IFDIR | airo_perm,
5630 proc_root_driver); 5610 NULL);
5631 5611
5632 if (airo_entry) { 5612 if (airo_entry) {
5633 airo_entry->uid = proc_uid; 5613 airo_entry->uid = proc_uid;
@@ -5651,7 +5631,7 @@ static int __init airo_init_module( void )
5651 airo_print_info("", "Finished probing for PCI adapters"); 5631 airo_print_info("", "Finished probing for PCI adapters");
5652 5632
5653 if (i) { 5633 if (i) {
5654 remove_proc_entry("aironet", proc_root_driver); 5634 remove_proc_entry("driver/aironet", NULL);
5655 return i; 5635 return i;
5656 } 5636 }
5657#endif 5637#endif
@@ -5673,7 +5653,7 @@ static void __exit airo_cleanup_module( void )
5673#ifdef CONFIG_PCI 5653#ifdef CONFIG_PCI
5674 pci_unregister_driver(&airo_driver); 5654 pci_unregister_driver(&airo_driver);
5675#endif 5655#endif
5676 remove_proc_entry("aironet", proc_root_driver); 5656 remove_proc_entry("driver/aironet", NULL);
5677} 5657}
5678 5658
5679/* 5659/*
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index e18305b781c9..4e5c8fc35200 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -58,10 +58,6 @@
58#include "reg.h" 58#include "reg.h"
59#include "debug.h" 59#include "debug.h"
60 60
61/* unaligned little endian access */
62#define LE_READ_2(_p) (le16_to_cpu(get_unaligned((__le16 *)(_p))))
63#define LE_READ_4(_p) (le32_to_cpu(get_unaligned((__le32 *)(_p))))
64
65enum { 61enum {
66 ATH_LED_TX, 62 ATH_LED_TX,
67 ATH_LED_RX, 63 ATH_LED_RX,
@@ -2909,9 +2905,9 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2909 if (!mclist) 2905 if (!mclist)
2910 break; 2906 break;
2911 /* calculate XOR of eight 6-bit values */ 2907 /* calculate XOR of eight 6-bit values */
2912 val = LE_READ_4(mclist->dmi_addr + 0); 2908 val = get_unaligned_le32(mclist->dmi_addr + 0);
2913 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2909 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2914 val = LE_READ_4(mclist->dmi_addr + 3); 2910 val = get_unaligned_le32(mclist->dmi_addr + 3);
2915 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2911 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2916 pos &= 0x3f; 2912 pos &= 0x3f;
2917 mfilt[pos / 32] |= (1 << (pos % 32)); 2913 mfilt[pos / 32] |= (1 << (pos % 32));
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index eff2a158a411..37783cdd301a 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -691,6 +691,10 @@ struct b43_wl {
691 691
692 struct mutex mutex; 692 struct mutex mutex;
693 spinlock_t irq_lock; 693 spinlock_t irq_lock;
694 /* R/W lock for data transmission.
695 * Transmissions on 2+ queues can run concurrently, but somebody else
696 * might sync with TX by write_lock_irqsave()'ing. */
697 rwlock_t tx_lock;
694 /* Lock for LEDs access. */ 698 /* Lock for LEDs access. */
695 spinlock_t leds_lock; 699 spinlock_t leds_lock;
696 /* Lock for SHM access. */ 700 /* Lock for SHM access. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4bf8a99099fe..8fdba9415c04 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -729,6 +729,7 @@ static void b43_synchronize_irq(struct b43_wldev *dev)
729 */ 729 */
730void b43_dummy_transmission(struct b43_wldev *dev) 730void b43_dummy_transmission(struct b43_wldev *dev)
731{ 731{
732 struct b43_wl *wl = dev->wl;
732 struct b43_phy *phy = &dev->phy; 733 struct b43_phy *phy = &dev->phy;
733 unsigned int i, max_loop; 734 unsigned int i, max_loop;
734 u16 value; 735 u16 value;
@@ -755,6 +756,9 @@ void b43_dummy_transmission(struct b43_wldev *dev)
755 return; 756 return;
756 } 757 }
757 758
759 spin_lock_irq(&wl->irq_lock);
760 write_lock(&wl->tx_lock);
761
758 for (i = 0; i < 5; i++) 762 for (i = 0; i < 5; i++)
759 b43_ram_write(dev, i * 4, buffer[i]); 763 b43_ram_write(dev, i * 4, buffer[i]);
760 764
@@ -795,6 +799,9 @@ void b43_dummy_transmission(struct b43_wldev *dev)
795 } 799 }
796 if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) 800 if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5)
797 b43_radio_write16(dev, 0x0051, 0x0037); 801 b43_radio_write16(dev, 0x0051, 0x0037);
802
803 write_unlock(&wl->tx_lock);
804 spin_unlock_irq(&wl->irq_lock);
798} 805}
799 806
800static void key_write(struct b43_wldev *dev, 807static void key_write(struct b43_wldev *dev,
@@ -2171,7 +2178,7 @@ static int b43_write_initvals(struct b43_wldev *dev,
2171 goto err_format; 2178 goto err_format;
2172 array_size -= sizeof(iv->data.d32); 2179 array_size -= sizeof(iv->data.d32);
2173 2180
2174 value = be32_to_cpu(get_unaligned(&iv->data.d32)); 2181 value = get_unaligned_be32(&iv->data.d32);
2175 b43_write32(dev, offset, value); 2182 b43_write32(dev, offset, value);
2176 2183
2177 iv = (const struct b43_iv *)((const uint8_t *)iv + 2184 iv = (const struct b43_iv *)((const uint8_t *)iv +
@@ -2840,24 +2847,31 @@ static int b43_op_tx(struct ieee80211_hw *hw,
2840{ 2847{
2841 struct b43_wl *wl = hw_to_b43_wl(hw); 2848 struct b43_wl *wl = hw_to_b43_wl(hw);
2842 struct b43_wldev *dev = wl->current_dev; 2849 struct b43_wldev *dev = wl->current_dev;
2843 int err = -ENODEV; 2850 unsigned long flags;
2851 int err;
2844 2852
2845 if (unlikely(skb->len < 2 + 2 + 6)) { 2853 if (unlikely(skb->len < 2 + 2 + 6)) {
2846 /* Too short, this can't be a valid frame. */ 2854 /* Too short, this can't be a valid frame. */
2847 return -EINVAL; 2855 dev_kfree_skb_any(skb);
2856 return NETDEV_TX_OK;
2848 } 2857 }
2849 B43_WARN_ON(skb_shinfo(skb)->nr_frags); 2858 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
2850
2851 if (unlikely(!dev)) 2859 if (unlikely(!dev))
2852 goto out; 2860 return NETDEV_TX_BUSY;
2853 if (unlikely(b43_status(dev) < B43_STAT_STARTED)) 2861
2854 goto out; 2862 /* Transmissions on seperate queues can run concurrently. */
2855 /* TX is done without a global lock. */ 2863 read_lock_irqsave(&wl->tx_lock, flags);
2856 if (b43_using_pio_transfers(dev)) 2864
2857 err = b43_pio_tx(dev, skb, ctl); 2865 err = -ENODEV;
2858 else 2866 if (likely(b43_status(dev) >= B43_STAT_STARTED)) {
2859 err = b43_dma_tx(dev, skb, ctl); 2867 if (b43_using_pio_transfers(dev))
2860out: 2868 err = b43_pio_tx(dev, skb, ctl);
2869 else
2870 err = b43_dma_tx(dev, skb, ctl);
2871 }
2872
2873 read_unlock_irqrestore(&wl->tx_lock, flags);
2874
2861 if (unlikely(err)) 2875 if (unlikely(err))
2862 return NETDEV_TX_BUSY; 2876 return NETDEV_TX_BUSY;
2863 return NETDEV_TX_OK; 2877 return NETDEV_TX_OK;
@@ -3476,7 +3490,9 @@ static void b43_wireless_core_stop(struct b43_wldev *dev)
3476 spin_unlock_irqrestore(&wl->irq_lock, flags); 3490 spin_unlock_irqrestore(&wl->irq_lock, flags);
3477 b43_synchronize_irq(dev); 3491 b43_synchronize_irq(dev);
3478 3492
3493 write_lock_irqsave(&wl->tx_lock, flags);
3479 b43_set_status(dev, B43_STAT_INITIALIZED); 3494 b43_set_status(dev, B43_STAT_INITIALIZED);
3495 write_unlock_irqrestore(&wl->tx_lock, flags);
3480 3496
3481 b43_pio_stop(dev); 3497 b43_pio_stop(dev);
3482 mutex_unlock(&wl->mutex); 3498 mutex_unlock(&wl->mutex);
@@ -3485,8 +3501,6 @@ static void b43_wireless_core_stop(struct b43_wldev *dev)
3485 cancel_delayed_work_sync(&dev->periodic_work); 3501 cancel_delayed_work_sync(&dev->periodic_work);
3486 mutex_lock(&wl->mutex); 3502 mutex_lock(&wl->mutex);
3487 3503
3488 ieee80211_stop_queues(wl->hw); //FIXME this could cause a deadlock, as mac80211 seems buggy.
3489
3490 b43_mac_suspend(dev); 3504 b43_mac_suspend(dev);
3491 free_irq(dev->dev->irq, dev); 3505 free_irq(dev->dev->irq, dev);
3492 b43dbg(wl, "Wireless interface stopped\n"); 3506 b43dbg(wl, "Wireless interface stopped\n");
@@ -4326,6 +4340,14 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4326 err = -EOPNOTSUPP; 4340 err = -EOPNOTSUPP;
4327 goto err_powerdown; 4341 goto err_powerdown;
4328 } 4342 }
4343 if (1 /* disable A-PHY */) {
4344 /* FIXME: For now we disable the A-PHY on multi-PHY devices. */
4345 if (dev->phy.type != B43_PHYTYPE_N) {
4346 have_2ghz_phy = 1;
4347 have_5ghz_phy = 0;
4348 }
4349 }
4350
4329 dev->phy.gmode = have_2ghz_phy; 4351 dev->phy.gmode = have_2ghz_phy;
4330 tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; 4352 tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
4331 b43_wireless_core_reset(dev, tmp); 4353 b43_wireless_core_reset(dev, tmp);
@@ -4490,6 +4512,7 @@ static int b43_wireless_init(struct ssb_device *dev)
4490 memset(wl, 0, sizeof(*wl)); 4512 memset(wl, 0, sizeof(*wl));
4491 wl->hw = hw; 4513 wl->hw = hw;
4492 spin_lock_init(&wl->irq_lock); 4514 spin_lock_init(&wl->irq_lock);
4515 rwlock_init(&wl->tx_lock);
4493 spin_lock_init(&wl->leds_lock); 4516 spin_lock_init(&wl->leds_lock);
4494 spin_lock_init(&wl->shm_lock); 4517 spin_lock_init(&wl->shm_lock);
4495 mutex_init(&wl->mutex); 4518 mutex_init(&wl->mutex);
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index ef829ee8ffd4..14a5eea2573e 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1720,7 +1720,7 @@ static int b43legacy_write_initvals(struct b43legacy_wldev *dev,
1720 goto err_format; 1720 goto err_format;
1721 array_size -= sizeof(iv->data.d32); 1721 array_size -= sizeof(iv->data.d32);
1722 1722
1723 value = be32_to_cpu(get_unaligned(&iv->data.d32)); 1723 value = get_unaligned_be32(&iv->data.d32);
1724 b43legacy_write32(dev, offset, value); 1724 b43legacy_write32(dev, offset, value);
1725 1725
1726 iv = (const struct b43legacy_iv *)((const uint8_t *)iv + 1726 iv = (const struct b43legacy_iv *)((const uint8_t *)iv +
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 9a25f550fd16..d5b7a76fcaad 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -6,6 +6,10 @@ config IWLCORE
6 tristate "Intel Wireless Wifi Core" 6 tristate "Intel Wireless Wifi Core"
7 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 7 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
8 select IWLWIFI 8 select IWLWIFI
9 select MAC80211_LEDS if IWLWIFI_LEDS
10 select LEDS_CLASS if IWLWIFI_LEDS
11 select RFKILL if IWLWIFI_RFKILL
12 select RFKILL_INPUT if IWLWIFI_RFKILL
9 13
10config IWLWIFI_LEDS 14config IWLWIFI_LEDS
11 bool 15 bool
@@ -14,8 +18,6 @@ config IWLWIFI_LEDS
14config IWLWIFI_RFKILL 18config IWLWIFI_RFKILL
15 boolean "IWLWIFI RF kill support" 19 boolean "IWLWIFI RF kill support"
16 depends on IWLCORE 20 depends on IWLCORE
17 select RFKILL
18 select RFKILL_INPUT
19 21
20config IWL4965 22config IWL4965
21 tristate "Intel Wireless WiFi 4965AGN" 23 tristate "Intel Wireless WiFi 4965AGN"
@@ -55,8 +57,6 @@ config IWL4965_HT
55config IWL4965_LEDS 57config IWL4965_LEDS
56 bool "Enable LEDS features in iwl4965 driver" 58 bool "Enable LEDS features in iwl4965 driver"
57 depends on IWL4965 59 depends on IWL4965
58 select MAC80211_LEDS
59 select LEDS_CLASS
60 select IWLWIFI_LEDS 60 select IWLWIFI_LEDS
61 ---help--- 61 ---help---
62 This option enables LEDS for the iwlwifi drivers 62 This option enables LEDS for the iwlwifi drivers
@@ -112,6 +112,8 @@ config IWL3945
112 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 112 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
113 select FW_LOADER 113 select FW_LOADER
114 select IWLWIFI 114 select IWLWIFI
115 select MAC80211_LEDS if IWL3945_LEDS
116 select LEDS_CLASS if IWL3945_LEDS
115 ---help--- 117 ---help---
116 Select to build the driver supporting the: 118 Select to build the driver supporting the:
117 119
@@ -143,8 +145,6 @@ config IWL3945_SPECTRUM_MEASUREMENT
143config IWL3945_LEDS 145config IWL3945_LEDS
144 bool "Enable LEDS features in iwl3945 driver" 146 bool "Enable LEDS features in iwl3945 driver"
145 depends on IWL3945 147 depends on IWL3945
146 select MAC80211_LEDS
147 select LEDS_CLASS
148 ---help--- 148 ---help---
149 This option enables LEDS for the iwl3945 driver. 149 This option enables LEDS for the iwl3945 driver.
150 150
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 598e4eef4f40..d3406830c8e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -554,40 +554,36 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
554 iwl3945_rt->rt_hdr.it_pad = 0; 554 iwl3945_rt->rt_hdr.it_pad = 0;
555 555
556 /* total header + data */ 556 /* total header + data */
557 put_unaligned(cpu_to_le16(sizeof(*iwl3945_rt)), 557 put_unaligned_le16(sizeof(*iwl3945_rt), &iwl3945_rt->rt_hdr.it_len);
558 &iwl3945_rt->rt_hdr.it_len);
559 558
560 /* Indicate all the fields we add to the radiotap header */ 559 /* Indicate all the fields we add to the radiotap header */
561 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | 560 put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
562 (1 << IEEE80211_RADIOTAP_FLAGS) | 561 (1 << IEEE80211_RADIOTAP_FLAGS) |
563 (1 << IEEE80211_RADIOTAP_RATE) | 562 (1 << IEEE80211_RADIOTAP_RATE) |
564 (1 << IEEE80211_RADIOTAP_CHANNEL) | 563 (1 << IEEE80211_RADIOTAP_CHANNEL) |
565 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 564 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
566 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | 565 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
567 (1 << IEEE80211_RADIOTAP_ANTENNA)), 566 (1 << IEEE80211_RADIOTAP_ANTENNA),
568 &iwl3945_rt->rt_hdr.it_present); 567 &iwl3945_rt->rt_hdr.it_present);
569 568
570 /* Zero the flags, we'll add to them as we go */ 569 /* Zero the flags, we'll add to them as we go */
571 iwl3945_rt->rt_flags = 0; 570 iwl3945_rt->rt_flags = 0;
572 571
573 put_unaligned(cpu_to_le64(tsf), &iwl3945_rt->rt_tsf); 572 put_unaligned_le64(tsf, &iwl3945_rt->rt_tsf);
574 573
575 iwl3945_rt->rt_dbmsignal = signal; 574 iwl3945_rt->rt_dbmsignal = signal;
576 iwl3945_rt->rt_dbmnoise = noise; 575 iwl3945_rt->rt_dbmnoise = noise;
577 576
578 /* Convert the channel frequency and set the flags */ 577 /* Convert the channel frequency and set the flags */
579 put_unaligned(cpu_to_le16(stats->freq), &iwl3945_rt->rt_channelMHz); 578 put_unaligned_le16(stats->freq, &iwl3945_rt->rt_channelMHz);
580 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) 579 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
581 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM | 580 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
582 IEEE80211_CHAN_5GHZ),
583 &iwl3945_rt->rt_chbitmask); 581 &iwl3945_rt->rt_chbitmask);
584 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) 582 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
585 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK | 583 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
586 IEEE80211_CHAN_2GHZ),
587 &iwl3945_rt->rt_chbitmask); 584 &iwl3945_rt->rt_chbitmask);
588 else /* 802.11g */ 585 else /* 802.11g */
589 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM | 586 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
590 IEEE80211_CHAN_2GHZ),
591 &iwl3945_rt->rt_chbitmask); 587 &iwl3945_rt->rt_chbitmask);
592 588
593 if (rate == -1) 589 if (rate == -1)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 45c1c5533bf0..c7695a215a39 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -742,7 +742,6 @@ struct iwl3945_priv {
742 u8 direct_ssid_len; 742 u8 direct_ssid_len;
743 u8 direct_ssid[IW_ESSID_MAX_SIZE]; 743 u8 direct_ssid[IW_ESSID_MAX_SIZE];
744 struct iwl3945_scan_cmd *scan; 744 struct iwl3945_scan_cmd *scan;
745 u8 only_active_channel;
746 745
747 /* spinlock */ 746 /* spinlock */
748 spinlock_t lock; /* protect general shared data */ 747 spinlock_t lock; /* protect general shared data */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-4965.h
index 9ed13cb0a2a9..581b98556c86 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.h
@@ -996,7 +996,6 @@ struct iwl_priv {
996 u8 direct_ssid_len; 996 u8 direct_ssid_len;
997 u8 direct_ssid[IW_ESSID_MAX_SIZE]; 997 u8 direct_ssid[IW_ESSID_MAX_SIZE];
998 struct iwl4965_scan_cmd *scan; 998 struct iwl4965_scan_cmd *scan;
999 u8 only_active_channel;
1000 999
1001 /* spinlock */ 1000 /* spinlock */
1002 spinlock_t lock; /* protect general shared data */ 1001 spinlock_t lock; /* protect general shared data */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index a1a0b3c581f1..13925b627e3b 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -4968,17 +4968,6 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4968 if (channels[i].flags & IEEE80211_CHAN_DISABLED) 4968 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4969 continue; 4969 continue;
4970 4970
4971 if (channels[i].hw_value ==
4972 le16_to_cpu(priv->active_rxon.channel)) {
4973 if (iwl3945_is_associated(priv)) {
4974 IWL_DEBUG_SCAN
4975 ("Skipping current channel %d\n",
4976 le16_to_cpu(priv->active_rxon.channel));
4977 continue;
4978 }
4979 } else if (priv->only_active_channel)
4980 continue;
4981
4982 scan_ch->channel = channels[i].hw_value; 4971 scan_ch->channel = channels[i].hw_value;
4983 4972
4984 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel); 4973 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
@@ -6303,12 +6292,17 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6303 priv->direct_ssid, priv->direct_ssid_len); 6292 priv->direct_ssid, priv->direct_ssid_len);
6304 direct_mask = 1; 6293 direct_mask = 1;
6305 } else if (!iwl3945_is_associated(priv) && priv->essid_len) { 6294 } else if (!iwl3945_is_associated(priv) && priv->essid_len) {
6295 IWL_DEBUG_SCAN
6296 ("Kicking off one direct scan for '%s' when not associated\n",
6297 iwl3945_escape_essid(priv->essid, priv->essid_len));
6306 scan->direct_scan[0].id = WLAN_EID_SSID; 6298 scan->direct_scan[0].id = WLAN_EID_SSID;
6307 scan->direct_scan[0].len = priv->essid_len; 6299 scan->direct_scan[0].len = priv->essid_len;
6308 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); 6300 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
6309 direct_mask = 1; 6301 direct_mask = 1;
6310 } else 6302 } else {
6303 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
6311 direct_mask = 0; 6304 direct_mask = 0;
6305 }
6312 6306
6313 /* We don't build a direct scan probe request; the uCode will do 6307 /* We don't build a direct scan probe request; the uCode will do
6314 * that based on the direct_mask added to each channel entry */ 6308 * that based on the direct_mask added to each channel entry */
@@ -6346,23 +6340,18 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6346 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) 6340 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
6347 scan->filter_flags = RXON_FILTER_PROMISC_MSK; 6341 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
6348 6342
6349 if (direct_mask) { 6343 if (direct_mask)
6350 IWL_DEBUG_SCAN
6351 ("Initiating direct scan for %s.\n",
6352 iwl3945_escape_essid(priv->essid, priv->essid_len));
6353 scan->channel_count = 6344 scan->channel_count =
6354 iwl3945_get_channels_for_scan( 6345 iwl3945_get_channels_for_scan(
6355 priv, band, 1, /* active */ 6346 priv, band, 1, /* active */
6356 direct_mask, 6347 direct_mask,
6357 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 6348 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6358 } else { 6349 else
6359 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
6360 scan->channel_count = 6350 scan->channel_count =
6361 iwl3945_get_channels_for_scan( 6351 iwl3945_get_channels_for_scan(
6362 priv, band, 0, /* passive */ 6352 priv, band, 0, /* passive */
6363 direct_mask, 6353 direct_mask,
6364 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 6354 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6365 }
6366 6355
6367 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 6356 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
6368 scan->channel_count * sizeof(struct iwl3945_scan_channel); 6357 scan->channel_count * sizeof(struct iwl3945_scan_channel);
@@ -7314,8 +7303,6 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7314 return; 7303 return;
7315 } 7304 }
7316 7305
7317 priv->only_active_channel = 0;
7318
7319 iwl3945_set_rate(priv); 7306 iwl3945_set_rate(priv);
7320 7307
7321 mutex_unlock(&priv->mutex); 7308 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index d0bbcaaeb94c..883b42f7e998 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -4633,17 +4633,6 @@ static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
4633 if (channels[i].flags & IEEE80211_CHAN_DISABLED) 4633 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4634 continue; 4634 continue;
4635 4635
4636 if (ieee80211_frequency_to_channel(channels[i].center_freq) ==
4637 le16_to_cpu(priv->active_rxon.channel)) {
4638 if (iwl_is_associated(priv)) {
4639 IWL_DEBUG_SCAN
4640 ("Skipping current channel %d\n",
4641 le16_to_cpu(priv->active_rxon.channel));
4642 continue;
4643 }
4644 } else if (priv->only_active_channel)
4645 continue;
4646
4647 scan_ch->channel = ieee80211_frequency_to_channel(channels[i].center_freq); 4636 scan_ch->channel = ieee80211_frequency_to_channel(channels[i].center_freq);
4648 4637
4649 ch_info = iwl_get_channel_info(priv, band, 4638 ch_info = iwl_get_channel_info(priv, band,
@@ -5824,11 +5813,15 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
5824 priv->direct_ssid, priv->direct_ssid_len); 5813 priv->direct_ssid, priv->direct_ssid_len);
5825 direct_mask = 1; 5814 direct_mask = 1;
5826 } else if (!iwl_is_associated(priv) && priv->essid_len) { 5815 } else if (!iwl_is_associated(priv) && priv->essid_len) {
5816 IWL_DEBUG_SCAN
5817 ("Kicking off one direct scan for '%s' when not associated\n",
5818 iwl4965_escape_essid(priv->essid, priv->essid_len));
5827 scan->direct_scan[0].id = WLAN_EID_SSID; 5819 scan->direct_scan[0].id = WLAN_EID_SSID;
5828 scan->direct_scan[0].len = priv->essid_len; 5820 scan->direct_scan[0].len = priv->essid_len;
5829 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); 5821 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
5830 direct_mask = 1; 5822 direct_mask = 1;
5831 } else { 5823 } else {
5824 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
5832 direct_mask = 0; 5825 direct_mask = 0;
5833 } 5826 }
5834 5827
@@ -5881,23 +5874,18 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
5881 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) 5874 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
5882 scan->filter_flags = RXON_FILTER_PROMISC_MSK; 5875 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
5883 5876
5884 if (direct_mask) { 5877 if (direct_mask)
5885 IWL_DEBUG_SCAN
5886 ("Initiating direct scan for %s.\n",
5887 iwl4965_escape_essid(priv->essid, priv->essid_len));
5888 scan->channel_count = 5878 scan->channel_count =
5889 iwl4965_get_channels_for_scan( 5879 iwl4965_get_channels_for_scan(
5890 priv, band, 1, /* active */ 5880 priv, band, 1, /* active */
5891 direct_mask, 5881 direct_mask,
5892 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 5882 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
5893 } else { 5883 else
5894 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
5895 scan->channel_count = 5884 scan->channel_count =
5896 iwl4965_get_channels_for_scan( 5885 iwl4965_get_channels_for_scan(
5897 priv, band, 0, /* passive */ 5886 priv, band, 0, /* passive */
5898 direct_mask, 5887 direct_mask,
5899 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 5888 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
5900 }
5901 5889
5902 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 5890 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
5903 scan->channel_count * sizeof(struct iwl4965_scan_channel); 5891 scan->channel_count * sizeof(struct iwl4965_scan_channel);
@@ -7061,8 +7049,6 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7061 return; 7049 return;
7062 } 7050 }
7063 7051
7064 priv->only_active_channel = 0;
7065
7066 iwl4965_set_rate(priv); 7052 iwl4965_set_rate(priv);
7067 7053
7068 mutex_unlock(&priv->mutex); 7054 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index e72c97a0d6c1..d448c9702a0f 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -298,7 +298,8 @@ static int lbs_do_scan(struct lbs_private *priv, uint8_t bsstype,
298 uint8_t *tlv; /* pointer into our current, growing TLV storage area */ 298 uint8_t *tlv; /* pointer into our current, growing TLV storage area */
299 299
300 lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, chan_count %d", 300 lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, chan_count %d",
301 bsstype, chan_list[0].channumber, chan_count); 301 bsstype, chan_list ? chan_list[0].channumber : -1,
302 chan_count);
302 303
303 /* create the fixed part for scan command */ 304 /* create the fixed part for scan command */
304 scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL); 305 scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL);
@@ -522,7 +523,7 @@ static int lbs_process_bss(struct bss_descriptor *bss,
522 523
523 if (*bytesleft >= sizeof(beaconsize)) { 524 if (*bytesleft >= sizeof(beaconsize)) {
524 /* Extract & convert beacon size from the command buffer */ 525 /* Extract & convert beacon size from the command buffer */
525 beaconsize = le16_to_cpu(get_unaligned((__le16 *)*pbeaconinfo)); 526 beaconsize = get_unaligned_le16(*pbeaconinfo);
526 *bytesleft -= sizeof(beaconsize); 527 *bytesleft -= sizeof(beaconsize);
527 *pbeaconinfo += sizeof(beaconsize); 528 *pbeaconinfo += sizeof(beaconsize);
528 } 529 }
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index b41187af1306..560b9c73c0b9 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -363,7 +363,7 @@ static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
363 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 363 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
364 364
365 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 365 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
366 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00 | preamble_mask); 366 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
367 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 367 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
368 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); 368 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10));
369 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 369 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg);
@@ -1308,7 +1308,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1308 1308
1309 if (value == LED_MODE_TXRX_ACTIVITY) { 1309 if (value == LED_MODE_TXRX_ACTIVITY) {
1310 rt2x00dev->led_qual.rt2x00dev = rt2x00dev; 1310 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
1311 rt2x00dev->led_radio.type = LED_TYPE_ACTIVITY; 1311 rt2x00dev->led_qual.type = LED_TYPE_ACTIVITY;
1312 rt2x00dev->led_qual.led_dev.brightness_set = 1312 rt2x00dev->led_qual.led_dev.brightness_set =
1313 rt2400pci_brightness_set; 1313 rt2400pci_brightness_set;
1314 rt2x00dev->led_qual.led_dev.blink_set = 1314 rt2x00dev->led_qual.led_dev.blink_set =
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 5ade097ed45e..a5ed54b69262 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -370,7 +370,7 @@ static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
370 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 370 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
371 371
372 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 372 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
373 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00 | preamble_mask); 373 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
374 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 374 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
375 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); 375 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10));
376 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 376 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg);
@@ -1485,7 +1485,7 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1485 1485
1486 if (value == LED_MODE_TXRX_ACTIVITY) { 1486 if (value == LED_MODE_TXRX_ACTIVITY) {
1487 rt2x00dev->led_qual.rt2x00dev = rt2x00dev; 1487 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
1488 rt2x00dev->led_radio.type = LED_TYPE_ACTIVITY; 1488 rt2x00dev->led_qual.type = LED_TYPE_ACTIVITY;
1489 rt2x00dev->led_qual.led_dev.brightness_set = 1489 rt2x00dev->led_qual.led_dev.brightness_set =
1490 rt2500pci_brightness_set; 1490 rt2500pci_brightness_set;
1491 rt2x00dev->led_qual.led_dev.blink_set = 1491 rt2x00dev->led_qual.led_dev.blink_set =
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 6bb07b339325..fdbd0ef2be4b 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1394,7 +1394,7 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1394 1394
1395 if (value == LED_MODE_TXRX_ACTIVITY) { 1395 if (value == LED_MODE_TXRX_ACTIVITY) {
1396 rt2x00dev->led_qual.rt2x00dev = rt2x00dev; 1396 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
1397 rt2x00dev->led_radio.type = LED_TYPE_ACTIVITY; 1397 rt2x00dev->led_qual.type = LED_TYPE_ACTIVITY;
1398 rt2x00dev->led_qual.led_dev.brightness_set = 1398 rt2x00dev->led_qual.led_dev.brightness_set =
1399 rt2500usb_brightness_set; 1399 rt2500usb_brightness_set;
1400 rt2x00dev->led_qual.led_dev.blink_set = 1400 rt2x00dev->led_qual.led_dev.blink_set =
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index f8fe7a139a8a..8d8657fb64dd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -114,6 +114,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
114 return status; 114 return status;
115 115
116 rt2x00leds_led_radio(rt2x00dev, true); 116 rt2x00leds_led_radio(rt2x00dev, true);
117 rt2x00led_led_activity(rt2x00dev, true);
117 118
118 __set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags); 119 __set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags);
119 120
@@ -157,6 +158,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
157 * Disable radio. 158 * Disable radio.
158 */ 159 */
159 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF); 160 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF);
161 rt2x00led_led_activity(rt2x00dev, false);
160 rt2x00leds_led_radio(rt2x00dev, false); 162 rt2x00leds_led_radio(rt2x00dev, false);
161} 163}
162 164
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index 40c1f5c1b805..b362a1cf3f8d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -72,6 +72,21 @@ void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi)
72 } 72 }
73} 73}
74 74
75void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled)
76{
77 struct rt2x00_led *led = &rt2x00dev->led_qual;
78 unsigned int brightness;
79
80 if ((led->type != LED_TYPE_ACTIVITY) || !(led->flags & LED_REGISTERED))
81 return;
82
83 brightness = enabled ? LED_FULL : LED_OFF;
84 if (brightness != led->led_dev.brightness) {
85 led->led_dev.brightness_set(&led->led_dev, brightness);
86 led->led_dev.brightness = brightness;
87 }
88}
89
75void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled) 90void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled)
76{ 91{
77 struct rt2x00_led *led = &rt2x00dev->led_assoc; 92 struct rt2x00_led *led = &rt2x00dev->led_assoc;
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 5be32fffc74c..41ee02cd2825 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -185,6 +185,7 @@ static inline void rt2x00rfkill_resume(struct rt2x00_dev *rt2x00dev)
185 */ 185 */
186#ifdef CONFIG_RT2X00_LIB_LEDS 186#ifdef CONFIG_RT2X00_LIB_LEDS
187void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi); 187void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi);
188void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled);
188void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled); 189void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled);
189void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled); 190void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled);
190void rt2x00leds_register(struct rt2x00_dev *rt2x00dev); 191void rt2x00leds_register(struct rt2x00_dev *rt2x00dev);
@@ -197,6 +198,11 @@ static inline void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev,
197{ 198{
198} 199}
199 200
201static inline void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev,
202 bool enabled)
203{
204}
205
200static inline void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, 206static inline void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev,
201 bool enabled) 207 bool enabled)
202{ 208{
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 468a31c8c113..ae12dcdd3c24 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2087,7 +2087,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2087 2087
2088 if (value == LED_MODE_SIGNAL_STRENGTH) { 2088 if (value == LED_MODE_SIGNAL_STRENGTH) {
2089 rt2x00dev->led_qual.rt2x00dev = rt2x00dev; 2089 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
2090 rt2x00dev->led_radio.type = LED_TYPE_QUALITY; 2090 rt2x00dev->led_qual.type = LED_TYPE_QUALITY;
2091 rt2x00dev->led_qual.led_dev.brightness_set = 2091 rt2x00dev->led_qual.led_dev.brightness_set =
2092 rt61pci_brightness_set; 2092 rt61pci_brightness_set;
2093 rt2x00dev->led_qual.led_dev.blink_set = 2093 rt2x00dev->led_qual.led_dev.blink_set =
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a9efe25f1ea7..da19a3a91f4d 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1647,7 +1647,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1647 1647
1648 if (value == LED_MODE_SIGNAL_STRENGTH) { 1648 if (value == LED_MODE_SIGNAL_STRENGTH) {
1649 rt2x00dev->led_qual.rt2x00dev = rt2x00dev; 1649 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
1650 rt2x00dev->led_radio.type = LED_TYPE_QUALITY; 1650 rt2x00dev->led_qual.type = LED_TYPE_QUALITY;
1651 rt2x00dev->led_qual.led_dev.brightness_set = 1651 rt2x00dev->led_qual.led_dev.brightness_set =
1652 rt73usb_brightness_set; 1652 rt73usb_brightness_set;
1653 rt2x00dev->led_qual.led_dev.blink_set = 1653 rt2x00dev->led_qual.led_dev.blink_set =
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index bced3fe1cf8a..5dd23c93497d 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -768,41 +768,17 @@ static __u8 *UnStuffData(__u8 * src, __u8 * end, __u8 * dst,
768/* General routines for STRIP */ 768/* General routines for STRIP */
769 769
770/* 770/*
771 * get_baud returns the current baud rate, as one of the constants defined in
772 * termbits.h
773 * If the user has issued a baud rate override using the 'setserial' command
774 * and the logical current rate is set to 38.4, then the true baud rate
775 * currently in effect (57.6 or 115.2) is returned.
776 */
777static unsigned int get_baud(struct tty_struct *tty)
778{
779 if (!tty || !tty->termios)
780 return (0);
781 if ((tty->termios->c_cflag & CBAUD) == B38400 && tty->driver_data) {
782 struct async_struct *info =
783 (struct async_struct *) tty->driver_data;
784 if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
785 return (B57600);
786 if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
787 return (B115200);
788 }
789 return (tty->termios->c_cflag & CBAUD);
790}
791
792/*
793 * set_baud sets the baud rate to the rate defined by baudcode 771 * set_baud sets the baud rate to the rate defined by baudcode
794 * Note: The rate B38400 should be avoided, because the user may have
795 * issued a 'setserial' speed override to map that to a different speed.
796 * We could achieve a true rate of 38400 if we needed to by cancelling
797 * any user speed override that is in place, but that might annoy the
798 * user, so it is simplest to just avoid using 38400.
799 */ 772 */
800static void set_baud(struct tty_struct *tty, unsigned int baudcode) 773static void set_baud(struct tty_struct *tty, speed_t baudrate)
801{ 774{
802 struct ktermios old_termios = *(tty->termios); 775 struct ktermios old_termios;
803 tty->termios->c_cflag &= ~CBAUD; /* Clear the old baud setting */ 776
804 tty->termios->c_cflag |= baudcode; /* Set the new baud setting */ 777 mutex_lock(&tty->termios_mutex);
805 tty->driver->set_termios(tty, &old_termios); 778 old_termios =*(tty->termios);
779 tty_encode_baud_rate(tty, baudrate, baudrate);
780 tty->ops->set_termios(tty, &old_termios);
781 mutex_unlock(&tty->termios_mutex);
806} 782}
807 783
808/* 784/*
@@ -1217,7 +1193,7 @@ static void ResetRadio(struct strip *strip_info)
1217 strip_info->watchdog_doreset = jiffies + 1 * HZ; 1193 strip_info->watchdog_doreset = jiffies + 1 * HZ;
1218 1194
1219 /* If the user has selected a baud rate above 38.4 see what magic we have to do */ 1195 /* If the user has selected a baud rate above 38.4 see what magic we have to do */
1220 if (strip_info->user_baud > B38400) { 1196 if (strip_info->user_baud > 38400) {
1221 /* 1197 /*
1222 * Subtle stuff: Pay attention :-) 1198 * Subtle stuff: Pay attention :-)
1223 * If the serial port is currently at the user's selected (>38.4) rate, 1199 * If the serial port is currently at the user's selected (>38.4) rate,
@@ -1227,17 +1203,17 @@ static void ResetRadio(struct strip *strip_info)
1227 * issued the ATS304 command last time through, so this time we restore 1203 * issued the ATS304 command last time through, so this time we restore
1228 * the user's selected rate and issue the normal starmode reset string. 1204 * the user's selected rate and issue the normal starmode reset string.
1229 */ 1205 */
1230 if (strip_info->user_baud == get_baud(tty)) { 1206 if (strip_info->user_baud == tty_get_baud_rate(tty)) {
1231 static const char b0[] = "ate0q1s304=57600\r"; 1207 static const char b0[] = "ate0q1s304=57600\r";
1232 static const char b1[] = "ate0q1s304=115200\r"; 1208 static const char b1[] = "ate0q1s304=115200\r";
1233 static const StringDescriptor baudstring[2] = 1209 static const StringDescriptor baudstring[2] =
1234 { {b0, sizeof(b0) - 1} 1210 { {b0, sizeof(b0) - 1}
1235 , {b1, sizeof(b1) - 1} 1211 , {b1, sizeof(b1) - 1}
1236 }; 1212 };
1237 set_baud(tty, B19200); 1213 set_baud(tty, 19200);
1238 if (strip_info->user_baud == B57600) 1214 if (strip_info->user_baud == 57600)
1239 s = baudstring[0]; 1215 s = baudstring[0];
1240 else if (strip_info->user_baud == B115200) 1216 else if (strip_info->user_baud == 115200)
1241 s = baudstring[1]; 1217 s = baudstring[1];
1242 else 1218 else
1243 s = baudstring[1]; /* For now */ 1219 s = baudstring[1]; /* For now */
@@ -1245,7 +1221,7 @@ static void ResetRadio(struct strip *strip_info)
1245 set_baud(tty, strip_info->user_baud); 1221 set_baud(tty, strip_info->user_baud);
1246 } 1222 }
1247 1223
1248 tty->driver->write(tty, s.string, s.length); 1224 tty->ops->write(tty, s.string, s.length);
1249#ifdef EXT_COUNTERS 1225#ifdef EXT_COUNTERS
1250 strip_info->tx_ebytes += s.length; 1226 strip_info->tx_ebytes += s.length;
1251#endif 1227#endif
@@ -1267,7 +1243,7 @@ static void strip_write_some_more(struct tty_struct *tty)
1267 1243
1268 if (strip_info->tx_left > 0) { 1244 if (strip_info->tx_left > 0) {
1269 int num_written = 1245 int num_written =
1270 tty->driver->write(tty, strip_info->tx_head, 1246 tty->ops->write(tty, strip_info->tx_head,
1271 strip_info->tx_left); 1247 strip_info->tx_left);
1272 strip_info->tx_left -= num_written; 1248 strip_info->tx_left -= num_written;
1273 strip_info->tx_head += num_written; 1249 strip_info->tx_head += num_written;
@@ -2457,7 +2433,7 @@ static int strip_open_low(struct net_device *dev)
2457 strip_info->working = FALSE; 2433 strip_info->working = FALSE;
2458 strip_info->firmware_level = NoStructure; 2434 strip_info->firmware_level = NoStructure;
2459 strip_info->next_command = CompatibilityCommand; 2435 strip_info->next_command = CompatibilityCommand;
2460 strip_info->user_baud = get_baud(strip_info->tty); 2436 strip_info->user_baud = tty_get_baud_rate(strip_info->tty);
2461 2437
2462 printk(KERN_INFO "%s: Initializing Radio.\n", 2438 printk(KERN_INFO "%s: Initializing Radio.\n",
2463 strip_info->dev->name); 2439 strip_info->dev->name);
@@ -2632,6 +2608,13 @@ static int strip_open(struct tty_struct *tty)
2632 return -EEXIST; 2608 return -EEXIST;
2633 2609
2634 /* 2610 /*
2611 * We need a write method.
2612 */
2613
2614 if (tty->ops->write == NULL)
2615 return -EOPNOTSUPP;
2616
2617 /*
2635 * OK. Find a free STRIP channel to use. 2618 * OK. Find a free STRIP channel to use.
2636 */ 2619 */
2637 if ((strip_info = strip_alloc()) == NULL) 2620 if ((strip_info = strip_alloc()) == NULL)
@@ -2652,8 +2635,7 @@ static int strip_open(struct tty_struct *tty)
2652 tty->disc_data = strip_info; 2635 tty->disc_data = strip_info;
2653 tty->receive_room = 65536; 2636 tty->receive_room = 65536;
2654 2637
2655 if (tty->driver->flush_buffer) 2638 tty_driver_flush_buffer(tty);
2656 tty->driver->flush_buffer(tty);
2657 2639
2658 /* 2640 /*
2659 * Restore default settings 2641 * Restore default settings
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index e34675c2f8fc..5316074f39f0 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -545,11 +545,11 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
545 * be padded. Unaligned access might also happen if the length_info 545 * be padded. Unaligned access might also happen if the length_info
546 * structure is not present. 546 * structure is not present.
547 */ 547 */
548 if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) 548 if (get_unaligned_le16(&length_info->tag) == RX_LENGTH_INFO_TAG)
549 { 549 {
550 unsigned int l, k, n; 550 unsigned int l, k, n;
551 for (i = 0, l = 0;; i++) { 551 for (i = 0, l = 0;; i++) {
552 k = le16_to_cpu(get_unaligned(&length_info->length[i])); 552 k = get_unaligned_le16(&length_info->length[i]);
553 if (k == 0) 553 if (k == 0)
554 return; 554 return;
555 n = l+k; 555 n = l+k;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e62018a36133..8bddff150c70 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1803,7 +1803,7 @@ static void __exit netif_exit(void)
1803 if (is_initial_xendomain()) 1803 if (is_initial_xendomain())
1804 return; 1804 return;
1805 1805
1806 return xenbus_unregister_driver(&netfront); 1806 xenbus_unregister_driver(&netfront);
1807} 1807}
1808module_exit(netif_exit); 1808module_exit(netif_exit);
1809 1809
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 24640726f8bb..57e1f495b9fc 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1062,7 +1062,7 @@ static int yellowfin_rx(struct net_device *dev)
1062 buf_addr = rx_skb->data; 1062 buf_addr = rx_skb->data;
1063 data_size = (le32_to_cpu(desc->dbdma_cmd) - 1063 data_size = (le32_to_cpu(desc->dbdma_cmd) -
1064 le32_to_cpu(desc->result_status)) & 0xffff; 1064 le32_to_cpu(desc->result_status)) & 0xffff;
1065 frame_status = le16_to_cpu(get_unaligned((__le16*)&(buf_addr[data_size - 2]))); 1065 frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1066 if (yellowfin_debug > 4) 1066 if (yellowfin_debug > 4)
1067 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n", 1067 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
1068 frame_status); 1068 frame_status);
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index e07492be1f4a..208dd12825bc 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/nubus.h> 22#include <linux/nubus.h>
23#include <linux/proc_fs.h> 23#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/module.h> 26#include <linux/module.h>
26 27
@@ -28,38 +29,36 @@
28#include <asm/byteorder.h> 29#include <asm/byteorder.h>
29 30
30static int 31static int
31get_nubus_dev_info(char *buf, char **start, off_t pos, int count) 32nubus_devices_proc_show(struct seq_file *m, void *v)
32{ 33{
33 struct nubus_dev *dev = nubus_devices; 34 struct nubus_dev *dev = nubus_devices;
34 off_t at = 0;
35 int len, cnt;
36 35
37 cnt = 0; 36 while (dev) {
38 while (dev && count > cnt) { 37 seq_printf(m, "%x\t%04x %04x %04x %04x",
39 len = sprintf(buf, "%x\t%04x %04x %04x %04x",
40 dev->board->slot, 38 dev->board->slot,
41 dev->category, 39 dev->category,
42 dev->type, 40 dev->type,
43 dev->dr_sw, 41 dev->dr_sw,
44 dev->dr_hw); 42 dev->dr_hw);
45 len += sprintf(buf+len, 43 seq_printf(m, "\t%08lx\n", dev->board->slot_addr);
46 "\t%08lx",
47 dev->board->slot_addr);
48 buf[len++] = '\n';
49 at += len;
50 if (at >= pos) {
51 if (!*start) {
52 *start = buf + (pos - (at - len));
53 cnt = at - pos;
54 } else
55 cnt += len;
56 buf += len;
57 }
58 dev = dev->next; 44 dev = dev->next;
59 } 45 }
60 return (count > cnt) ? cnt : count; 46 return 0;
47}
48
49static int nubus_devices_proc_open(struct inode *inode, struct file *file)
50{
51 return single_open(file, nubus_devices_proc_show, NULL);
61} 52}
62 53
54static const struct file_operations nubus_devices_proc_fops = {
55 .owner = THIS_MODULE,
56 .open = nubus_devices_proc_open,
57 .read = seq_read,
58 .llseek = seq_lseek,
59 .release = single_release,
60};
61
63static struct proc_dir_entry *proc_bus_nubus_dir; 62static struct proc_dir_entry *proc_bus_nubus_dir;
64 63
65static void nubus_proc_subdir(struct nubus_dev* dev, 64static void nubus_proc_subdir(struct nubus_dev* dev,
@@ -171,8 +170,7 @@ void __init nubus_proc_init(void)
171{ 170{
172 if (!MACH_IS_MAC) 171 if (!MACH_IS_MAC)
173 return; 172 return;
174 proc_bus_nubus_dir = proc_mkdir("nubus", proc_bus); 173 proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL);
175 create_proc_info_entry("devices", 0, proc_bus_nubus_dir, 174 proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops);
176 get_nubus_dev_info);
177 proc_bus_nubus_add_devices(); 175 proc_bus_nubus_add_devices();
178} 176}
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 62db3c3fe4dc..07d2a8d4498f 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1551,8 +1551,7 @@ static int __init ccio_probe(struct parisc_device *dev)
1551{ 1551{
1552 int i; 1552 int i;
1553 struct ioc *ioc, **ioc_p = &ioc_list; 1553 struct ioc *ioc, **ioc_p = &ioc_list;
1554 struct proc_dir_entry *info_entry, *bitmap_entry; 1554
1555
1556 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); 1555 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1557 if (ioc == NULL) { 1556 if (ioc == NULL) {
1558 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); 1557 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
@@ -1580,13 +1579,10 @@ static int __init ccio_probe(struct parisc_device *dev)
1580 HBA_DATA(dev->dev.platform_data)->iommu = ioc; 1579 HBA_DATA(dev->dev.platform_data)->iommu = ioc;
1581 1580
1582 if (ioc_count == 0) { 1581 if (ioc_count == 0) {
1583 info_entry = create_proc_entry(MODULE_NAME, 0, proc_runway_root); 1582 proc_create(MODULE_NAME, 0, proc_runway_root,
1584 if (info_entry) 1583 &ccio_proc_info_fops);
1585 info_entry->proc_fops = &ccio_proc_info_fops; 1584 proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
1586 1585 &ccio_proc_bitmap_fops);
1587 bitmap_entry = create_proc_entry(MODULE_NAME"-bitmap", 0, proc_runway_root);
1588 if (bitmap_entry)
1589 bitmap_entry->proc_fops = &ccio_proc_bitmap_fops;
1590 } 1586 }
1591 1587
1592 ioc_count++; 1588 ioc_count++;
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8c4d2c13d5f2..afc849bd3f58 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1895,7 +1895,9 @@ sba_driver_callback(struct parisc_device *dev)
1895 int i; 1895 int i;
1896 char *version; 1896 char *version;
1897 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); 1897 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
1898 struct proc_dir_entry *info_entry, *bitmap_entry, *root; 1898#ifdef CONFIG_PROC_FS
1899 struct proc_dir_entry *root;
1900#endif
1899 1901
1900 sba_dump_ranges(sba_addr); 1902 sba_dump_ranges(sba_addr);
1901 1903
@@ -1973,14 +1975,8 @@ sba_driver_callback(struct parisc_device *dev)
1973 break; 1975 break;
1974 } 1976 }
1975 1977
1976 info_entry = create_proc_entry("sba_iommu", 0, root); 1978 proc_create("sba_iommu", 0, root, &sba_proc_fops);
1977 bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root); 1979 proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
1978
1979 if (info_entry)
1980 info_entry->proc_fops = &sba_proc_fops;
1981
1982 if (bitmap_entry)
1983 bitmap_entry->proc_fops = &sba_proc_bitmap_fops;
1984#endif 1980#endif
1985 1981
1986 parisc_vmerge_boundary = IOVP_SIZE; 1982 parisc_vmerge_boundary = IOVP_SIZE;
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 54a6ef72906e..0338b0912674 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -76,7 +76,7 @@ int parport_wait_event (struct parport *port, signed long timeout)
76 semaphore. */ 76 semaphore. */
77 return 1; 77 return 1;
78 78
79 init_timer (&timer); 79 init_timer_on_stack(&timer);
80 timer.expires = jiffies + timeout; 80 timer.expires = jiffies + timeout;
81 timer.function = timeout_waiting_on_port; 81 timer.function = timeout_waiting_on_port;
82 port_from_cookie[port->number % PARPORT_MAX] = port; 82 port_from_cookie[port->number % PARPORT_MAX] = port;
@@ -88,6 +88,8 @@ int parport_wait_event (struct parport *port, signed long timeout)
88 /* Timed out. */ 88 /* Timed out. */
89 ret = 1; 89 ret = 1;
90 90
91 destroy_timer_on_stack(&timer);
92
91 return ret; 93 return ret;
92} 94}
93 95
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 0e77ae2b71a0..e6a7e847ee80 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -365,11 +365,11 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
365 if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) { 365 if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) {
366 366
367 /* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */ 367 /* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */
368 printk("%s: initialize bidirectional-mode.\n", __FUNCTION__); 368 printk("%s: initialize bidirectional-mode.\n", __func__);
369 parport_writeb ( (0x10 + 0x20), port + 4); 369 parport_writeb ( (0x10 + 0x20), port + 4);
370 370
371 } else { 371 } else {
372 printk("%s: enhanced parport-modes not supported.\n", __FUNCTION__); 372 printk("%s: enhanced parport-modes not supported.\n", __func__);
373 } 373 }
374 374
375 p = parport_gsc_probe_port(port, 0, dev->irq, 375 p = parport_gsc_probe_port(port, 0, dev->irq,
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index a85808938205..e0c2a4584ec6 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1415,7 +1415,7 @@ static void __devinit winbond_check(int io, int key)
1415{ 1415{
1416 int devid,devrev,oldid,x_devid,x_devrev,x_oldid; 1416 int devid,devrev,oldid,x_devid,x_devrev,x_oldid;
1417 1417
1418 if (!request_region(io, 3, __FUNCTION__)) 1418 if (!request_region(io, 3, __func__))
1419 return; 1419 return;
1420 1420
1421 /* First probe without key */ 1421 /* First probe without key */
@@ -1449,7 +1449,7 @@ static void __devinit winbond_check2(int io,int key)
1449{ 1449{
1450 int devid,devrev,oldid,x_devid,x_devrev,x_oldid; 1450 int devid,devrev,oldid,x_devid,x_devrev,x_oldid;
1451 1451
1452 if (!request_region(io, 3, __FUNCTION__)) 1452 if (!request_region(io, 3, __func__))
1453 return; 1453 return;
1454 1454
1455 /* First probe without the key */ 1455 /* First probe without the key */
@@ -1482,7 +1482,7 @@ static void __devinit smsc_check(int io, int key)
1482{ 1482{
1483 int id,rev,oldid,oldrev,x_id,x_rev,x_oldid,x_oldrev; 1483 int id,rev,oldid,oldrev,x_id,x_rev,x_oldid,x_oldrev;
1484 1484
1485 if (!request_region(io, 3, __FUNCTION__)) 1485 if (!request_region(io, 3, __func__))
1486 return; 1486 return;
1487 1487
1488 /* First probe without the key */ 1488 /* First probe without the key */
@@ -1547,7 +1547,7 @@ static void __devinit detect_and_report_it87(void)
1547 u8 r; 1547 u8 r;
1548 if (verbose_probing) 1548 if (verbose_probing)
1549 printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n"); 1549 printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
1550 if (!request_region(0x2e, 1, __FUNCTION__)) 1550 if (!request_region(0x2e, 1, __func__))
1551 return; 1551 return;
1552 outb(0x87, 0x2e); 1552 outb(0x87, 0x2e);
1553 outb(0x01, 0x2e); 1553 outb(0x01, 0x2e);
@@ -3082,6 +3082,7 @@ static struct pci_driver parport_pc_pci_driver;
3082static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;} 3082static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;}
3083#endif /* CONFIG_PCI */ 3083#endif /* CONFIG_PCI */
3084 3084
3085#ifdef CONFIG_PNP
3085 3086
3086static const struct pnp_device_id parport_pc_pnp_tbl[] = { 3087static const struct pnp_device_id parport_pc_pnp_tbl[] = {
3087 /* Standard LPT Printer Port */ 3088 /* Standard LPT Printer Port */
@@ -3148,6 +3149,9 @@ static struct pnp_driver parport_pc_pnp_driver = {
3148 .remove = parport_pc_pnp_remove, 3149 .remove = parport_pc_pnp_remove,
3149}; 3150};
3150 3151
3152#else
3153static struct pnp_driver parport_pc_pnp_driver;
3154#endif /* CONFIG_PNP */
3151 3155
3152static int __devinit parport_pc_platform_probe(struct platform_device *pdev) 3156static int __devinit parport_pc_platform_probe(struct platform_device *pdev)
3153{ 3157{
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index f14267e197dd..8264a7680435 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -93,11 +93,10 @@ struct controller {
93 u8 slot_device_offset; 93 u8 slot_device_offset;
94 u32 first_slot; /* First physical slot number */ /* PCIE only has 1 slot */ 94 u32 first_slot; /* First physical slot number */ /* PCIE only has 1 slot */
95 u8 slot_bus; /* Bus where the slots handled by this controller sit */ 95 u8 slot_bus; /* Bus where the slots handled by this controller sit */
96 u8 ctrlcap; 96 u32 slot_cap;
97 u8 cap_base; 97 u8 cap_base;
98 struct timer_list poll_timer; 98 struct timer_list poll_timer;
99 volatile int cmd_busy; 99 volatile int cmd_busy;
100 spinlock_t lock;
101}; 100};
102 101
103#define INT_BUTTON_IGNORE 0 102#define INT_BUTTON_IGNORE 0
@@ -137,13 +136,13 @@ struct controller {
137#define HP_SUPR_RM_SUP 0x00000020 136#define HP_SUPR_RM_SUP 0x00000020
138#define EMI_PRSN 0x00020000 137#define EMI_PRSN 0x00020000
139 138
140#define ATTN_BUTTN(cap) (cap & ATTN_BUTTN_PRSN) 139#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & ATTN_BUTTN_PRSN)
141#define POWER_CTRL(cap) (cap & PWR_CTRL_PRSN) 140#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PWR_CTRL_PRSN)
142#define MRL_SENS(cap) (cap & MRL_SENS_PRSN) 141#define MRL_SENS(ctrl) ((ctrl)->slot_cap & MRL_SENS_PRSN)
143#define ATTN_LED(cap) (cap & ATTN_LED_PRSN) 142#define ATTN_LED(ctrl) ((ctrl)->slot_cap & ATTN_LED_PRSN)
144#define PWR_LED(cap) (cap & PWR_LED_PRSN) 143#define PWR_LED(ctrl) ((ctrl)->slot_cap & PWR_LED_PRSN)
145#define HP_SUPR_RM(cap) (cap & HP_SUPR_RM_SUP) 144#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & HP_SUPR_RM_SUP)
146#define EMI(cap) (cap & EMI_PRSN) 145#define EMI(ctrl) ((ctrl)->slot_cap & EMI_PRSN)
147 146
148extern int pciehp_sysfs_enable_slot(struct slot *slot); 147extern int pciehp_sysfs_enable_slot(struct slot *slot);
149extern int pciehp_sysfs_disable_slot(struct slot *slot); 148extern int pciehp_sysfs_disable_slot(struct slot *slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index aee19f013d84..43d8ddb2d679 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -41,6 +41,7 @@ int pciehp_debug;
41int pciehp_poll_mode; 41int pciehp_poll_mode;
42int pciehp_poll_time; 42int pciehp_poll_time;
43int pciehp_force; 43int pciehp_force;
44int pciehp_slot_with_bus;
44struct workqueue_struct *pciehp_wq; 45struct workqueue_struct *pciehp_wq;
45 46
46#define DRIVER_VERSION "0.4" 47#define DRIVER_VERSION "0.4"
@@ -55,10 +56,12 @@ module_param(pciehp_debug, bool, 0644);
55module_param(pciehp_poll_mode, bool, 0644); 56module_param(pciehp_poll_mode, bool, 0644);
56module_param(pciehp_poll_time, int, 0644); 57module_param(pciehp_poll_time, int, 0644);
57module_param(pciehp_force, bool, 0644); 58module_param(pciehp_force, bool, 0644);
59module_param(pciehp_slot_with_bus, bool, 0644);
58MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); 60MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
59MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); 61MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
60MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); 62MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
61MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); 63MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
64MODULE_PARM_DESC(pciehp_slot_with_bus, "Use bus number in the slot name");
62 65
63#define PCIE_MODULE_NAME "pciehp" 66#define PCIE_MODULE_NAME "pciehp"
64 67
@@ -193,8 +196,12 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
193 196
194static void make_slot_name(struct slot *slot) 197static void make_slot_name(struct slot *slot)
195{ 198{
196 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d", 199 if (pciehp_slot_with_bus)
197 slot->bus, slot->number); 200 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
201 slot->bus, slot->number);
202 else
203 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
204 slot->number);
198} 205}
199 206
200static int init_slots(struct controller *ctrl) 207static int init_slots(struct controller *ctrl)
@@ -251,7 +258,7 @@ static int init_slots(struct controller *ctrl)
251 goto error_info; 258 goto error_info;
252 } 259 }
253 /* create additional sysfs entries */ 260 /* create additional sysfs entries */
254 if (EMI(ctrl->ctrlcap)) { 261 if (EMI(ctrl)) {
255 retval = sysfs_create_file(&hotplug_slot->kobj, 262 retval = sysfs_create_file(&hotplug_slot->kobj,
256 &hotplug_slot_attr_lock.attr); 263 &hotplug_slot_attr_lock.attr);
257 if (retval) { 264 if (retval) {
@@ -284,7 +291,7 @@ static void cleanup_slots(struct controller *ctrl)
284 list_for_each_safe(tmp, next, &ctrl->slot_list) { 291 list_for_each_safe(tmp, next, &ctrl->slot_list) {
285 slot = list_entry(tmp, struct slot, slot_list); 292 slot = list_entry(tmp, struct slot, slot_list);
286 list_del(&slot->slot_list); 293 list_del(&slot->slot_list);
287 if (EMI(ctrl->ctrlcap)) 294 if (EMI(ctrl))
288 sysfs_remove_file(&slot->hotplug_slot->kobj, 295 sysfs_remove_file(&slot->hotplug_slot->kobj,
289 &hotplug_slot_attr_lock.attr); 296 &hotplug_slot_attr_lock.attr);
290 cancel_delayed_work(&slot->work); 297 cancel_delayed_work(&slot->work);
@@ -305,7 +312,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
305 312
306 hotplug_slot->info->attention_status = status; 313 hotplug_slot->info->attention_status = status;
307 314
308 if (ATTN_LED(slot->ctrl->ctrlcap)) 315 if (ATTN_LED(slot->ctrl))
309 slot->hpc_ops->set_attention_status(slot, status); 316 slot->hpc_ops->set_attention_status(slot, status);
310 317
311 return 0; 318 return 0;
@@ -472,7 +479,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
472 if (rc) /* -ENODEV: shouldn't happen, but deal with it */ 479 if (rc) /* -ENODEV: shouldn't happen, but deal with it */
473 value = 0; 480 value = 0;
474 } 481 }
475 if ((POWER_CTRL(ctrl->ctrlcap)) && !value) { 482 if ((POWER_CTRL(ctrl)) && !value) {
476 rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/ 483 rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/
477 if (rc) 484 if (rc)
478 goto err_out_free_ctrl_slot; 485 goto err_out_free_ctrl_slot;
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 0c481f7d2ab3..0a7aa628e955 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -178,7 +178,7 @@ u8 pciehp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
178static void set_slot_off(struct controller *ctrl, struct slot * pslot) 178static void set_slot_off(struct controller *ctrl, struct slot * pslot)
179{ 179{
180 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 180 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
181 if (POWER_CTRL(ctrl->ctrlcap)) { 181 if (POWER_CTRL(ctrl)) {
182 if (pslot->hpc_ops->power_off_slot(pslot)) { 182 if (pslot->hpc_ops->power_off_slot(pslot)) {
183 err("%s: Issue of Slot Power Off command failed\n", 183 err("%s: Issue of Slot Power Off command failed\n",
184 __func__); 184 __func__);
@@ -186,10 +186,10 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
186 } 186 }
187 } 187 }
188 188
189 if (PWR_LED(ctrl->ctrlcap)) 189 if (PWR_LED(ctrl))
190 pslot->hpc_ops->green_led_off(pslot); 190 pslot->hpc_ops->green_led_off(pslot);
191 191
192 if (ATTN_LED(ctrl->ctrlcap)) { 192 if (ATTN_LED(ctrl)) {
193 if (pslot->hpc_ops->set_attention_status(pslot, 1)) { 193 if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
194 err("%s: Issue of Set Attention Led command failed\n", 194 err("%s: Issue of Set Attention Led command failed\n",
195 __func__); 195 __func__);
@@ -214,14 +214,14 @@ static int board_added(struct slot *p_slot)
214 __func__, p_slot->device, 214 __func__, p_slot->device,
215 ctrl->slot_device_offset, p_slot->hp_slot); 215 ctrl->slot_device_offset, p_slot->hp_slot);
216 216
217 if (POWER_CTRL(ctrl->ctrlcap)) { 217 if (POWER_CTRL(ctrl)) {
218 /* Power on slot */ 218 /* Power on slot */
219 retval = p_slot->hpc_ops->power_on_slot(p_slot); 219 retval = p_slot->hpc_ops->power_on_slot(p_slot);
220 if (retval) 220 if (retval)
221 return retval; 221 return retval;
222 } 222 }
223 223
224 if (PWR_LED(ctrl->ctrlcap)) 224 if (PWR_LED(ctrl))
225 p_slot->hpc_ops->green_led_blink(p_slot); 225 p_slot->hpc_ops->green_led_blink(p_slot);
226 226
227 /* Wait for ~1 second */ 227 /* Wait for ~1 second */
@@ -254,7 +254,7 @@ static int board_added(struct slot *p_slot)
254 */ 254 */
255 if (pcie_mch_quirk) 255 if (pcie_mch_quirk)
256 pci_fixup_device(pci_fixup_final, ctrl->pci_dev); 256 pci_fixup_device(pci_fixup_final, ctrl->pci_dev);
257 if (PWR_LED(ctrl->ctrlcap)) 257 if (PWR_LED(ctrl))
258 p_slot->hpc_ops->green_led_on(p_slot); 258 p_slot->hpc_ops->green_led_on(p_slot);
259 259
260 return 0; 260 return 0;
@@ -279,7 +279,7 @@ static int remove_board(struct slot *p_slot)
279 279
280 dbg("In %s, hp_slot = %d\n", __func__, p_slot->hp_slot); 280 dbg("In %s, hp_slot = %d\n", __func__, p_slot->hp_slot);
281 281
282 if (POWER_CTRL(ctrl->ctrlcap)) { 282 if (POWER_CTRL(ctrl)) {
283 /* power off slot */ 283 /* power off slot */
284 retval = p_slot->hpc_ops->power_off_slot(p_slot); 284 retval = p_slot->hpc_ops->power_off_slot(p_slot);
285 if (retval) { 285 if (retval) {
@@ -289,7 +289,7 @@ static int remove_board(struct slot *p_slot)
289 } 289 }
290 } 290 }
291 291
292 if (PWR_LED(ctrl->ctrlcap)) 292 if (PWR_LED(ctrl))
293 /* turn off Green LED */ 293 /* turn off Green LED */
294 p_slot->hpc_ops->green_led_off(p_slot); 294 p_slot->hpc_ops->green_led_off(p_slot);
295 295
@@ -327,7 +327,7 @@ static void pciehp_power_thread(struct work_struct *work)
327 case POWERON_STATE: 327 case POWERON_STATE:
328 mutex_unlock(&p_slot->lock); 328 mutex_unlock(&p_slot->lock);
329 if (pciehp_enable_slot(p_slot) && 329 if (pciehp_enable_slot(p_slot) &&
330 PWR_LED(p_slot->ctrl->ctrlcap)) 330 PWR_LED(p_slot->ctrl))
331 p_slot->hpc_ops->green_led_off(p_slot); 331 p_slot->hpc_ops->green_led_off(p_slot);
332 mutex_lock(&p_slot->lock); 332 mutex_lock(&p_slot->lock);
333 p_slot->state = STATIC_STATE; 333 p_slot->state = STATIC_STATE;
@@ -409,9 +409,9 @@ static void handle_button_press_event(struct slot *p_slot)
409 "press.\n", p_slot->name); 409 "press.\n", p_slot->name);
410 } 410 }
411 /* blink green LED and turn off amber */ 411 /* blink green LED and turn off amber */
412 if (PWR_LED(ctrl->ctrlcap)) 412 if (PWR_LED(ctrl))
413 p_slot->hpc_ops->green_led_blink(p_slot); 413 p_slot->hpc_ops->green_led_blink(p_slot);
414 if (ATTN_LED(ctrl->ctrlcap)) 414 if (ATTN_LED(ctrl))
415 p_slot->hpc_ops->set_attention_status(p_slot, 0); 415 p_slot->hpc_ops->set_attention_status(p_slot, 0);
416 416
417 schedule_delayed_work(&p_slot->work, 5*HZ); 417 schedule_delayed_work(&p_slot->work, 5*HZ);
@@ -427,13 +427,13 @@ static void handle_button_press_event(struct slot *p_slot)
427 dbg("%s: button cancel\n", __func__); 427 dbg("%s: button cancel\n", __func__);
428 cancel_delayed_work(&p_slot->work); 428 cancel_delayed_work(&p_slot->work);
429 if (p_slot->state == BLINKINGOFF_STATE) { 429 if (p_slot->state == BLINKINGOFF_STATE) {
430 if (PWR_LED(ctrl->ctrlcap)) 430 if (PWR_LED(ctrl))
431 p_slot->hpc_ops->green_led_on(p_slot); 431 p_slot->hpc_ops->green_led_on(p_slot);
432 } else { 432 } else {
433 if (PWR_LED(ctrl->ctrlcap)) 433 if (PWR_LED(ctrl))
434 p_slot->hpc_ops->green_led_off(p_slot); 434 p_slot->hpc_ops->green_led_off(p_slot);
435 } 435 }
436 if (ATTN_LED(ctrl->ctrlcap)) 436 if (ATTN_LED(ctrl))
437 p_slot->hpc_ops->set_attention_status(p_slot, 0); 437 p_slot->hpc_ops->set_attention_status(p_slot, 0);
438 info("PCI slot #%s - action canceled due to button press\n", 438 info("PCI slot #%s - action canceled due to button press\n",
439 p_slot->name); 439 p_slot->name);
@@ -492,16 +492,16 @@ static void interrupt_event_handler(struct work_struct *work)
492 handle_button_press_event(p_slot); 492 handle_button_press_event(p_slot);
493 break; 493 break;
494 case INT_POWER_FAULT: 494 case INT_POWER_FAULT:
495 if (!POWER_CTRL(ctrl->ctrlcap)) 495 if (!POWER_CTRL(ctrl))
496 break; 496 break;
497 if (ATTN_LED(ctrl->ctrlcap)) 497 if (ATTN_LED(ctrl))
498 p_slot->hpc_ops->set_attention_status(p_slot, 1); 498 p_slot->hpc_ops->set_attention_status(p_slot, 1);
499 if (PWR_LED(ctrl->ctrlcap)) 499 if (PWR_LED(ctrl))
500 p_slot->hpc_ops->green_led_off(p_slot); 500 p_slot->hpc_ops->green_led_off(p_slot);
501 break; 501 break;
502 case INT_PRESENCE_ON: 502 case INT_PRESENCE_ON:
503 case INT_PRESENCE_OFF: 503 case INT_PRESENCE_OFF:
504 if (!HP_SUPR_RM(ctrl->ctrlcap)) 504 if (!HP_SUPR_RM(ctrl))
505 break; 505 break;
506 dbg("Surprise Removal\n"); 506 dbg("Surprise Removal\n");
507 update_slot_info(p_slot); 507 update_slot_info(p_slot);
@@ -531,7 +531,7 @@ int pciehp_enable_slot(struct slot *p_slot)
531 mutex_unlock(&p_slot->ctrl->crit_sect); 531 mutex_unlock(&p_slot->ctrl->crit_sect);
532 return -ENODEV; 532 return -ENODEV;
533 } 533 }
534 if (MRL_SENS(p_slot->ctrl->ctrlcap)) { 534 if (MRL_SENS(p_slot->ctrl)) {
535 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 535 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
536 if (rc || getstatus) { 536 if (rc || getstatus) {
537 info("%s: latch open on slot(%s)\n", __func__, 537 info("%s: latch open on slot(%s)\n", __func__,
@@ -541,7 +541,7 @@ int pciehp_enable_slot(struct slot *p_slot)
541 } 541 }
542 } 542 }
543 543
544 if (POWER_CTRL(p_slot->ctrl->ctrlcap)) { 544 if (POWER_CTRL(p_slot->ctrl)) {
545 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 545 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
546 if (rc || getstatus) { 546 if (rc || getstatus) {
547 info("%s: already enabled on slot(%s)\n", __func__, 547 info("%s: already enabled on slot(%s)\n", __func__,
@@ -576,7 +576,7 @@ int pciehp_disable_slot(struct slot *p_slot)
576 /* Check to see if (latch closed, card present, power on) */ 576 /* Check to see if (latch closed, card present, power on) */
577 mutex_lock(&p_slot->ctrl->crit_sect); 577 mutex_lock(&p_slot->ctrl->crit_sect);
578 578
579 if (!HP_SUPR_RM(p_slot->ctrl->ctrlcap)) { 579 if (!HP_SUPR_RM(p_slot->ctrl)) {
580 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 580 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
581 if (ret || !getstatus) { 581 if (ret || !getstatus) {
582 info("%s: no adapter on slot(%s)\n", __func__, 582 info("%s: no adapter on slot(%s)\n", __func__,
@@ -586,7 +586,7 @@ int pciehp_disable_slot(struct slot *p_slot)
586 } 586 }
587 } 587 }
588 588
589 if (MRL_SENS(p_slot->ctrl->ctrlcap)) { 589 if (MRL_SENS(p_slot->ctrl)) {
590 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 590 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
591 if (ret || getstatus) { 591 if (ret || getstatus) {
592 info("%s: latch open on slot(%s)\n", __func__, 592 info("%s: latch open on slot(%s)\n", __func__,
@@ -596,7 +596,7 @@ int pciehp_disable_slot(struct slot *p_slot)
596 } 596 }
597 } 597 }
598 598
599 if (POWER_CTRL(p_slot->ctrl->ctrlcap)) { 599 if (POWER_CTRL(p_slot->ctrl)) {
600 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 600 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
601 if (ret || !getstatus) { 601 if (ret || !getstatus) {
602 info("%s: already disabled slot(%s)\n", __func__, 602 info("%s: already disabled slot(%s)\n", __func__,
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b4bbd07d1e39..891f81a0400c 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -221,6 +221,32 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
221 add_timer(&ctrl->poll_timer); 221 add_timer(&ctrl->poll_timer);
222} 222}
223 223
224static inline int pciehp_request_irq(struct controller *ctrl)
225{
226 int retval, irq = ctrl->pci_dev->irq;
227
228 /* Install interrupt polling timer. Start with 10 sec delay */
229 if (pciehp_poll_mode) {
230 init_timer(&ctrl->poll_timer);
231 start_int_poll_timer(ctrl, 10);
232 return 0;
233 }
234
235 /* Installs the interrupt handler */
236 retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
237 if (retval)
238 err("Cannot get irq %d for the hotplug controller\n", irq);
239 return retval;
240}
241
242static inline void pciehp_free_irq(struct controller *ctrl)
243{
244 if (pciehp_poll_mode)
245 del_timer_sync(&ctrl->poll_timer);
246 else
247 free_irq(ctrl->pci_dev->irq, ctrl);
248}
249
224static inline int pcie_wait_cmd(struct controller *ctrl) 250static inline int pcie_wait_cmd(struct controller *ctrl)
225{ 251{
226 int retval = 0; 252 int retval = 0;
@@ -242,17 +268,15 @@ static inline int pcie_wait_cmd(struct controller *ctrl)
242 268
243/** 269/**
244 * pcie_write_cmd - Issue controller command 270 * pcie_write_cmd - Issue controller command
245 * @slot: slot to which the command is issued 271 * @ctrl: controller to which the command is issued
246 * @cmd: command value written to slot control register 272 * @cmd: command value written to slot control register
247 * @mask: bitmask of slot control register to be modified 273 * @mask: bitmask of slot control register to be modified
248 */ 274 */
249static int pcie_write_cmd(struct slot *slot, u16 cmd, u16 mask) 275static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
250{ 276{
251 struct controller *ctrl = slot->ctrl;
252 int retval = 0; 277 int retval = 0;
253 u16 slot_status; 278 u16 slot_status;
254 u16 slot_ctrl; 279 u16 slot_ctrl;
255 unsigned long flags;
256 280
257 mutex_lock(&ctrl->ctrl_lock); 281 mutex_lock(&ctrl->ctrl_lock);
258 282
@@ -270,24 +294,24 @@ static int pcie_write_cmd(struct slot *slot, u16 cmd, u16 mask)
270 __func__); 294 __func__);
271 } 295 }
272 296
273 spin_lock_irqsave(&ctrl->lock, flags);
274 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 297 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
275 if (retval) { 298 if (retval) {
276 err("%s: Cannot read SLOTCTRL register\n", __func__); 299 err("%s: Cannot read SLOTCTRL register\n", __func__);
277 goto out_spin_unlock; 300 goto out;
278 } 301 }
279 302
280 slot_ctrl &= ~mask; 303 slot_ctrl &= ~mask;
281 slot_ctrl |= ((cmd & mask) | CMD_CMPL_INTR_ENABLE); 304 slot_ctrl |= (cmd & mask);
305 /* Don't enable command completed if caller is changing it. */
306 if (!(mask & CMD_CMPL_INTR_ENABLE))
307 slot_ctrl |= CMD_CMPL_INTR_ENABLE;
282 308
283 ctrl->cmd_busy = 1; 309 ctrl->cmd_busy = 1;
310 smp_mb();
284 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); 311 retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
285 if (retval) 312 if (retval)
286 err("%s: Cannot write to SLOTCTRL register\n", __func__); 313 err("%s: Cannot write to SLOTCTRL register\n", __func__);
287 314
288 out_spin_unlock:
289 spin_unlock_irqrestore(&ctrl->lock, flags);
290
291 /* 315 /*
292 * Wait for command completion. 316 * Wait for command completion.
293 */ 317 */
@@ -467,12 +491,7 @@ static int hpc_toggle_emi(struct slot *slot)
467 491
468 slot_cmd = EMI_CTRL; 492 slot_cmd = EMI_CTRL;
469 cmd_mask = EMI_CTRL; 493 cmd_mask = EMI_CTRL;
470 if (!pciehp_poll_mode) { 494 rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
471 slot_cmd = slot_cmd | HP_INTR_ENABLE;
472 cmd_mask = cmd_mask | HP_INTR_ENABLE;
473 }
474
475 rc = pcie_write_cmd(slot, slot_cmd, cmd_mask);
476 slot->last_emi_toggle = get_seconds(); 495 slot->last_emi_toggle = get_seconds();
477 496
478 return rc; 497 return rc;
@@ -499,12 +518,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
499 default: 518 default:
500 return -1; 519 return -1;
501 } 520 }
502 if (!pciehp_poll_mode) { 521 rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
503 slot_cmd = slot_cmd | HP_INTR_ENABLE;
504 cmd_mask = cmd_mask | HP_INTR_ENABLE;
505 }
506
507 rc = pcie_write_cmd(slot, slot_cmd, cmd_mask);
508 dbg("%s: SLOTCTRL %x write cmd %x\n", 522 dbg("%s: SLOTCTRL %x write cmd %x\n",
509 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 523 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
510 524
@@ -519,13 +533,7 @@ static void hpc_set_green_led_on(struct slot *slot)
519 533
520 slot_cmd = 0x0100; 534 slot_cmd = 0x0100;
521 cmd_mask = PWR_LED_CTRL; 535 cmd_mask = PWR_LED_CTRL;
522 if (!pciehp_poll_mode) { 536 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
523 slot_cmd = slot_cmd | HP_INTR_ENABLE;
524 cmd_mask = cmd_mask | HP_INTR_ENABLE;
525 }
526
527 pcie_write_cmd(slot, slot_cmd, cmd_mask);
528
529 dbg("%s: SLOTCTRL %x write cmd %x\n", 537 dbg("%s: SLOTCTRL %x write cmd %x\n",
530 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 538 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
531} 539}
@@ -538,12 +546,7 @@ static void hpc_set_green_led_off(struct slot *slot)
538 546
539 slot_cmd = 0x0300; 547 slot_cmd = 0x0300;
540 cmd_mask = PWR_LED_CTRL; 548 cmd_mask = PWR_LED_CTRL;
541 if (!pciehp_poll_mode) { 549 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
542 slot_cmd = slot_cmd | HP_INTR_ENABLE;
543 cmd_mask = cmd_mask | HP_INTR_ENABLE;
544 }
545
546 pcie_write_cmd(slot, slot_cmd, cmd_mask);
547 dbg("%s: SLOTCTRL %x write cmd %x\n", 550 dbg("%s: SLOTCTRL %x write cmd %x\n",
548 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 551 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
549} 552}
@@ -556,23 +559,19 @@ static void hpc_set_green_led_blink(struct slot *slot)
556 559
557 slot_cmd = 0x0200; 560 slot_cmd = 0x0200;
558 cmd_mask = PWR_LED_CTRL; 561 cmd_mask = PWR_LED_CTRL;
559 if (!pciehp_poll_mode) { 562 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
560 slot_cmd = slot_cmd | HP_INTR_ENABLE;
561 cmd_mask = cmd_mask | HP_INTR_ENABLE;
562 }
563
564 pcie_write_cmd(slot, slot_cmd, cmd_mask);
565
566 dbg("%s: SLOTCTRL %x write cmd %x\n", 563 dbg("%s: SLOTCTRL %x write cmd %x\n",
567 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 564 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
568} 565}
569 566
570static void hpc_release_ctlr(struct controller *ctrl) 567static void hpc_release_ctlr(struct controller *ctrl)
571{ 568{
572 if (pciehp_poll_mode) 569 /* Mask Hot-plug Interrupt Enable */
573 del_timer(&ctrl->poll_timer); 570 if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE))
574 else 571 err("%s: Cannot mask hotplut interrupt enable\n", __func__);
575 free_irq(ctrl->pci_dev->irq, ctrl); 572
573 /* Free interrupt handler or interrupt polling timer */
574 pciehp_free_irq(ctrl);
576 575
577 /* 576 /*
578 * If this is the last controller to be released, destroy the 577 * If this is the last controller to be released, destroy the
@@ -612,19 +611,13 @@ static int hpc_power_on_slot(struct slot * slot)
612 cmd_mask = PWR_CTRL; 611 cmd_mask = PWR_CTRL;
613 /* Enable detection that we turned off at slot power-off time */ 612 /* Enable detection that we turned off at slot power-off time */
614 if (!pciehp_poll_mode) { 613 if (!pciehp_poll_mode) {
615 slot_cmd = slot_cmd | 614 slot_cmd |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
616 PWR_FAULT_DETECT_ENABLE | 615 PRSN_DETECT_ENABLE);
617 MRL_DETECT_ENABLE | 616 cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
618 PRSN_DETECT_ENABLE | 617 PRSN_DETECT_ENABLE);
619 HP_INTR_ENABLE;
620 cmd_mask = cmd_mask |
621 PWR_FAULT_DETECT_ENABLE |
622 MRL_DETECT_ENABLE |
623 PRSN_DETECT_ENABLE |
624 HP_INTR_ENABLE;
625 } 618 }
626 619
627 retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); 620 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
628 621
629 if (retval) { 622 if (retval) {
630 err("%s: Write %x command failed!\n", __func__, slot_cmd); 623 err("%s: Write %x command failed!\n", __func__, slot_cmd);
@@ -697,18 +690,13 @@ static int hpc_power_off_slot(struct slot * slot)
697 * till the slot is powered on again. 690 * till the slot is powered on again.
698 */ 691 */
699 if (!pciehp_poll_mode) { 692 if (!pciehp_poll_mode) {
700 slot_cmd = (slot_cmd & 693 slot_cmd &= ~(PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
701 ~PWR_FAULT_DETECT_ENABLE & 694 PRSN_DETECT_ENABLE);
702 ~MRL_DETECT_ENABLE & 695 cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
703 ~PRSN_DETECT_ENABLE) | HP_INTR_ENABLE; 696 PRSN_DETECT_ENABLE);
704 cmd_mask = cmd_mask |
705 PWR_FAULT_DETECT_ENABLE |
706 MRL_DETECT_ENABLE |
707 PRSN_DETECT_ENABLE |
708 HP_INTR_ENABLE;
709 } 697 }
710 698
711 retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); 699 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
712 if (retval) { 700 if (retval) {
713 err("%s: Write command failed!\n", __func__); 701 err("%s: Write command failed!\n", __func__);
714 retval = -1; 702 retval = -1;
@@ -733,139 +721,56 @@ static int hpc_power_off_slot(struct slot * slot)
733static irqreturn_t pcie_isr(int irq, void *dev_id) 721static irqreturn_t pcie_isr(int irq, void *dev_id)
734{ 722{
735 struct controller *ctrl = (struct controller *)dev_id; 723 struct controller *ctrl = (struct controller *)dev_id;
736 u16 slot_status, intr_detect, intr_loc; 724 u16 detected, intr_loc;
737 u16 temp_word;
738 int hp_slot = 0; /* only 1 slot per PCI Express port */
739 int rc = 0;
740 unsigned long flags;
741
742 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
743 if (rc) {
744 err("%s: Cannot read SLOTSTATUS register\n", __func__);
745 return IRQ_NONE;
746 }
747
748 intr_detect = (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED |
749 MRL_SENS_CHANGED | PRSN_DETECT_CHANGED | CMD_COMPLETED);
750
751 intr_loc = slot_status & intr_detect;
752
753 /* Check to see if it was our interrupt */
754 if ( !intr_loc )
755 return IRQ_NONE;
756 725
757 dbg("%s: intr_loc %x\n", __func__, intr_loc); 726 /*
758 /* Mask Hot-plug Interrupt Enable */ 727 * In order to guarantee that all interrupt events are
759 if (!pciehp_poll_mode) { 728 * serviced, we need to re-inspect Slot Status register after
760 spin_lock_irqsave(&ctrl->lock, flags); 729 * clearing what is presumed to be the last pending interrupt.
761 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word); 730 */
762 if (rc) { 731 intr_loc = 0;
763 err("%s: Cannot read SLOT_CTRL register\n", 732 do {
764 __func__); 733 if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) {
765 spin_unlock_irqrestore(&ctrl->lock, flags); 734 err("%s: Cannot read SLOTSTATUS\n", __func__);
766 return IRQ_NONE; 735 return IRQ_NONE;
767 } 736 }
768 737
769 dbg("%s: pciehp_readw(SLOTCTRL) with value %x\n", 738 detected &= (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED |
770 __func__, temp_word); 739 MRL_SENS_CHANGED | PRSN_DETECT_CHANGED |
771 temp_word = (temp_word & ~HP_INTR_ENABLE & 740 CMD_COMPLETED);
772 ~CMD_CMPL_INTR_ENABLE) | 0x00; 741 intr_loc |= detected;
773 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word); 742 if (!intr_loc)
774 if (rc) {
775 err("%s: Cannot write to SLOTCTRL register\n",
776 __func__);
777 spin_unlock_irqrestore(&ctrl->lock, flags);
778 return IRQ_NONE; 743 return IRQ_NONE;
779 } 744 if (pciehp_writew(ctrl, SLOTSTATUS, detected)) {
780 spin_unlock_irqrestore(&ctrl->lock, flags); 745 err("%s: Cannot write to SLOTSTATUS\n", __func__);
781
782 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
783 if (rc) {
784 err("%s: Cannot read SLOT_STATUS register\n",
785 __func__);
786 return IRQ_NONE; 746 return IRQ_NONE;
787 } 747 }
788 dbg("%s: pciehp_readw(SLOTSTATUS) with value %x\n", 748 } while (detected);
789 __func__, slot_status);
790 749
791 /* Clear command complete interrupt caused by this write */ 750 dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc);
792 temp_word = 0x1f;
793 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
794 if (rc) {
795 err("%s: Cannot write to SLOTSTATUS register\n",
796 __func__);
797 return IRQ_NONE;
798 }
799 }
800 751
752 /* Check Command Complete Interrupt Pending */
801 if (intr_loc & CMD_COMPLETED) { 753 if (intr_loc & CMD_COMPLETED) {
802 /*
803 * Command Complete Interrupt Pending
804 */
805 ctrl->cmd_busy = 0; 754 ctrl->cmd_busy = 0;
755 smp_mb();
806 wake_up_interruptible(&ctrl->queue); 756 wake_up_interruptible(&ctrl->queue);
807 } 757 }
808 758
759 /* Check MRL Sensor Changed */
809 if (intr_loc & MRL_SENS_CHANGED) 760 if (intr_loc & MRL_SENS_CHANGED)
810 pciehp_handle_switch_change(hp_slot, ctrl); 761 pciehp_handle_switch_change(0, ctrl);
811 762
763 /* Check Attention Button Pressed */
812 if (intr_loc & ATTN_BUTTN_PRESSED) 764 if (intr_loc & ATTN_BUTTN_PRESSED)
813 pciehp_handle_attention_button(hp_slot, ctrl); 765 pciehp_handle_attention_button(0, ctrl);
814 766
767 /* Check Presence Detect Changed */
815 if (intr_loc & PRSN_DETECT_CHANGED) 768 if (intr_loc & PRSN_DETECT_CHANGED)
816 pciehp_handle_presence_change(hp_slot, ctrl); 769 pciehp_handle_presence_change(0, ctrl);
817 770
771 /* Check Power Fault Detected */
818 if (intr_loc & PWR_FAULT_DETECTED) 772 if (intr_loc & PWR_FAULT_DETECTED)
819 pciehp_handle_power_fault(hp_slot, ctrl); 773 pciehp_handle_power_fault(0, ctrl);
820
821 /* Clear all events after serving them */
822 temp_word = 0x1F;
823 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
824 if (rc) {
825 err("%s: Cannot write to SLOTSTATUS register\n", __func__);
826 return IRQ_NONE;
827 }
828 /* Unmask Hot-plug Interrupt Enable */
829 if (!pciehp_poll_mode) {
830 spin_lock_irqsave(&ctrl->lock, flags);
831 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
832 if (rc) {
833 err("%s: Cannot read SLOTCTRL register\n",
834 __func__);
835 spin_unlock_irqrestore(&ctrl->lock, flags);
836 return IRQ_NONE;
837 }
838
839 dbg("%s: Unmask Hot-plug Interrupt Enable\n", __func__);
840 temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE;
841
842 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
843 if (rc) {
844 err("%s: Cannot write to SLOTCTRL register\n",
845 __func__);
846 spin_unlock_irqrestore(&ctrl->lock, flags);
847 return IRQ_NONE;
848 }
849 spin_unlock_irqrestore(&ctrl->lock, flags);
850
851 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
852 if (rc) {
853 err("%s: Cannot read SLOT_STATUS register\n",
854 __func__);
855 return IRQ_NONE;
856 }
857
858 /* Clear command complete interrupt caused by this write */
859 temp_word = 0x1F;
860 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
861 if (rc) {
862 err("%s: Cannot write to SLOTSTATUS failed\n",
863 __func__);
864 return IRQ_NONE;
865 }
866 dbg("%s: pciehp_writew(SLOTSTATUS) with value %x\n",
867 __func__, temp_word);
868 }
869 774
870 return IRQ_HANDLED; 775 return IRQ_HANDLED;
871} 776}
@@ -1052,7 +957,7 @@ static struct hpc_ops pciehp_hpc_ops = {
1052}; 957};
1053 958
1054#ifdef CONFIG_ACPI 959#ifdef CONFIG_ACPI
1055int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev) 960static int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
1056{ 961{
1057 acpi_status status; 962 acpi_status status;
1058 acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); 963 acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
@@ -1112,7 +1017,7 @@ int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
1112 break; 1017 break;
1113 } 1018 }
1114 1019
1115 err("Cannot get control of hotplug hardware for pci %s\n", 1020 dbg("Cannot get control of hotplug hardware for pci %s\n",
1116 pci_name(dev)); 1021 pci_name(dev));
1117 1022
1118 kfree(string.pointer); 1023 kfree(string.pointer);
@@ -1123,45 +1028,9 @@ int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
1123static int pcie_init_hardware_part1(struct controller *ctrl, 1028static int pcie_init_hardware_part1(struct controller *ctrl,
1124 struct pcie_device *dev) 1029 struct pcie_device *dev)
1125{ 1030{
1126 int rc;
1127 u16 temp_word;
1128 u32 slot_cap;
1129 u16 slot_status;
1130
1131 rc = pciehp_readl(ctrl, SLOTCAP, &slot_cap);
1132 if (rc) {
1133 err("%s: Cannot read SLOTCAP register\n", __func__);
1134 return -1;
1135 }
1136
1137 /* Mask Hot-plug Interrupt Enable */ 1031 /* Mask Hot-plug Interrupt Enable */
1138 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word); 1032 if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) {
1139 if (rc) { 1033 err("%s: Cannot mask hotplug interrupt enable\n", __func__);
1140 err("%s: Cannot read SLOTCTRL register\n", __func__);
1141 return -1;
1142 }
1143
1144 dbg("%s: SLOTCTRL %x value read %x\n",
1145 __func__, ctrl->cap_base + SLOTCTRL, temp_word);
1146 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) |
1147 0x00;
1148
1149 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1150 if (rc) {
1151 err("%s: Cannot write to SLOTCTRL register\n", __func__);
1152 return -1;
1153 }
1154
1155 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
1156 if (rc) {
1157 err("%s: Cannot read SLOTSTATUS register\n", __func__);
1158 return -1;
1159 }
1160
1161 temp_word = 0x1F; /* Clear all events */
1162 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
1163 if (rc) {
1164 err("%s: Cannot write to SLOTSTATUS register\n", __func__);
1165 return -1; 1034 return -1;
1166 } 1035 }
1167 return 0; 1036 return 0;
@@ -1169,205 +1038,125 @@ static int pcie_init_hardware_part1(struct controller *ctrl,
1169 1038
1170int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev) 1039int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev)
1171{ 1040{
1172 int rc; 1041 u16 cmd, mask;
1173 u16 temp_word;
1174 u16 intr_enable = 0;
1175 u32 slot_cap;
1176 u16 slot_status;
1177 1042
1178 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word); 1043 /*
1179 if (rc) { 1044 * We need to clear all events before enabling hotplug interrupt
1180 err("%s: Cannot read SLOTCTRL register\n", __func__); 1045 * notification mechanism in order for hotplug controler to
1181 goto abort; 1046 * generate interrupts.
1182 } 1047 */
1183 1048 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) {
1184 intr_enable = intr_enable | PRSN_DETECT_ENABLE; 1049 err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__);
1185 1050 return -1;
1186 rc = pciehp_readl(ctrl, SLOTCAP, &slot_cap);
1187 if (rc) {
1188 err("%s: Cannot read SLOTCAP register\n", __func__);
1189 goto abort;
1190 } 1051 }
1191 1052
1192 if (ATTN_BUTTN(slot_cap)) 1053 cmd = PRSN_DETECT_ENABLE;
1193 intr_enable = intr_enable | ATTN_BUTTN_ENABLE; 1054 if (ATTN_BUTTN(ctrl))
1194 1055 cmd |= ATTN_BUTTN_ENABLE;
1195 if (POWER_CTRL(slot_cap)) 1056 if (POWER_CTRL(ctrl))
1196 intr_enable = intr_enable | PWR_FAULT_DETECT_ENABLE; 1057 cmd |= PWR_FAULT_DETECT_ENABLE;
1197 1058 if (MRL_SENS(ctrl))
1198 if (MRL_SENS(slot_cap)) 1059 cmd |= MRL_DETECT_ENABLE;
1199 intr_enable = intr_enable | MRL_DETECT_ENABLE; 1060 if (!pciehp_poll_mode)
1061 cmd |= HP_INTR_ENABLE;
1200 1062
1201 temp_word = (temp_word & ~intr_enable) | intr_enable; 1063 mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE |
1064 PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | HP_INTR_ENABLE;
1202 1065
1203 if (pciehp_poll_mode) { 1066 if (pcie_write_cmd(ctrl, cmd, mask)) {
1204 temp_word = (temp_word & ~HP_INTR_ENABLE) | 0x0; 1067 err("%s: Cannot enable software notification\n", __func__);
1205 } else {
1206 temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE;
1207 }
1208
1209 /*
1210 * Unmask Hot-plug Interrupt Enable for the interrupt
1211 * notification mechanism case.
1212 */
1213 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1214 if (rc) {
1215 err("%s: Cannot write to SLOTCTRL register\n", __func__);
1216 goto abort; 1068 goto abort;
1217 } 1069 }
1218 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
1219 if (rc) {
1220 err("%s: Cannot read SLOTSTATUS register\n", __func__);
1221 goto abort_disable_intr;
1222 }
1223
1224 temp_word = 0x1F; /* Clear all events */
1225 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
1226 if (rc) {
1227 err("%s: Cannot write to SLOTSTATUS register\n", __func__);
1228 goto abort_disable_intr;
1229 }
1230 1070
1231 if (pciehp_force) { 1071 if (pciehp_force)
1232 dbg("Bypassing BIOS check for pciehp use on %s\n", 1072 dbg("Bypassing BIOS check for pciehp use on %s\n",
1233 pci_name(ctrl->pci_dev)); 1073 pci_name(ctrl->pci_dev));
1234 } else { 1074 else if (pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev))
1235 rc = pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev); 1075 goto abort_disable_intr;
1236 if (rc)
1237 goto abort_disable_intr;
1238 }
1239 1076
1240 return 0; 1077 return 0;
1241 1078
1242 /* We end up here for the many possible ways to fail this API. */ 1079 /* We end up here for the many possible ways to fail this API. */
1243abort_disable_intr: 1080abort_disable_intr:
1244 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word); 1081 if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE))
1245 if (!rc) {
1246 temp_word &= ~(intr_enable | HP_INTR_ENABLE);
1247 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1248 }
1249 if (rc)
1250 err("%s : disabling interrupts failed\n", __func__); 1082 err("%s : disabling interrupts failed\n", __func__);
1251abort: 1083abort:
1252 return -1; 1084 return -1;
1253} 1085}
1254 1086
1255int pcie_init(struct controller *ctrl, struct pcie_device *dev) 1087static inline void dbg_ctrl(struct controller *ctrl)
1256{ 1088{
1257 int rc; 1089 int i;
1258 u16 cap_reg; 1090 u16 reg16;
1259 u32 slot_cap; 1091 struct pci_dev *pdev = ctrl->pci_dev;
1260 int cap_base;
1261 u16 slot_status, slot_ctrl;
1262 struct pci_dev *pdev;
1263
1264 pdev = dev->port;
1265 ctrl->pci_dev = pdev; /* save pci_dev in context */
1266
1267 dbg("%s: hotplug controller vendor id 0x%x device id 0x%x\n",
1268 __func__, pdev->vendor, pdev->device);
1269 1092
1270 cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1093 if (!pciehp_debug)
1271 if (cap_base == 0) { 1094 return;
1272 dbg("%s: Can't find PCI_CAP_ID_EXP (0x10)\n", __func__);
1273 goto abort;
1274 }
1275 1095
1276 ctrl->cap_base = cap_base; 1096 dbg("Hotplug Controller:\n");
1097 dbg(" Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", pci_name(pdev), pdev->irq);
1098 dbg(" Vendor ID : 0x%04x\n", pdev->vendor);
1099 dbg(" Device ID : 0x%04x\n", pdev->device);
1100 dbg(" Subsystem ID : 0x%04x\n", pdev->subsystem_device);
1101 dbg(" Subsystem Vendor ID : 0x%04x\n", pdev->subsystem_vendor);
1102 dbg(" PCIe Cap offset : 0x%02x\n", ctrl->cap_base);
1103 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1104 if (!pci_resource_len(pdev, i))
1105 continue;
1106 dbg(" PCI resource [%d] : 0x%llx@0x%llx\n", i,
1107 (unsigned long long)pci_resource_len(pdev, i),
1108 (unsigned long long)pci_resource_start(pdev, i));
1109 }
1110 dbg("Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
1111 dbg(" Physical Slot Number : %d\n", ctrl->first_slot);
1112 dbg(" Attention Button : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no");
1113 dbg(" Power Controller : %3s\n", POWER_CTRL(ctrl) ? "yes" : "no");
1114 dbg(" MRL Sensor : %3s\n", MRL_SENS(ctrl) ? "yes" : "no");
1115 dbg(" Attention Indicator : %3s\n", ATTN_LED(ctrl) ? "yes" : "no");
1116 dbg(" Power Indicator : %3s\n", PWR_LED(ctrl) ? "yes" : "no");
1117 dbg(" Hot-Plug Surprise : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no");
1118 dbg(" EMI Present : %3s\n", EMI(ctrl) ? "yes" : "no");
1119 pciehp_readw(ctrl, SLOTSTATUS, &reg16);
1120 dbg("Slot Status : 0x%04x\n", reg16);
1121 pciehp_readw(ctrl, SLOTSTATUS, &reg16);
1122 dbg("Slot Control : 0x%04x\n", reg16);
1123}
1277 1124
1278 dbg("%s: pcie_cap_base %x\n", __func__, cap_base); 1125int pcie_init(struct controller *ctrl, struct pcie_device *dev)
1126{
1127 u32 slot_cap;
1128 struct pci_dev *pdev = dev->port;
1279 1129
1280 rc = pciehp_readw(ctrl, CAPREG, &cap_reg); 1130 ctrl->pci_dev = pdev;
1281 if (rc) { 1131 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1282 err("%s: Cannot read CAPREG register\n", __func__); 1132 if (!ctrl->cap_base) {
1283 goto abort; 1133 err("%s: Cannot find PCI Express capability\n", __func__);
1284 }
1285 dbg("%s: CAPREG offset %x cap_reg %x\n",
1286 __func__, ctrl->cap_base + CAPREG, cap_reg);
1287
1288 if (((cap_reg & SLOT_IMPL) == 0) ||
1289 (((cap_reg & DEV_PORT_TYPE) != 0x0040)
1290 && ((cap_reg & DEV_PORT_TYPE) != 0x0060))) {
1291 dbg("%s : This is not a root port or the port is not "
1292 "connected to a slot\n", __func__);
1293 goto abort; 1134 goto abort;
1294 } 1135 }
1295 1136 if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
1296 rc = pciehp_readl(ctrl, SLOTCAP, &slot_cap);
1297 if (rc) {
1298 err("%s: Cannot read SLOTCAP register\n", __func__); 1137 err("%s: Cannot read SLOTCAP register\n", __func__);
1299 goto abort; 1138 goto abort;
1300 } 1139 }
1301 dbg("%s: SLOTCAP offset %x slot_cap %x\n",
1302 __func__, ctrl->cap_base + SLOTCAP, slot_cap);
1303
1304 if (!(slot_cap & HP_CAP)) {
1305 dbg("%s : This slot is not hot-plug capable\n", __func__);
1306 goto abort;
1307 }
1308 /* For debugging purpose */
1309 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
1310 if (rc) {
1311 err("%s: Cannot read SLOTSTATUS register\n", __func__);
1312 goto abort;
1313 }
1314 dbg("%s: SLOTSTATUS offset %x slot_status %x\n",
1315 __func__, ctrl->cap_base + SLOTSTATUS, slot_status);
1316
1317 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
1318 if (rc) {
1319 err("%s: Cannot read SLOTCTRL register\n", __func__);
1320 goto abort;
1321 }
1322 dbg("%s: SLOTCTRL offset %x slot_ctrl %x\n",
1323 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
1324
1325 for (rc = 0; rc < DEVICE_COUNT_RESOURCE; rc++)
1326 if (pci_resource_len(pdev, rc) > 0)
1327 dbg("pci resource[%d] start=0x%llx(len=0x%llx)\n", rc,
1328 (unsigned long long)pci_resource_start(pdev, rc),
1329 (unsigned long long)pci_resource_len(pdev, rc));
1330
1331 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
1332 pdev->vendor, pdev->device,
1333 pdev->subsystem_vendor, pdev->subsystem_device);
1334 1140
1141 ctrl->slot_cap = slot_cap;
1142 ctrl->first_slot = slot_cap >> 19;
1143 ctrl->slot_device_offset = 0;
1144 ctrl->num_slots = 1;
1145 ctrl->hpc_ops = &pciehp_hpc_ops;
1335 mutex_init(&ctrl->crit_sect); 1146 mutex_init(&ctrl->crit_sect);
1336 mutex_init(&ctrl->ctrl_lock); 1147 mutex_init(&ctrl->ctrl_lock);
1337 spin_lock_init(&ctrl->lock);
1338
1339 /* setup wait queue */
1340 init_waitqueue_head(&ctrl->queue); 1148 init_waitqueue_head(&ctrl->queue);
1149 dbg_ctrl(ctrl);
1341 1150
1342 /* return PCI Controller Info */ 1151 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
1343 ctrl->slot_device_offset = 0; 1152 pdev->vendor, pdev->device,
1344 ctrl->num_slots = 1; 1153 pdev->subsystem_vendor, pdev->subsystem_device);
1345 ctrl->first_slot = slot_cap >> 19;
1346 ctrl->ctrlcap = slot_cap & 0x0000007f;
1347 1154
1348 rc = pcie_init_hardware_part1(ctrl, dev); 1155 if (pcie_init_hardware_part1(ctrl, dev))
1349 if (rc)
1350 goto abort; 1156 goto abort;
1351 1157
1352 if (pciehp_poll_mode) { 1158 if (pciehp_request_irq(ctrl))
1353 /* Install interrupt polling timer. Start with 10 sec delay */ 1159 goto abort;
1354 init_timer(&ctrl->poll_timer);
1355 start_int_poll_timer(ctrl, 10);
1356 } else {
1357 /* Installs the interrupt handler */
1358 rc = request_irq(ctrl->pci_dev->irq, pcie_isr, IRQF_SHARED,
1359 MY_NAME, (void *)ctrl);
1360 dbg("%s: request_irq %d for hpc%d (returns %d)\n",
1361 __func__, ctrl->pci_dev->irq,
1362 atomic_read(&pciehp_num_controllers), rc);
1363 if (rc) {
1364 err("Can't get irq %d for the hotplug controller\n",
1365 ctrl->pci_dev->irq);
1366 goto abort;
1367 }
1368 }
1369 dbg("pciehp ctrl b:d:f:irq=0x%x:%x:%x:%x\n", pdev->bus->number,
1370 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), dev->irq);
1371 1160
1372 /* 1161 /*
1373 * If this is the first controller to be initialized, 1162 * If this is the first controller to be initialized,
@@ -1376,21 +1165,17 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev)
1376 if (atomic_add_return(1, &pciehp_num_controllers) == 1) { 1165 if (atomic_add_return(1, &pciehp_num_controllers) == 1) {
1377 pciehp_wq = create_singlethread_workqueue("pciehpd"); 1166 pciehp_wq = create_singlethread_workqueue("pciehpd");
1378 if (!pciehp_wq) { 1167 if (!pciehp_wq) {
1379 rc = -ENOMEM;
1380 goto abort_free_irq; 1168 goto abort_free_irq;
1381 } 1169 }
1382 } 1170 }
1383 1171
1384 rc = pcie_init_hardware_part2(ctrl, dev); 1172 if (pcie_init_hardware_part2(ctrl, dev))
1385 if (rc == 0) { 1173 goto abort_free_irq;
1386 ctrl->hpc_ops = &pciehp_hpc_ops; 1174
1387 return 0; 1175 return 0;
1388 } 1176
1389abort_free_irq: 1177abort_free_irq:
1390 if (pciehp_poll_mode) 1178 pciehp_free_irq(ctrl);
1391 del_timer_sync(&ctrl->poll_timer);
1392 else
1393 free_irq(ctrl->pci_dev->irq, ctrl);
1394abort: 1179abort:
1395 return -1; 1180 return -1;
1396} 1181}
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 43816d4b3c43..1648076600fc 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -39,6 +39,7 @@
39int shpchp_debug; 39int shpchp_debug;
40int shpchp_poll_mode; 40int shpchp_poll_mode;
41int shpchp_poll_time; 41int shpchp_poll_time;
42int shpchp_slot_with_bus;
42struct workqueue_struct *shpchp_wq; 43struct workqueue_struct *shpchp_wq;
43 44
44#define DRIVER_VERSION "0.4" 45#define DRIVER_VERSION "0.4"
@@ -52,9 +53,11 @@ MODULE_LICENSE("GPL");
52module_param(shpchp_debug, bool, 0644); 53module_param(shpchp_debug, bool, 0644);
53module_param(shpchp_poll_mode, bool, 0644); 54module_param(shpchp_poll_mode, bool, 0644);
54module_param(shpchp_poll_time, int, 0644); 55module_param(shpchp_poll_time, int, 0644);
56module_param(shpchp_slot_with_bus, bool, 0644);
55MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not"); 57MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
56MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not"); 58MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
57MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); 59MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
60MODULE_PARM_DESC(shpchp_slot_with_bus, "Use bus number in the slot name");
58 61
59#define SHPC_MODULE_NAME "shpchp" 62#define SHPC_MODULE_NAME "shpchp"
60 63
@@ -100,8 +103,12 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
100 103
101static void make_slot_name(struct slot *slot) 104static void make_slot_name(struct slot *slot)
102{ 105{
103 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d", 106 if (shpchp_slot_with_bus)
104 slot->bus, slot->number); 107 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
108 slot->bus, slot->number);
109 else
110 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
111 slot->number);
105} 112}
106 113
107static int init_slots(struct controller *ctrl) 114static int init_slots(struct controller *ctrl)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 26938da8f438..8c61304cbb37 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -123,7 +123,7 @@ static void msix_flush_writes(unsigned int irq)
123 } 123 }
124} 124}
125 125
126static void msi_set_mask_bit(unsigned int irq, int flag) 126static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
127{ 127{
128 struct msi_desc *entry; 128 struct msi_desc *entry;
129 129
@@ -137,8 +137,8 @@ static void msi_set_mask_bit(unsigned int irq, int flag)
137 137
138 pos = (long)entry->mask_base; 138 pos = (long)entry->mask_base;
139 pci_read_config_dword(entry->dev, pos, &mask_bits); 139 pci_read_config_dword(entry->dev, pos, &mask_bits);
140 mask_bits &= ~(1); 140 mask_bits &= ~(mask);
141 mask_bits |= flag; 141 mask_bits |= flag & mask;
142 pci_write_config_dword(entry->dev, pos, mask_bits); 142 pci_write_config_dword(entry->dev, pos, mask_bits);
143 } else { 143 } else {
144 msi_set_enable(entry->dev, !flag); 144 msi_set_enable(entry->dev, !flag);
@@ -241,13 +241,13 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
241 241
242void mask_msi_irq(unsigned int irq) 242void mask_msi_irq(unsigned int irq)
243{ 243{
244 msi_set_mask_bit(irq, 1); 244 msi_set_mask_bits(irq, 1, 1);
245 msix_flush_writes(irq); 245 msix_flush_writes(irq);
246} 246}
247 247
248void unmask_msi_irq(unsigned int irq) 248void unmask_msi_irq(unsigned int irq)
249{ 249{
250 msi_set_mask_bit(irq, 0); 250 msi_set_mask_bits(irq, 1, 0);
251 msix_flush_writes(irq); 251 msix_flush_writes(irq);
252} 252}
253 253
@@ -291,7 +291,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
291 msi_set_enable(dev, 0); 291 msi_set_enable(dev, 0);
292 write_msi_msg(dev->irq, &entry->msg); 292 write_msi_msg(dev->irq, &entry->msg);
293 if (entry->msi_attrib.maskbit) 293 if (entry->msi_attrib.maskbit)
294 msi_set_mask_bit(dev->irq, entry->msi_attrib.masked); 294 msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask,
295 entry->msi_attrib.masked);
295 296
296 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 297 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
297 control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE); 298 control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
@@ -315,7 +316,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
315 316
316 list_for_each_entry(entry, &dev->msi_list, list) { 317 list_for_each_entry(entry, &dev->msi_list, list) {
317 write_msi_msg(entry->irq, &entry->msg); 318 write_msi_msg(entry->irq, &entry->msg);
318 msi_set_mask_bit(entry->irq, entry->msi_attrib.masked); 319 msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked);
319 } 320 }
320 321
321 BUG_ON(list_empty(&dev->msi_list)); 322 BUG_ON(list_empty(&dev->msi_list));
@@ -382,6 +383,7 @@ static int msi_capability_init(struct pci_dev *dev)
382 pci_write_config_dword(dev, 383 pci_write_config_dword(dev,
383 msi_mask_bits_reg(pos, is_64bit_address(control)), 384 msi_mask_bits_reg(pos, is_64bit_address(control)),
384 maskbits); 385 maskbits);
386 entry->msi_attrib.maskbits_mask = temp;
385 } 387 }
386 list_add_tail(&entry->list, &dev->msi_list); 388 list_add_tail(&entry->list, &dev->msi_list);
387 389
@@ -569,10 +571,9 @@ int pci_enable_msi(struct pci_dev* dev)
569} 571}
570EXPORT_SYMBOL(pci_enable_msi); 572EXPORT_SYMBOL(pci_enable_msi);
571 573
572void pci_disable_msi(struct pci_dev* dev) 574void pci_msi_shutdown(struct pci_dev* dev)
573{ 575{
574 struct msi_desc *entry; 576 struct msi_desc *entry;
575 int default_irq;
576 577
577 if (!pci_msi_enable || !dev || !dev->msi_enabled) 578 if (!pci_msi_enable || !dev || !dev->msi_enabled)
578 return; 579 return;
@@ -583,15 +584,31 @@ void pci_disable_msi(struct pci_dev* dev)
583 584
584 BUG_ON(list_empty(&dev->msi_list)); 585 BUG_ON(list_empty(&dev->msi_list));
585 entry = list_entry(dev->msi_list.next, struct msi_desc, list); 586 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
586 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { 587 /* Return the the pci reset with msi irqs unmasked */
587 return; 588 if (entry->msi_attrib.maskbit) {
589 u32 mask = entry->msi_attrib.maskbits_mask;
590 msi_set_mask_bits(dev->irq, mask, ~mask);
588 } 591 }
589 592 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
590 default_irq = entry->msi_attrib.default_irq; 593 return;
591 msi_free_irqs(dev);
592 594
593 /* Restore dev->irq to its default pin-assertion irq */ 595 /* Restore dev->irq to its default pin-assertion irq */
594 dev->irq = default_irq; 596 dev->irq = entry->msi_attrib.default_irq;
597}
598void pci_disable_msi(struct pci_dev* dev)
599{
600 struct msi_desc *entry;
601
602 if (!pci_msi_enable || !dev || !dev->msi_enabled)
603 return;
604
605 pci_msi_shutdown(dev);
606
607 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
608 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
609 return;
610
611 msi_free_irqs(dev);
595} 612}
596EXPORT_SYMBOL(pci_disable_msi); 613EXPORT_SYMBOL(pci_disable_msi);
597 614
@@ -684,7 +701,7 @@ static void msix_free_all_irqs(struct pci_dev *dev)
684 msi_free_irqs(dev); 701 msi_free_irqs(dev);
685} 702}
686 703
687void pci_disable_msix(struct pci_dev* dev) 704void pci_msix_shutdown(struct pci_dev* dev)
688{ 705{
689 if (!pci_msi_enable || !dev || !dev->msix_enabled) 706 if (!pci_msi_enable || !dev || !dev->msix_enabled)
690 return; 707 return;
@@ -692,6 +709,13 @@ void pci_disable_msix(struct pci_dev* dev)
692 msix_set_enable(dev, 0); 709 msix_set_enable(dev, 0);
693 pci_intx_for_msi(dev, 1); 710 pci_intx_for_msi(dev, 1);
694 dev->msix_enabled = 0; 711 dev->msix_enabled = 0;
712}
713void pci_disable_msix(struct pci_dev* dev)
714{
715 if (!pci_msi_enable || !dev || !dev->msix_enabled)
716 return;
717
718 pci_msix_shutdown(dev);
695 719
696 msix_free_all_irqs(dev); 720 msix_free_all_irqs(dev);
697} 721}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index e8d94fafc280..72cf61ed8f96 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -360,6 +360,8 @@ static void pci_device_shutdown(struct device *dev)
360 360
361 if (drv && drv->shutdown) 361 if (drv && drv->shutdown)
362 drv->shutdown(pci_dev); 362 drv->shutdown(pci_dev);
363 pci_msi_shutdown(pci_dev);
364 pci_msix_shutdown(pci_dev);
363} 365}
364 366
365/** 367/**
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 25b04fb2517d..5a0c6ad53f8e 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -33,7 +33,7 @@ source "drivers/pci/pcie/aer/Kconfig"
33config PCIEASPM 33config PCIEASPM
34 bool "PCI Express ASPM support(Experimental)" 34 bool "PCI Express ASPM support(Experimental)"
35 depends on PCI && EXPERIMENTAL && PCIEPORTBUS 35 depends on PCI && EXPERIMENTAL && PCIEPORTBUS
36 default y 36 default n
37 help 37 help
38 This enables PCI Express ASPM (Active State Power Management) and 38 This enables PCI Express ASPM (Active State Power Management) and
39 Clock Power Management. ASPM supports state L0/L0s/L1. 39 Clock Power Management. ASPM supports state L0/L0s/L1.
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 96ac54072f6f..d39a78dbd026 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -31,7 +31,7 @@ int aer_osc_setup(struct pcie_device *pciedev)
31{ 31{
32 acpi_status status = AE_NOT_FOUND; 32 acpi_status status = AE_NOT_FOUND;
33 struct pci_dev *pdev = pciedev->port; 33 struct pci_dev *pdev = pciedev->port;
34 acpi_handle handle = 0; 34 acpi_handle handle = NULL;
35 35
36 if (acpi_pci_disabled) 36 if (acpi_pci_disabled)
37 return -1; 37 return -1;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f991359f0c36..3706ce7972dd 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -842,6 +842,21 @@ static void set_pcie_port_type(struct pci_dev *pdev)
842 * reading the dword at 0x100 which must either be 0 or a valid extended 842 * reading the dword at 0x100 which must either be 0 or a valid extended
843 * capability header. 843 * capability header.
844 */ 844 */
845int pci_cfg_space_size_ext(struct pci_dev *dev)
846{
847 u32 status;
848
849 if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL)
850 goto fail;
851 if (status == 0xffffffff)
852 goto fail;
853
854 return PCI_CFG_SPACE_EXP_SIZE;
855
856 fail:
857 return PCI_CFG_SPACE_SIZE;
858}
859
845int pci_cfg_space_size(struct pci_dev *dev) 860int pci_cfg_space_size(struct pci_dev *dev)
846{ 861{
847 int pos; 862 int pos;
@@ -858,12 +873,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
858 goto fail; 873 goto fail;
859 } 874 }
860 875
861 if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) 876 return pci_cfg_space_size_ext(dev);
862 goto fail;
863 if (status == 0xffffffff)
864 goto fail;
865
866 return PCI_CFG_SPACE_EXP_SIZE;
867 877
868 fail: 878 fail:
869 return PCI_CFG_SPACE_SIZE; 879 return PCI_CFG_SPACE_SIZE;
@@ -964,7 +974,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
964 dev->dev.release = pci_release_dev; 974 dev->dev.release = pci_release_dev;
965 pci_dev_get(dev); 975 pci_dev_get(dev);
966 976
967 set_dev_node(&dev->dev, pcibus_to_node(bus));
968 dev->dev.dma_mask = &dev->dma_mask; 977 dev->dev.dma_mask = &dev->dma_mask;
969 dev->dev.dma_parms = &dev->dma_parms; 978 dev->dev.dma_parms = &dev->dma_parms;
970 dev->dev.coherent_dma_mask = 0xffffffffull; 979 dev->dev.coherent_dma_mask = 0xffffffffull;
@@ -1080,6 +1089,10 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1080 return max; 1089 return max;
1081} 1090}
1082 1091
1092void __attribute__((weak)) set_pci_bus_resources_arch_default(struct pci_bus *b)
1093{
1094}
1095
1083struct pci_bus * pci_create_bus(struct device *parent, 1096struct pci_bus * pci_create_bus(struct device *parent,
1084 int bus, struct pci_ops *ops, void *sysdata) 1097 int bus, struct pci_ops *ops, void *sysdata)
1085{ 1098{
@@ -1119,6 +1132,9 @@ struct pci_bus * pci_create_bus(struct device *parent,
1119 goto dev_reg_err; 1132 goto dev_reg_err;
1120 b->bridge = get_device(dev); 1133 b->bridge = get_device(dev);
1121 1134
1135 if (!parent)
1136 set_dev_node(b->bridge, pcibus_to_node(b));
1137
1122 b->dev.class = &pcibus_class; 1138 b->dev.class = &pcibus_class;
1123 b->dev.parent = b->bridge; 1139 b->dev.parent = b->bridge;
1124 sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus); 1140 sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus);
@@ -1136,6 +1152,8 @@ struct pci_bus * pci_create_bus(struct device *parent,
1136 b->resource[0] = &ioport_resource; 1152 b->resource[0] = &ioport_resource;
1137 b->resource[1] = &iomem_resource; 1153 b->resource[1] = &iomem_resource;
1138 1154
1155 set_pci_bus_resources_arch_default(b);
1156
1139 return b; 1157 return b;
1140 1158
1141dev_create_file_err: 1159dev_create_file_err:
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index ef18fcd641e2..963a97642ae9 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -293,6 +293,7 @@ static int proc_bus_pci_release(struct inode *inode, struct file *file)
293#endif /* HAVE_PCI_MMAP */ 293#endif /* HAVE_PCI_MMAP */
294 294
295static const struct file_operations proc_bus_pci_operations = { 295static const struct file_operations proc_bus_pci_operations = {
296 .owner = THIS_MODULE,
296 .llseek = proc_bus_pci_lseek, 297 .llseek = proc_bus_pci_lseek,
297 .read = proc_bus_pci_read, 298 .read = proc_bus_pci_read,
298 .write = proc_bus_pci_write, 299 .write = proc_bus_pci_write,
@@ -406,11 +407,10 @@ int pci_proc_attach_device(struct pci_dev *dev)
406 } 407 }
407 408
408 sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); 409 sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
409 e = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir); 410 e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir,
411 &proc_bus_pci_operations, dev);
410 if (!e) 412 if (!e)
411 return -ENOMEM; 413 return -ENOMEM;
412 e->proc_fops = &proc_bus_pci_operations;
413 e->data = dev;
414 e->size = dev->cfg_size; 414 e->size = dev->cfg_size;
415 dev->procent = e; 415 dev->procent = e;
416 416
@@ -462,6 +462,7 @@ static int proc_bus_pci_dev_open(struct inode *inode, struct file *file)
462 return seq_open(file, &proc_bus_pci_devices_op); 462 return seq_open(file, &proc_bus_pci_devices_op);
463} 463}
464static const struct file_operations proc_bus_pci_dev_operations = { 464static const struct file_operations proc_bus_pci_dev_operations = {
465 .owner = THIS_MODULE,
465 .open = proc_bus_pci_dev_open, 466 .open = proc_bus_pci_dev_open,
466 .read = seq_read, 467 .read = seq_read,
467 .llseek = seq_lseek, 468 .llseek = seq_lseek,
@@ -470,12 +471,10 @@ static const struct file_operations proc_bus_pci_dev_operations = {
470 471
471static int __init pci_proc_init(void) 472static int __init pci_proc_init(void)
472{ 473{
473 struct proc_dir_entry *entry;
474 struct pci_dev *dev = NULL; 474 struct pci_dev *dev = NULL;
475 proc_bus_pci_dir = proc_mkdir("pci", proc_bus); 475 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
476 entry = create_proc_entry("devices", 0, proc_bus_pci_dir); 476 proc_create("devices", 0, proc_bus_pci_dir,
477 if (entry) 477 &proc_bus_pci_dev_operations);
478 entry->proc_fops = &proc_bus_pci_dev_operations;
479 proc_initialized = 1; 478 proc_initialized = 1;
480 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 479 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
481 pci_proc_attach_device(dev); 480 pci_proc_attach_device(dev);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 8d8852651fd2..1b0eb5aaf650 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -38,7 +38,6 @@ config PCMCIA_DEBUG
38config PCMCIA 38config PCMCIA
39 tristate "16-bit PCMCIA support" 39 tristate "16-bit PCMCIA support"
40 select CRC32 40 select CRC32
41 select HAVE_IDE
42 default y 41 default y
43 ---help--- 42 ---help---
44 This option enables support for 16-bit PCMCIA cards. Most older 43 This option enables support for 16-bit PCMCIA cards. Most older
diff --git a/drivers/pcmcia/au1000_db1x00.c b/drivers/pcmcia/au1000_db1x00.c
index 74e051535d6c..c78d77fd7e3b 100644
--- a/drivers/pcmcia/au1000_db1x00.c
+++ b/drivers/pcmcia/au1000_db1x00.c
@@ -194,7 +194,7 @@ db1x00_pcmcia_configure_socket(struct au1000_pcmcia_socket *skt, struct socket_s
194 default: 194 default:
195 pwr |= SET_VCC_VPP(0,0,sock); 195 pwr |= SET_VCC_VPP(0,0,sock);
196 printk("%s: bad Vcc/Vpp (%d:%d)\n", 196 printk("%s: bad Vcc/Vpp (%d:%d)\n",
197 __FUNCTION__, 197 __func__,
198 state->Vcc, 198 state->Vcc,
199 state->Vpp); 199 state->Vpp);
200 break; 200 break;
@@ -215,7 +215,7 @@ db1x00_pcmcia_configure_socket(struct au1000_pcmcia_socket *skt, struct socket_s
215 default: 215 default:
216 pwr |= SET_VCC_VPP(0,0,sock); 216 pwr |= SET_VCC_VPP(0,0,sock);
217 printk("%s: bad Vcc/Vpp (%d:%d)\n", 217 printk("%s: bad Vcc/Vpp (%d:%d)\n",
218 __FUNCTION__, 218 __func__,
219 state->Vcc, 219 state->Vcc,
220 state->Vpp); 220 state->Vpp);
221 break; 221 break;
@@ -224,7 +224,7 @@ db1x00_pcmcia_configure_socket(struct au1000_pcmcia_socket *skt, struct socket_s
224 default: /* what's this ? */ 224 default: /* what's this ? */
225 pwr |= SET_VCC_VPP(0,0,sock); 225 pwr |= SET_VCC_VPP(0,0,sock);
226 printk(KERN_ERR "%s: bad Vcc %d\n", 226 printk(KERN_ERR "%s: bad Vcc %d\n",
227 __FUNCTION__, state->Vcc); 227 __func__, state->Vcc);
228 break; 228 break;
229 } 229 }
230 230
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c
index b693367d38cd..75e8f8505e47 100644
--- a/drivers/pcmcia/au1000_generic.c
+++ b/drivers/pcmcia/au1000_generic.c
@@ -41,6 +41,7 @@
41#include <linux/notifier.h> 41#include <linux/notifier.h>
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/mutex.h>
44#include <linux/platform_device.h> 45#include <linux/platform_device.h>
45 46
46#include <asm/io.h> 47#include <asm/io.h>
@@ -71,7 +72,7 @@ extern struct au1000_pcmcia_socket au1000_pcmcia_socket[];
71u32 *pcmcia_base_vaddrs[2]; 72u32 *pcmcia_base_vaddrs[2];
72extern const unsigned long mips_io_port_base; 73extern const unsigned long mips_io_port_base;
73 74
74DECLARE_MUTEX(pcmcia_sockets_lock); 75static DEFINE_MUTEX(pcmcia_sockets_lock);
75 76
76static int (*au1x00_pcmcia_hw_init[])(struct device *dev) = { 77static int (*au1x00_pcmcia_hw_init[])(struct device *dev) = {
77 au1x_board_init, 78 au1x_board_init,
@@ -472,7 +473,7 @@ int au1x00_drv_pcmcia_remove(struct device *dev)
472 struct skt_dev_info *sinfo = dev_get_drvdata(dev); 473 struct skt_dev_info *sinfo = dev_get_drvdata(dev);
473 int i; 474 int i;
474 475
475 down(&pcmcia_sockets_lock); 476 mutex_lock(&pcmcia_sockets_lock);
476 dev_set_drvdata(dev, NULL); 477 dev_set_drvdata(dev, NULL);
477 478
478 for (i = 0; i < sinfo->nskt; i++) { 479 for (i = 0; i < sinfo->nskt; i++) {
@@ -488,7 +489,7 @@ int au1x00_drv_pcmcia_remove(struct device *dev)
488 } 489 }
489 490
490 kfree(sinfo); 491 kfree(sinfo);
491 up(&pcmcia_sockets_lock); 492 mutex_unlock(&pcmcia_sockets_lock);
492 return 0; 493 return 0;
493} 494}
494 495
@@ -501,13 +502,13 @@ static int au1x00_drv_pcmcia_probe(struct device *dev)
501{ 502{
502 int i, ret = -ENODEV; 503 int i, ret = -ENODEV;
503 504
504 down(&pcmcia_sockets_lock); 505 mutex_lock(&pcmcia_sockets_lock);
505 for (i=0; i < ARRAY_SIZE(au1x00_pcmcia_hw_init); i++) { 506 for (i=0; i < ARRAY_SIZE(au1x00_pcmcia_hw_init); i++) {
506 ret = au1x00_pcmcia_hw_init[i](dev); 507 ret = au1x00_pcmcia_hw_init[i](dev);
507 if (ret == 0) 508 if (ret == 0)
508 break; 509 break;
509 } 510 }
510 up(&pcmcia_sockets_lock); 511 mutex_unlock(&pcmcia_sockets_lock);
511 return ret; 512 return ret;
512} 513}
513 514
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
index 86c0808d6a05..157e41423a0a 100644
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ b/drivers/pcmcia/au1000_pb1x00.c
@@ -244,7 +244,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
244 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ, 244 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
245 configure->sock); 245 configure->sock);
246 printk("%s: bad Vcc/Vpp (%d:%d)\n", 246 printk("%s: bad Vcc/Vpp (%d:%d)\n",
247 __FUNCTION__, 247 __func__,
248 configure->vcc, 248 configure->vcc,
249 configure->vpp); 249 configure->vpp);
250 break; 250 break;
@@ -272,7 +272,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
272 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ, 272 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
273 configure->sock); 273 configure->sock);
274 printk("%s: bad Vcc/Vpp (%d:%d)\n", 274 printk("%s: bad Vcc/Vpp (%d:%d)\n",
275 __FUNCTION__, 275 __func__,
276 configure->vcc, 276 configure->vcc,
277 configure->vpp); 277 configure->vpp);
278 break; 278 break;
@@ -300,7 +300,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
300 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ, 300 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
301 configure->sock); 301 configure->sock);
302 printk("%s: bad Vcc/Vpp (%d:%d)\n", 302 printk("%s: bad Vcc/Vpp (%d:%d)\n",
303 __FUNCTION__, 303 __func__,
304 configure->vcc, 304 configure->vcc,
305 configure->vpp); 305 configure->vpp);
306 break; 306 break;
@@ -309,7 +309,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
309 default: /* what's this ? */ 309 default: /* what's this ? */
310 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,configure->sock); 310 pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,configure->sock);
311 printk(KERN_ERR "%s: bad Vcc %d\n", 311 printk(KERN_ERR "%s: bad Vcc %d\n",
312 __FUNCTION__, configure->vcc); 312 __func__, configure->vcc);
313 break; 313 break;
314 } 314 }
315 315
@@ -353,7 +353,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
353 default: 353 default:
354 pcr |= SET_VCC_VPP(0,0); 354 pcr |= SET_VCC_VPP(0,0);
355 printk("%s: bad Vcc/Vpp (%d:%d)\n", 355 printk("%s: bad Vcc/Vpp (%d:%d)\n",
356 __FUNCTION__, 356 __func__,
357 configure->vcc, 357 configure->vcc,
358 configure->vpp); 358 configure->vpp);
359 break; 359 break;
@@ -374,7 +374,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
374 default: 374 default:
375 pcr |= SET_VCC_VPP(0,0); 375 pcr |= SET_VCC_VPP(0,0);
376 printk("%s: bad Vcc/Vpp (%d:%d)\n", 376 printk("%s: bad Vcc/Vpp (%d:%d)\n",
377 __FUNCTION__, 377 __func__,
378 configure->vcc, 378 configure->vcc,
379 configure->vpp); 379 configure->vpp);
380 break; 380 break;
@@ -383,7 +383,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
383 default: /* what's this ? */ 383 default: /* what's this ? */
384 pcr |= SET_VCC_VPP(0,0); 384 pcr |= SET_VCC_VPP(0,0);
385 printk(KERN_ERR "%s: bad Vcc %d\n", 385 printk(KERN_ERR "%s: bad Vcc %d\n",
386 __FUNCTION__, configure->vcc); 386 __func__, configure->vcc);
387 break; 387 break;
388 } 388 }
389 389
diff --git a/drivers/pcmcia/au1000_xxs1500.c b/drivers/pcmcia/au1000_xxs1500.c
index ce9d5c44a7b5..c78ed5347510 100644
--- a/drivers/pcmcia/au1000_xxs1500.c
+++ b/drivers/pcmcia/au1000_xxs1500.c
@@ -56,7 +56,7 @@
56#define PCMCIA_IRQ AU1000_GPIO_4 56#define PCMCIA_IRQ AU1000_GPIO_4
57 57
58#if 0 58#if 0
59#define DEBUG(x,args...) printk(__FUNCTION__ ": " x,##args) 59#define DEBUG(x, args...) printk(__func__ ": " x, ##args)
60#else 60#else
61#define DEBUG(x,args...) 61#define DEBUG(x,args...)
62#endif 62#endif
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 714baaeb6da1..fb2f38dc92c5 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -209,7 +209,7 @@ static void cardbus_assign_irqs(struct pci_bus *bus, int irq)
209 } 209 }
210} 210}
211 211
212int cb_alloc(struct pcmcia_socket * s) 212int __ref cb_alloc(struct pcmcia_socket * s)
213{ 213{
214 struct pci_bus *bus = s->cb_dev->subordinate; 214 struct pci_bus *bus = s->cb_dev->subordinate;
215 struct pci_dev *dev; 215 struct pci_dev *dev;
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 06a85d7d5aa2..36379535f9da 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -402,15 +402,6 @@ EXPORT_SYMBOL(pcmcia_replace_cis);
402 402
403======================================================================*/ 403======================================================================*/
404 404
405static inline u16 cis_get_u16(void *ptr)
406{
407 return le16_to_cpu(get_unaligned((__le16 *) ptr));
408}
409static inline u32 cis_get_u32(void *ptr)
410{
411 return le32_to_cpu(get_unaligned((__le32 *) ptr));
412}
413
414typedef struct tuple_flags { 405typedef struct tuple_flags {
415 u_int link_space:4; 406 u_int link_space:4;
416 u_int has_link:1; 407 u_int has_link:1;
@@ -471,7 +462,7 @@ static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
471 /* Get indirect link from the MFC tuple */ 462 /* Get indirect link from the MFC tuple */
472 read_cis_cache(s, LINK_SPACE(tuple->Flags), 463 read_cis_cache(s, LINK_SPACE(tuple->Flags),
473 tuple->LinkOffset, 5, link); 464 tuple->LinkOffset, 5, link);
474 ofs = cis_get_u32(link + 1); 465 ofs = get_unaligned_le32(link + 1);
475 SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); 466 SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
476 /* Move to the next indirect link */ 467 /* Move to the next indirect link */
477 tuple->LinkOffset += 5; 468 tuple->LinkOffset += 5;
@@ -679,8 +670,8 @@ static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum)
679 if (tuple->TupleDataLen < 5) 670 if (tuple->TupleDataLen < 5)
680 return CS_BAD_TUPLE; 671 return CS_BAD_TUPLE;
681 p = (u_char *) tuple->TupleData; 672 p = (u_char *) tuple->TupleData;
682 csum->addr = tuple->CISOffset + cis_get_u16(p) - 2; 673 csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
683 csum->len = cis_get_u16(p + 2); 674 csum->len = get_unaligned_le16(p + 2);
684 csum->sum = *(p + 4); 675 csum->sum = *(p + 4);
685 return CS_SUCCESS; 676 return CS_SUCCESS;
686} 677}
@@ -691,7 +682,7 @@ static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link)
691{ 682{
692 if (tuple->TupleDataLen < 4) 683 if (tuple->TupleDataLen < 4)
693 return CS_BAD_TUPLE; 684 return CS_BAD_TUPLE;
694 link->addr = cis_get_u32(tuple->TupleData); 685 link->addr = get_unaligned_le32(tuple->TupleData);
695 return CS_SUCCESS; 686 return CS_SUCCESS;
696} 687}
697 688
@@ -710,7 +701,7 @@ static int parse_longlink_mfc(tuple_t *tuple,
710 return CS_BAD_TUPLE; 701 return CS_BAD_TUPLE;
711 for (i = 0; i < link->nfn; i++) { 702 for (i = 0; i < link->nfn; i++) {
712 link->fn[i].space = *p; p++; 703 link->fn[i].space = *p; p++;
713 link->fn[i].addr = cis_get_u32(p); 704 link->fn[i].addr = get_unaligned_le32(p);
714 p += 4; 705 p += 4;
715 } 706 }
716 return CS_SUCCESS; 707 return CS_SUCCESS;
@@ -800,8 +791,8 @@ static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m)
800{ 791{
801 if (tuple->TupleDataLen < 4) 792 if (tuple->TupleDataLen < 4)
802 return CS_BAD_TUPLE; 793 return CS_BAD_TUPLE;
803 m->manf = cis_get_u16(tuple->TupleData); 794 m->manf = get_unaligned_le16(tuple->TupleData);
804 m->card = cis_get_u16(tuple->TupleData + 2); 795 m->card = get_unaligned_le16(tuple->TupleData + 2);
805 return CS_SUCCESS; 796 return CS_SUCCESS;
806} 797}
807 798
@@ -1100,7 +1091,7 @@ static int parse_cftable_entry(tuple_t *tuple,
1100 break; 1091 break;
1101 case 0x20: 1092 case 0x20:
1102 entry->mem.nwin = 1; 1093 entry->mem.nwin = 1;
1103 entry->mem.win[0].len = cis_get_u16(p) << 8; 1094 entry->mem.win[0].len = get_unaligned_le16(p) << 8;
1104 entry->mem.win[0].card_addr = 0; 1095 entry->mem.win[0].card_addr = 0;
1105 entry->mem.win[0].host_addr = 0; 1096 entry->mem.win[0].host_addr = 0;
1106 p += 2; 1097 p += 2;
@@ -1108,8 +1099,8 @@ static int parse_cftable_entry(tuple_t *tuple,
1108 break; 1099 break;
1109 case 0x40: 1100 case 0x40:
1110 entry->mem.nwin = 1; 1101 entry->mem.nwin = 1;
1111 entry->mem.win[0].len = cis_get_u16(p) << 8; 1102 entry->mem.win[0].len = get_unaligned_le16(p) << 8;
1112 entry->mem.win[0].card_addr = cis_get_u16(p + 2) << 8; 1103 entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
1113 entry->mem.win[0].host_addr = 0; 1104 entry->mem.win[0].host_addr = 0;
1114 p += 4; 1105 p += 4;
1115 if (p > q) return CS_BAD_TUPLE; 1106 if (p > q) return CS_BAD_TUPLE;
@@ -1146,7 +1137,7 @@ static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar)
1146 p = (u_char *)tuple->TupleData; 1137 p = (u_char *)tuple->TupleData;
1147 bar->attr = *p; 1138 bar->attr = *p;
1148 p += 2; 1139 p += 2;
1149 bar->size = cis_get_u32(p); 1140 bar->size = get_unaligned_le32(p);
1150 return CS_SUCCESS; 1141 return CS_SUCCESS;
1151} 1142}
1152 1143
@@ -1159,7 +1150,7 @@ static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config)
1159 return CS_BAD_TUPLE; 1150 return CS_BAD_TUPLE;
1160 config->last_idx = *(++p); 1151 config->last_idx = *(++p);
1161 p++; 1152 p++;
1162 config->base = cis_get_u32(p); 1153 config->base = get_unaligned_le32(p);
1163 config->subtuples = tuple->TupleDataLen - 6; 1154 config->subtuples = tuple->TupleDataLen - 6;
1164 return CS_SUCCESS; 1155 return CS_SUCCESS;
1165} 1156}
@@ -1275,7 +1266,7 @@ static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
1275 1266
1276 v2->vers = p[0]; 1267 v2->vers = p[0];
1277 v2->comply = p[1]; 1268 v2->comply = p[1];
1278 v2->dindex = cis_get_u16(p +2 ); 1269 v2->dindex = get_unaligned_le16(p +2 );
1279 v2->vspec8 = p[6]; 1270 v2->vspec8 = p[6];
1280 v2->vspec9 = p[7]; 1271 v2->vspec9 = p[7];
1281 v2->nhdr = p[8]; 1272 v2->nhdr = p[8];
@@ -1316,8 +1307,8 @@ static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
1316 1307
1317 fmt->type = p[0]; 1308 fmt->type = p[0];
1318 fmt->edc = p[1]; 1309 fmt->edc = p[1];
1319 fmt->offset = cis_get_u32(p + 2); 1310 fmt->offset = get_unaligned_le32(p + 2);
1320 fmt->length = cis_get_u32(p + 6); 1311 fmt->length = get_unaligned_le32(p + 6);
1321 1312
1322 return CS_SUCCESS; 1313 return CS_SUCCESS;
1323} 1314}
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 56230dbd347a..29276bd28295 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -652,6 +652,9 @@ static int pccardd(void *__skt)
652 complete(&skt->thread_done); 652 complete(&skt->thread_done);
653 return 0; 653 return 0;
654 } 654 }
655 ret = pccard_sysfs_add_socket(&skt->dev);
656 if (ret)
657 dev_warn(&skt->dev, "err %d adding socket attributes\n", ret);
655 658
656 add_wait_queue(&skt->thread_wait, &wait); 659 add_wait_queue(&skt->thread_wait, &wait);
657 complete(&skt->thread_done); 660 complete(&skt->thread_done);
@@ -694,6 +697,7 @@ static int pccardd(void *__skt)
694 remove_wait_queue(&skt->thread_wait, &wait); 697 remove_wait_queue(&skt->thread_wait, &wait);
695 698
696 /* remove from the device core */ 699 /* remove from the device core */
700 pccard_sysfs_remove_socket(&skt->dev);
697 device_unregister(&skt->dev); 701 device_unregister(&skt->dev);
698 702
699 return 0; 703 return 0;
@@ -940,20 +944,13 @@ EXPORT_SYMBOL(pcmcia_socket_class);
940 944
941static int __init init_pcmcia_cs(void) 945static int __init init_pcmcia_cs(void)
942{ 946{
943 int ret;
944
945 init_completion(&pcmcia_unload); 947 init_completion(&pcmcia_unload);
946 ret = class_register(&pcmcia_socket_class); 948 return class_register(&pcmcia_socket_class);
947 if (ret)
948 return (ret);
949 return class_interface_register(&pccard_sysfs_interface);
950} 949}
951 950
952static void __exit exit_pcmcia_cs(void) 951static void __exit exit_pcmcia_cs(void)
953{ 952{
954 class_interface_unregister(&pccard_sysfs_interface);
955 class_unregister(&pcmcia_socket_class); 953 class_unregister(&pcmcia_socket_class);
956
957 wait_for_completion(&pcmcia_unload); 954 wait_for_completion(&pcmcia_unload);
958} 955}
959 956
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 9fa207e3c7b3..e7d5d141f24d 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -121,7 +121,8 @@ struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
121void release_resource_db(struct pcmcia_socket *s); 121void release_resource_db(struct pcmcia_socket *s);
122 122
123/* In socket_sysfs.c */ 123/* In socket_sysfs.c */
124extern struct class_interface pccard_sysfs_interface; 124extern int pccard_sysfs_add_socket(struct device *dev);
125extern void pccard_sysfs_remove_socket(struct device *dev);
125 126
126/* In cs.c */ 127/* In cs.c */
127extern struct rw_semaphore pcmcia_socket_list_rwsem; 128extern struct rw_semaphore pcmcia_socket_list_rwsem;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 5a85871f5ee9..e40775443d04 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1520,7 +1520,7 @@ static void pcmcia_bus_remove_socket(struct device *dev,
1520 1520
1521 1521
1522/* the pcmcia_bus_interface is used to handle pcmcia socket devices */ 1522/* the pcmcia_bus_interface is used to handle pcmcia socket devices */
1523static struct class_interface pcmcia_bus_interface = { 1523static struct class_interface pcmcia_bus_interface __refdata = {
1524 .class = &pcmcia_socket_class, 1524 .class = &pcmcia_socket_class,
1525 .add_dev = &pcmcia_bus_add_socket, 1525 .add_dev = &pcmcia_bus_add_socket,
1526 .remove_dev = &pcmcia_bus_remove_socket, 1526 .remove_dev = &pcmcia_bus_remove_socket,
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index e54ecc580d9e..e13618656ff7 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -53,7 +53,7 @@ static int i82092aa_socket_resume (struct pci_dev *dev)
53} 53}
54#endif 54#endif
55 55
56static struct pci_driver i82092aa_pci_drv = { 56static struct pci_driver i82092aa_pci_driver = {
57 .name = "i82092aa", 57 .name = "i82092aa",
58 .id_table = i82092aa_pci_ids, 58 .id_table = i82092aa_pci_ids,
59 .probe = i82092aa_pci_probe, 59 .probe = i82092aa_pci_probe,
@@ -714,13 +714,13 @@ static int i82092aa_set_mem_map(struct pcmcia_socket *socket, struct pccard_mem_
714 714
715static int i82092aa_module_init(void) 715static int i82092aa_module_init(void)
716{ 716{
717 return pci_register_driver(&i82092aa_pci_drv); 717 return pci_register_driver(&i82092aa_pci_driver);
718} 718}
719 719
720static void i82092aa_module_exit(void) 720static void i82092aa_module_exit(void)
721{ 721{
722 enter("i82092aa_module_exit"); 722 enter("i82092aa_module_exit");
723 pci_unregister_driver(&i82092aa_pci_drv); 723 pci_unregister_driver(&i82092aa_pci_driver);
724 if (sockets[0].io_base>0) 724 if (sockets[0].io_base>0)
725 release_region(sockets[0].io_base, 2); 725 release_region(sockets[0].io_base, 2);
726 leave("i82092aa_module_exit"); 726 leave("i82092aa_module_exit");
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index bb6db3a582b2..46314b420765 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -153,7 +153,7 @@ omap_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
153 153
154static int omap_cf_ss_suspend(struct pcmcia_socket *s) 154static int omap_cf_ss_suspend(struct pcmcia_socket *s)
155{ 155{
156 pr_debug("%s: %s\n", driver_name, __FUNCTION__); 156 pr_debug("%s: %s\n", driver_name, __func__);
157 return omap_cf_set_socket(s, &dead_socket); 157 return omap_cf_set_socket(s, &dead_socket);
158} 158}
159 159
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 27523c5f4dad..5f186abca108 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -787,7 +787,7 @@ void __init pcmcia_setup_ioctl(void) {
787 major_dev = i; 787 major_dev = i;
788 788
789#ifdef CONFIG_PROC_FS 789#ifdef CONFIG_PROC_FS
790 proc_pccard = proc_mkdir("pccard", proc_bus); 790 proc_pccard = proc_mkdir("bus/pccard", NULL);
791 if (proc_pccard) 791 if (proc_pccard)
792 create_proc_read_entry("drivers",0,proc_pccard,proc_read_drivers,NULL); 792 create_proc_read_entry("drivers",0,proc_pccard,proc_read_drivers,NULL);
793#endif 793#endif
@@ -798,7 +798,7 @@ void __exit pcmcia_cleanup_ioctl(void) {
798#ifdef CONFIG_PROC_FS 798#ifdef CONFIG_PROC_FS
799 if (proc_pccard) { 799 if (proc_pccard) {
800 remove_proc_entry("drivers", proc_pccard); 800 remove_proc_entry("drivers", proc_pccard);
801 remove_proc_entry("pccard", proc_bus); 801 remove_proc_entry("bus/pccard", NULL);
802 } 802 }
803#endif 803#endif
804 if (major_dev != -1) 804 if (major_dev != -1)
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index abc10fe49bd8..8bed1dab9039 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -778,7 +778,7 @@ static struct pci_device_id pd6729_pci_ids[] = {
778}; 778};
779MODULE_DEVICE_TABLE(pci, pd6729_pci_ids); 779MODULE_DEVICE_TABLE(pci, pd6729_pci_ids);
780 780
781static struct pci_driver pd6729_pci_drv = { 781static struct pci_driver pd6729_pci_driver = {
782 .name = "pd6729", 782 .name = "pd6729",
783 .id_table = pd6729_pci_ids, 783 .id_table = pd6729_pci_ids,
784 .probe = pd6729_pci_probe, 784 .probe = pd6729_pci_probe,
@@ -791,12 +791,12 @@ static struct pci_driver pd6729_pci_drv = {
791 791
792static int pd6729_module_init(void) 792static int pd6729_module_init(void)
793{ 793{
794 return pci_register_driver(&pd6729_pci_drv); 794 return pci_register_driver(&pd6729_pci_driver);
795} 795}
796 796
797static void pd6729_module_exit(void) 797static void pd6729_module_exit(void)
798{ 798{
799 pci_unregister_driver(&pd6729_pci_drv); 799 pci_unregister_driver(&pd6729_pci_driver);
800} 800}
801 801
802module_init(pd6729_module_init); 802module_init(pd6729_module_init);
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index 4a05802213c8..881ec8a8e389 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -87,7 +87,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
87 87
88 default: 88 default:
89 printk(KERN_ERR "%s(): unrecognized Vcc %u\n", 89 printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
90 __FUNCTION__, state->Vcc); 90 __func__, state->Vcc);
91 ret = -1; 91 ret = -1;
92 } 92 }
93 93
@@ -104,7 +104,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
104 pa_dwr_set |= GPIO_A0; 104 pa_dwr_set |= GPIO_A0;
105 else { 105 else {
106 printk(KERN_ERR "%s(): unrecognized Vpp %u\n", 106 printk(KERN_ERR "%s(): unrecognized Vpp %u\n",
107 __FUNCTION__, state->Vpp); 107 __func__, state->Vpp);
108 ret = -1; 108 ret = -1;
109 break; 109 break;
110 } 110 }
@@ -128,14 +128,14 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
128 128
129 default: 129 default:
130 printk(KERN_ERR "%s(): unrecognized Vcc %u\n", 130 printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
131 __FUNCTION__, state->Vcc); 131 __func__, state->Vcc);
132 ret = -1; 132 ret = -1;
133 break; 133 break;
134 } 134 }
135 135
136 if (state->Vpp != state->Vcc && state->Vpp != 0) { 136 if (state->Vpp != state->Vcc && state->Vpp != 0) {
137 printk(KERN_ERR "%s(): CF slot cannot support Vpp %u\n", 137 printk(KERN_ERR "%s(): CF slot cannot support Vpp %u\n",
138 __FUNCTION__, state->Vpp); 138 __func__, state->Vpp);
139 ret = -1; 139 ret = -1;
140 break; 140 break;
141 } 141 }
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 6fa5eaaab8af..145b85e0f02c 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -99,7 +99,7 @@ static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
99 case 50: power |= MST_PCMCIA_PWR_VCC_50; break; 99 case 50: power |= MST_PCMCIA_PWR_VCC_50; break;
100 default: 100 default:
101 printk(KERN_ERR "%s(): bad Vcc %u\n", 101 printk(KERN_ERR "%s(): bad Vcc %u\n",
102 __FUNCTION__, state->Vcc); 102 __func__, state->Vcc);
103 ret = -1; 103 ret = -1;
104 } 104 }
105 105
@@ -111,7 +111,7 @@ static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
111 power |= MST_PCMCIA_PWR_VPP_VCC; 111 power |= MST_PCMCIA_PWR_VPP_VCC;
112 } else { 112 } else {
113 printk(KERN_ERR "%s(): bad Vpp %u\n", 113 printk(KERN_ERR "%s(): bad Vpp %u\n",
114 __FUNCTION__, state->Vpp); 114 __func__, state->Vpp);
115 ret = -1; 115 ret = -1;
116 } 116 }
117 } 117 }
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index a8d100707721..0fcf763b9175 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -1045,7 +1045,7 @@ static void __devexit pccard_sysfs_remove_rsrc(struct device *dev,
1045 device_remove_file(dev, *attr); 1045 device_remove_file(dev, *attr);
1046} 1046}
1047 1047
1048static struct class_interface pccard_rsrc_interface = { 1048static struct class_interface pccard_rsrc_interface __refdata = {
1049 .class = &pcmcia_socket_class, 1049 .class = &pcmcia_socket_class,
1050 .add_dev = &pccard_sysfs_add_rsrc, 1050 .add_dev = &pccard_sysfs_add_rsrc,
1051 .remove_dev = __devexit_p(&pccard_sysfs_remove_rsrc), 1051 .remove_dev = __devexit_p(&pccard_sysfs_remove_rsrc),
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index 7c57fdd3c8d7..ce133ce81c10 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -66,14 +66,14 @@ assabet_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_stat
66 66
67 case 50: 67 case 50:
68 printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V...\n", 68 printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V...\n",
69 __FUNCTION__); 69 __func__);
70 70
71 case 33: /* Can only apply 3.3V to the CF slot. */ 71 case 33: /* Can only apply 3.3V to the CF slot. */
72 mask = ASSABET_BCR_CF_PWR; 72 mask = ASSABET_BCR_CF_PWR;
73 break; 73 break;
74 74
75 default: 75 default:
76 printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __FUNCTION__, 76 printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__,
77 state->Vcc); 77 state->Vcc);
78 return -1; 78 return -1;
79 } 79 }
diff --git a/drivers/pcmcia/sa1100_badge4.c b/drivers/pcmcia/sa1100_badge4.c
index 62bfc7566ec2..607c3f326eca 100644
--- a/drivers/pcmcia/sa1100_badge4.c
+++ b/drivers/pcmcia/sa1100_badge4.c
@@ -82,14 +82,14 @@ badge4_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state
82 case 0: 82 case 0:
83 if ((state->Vcc != 0) && 83 if ((state->Vcc != 0) &&
84 (state->Vcc != badge4_pcmvcc)) { 84 (state->Vcc != badge4_pcmvcc)) {
85 complain_about_jumpering(__FUNCTION__, "pcmvcc", 85 complain_about_jumpering(__func__, "pcmvcc",
86 badge4_pcmvcc, state->Vcc); 86 badge4_pcmvcc, state->Vcc);
87 // Apply power regardless of the jumpering. 87 // Apply power regardless of the jumpering.
88 // return -1; 88 // return -1;
89 } 89 }
90 if ((state->Vpp != 0) && 90 if ((state->Vpp != 0) &&
91 (state->Vpp != badge4_pcmvpp)) { 91 (state->Vpp != badge4_pcmvpp)) {
92 complain_about_jumpering(__FUNCTION__, "pcmvpp", 92 complain_about_jumpering(__func__, "pcmvpp",
93 badge4_pcmvpp, state->Vpp); 93 badge4_pcmvpp, state->Vpp);
94 return -1; 94 return -1;
95 } 95 }
@@ -98,7 +98,7 @@ badge4_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state
98 case 1: 98 case 1:
99 if ((state->Vcc != 0) && 99 if ((state->Vcc != 0) &&
100 (state->Vcc != badge4_cfvcc)) { 100 (state->Vcc != badge4_cfvcc)) {
101 complain_about_jumpering(__FUNCTION__, "cfvcc", 101 complain_about_jumpering(__func__, "cfvcc",
102 badge4_cfvcc, state->Vcc); 102 badge4_cfvcc, state->Vcc);
103 return -1; 103 return -1;
104 } 104 }
@@ -143,7 +143,7 @@ int pcmcia_badge4_init(struct device *dev)
143 if (machine_is_badge4()) { 143 if (machine_is_badge4()) {
144 printk(KERN_INFO 144 printk(KERN_INFO
145 "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n", 145 "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
146 __FUNCTION__, 146 __func__,
147 badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc); 147 badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
148 148
149 ret = sa11xx_drv_pcmcia_probe(dev, &badge4_pcmcia_ops, 0, 2); 149 ret = sa11xx_drv_pcmcia_probe(dev, &badge4_pcmcia_ops, 0, 2);
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index 549a1529fe35..7c3951a2675d 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -63,7 +63,7 @@ cerf_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
63 63
64 default: 64 default:
65 printk(KERN_ERR "%s(): unrecognized Vcc %u\n", 65 printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
66 __FUNCTION__, state->Vcc); 66 __func__, state->Vcc);
67 return -1; 67 return -1;
68 } 68 }
69 69
diff --git a/drivers/pcmcia/sa1100_jornada720.c b/drivers/pcmcia/sa1100_jornada720.c
index 6284c35dabc6..2167e6714d2d 100644
--- a/drivers/pcmcia/sa1100_jornada720.c
+++ b/drivers/pcmcia/sa1100_jornada720.c
@@ -42,7 +42,7 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
42 unsigned int pa_dwr_mask, pa_dwr_set; 42 unsigned int pa_dwr_mask, pa_dwr_set;
43 int ret; 43 int ret;
44 44
45printk("%s(): config socket %d vcc %d vpp %d\n", __FUNCTION__, 45printk("%s(): config socket %d vcc %d vpp %d\n", __func__,
46 skt->nr, state->Vcc, state->Vpp); 46 skt->nr, state->Vcc, state->Vpp);
47 47
48 switch (skt->nr) { 48 switch (skt->nr) {
@@ -74,7 +74,7 @@ printk("%s(): config socket %d vcc %d vpp %d\n", __FUNCTION__,
74 74
75 if (state->Vpp != state->Vcc && state->Vpp != 0) { 75 if (state->Vpp != state->Vcc && state->Vpp != 0) {
76 printk(KERN_ERR "%s(): slot cannot support VPP %u\n", 76 printk(KERN_ERR "%s(): slot cannot support VPP %u\n",
77 __FUNCTION__, state->Vpp); 77 __func__, state->Vpp);
78 return -1; 78 return -1;
79 } 79 }
80 80
diff --git a/drivers/pcmcia/sa1100_neponset.c b/drivers/pcmcia/sa1100_neponset.c
index 5bc9e9532b9d..687492fcd5b4 100644
--- a/drivers/pcmcia/sa1100_neponset.c
+++ b/drivers/pcmcia/sa1100_neponset.c
@@ -59,7 +59,7 @@ neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_sta
59 ncr_set = NCR_A0VPP; 59 ncr_set = NCR_A0VPP;
60 else { 60 else {
61 printk(KERN_ERR "%s(): unrecognized VPP %u\n", 61 printk(KERN_ERR "%s(): unrecognized VPP %u\n",
62 __FUNCTION__, state->Vpp); 62 __func__, state->Vpp);
63 return -1; 63 return -1;
64 } 64 }
65 break; 65 break;
@@ -71,7 +71,7 @@ neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_sta
71 71
72 if (state->Vpp != state->Vcc && state->Vpp != 0) { 72 if (state->Vpp != state->Vcc && state->Vpp != 0) {
73 printk(KERN_ERR "%s(): CF slot cannot support VPP %u\n", 73 printk(KERN_ERR "%s(): CF slot cannot support VPP %u\n",
74 __FUNCTION__, state->Vpp); 74 __func__, state->Vpp);
75 return -1; 75 return -1;
76 } 76 }
77 break; 77 break;
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c
index 9456f5478d09..494912fccc0d 100644
--- a/drivers/pcmcia/sa1100_shannon.c
+++ b/drivers/pcmcia/sa1100_shannon.c
@@ -73,19 +73,19 @@ shannon_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
73{ 73{
74 switch (state->Vcc) { 74 switch (state->Vcc) {
75 case 0: /* power off */ 75 case 0: /* power off */
76 printk(KERN_WARNING "%s(): CS asked for 0V, still applying 3.3V..\n", __FUNCTION__); 76 printk(KERN_WARNING "%s(): CS asked for 0V, still applying 3.3V..\n", __func__);
77 break; 77 break;
78 case 50: 78 case 50:
79 printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V..\n", __FUNCTION__); 79 printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V..\n", __func__);
80 case 33: 80 case 33:
81 break; 81 break;
82 default: 82 default:
83 printk(KERN_ERR "%s(): unrecognized Vcc %u\n", 83 printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
84 __FUNCTION__, state->Vcc); 84 __func__, state->Vcc);
85 return -1; 85 return -1;
86 } 86 }
87 87
88 printk(KERN_WARNING "%s(): Warning, Can't perform reset\n", __FUNCTION__); 88 printk(KERN_WARNING "%s(): Warning, Can't perform reset\n", __func__);
89 89
90 /* Silently ignore Vpp, output enable, speaker enable. */ 90 /* Silently ignore Vpp, output enable, speaker enable. */
91 91
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index 04d6f7f75f78..42567de894b9 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -90,7 +90,7 @@ simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
90 90
91 default: 91 default:
92 printk(KERN_ERR "%s(): unrecognized Vcc %u\n", 92 printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
93 __FUNCTION__, state->Vcc); 93 __func__, state->Vcc);
94 clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); 94 clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
95 local_irq_restore(flags); 95 local_irq_restore(flags);
96 return -1; 96 return -1;
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index aa7779d89752..420a77540f41 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -37,6 +37,7 @@
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/timer.h> 38#include <linux/timer.h>
39#include <linux/mm.h> 39#include <linux/mm.h>
40#include <linux/mutex.h>
40#include <linux/interrupt.h> 41#include <linux/interrupt.h>
41#include <linux/irq.h> 42#include <linux/irq.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
@@ -353,7 +354,7 @@ soc_common_pcmcia_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *m
353 (map->flags&MAP_PREFETCH)?"PREFETCH ":""); 354 (map->flags&MAP_PREFETCH)?"PREFETCH ":"");
354 355
355 if (map->map >= MAX_IO_WIN) { 356 if (map->map >= MAX_IO_WIN) {
356 printk(KERN_ERR "%s(): map (%d) out of range\n", __FUNCTION__, 357 printk(KERN_ERR "%s(): map (%d) out of range\n", __func__,
357 map->map); 358 map->map);
358 return -1; 359 return -1;
359 } 360 }
@@ -578,7 +579,7 @@ EXPORT_SYMBOL(soc_pcmcia_enable_irqs);
578 579
579 580
580LIST_HEAD(soc_pcmcia_sockets); 581LIST_HEAD(soc_pcmcia_sockets);
581DECLARE_MUTEX(soc_pcmcia_sockets_lock); 582static DEFINE_MUTEX(soc_pcmcia_sockets_lock);
582 583
583static const char *skt_names[] = { 584static const char *skt_names[] = {
584 "PCMCIA socket 0", 585 "PCMCIA socket 0",
@@ -601,11 +602,11 @@ soc_pcmcia_notifier(struct notifier_block *nb, unsigned long val, void *data)
601 struct cpufreq_freqs *freqs = data; 602 struct cpufreq_freqs *freqs = data;
602 int ret = 0; 603 int ret = 0;
603 604
604 down(&soc_pcmcia_sockets_lock); 605 mutex_lock(&soc_pcmcia_sockets_lock);
605 list_for_each_entry(skt, &soc_pcmcia_sockets, node) 606 list_for_each_entry(skt, &soc_pcmcia_sockets, node)
606 if ( skt->ops->frequency_change ) 607 if ( skt->ops->frequency_change )
607 ret += skt->ops->frequency_change(skt, val, freqs); 608 ret += skt->ops->frequency_change(skt, val, freqs);
608 up(&soc_pcmcia_sockets_lock); 609 mutex_unlock(&soc_pcmcia_sockets_lock);
609 610
610 return ret; 611 return ret;
611} 612}
@@ -642,7 +643,7 @@ int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops
642 struct soc_pcmcia_socket *skt; 643 struct soc_pcmcia_socket *skt;
643 int ret, i; 644 int ret, i;
644 645
645 down(&soc_pcmcia_sockets_lock); 646 mutex_lock(&soc_pcmcia_sockets_lock);
646 647
647 sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL); 648 sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
648 if (!sinfo) { 649 if (!sinfo) {
@@ -782,7 +783,7 @@ int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops
782 kfree(sinfo); 783 kfree(sinfo);
783 784
784 out: 785 out:
785 up(&soc_pcmcia_sockets_lock); 786 mutex_unlock(&soc_pcmcia_sockets_lock);
786 return ret; 787 return ret;
787} 788}
788 789
@@ -793,7 +794,7 @@ int soc_common_drv_pcmcia_remove(struct device *dev)
793 794
794 dev_set_drvdata(dev, NULL); 795 dev_set_drvdata(dev, NULL);
795 796
796 down(&soc_pcmcia_sockets_lock); 797 mutex_lock(&soc_pcmcia_sockets_lock);
797 for (i = 0; i < sinfo->nskt; i++) { 798 for (i = 0; i < sinfo->nskt; i++) {
798 struct soc_pcmcia_socket *skt = &sinfo->skt[i]; 799 struct soc_pcmcia_socket *skt = &sinfo->skt[i];
799 800
@@ -818,7 +819,7 @@ int soc_common_drv_pcmcia_remove(struct device *dev)
818 if (list_empty(&soc_pcmcia_sockets)) 819 if (list_empty(&soc_pcmcia_sockets))
819 soc_pcmcia_cpufreq_unregister(); 820 soc_pcmcia_cpufreq_unregister();
820 821
821 up(&soc_pcmcia_sockets_lock); 822 mutex_unlock(&soc_pcmcia_sockets_lock);
822 823
823 kfree(sinfo); 824 kfree(sinfo);
824 825
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 6f14126889b3..1edc1da9d353 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -133,7 +133,6 @@ extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_
133 133
134 134
135extern struct list_head soc_pcmcia_sockets; 135extern struct list_head soc_pcmcia_sockets;
136extern struct semaphore soc_pcmcia_sockets_lock;
137 136
138extern int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr); 137extern int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr);
139extern int soc_common_drv_pcmcia_remove(struct device *dev); 138extern int soc_common_drv_pcmcia_remove(struct device *dev);
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index b4409002b7f8..562384d6f321 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -356,19 +356,23 @@ static ssize_t pccard_store_cis(struct kobject *kobj,
356} 356}
357 357
358 358
359static struct device_attribute *pccard_socket_attributes[] = { 359static struct attribute *pccard_socket_attributes[] = {
360 &dev_attr_card_type, 360 &dev_attr_card_type.attr,
361 &dev_attr_card_voltage, 361 &dev_attr_card_voltage.attr,
362 &dev_attr_card_vpp, 362 &dev_attr_card_vpp.attr,
363 &dev_attr_card_vcc, 363 &dev_attr_card_vcc.attr,
364 &dev_attr_card_insert, 364 &dev_attr_card_insert.attr,
365 &dev_attr_card_pm_state, 365 &dev_attr_card_pm_state.attr,
366 &dev_attr_card_eject, 366 &dev_attr_card_eject.attr,
367 &dev_attr_card_irq_mask, 367 &dev_attr_card_irq_mask.attr,
368 &dev_attr_available_resources_setup_done, 368 &dev_attr_available_resources_setup_done.attr,
369 NULL, 369 NULL,
370}; 370};
371 371
372static const struct attribute_group socket_attrs = {
373 .attrs = pccard_socket_attributes,
374};
375
372static struct bin_attribute pccard_cis_attr = { 376static struct bin_attribute pccard_cis_attr = {
373 .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR }, 377 .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
374 .size = 0x200, 378 .size = 0x200,
@@ -376,35 +380,21 @@ static struct bin_attribute pccard_cis_attr = {
376 .write = pccard_store_cis, 380 .write = pccard_store_cis,
377}; 381};
378 382
379static int __devinit pccard_sysfs_add_socket(struct device *dev, 383int pccard_sysfs_add_socket(struct device *dev)
380 struct class_interface *class_intf)
381{ 384{
382 struct device_attribute **attr;
383 int ret = 0; 385 int ret = 0;
384 386
385 for (attr = pccard_socket_attributes; *attr; attr++) { 387 ret = sysfs_create_group(&dev->kobj, &socket_attrs);
386 ret = device_create_file(dev, *attr); 388 if (!ret) {
389 ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
387 if (ret) 390 if (ret)
388 break; 391 sysfs_remove_group(&dev->kobj, &socket_attrs);
389 } 392 }
390 if (!ret)
391 ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
392
393 return ret; 393 return ret;
394} 394}
395 395
396static void __devexit pccard_sysfs_remove_socket(struct device *dev, 396void pccard_sysfs_remove_socket(struct device *dev)
397 struct class_interface *class_intf)
398{ 397{
399 struct device_attribute **attr;
400
401 sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr); 398 sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr);
402 for (attr = pccard_socket_attributes; *attr; attr++) 399 sysfs_remove_group(&dev->kobj, &socket_attrs);
403 device_remove_file(dev, *attr);
404} 400}
405
406struct class_interface pccard_sysfs_interface = {
407 .class = &pcmcia_socket_class,
408 .add_dev = &pccard_sysfs_add_socket,
409 .remove_dev = __devexit_p(&pccard_sysfs_remove_socket),
410};
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 31a633f65547..4fe7c58f57e9 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -1,12 +1,78 @@
1extern spinlock_t pnp_lock; 1extern spinlock_t pnp_lock;
2void *pnp_alloc(long size); 2void *pnp_alloc(long size);
3
4int pnp_register_protocol(struct pnp_protocol *protocol);
5void pnp_unregister_protocol(struct pnp_protocol *protocol);
6
7#define PNP_EISA_ID_MASK 0x7fffffff
8void pnp_eisa_id_to_string(u32 id, char *str);
9struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *, int id, char *pnpid);
10struct pnp_card *pnp_alloc_card(struct pnp_protocol *, int id, char *pnpid);
11
12int pnp_add_device(struct pnp_dev *dev);
13struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id);
3int pnp_interface_attach_device(struct pnp_dev *dev); 14int pnp_interface_attach_device(struct pnp_dev *dev);
15
16int pnp_add_card(struct pnp_card *card);
17struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id);
18void pnp_remove_card(struct pnp_card *card);
19int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev);
20void pnp_remove_card_device(struct pnp_dev *dev);
21
22struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev);
23struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev,
24 int priority);
25int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option,
26 struct pnp_irq *data);
27int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option,
28 struct pnp_dma *data);
29int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option,
30 struct pnp_port *data);
31int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option,
32 struct pnp_mem *data);
33void pnp_init_resources(struct pnp_dev *dev);
34
4void pnp_fixup_device(struct pnp_dev *dev); 35void pnp_fixup_device(struct pnp_dev *dev);
5void pnp_free_option(struct pnp_option *option); 36void pnp_free_option(struct pnp_option *option);
6int __pnp_add_device(struct pnp_dev *dev); 37int __pnp_add_device(struct pnp_dev *dev);
7void __pnp_remove_device(struct pnp_dev *dev); 38void __pnp_remove_device(struct pnp_dev *dev);
8 39
9int pnp_check_port(struct pnp_dev * dev, int idx); 40int pnp_check_port(struct pnp_dev *dev, struct resource *res);
10int pnp_check_mem(struct pnp_dev * dev, int idx); 41int pnp_check_mem(struct pnp_dev *dev, struct resource *res);
11int pnp_check_irq(struct pnp_dev * dev, int idx); 42int pnp_check_irq(struct pnp_dev *dev, struct resource *res);
12int pnp_check_dma(struct pnp_dev * dev, int idx); 43int pnp_check_dma(struct pnp_dev *dev, struct resource *res);
44
45void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc);
46
47void pnp_init_resource(struct resource *res);
48
49struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev,
50 unsigned int type, unsigned int num);
51
52#define PNP_MAX_PORT 40
53#define PNP_MAX_MEM 24
54#define PNP_MAX_IRQ 2
55#define PNP_MAX_DMA 2
56
57struct pnp_resource {
58 struct resource res;
59 unsigned int index; /* ISAPNP config register index */
60};
61
62struct pnp_resource_table {
63 struct pnp_resource port[PNP_MAX_PORT];
64 struct pnp_resource mem[PNP_MAX_MEM];
65 struct pnp_resource dma[PNP_MAX_DMA];
66 struct pnp_resource irq[PNP_MAX_IRQ];
67};
68
69struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
70 int flags);
71struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
72 int flags);
73struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
74 resource_size_t start,
75 resource_size_t end, int flags);
76struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
77 resource_size_t start,
78 resource_size_t end, int flags);
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index da1c9909eb44..a762a4176736 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/ctype.h>
8#include <linux/slab.h> 9#include <linux/slab.h>
9#include <linux/pnp.h> 10#include <linux/pnp.h>
10#include "base.h" 11#include "base.h"
@@ -100,19 +101,33 @@ static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv)
100 * @id: pointer to a pnp_id structure 101 * @id: pointer to a pnp_id structure
101 * @card: pointer to the desired card 102 * @card: pointer to the desired card
102 */ 103 */
103int pnp_add_card_id(struct pnp_id *id, struct pnp_card *card) 104struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id)
104{ 105{
105 struct pnp_id *ptr; 106 struct pnp_id *dev_id, *ptr;
106 107
107 id->next = NULL; 108 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
109 if (!dev_id)
110 return NULL;
111
112 dev_id->id[0] = id[0];
113 dev_id->id[1] = id[1];
114 dev_id->id[2] = id[2];
115 dev_id->id[3] = tolower(id[3]);
116 dev_id->id[4] = tolower(id[4]);
117 dev_id->id[5] = tolower(id[5]);
118 dev_id->id[6] = tolower(id[6]);
119 dev_id->id[7] = '\0';
120
121 dev_id->next = NULL;
108 ptr = card->id; 122 ptr = card->id;
109 while (ptr && ptr->next) 123 while (ptr && ptr->next)
110 ptr = ptr->next; 124 ptr = ptr->next;
111 if (ptr) 125 if (ptr)
112 ptr->next = id; 126 ptr->next = dev_id;
113 else 127 else
114 card->id = id; 128 card->id = dev_id;
115 return 0; 129
130 return dev_id;
116} 131}
117 132
118static void pnp_free_card_ids(struct pnp_card *card) 133static void pnp_free_card_ids(struct pnp_card *card)
@@ -136,6 +151,31 @@ static void pnp_release_card(struct device *dmdev)
136 kfree(card); 151 kfree(card);
137} 152}
138 153
154struct pnp_card *pnp_alloc_card(struct pnp_protocol *protocol, int id, char *pnpid)
155{
156 struct pnp_card *card;
157 struct pnp_id *dev_id;
158
159 card = kzalloc(sizeof(struct pnp_card), GFP_KERNEL);
160 if (!card)
161 return NULL;
162
163 card->protocol = protocol;
164 card->number = id;
165
166 card->dev.parent = &card->protocol->dev;
167 sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number,
168 card->number);
169
170 dev_id = pnp_add_card_id(card, pnpid);
171 if (!dev_id) {
172 kfree(card);
173 return NULL;
174 }
175
176 return card;
177}
178
139static ssize_t pnp_show_card_name(struct device *dmdev, 179static ssize_t pnp_show_card_name(struct device *dmdev,
140 struct device_attribute *attr, char *buf) 180 struct device_attribute *attr, char *buf)
141{ 181{
@@ -191,9 +231,6 @@ int pnp_add_card(struct pnp_card *card)
191 int error; 231 int error;
192 struct list_head *pos, *temp; 232 struct list_head *pos, *temp;
193 233
194 sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number,
195 card->number);
196 card->dev.parent = &card->protocol->dev;
197 card->dev.bus = NULL; 234 card->dev.bus = NULL;
198 card->dev.release = &pnp_release_card; 235 card->dev.release = &pnp_release_card;
199 error = device_register(&card->dev); 236 error = device_register(&card->dev);
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 7d366ca672d3..20771b7d4482 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -106,18 +106,53 @@ static void pnp_release_device(struct device *dmdev)
106 pnp_free_option(dev->independent); 106 pnp_free_option(dev->independent);
107 pnp_free_option(dev->dependent); 107 pnp_free_option(dev->dependent);
108 pnp_free_ids(dev); 108 pnp_free_ids(dev);
109 kfree(dev->res);
109 kfree(dev); 110 kfree(dev);
110} 111}
111 112
112int __pnp_add_device(struct pnp_dev *dev) 113struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid)
113{ 114{
114 int ret; 115 struct pnp_dev *dev;
116 struct pnp_id *dev_id;
115 117
116 pnp_fixup_device(dev); 118 dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
119 if (!dev)
120 return NULL;
121
122 dev->res = kzalloc(sizeof(struct pnp_resource_table), GFP_KERNEL);
123 if (!dev->res) {
124 kfree(dev);
125 return NULL;
126 }
127
128 dev->protocol = protocol;
129 dev->number = id;
130 dev->dma_mask = DMA_24BIT_MASK;
131
132 dev->dev.parent = &dev->protocol->dev;
117 dev->dev.bus = &pnp_bus_type; 133 dev->dev.bus = &pnp_bus_type;
118 dev->dev.dma_mask = &dev->dma_mask; 134 dev->dev.dma_mask = &dev->dma_mask;
119 dev->dma_mask = dev->dev.coherent_dma_mask = DMA_24BIT_MASK; 135 dev->dev.coherent_dma_mask = dev->dma_mask;
120 dev->dev.release = &pnp_release_device; 136 dev->dev.release = &pnp_release_device;
137
138 sprintf(dev->dev.bus_id, "%02x:%02x", dev->protocol->number,
139 dev->number);
140
141 dev_id = pnp_add_id(dev, pnpid);
142 if (!dev_id) {
143 kfree(dev->res);
144 kfree(dev);
145 return NULL;
146 }
147
148 return dev;
149}
150
151int __pnp_add_device(struct pnp_dev *dev)
152{
153 int ret;
154
155 pnp_fixup_device(dev);
121 dev->status = PNP_READY; 156 dev->status = PNP_READY;
122 spin_lock(&pnp_lock); 157 spin_lock(&pnp_lock);
123 list_add_tail(&dev->global_list, &pnp_global); 158 list_add_tail(&dev->global_list, &pnp_global);
@@ -145,9 +180,6 @@ int pnp_add_device(struct pnp_dev *dev)
145 if (dev->card) 180 if (dev->card)
146 return -EINVAL; 181 return -EINVAL;
147 182
148 dev->dev.parent = &dev->protocol->dev;
149 sprintf(dev->dev.bus_id, "%02x:%02x", dev->protocol->number,
150 dev->number);
151 ret = __pnp_add_device(dev); 183 ret = __pnp_add_device(dev);
152 if (ret) 184 if (ret)
153 return ret; 185 return ret;
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index e85cbf116db1..d3f869ee1d92 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -226,22 +226,36 @@ void pnp_unregister_driver(struct pnp_driver *drv)
226 226
227/** 227/**
228 * pnp_add_id - adds an EISA id to the specified device 228 * pnp_add_id - adds an EISA id to the specified device
229 * @id: pointer to a pnp_id structure
230 * @dev: pointer to the desired device 229 * @dev: pointer to the desired device
230 * @id: pointer to an EISA id string
231 */ 231 */
232int pnp_add_id(struct pnp_id *id, struct pnp_dev *dev) 232struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id)
233{ 233{
234 struct pnp_id *ptr; 234 struct pnp_id *dev_id, *ptr;
235 235
236 id->next = NULL; 236 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
237 if (!dev_id)
238 return NULL;
239
240 dev_id->id[0] = id[0];
241 dev_id->id[1] = id[1];
242 dev_id->id[2] = id[2];
243 dev_id->id[3] = tolower(id[3]);
244 dev_id->id[4] = tolower(id[4]);
245 dev_id->id[5] = tolower(id[5]);
246 dev_id->id[6] = tolower(id[6]);
247 dev_id->id[7] = '\0';
248
249 dev_id->next = NULL;
237 ptr = dev->id; 250 ptr = dev->id;
238 while (ptr && ptr->next) 251 while (ptr && ptr->next)
239 ptr = ptr->next; 252 ptr = ptr->next;
240 if (ptr) 253 if (ptr)
241 ptr->next = id; 254 ptr->next = dev_id;
242 else 255 else
243 dev->id = id; 256 dev->id = dev_id;
244 return 0; 257
258 return dev_id;
245} 259}
246 260
247EXPORT_SYMBOL(pnp_register_driver); 261EXPORT_SYMBOL(pnp_register_driver);
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 982658477a58..5d9301de1778 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -248,6 +248,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev,
248 char *buf) 248 char *buf)
249{ 249{
250 struct pnp_dev *dev = to_pnp_dev(dmdev); 250 struct pnp_dev *dev = to_pnp_dev(dmdev);
251 struct resource *res;
251 int i, ret; 252 int i, ret;
252 pnp_info_buffer_t *buffer; 253 pnp_info_buffer_t *buffer;
253 254
@@ -267,50 +268,46 @@ static ssize_t pnp_show_current_resources(struct device *dmdev,
267 else 268 else
268 pnp_printf(buffer, "disabled\n"); 269 pnp_printf(buffer, "disabled\n");
269 270
270 for (i = 0; i < PNP_MAX_PORT; i++) { 271 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) {
271 if (pnp_port_valid(dev, i)) { 272 if (pnp_resource_valid(res)) {
272 pnp_printf(buffer, "io"); 273 pnp_printf(buffer, "io");
273 if (pnp_port_flags(dev, i) & IORESOURCE_DISABLED) 274 if (res->flags & IORESOURCE_DISABLED)
274 pnp_printf(buffer, " disabled\n"); 275 pnp_printf(buffer, " disabled\n");
275 else 276 else
276 pnp_printf(buffer, " 0x%llx-0x%llx\n", 277 pnp_printf(buffer, " 0x%llx-0x%llx\n",
277 (unsigned long long) 278 (unsigned long long) res->start,
278 pnp_port_start(dev, i), 279 (unsigned long long) res->end);
279 (unsigned long long)pnp_port_end(dev,
280 i));
281 } 280 }
282 } 281 }
283 for (i = 0; i < PNP_MAX_MEM; i++) { 282 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
284 if (pnp_mem_valid(dev, i)) { 283 if (pnp_resource_valid(res)) {
285 pnp_printf(buffer, "mem"); 284 pnp_printf(buffer, "mem");
286 if (pnp_mem_flags(dev, i) & IORESOURCE_DISABLED) 285 if (res->flags & IORESOURCE_DISABLED)
287 pnp_printf(buffer, " disabled\n"); 286 pnp_printf(buffer, " disabled\n");
288 else 287 else
289 pnp_printf(buffer, " 0x%llx-0x%llx\n", 288 pnp_printf(buffer, " 0x%llx-0x%llx\n",
290 (unsigned long long) 289 (unsigned long long) res->start,
291 pnp_mem_start(dev, i), 290 (unsigned long long) res->end);
292 (unsigned long long)pnp_mem_end(dev,
293 i));
294 } 291 }
295 } 292 }
296 for (i = 0; i < PNP_MAX_IRQ; i++) { 293 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IRQ, i)); i++) {
297 if (pnp_irq_valid(dev, i)) { 294 if (pnp_resource_valid(res)) {
298 pnp_printf(buffer, "irq"); 295 pnp_printf(buffer, "irq");
299 if (pnp_irq_flags(dev, i) & IORESOURCE_DISABLED) 296 if (res->flags & IORESOURCE_DISABLED)
300 pnp_printf(buffer, " disabled\n"); 297 pnp_printf(buffer, " disabled\n");
301 else 298 else
302 pnp_printf(buffer, " %lld\n", 299 pnp_printf(buffer, " %lld\n",
303 (unsigned long long)pnp_irq(dev, i)); 300 (unsigned long long) res->start);
304 } 301 }
305 } 302 }
306 for (i = 0; i < PNP_MAX_DMA; i++) { 303 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_DMA, i)); i++) {
307 if (pnp_dma_valid(dev, i)) { 304 if (pnp_resource_valid(res)) {
308 pnp_printf(buffer, "dma"); 305 pnp_printf(buffer, "dma");
309 if (pnp_dma_flags(dev, i) & IORESOURCE_DISABLED) 306 if (res->flags & IORESOURCE_DISABLED)
310 pnp_printf(buffer, " disabled\n"); 307 pnp_printf(buffer, " disabled\n");
311 else 308 else
312 pnp_printf(buffer, " %lld\n", 309 pnp_printf(buffer, " %lld\n",
313 (unsigned long long)pnp_dma(dev, i)); 310 (unsigned long long) res->start);
314 } 311 }
315 } 312 }
316 ret = (buffer->curr - buf); 313 ret = (buffer->curr - buf);
@@ -323,8 +320,10 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
323 const char *ubuf, size_t count) 320 const char *ubuf, size_t count)
324{ 321{
325 struct pnp_dev *dev = to_pnp_dev(dmdev); 322 struct pnp_dev *dev = to_pnp_dev(dmdev);
323 struct pnp_resource *pnp_res;
326 char *buf = (void *)ubuf; 324 char *buf = (void *)ubuf;
327 int retval = 0; 325 int retval = 0;
326 resource_size_t start, end;
328 327
329 if (dev->status & PNP_ATTACHED) { 328 if (dev->status & PNP_ATTACHED) {
330 retval = -EBUSY; 329 retval = -EBUSY;
@@ -351,20 +350,20 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
351 if (!strnicmp(buf, "auto", 4)) { 350 if (!strnicmp(buf, "auto", 4)) {
352 if (dev->active) 351 if (dev->active)
353 goto done; 352 goto done;
354 pnp_init_resource_table(&dev->res); 353 pnp_init_resources(dev);
355 retval = pnp_auto_config_dev(dev); 354 retval = pnp_auto_config_dev(dev);
356 goto done; 355 goto done;
357 } 356 }
358 if (!strnicmp(buf, "clear", 5)) { 357 if (!strnicmp(buf, "clear", 5)) {
359 if (dev->active) 358 if (dev->active)
360 goto done; 359 goto done;
361 pnp_init_resource_table(&dev->res); 360 pnp_init_resources(dev);
362 goto done; 361 goto done;
363 } 362 }
364 if (!strnicmp(buf, "get", 3)) { 363 if (!strnicmp(buf, "get", 3)) {
365 mutex_lock(&pnp_res_mutex); 364 mutex_lock(&pnp_res_mutex);
366 if (pnp_can_read(dev)) 365 if (pnp_can_read(dev))
367 dev->protocol->get(dev, &dev->res); 366 dev->protocol->get(dev);
368 mutex_unlock(&pnp_res_mutex); 367 mutex_unlock(&pnp_res_mutex);
369 goto done; 368 goto done;
370 } 369 }
@@ -373,7 +372,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
373 if (dev->active) 372 if (dev->active)
374 goto done; 373 goto done;
375 buf += 3; 374 buf += 3;
376 pnp_init_resource_table(&dev->res); 375 pnp_init_resources(dev);
377 mutex_lock(&pnp_res_mutex); 376 mutex_lock(&pnp_res_mutex);
378 while (1) { 377 while (1) {
379 while (isspace(*buf)) 378 while (isspace(*buf))
@@ -382,76 +381,60 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
382 buf += 2; 381 buf += 2;
383 while (isspace(*buf)) 382 while (isspace(*buf))
384 ++buf; 383 ++buf;
385 dev->res.port_resource[nport].start = 384 start = simple_strtoul(buf, &buf, 0);
386 simple_strtoul(buf, &buf, 0);
387 while (isspace(*buf)) 385 while (isspace(*buf))
388 ++buf; 386 ++buf;
389 if (*buf == '-') { 387 if (*buf == '-') {
390 buf += 1; 388 buf += 1;
391 while (isspace(*buf)) 389 while (isspace(*buf))
392 ++buf; 390 ++buf;
393 dev->res.port_resource[nport].end = 391 end = simple_strtoul(buf, &buf, 0);
394 simple_strtoul(buf, &buf, 0);
395 } else 392 } else
396 dev->res.port_resource[nport].end = 393 end = start;
397 dev->res.port_resource[nport].start; 394 pnp_res = pnp_add_io_resource(dev, start, end,
398 dev->res.port_resource[nport].flags = 395 0);
399 IORESOURCE_IO; 396 if (pnp_res)
400 nport++; 397 pnp_res->index = nport++;
401 if (nport >= PNP_MAX_PORT)
402 break;
403 continue; 398 continue;
404 } 399 }
405 if (!strnicmp(buf, "mem", 3)) { 400 if (!strnicmp(buf, "mem", 3)) {
406 buf += 3; 401 buf += 3;
407 while (isspace(*buf)) 402 while (isspace(*buf))
408 ++buf; 403 ++buf;
409 dev->res.mem_resource[nmem].start = 404 start = simple_strtoul(buf, &buf, 0);
410 simple_strtoul(buf, &buf, 0);
411 while (isspace(*buf)) 405 while (isspace(*buf))
412 ++buf; 406 ++buf;
413 if (*buf == '-') { 407 if (*buf == '-') {
414 buf += 1; 408 buf += 1;
415 while (isspace(*buf)) 409 while (isspace(*buf))
416 ++buf; 410 ++buf;
417 dev->res.mem_resource[nmem].end = 411 end = simple_strtoul(buf, &buf, 0);
418 simple_strtoul(buf, &buf, 0);
419 } else 412 } else
420 dev->res.mem_resource[nmem].end = 413 end = start;
421 dev->res.mem_resource[nmem].start; 414 pnp_res = pnp_add_mem_resource(dev, start, end,
422 dev->res.mem_resource[nmem].flags = 415 0);
423 IORESOURCE_MEM; 416 if (pnp_res)
424 nmem++; 417 pnp_res->index = nmem++;
425 if (nmem >= PNP_MAX_MEM)
426 break;
427 continue; 418 continue;
428 } 419 }
429 if (!strnicmp(buf, "irq", 3)) { 420 if (!strnicmp(buf, "irq", 3)) {
430 buf += 3; 421 buf += 3;
431 while (isspace(*buf)) 422 while (isspace(*buf))
432 ++buf; 423 ++buf;
433 dev->res.irq_resource[nirq].start = 424 start = simple_strtoul(buf, &buf, 0);
434 dev->res.irq_resource[nirq].end = 425 pnp_res = pnp_add_irq_resource(dev, start, 0);
435 simple_strtoul(buf, &buf, 0); 426 if (pnp_res)
436 dev->res.irq_resource[nirq].flags = 427 nirq++;
437 IORESOURCE_IRQ;
438 nirq++;
439 if (nirq >= PNP_MAX_IRQ)
440 break;
441 continue; 428 continue;
442 } 429 }
443 if (!strnicmp(buf, "dma", 3)) { 430 if (!strnicmp(buf, "dma", 3)) {
444 buf += 3; 431 buf += 3;
445 while (isspace(*buf)) 432 while (isspace(*buf))
446 ++buf; 433 ++buf;
447 dev->res.dma_resource[ndma].start = 434 start = simple_strtoul(buf, &buf, 0);
448 dev->res.dma_resource[ndma].end = 435 pnp_res = pnp_add_dma_resource(dev, start, 0);
449 simple_strtoul(buf, &buf, 0); 436 if (pnp_res)
450 dev->res.dma_resource[ndma].flags = 437 pnp_res->index = ndma++;
451 IORESOURCE_DMA;
452 ndma++;
453 if (ndma >= PNP_MAX_DMA)
454 break;
455 continue; 438 continue;
456 } 439 }
457 break; 440 break;
diff --git a/drivers/pnp/isapnp/Makefile b/drivers/pnp/isapnp/Makefile
index cac18bbfb817..3e38f06f8d78 100644
--- a/drivers/pnp/isapnp/Makefile
+++ b/drivers/pnp/isapnp/Makefile
@@ -5,3 +5,7 @@
5isapnp-proc-$(CONFIG_PROC_FS) = proc.o 5isapnp-proc-$(CONFIG_PROC_FS) = proc.o
6 6
7obj-y := core.o compat.o $(isapnp-proc-y) 7obj-y := core.o compat.o $(isapnp-proc-y)
8
9ifeq ($(CONFIG_PNP_DEBUG),y)
10EXTRA_CFLAGS += -DDEBUG
11endif
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 257f5d827d83..f1bccdbdeb08 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -44,6 +44,8 @@
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <asm/io.h> 45#include <asm/io.h>
46 46
47#include "../base.h"
48
47#if 0 49#if 0
48#define ISAPNP_REGION_OK 50#define ISAPNP_REGION_OK
49#endif 51#endif
@@ -88,6 +90,14 @@ MODULE_LICENSE("GPL");
88#define _LTAG_MEM32RANGE 0x85 90#define _LTAG_MEM32RANGE 0x85
89#define _LTAG_FIXEDMEM32RANGE 0x86 91#define _LTAG_FIXEDMEM32RANGE 0x86
90 92
93/* Logical device control and configuration registers */
94
95#define ISAPNP_CFG_ACTIVATE 0x30 /* byte */
96#define ISAPNP_CFG_MEM 0x40 /* 4 * dword */
97#define ISAPNP_CFG_PORT 0x60 /* 8 * word */
98#define ISAPNP_CFG_IRQ 0x70 /* 2 * word */
99#define ISAPNP_CFG_DMA 0x74 /* 2 * byte */
100
91/* 101/*
92 * Sizes of ISAPNP logical device configuration register sets. 102 * Sizes of ISAPNP logical device configuration register sets.
93 * See PNP-ISA-v1.0a.pdf, Appendix A. 103 * See PNP-ISA-v1.0a.pdf, Appendix A.
@@ -388,28 +398,6 @@ static void __init isapnp_skip_bytes(int count)
388} 398}
389 399
390/* 400/*
391 * Parse EISA id.
392 */
393static void isapnp_parse_id(struct pnp_dev *dev, unsigned short vendor,
394 unsigned short device)
395{
396 struct pnp_id *id;
397
398 if (!dev)
399 return;
400 id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
401 if (!id)
402 return;
403 sprintf(id->id, "%c%c%c%x%x%x%x",
404 'A' + ((vendor >> 2) & 0x3f) - 1,
405 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
406 'A' + ((vendor >> 8) & 0x1f) - 1,
407 (device >> 4) & 0x0f,
408 device & 0x0f, (device >> 12) & 0x0f, (device >> 8) & 0x0f);
409 pnp_add_id(id, dev);
410}
411
412/*
413 * Parse logical device tag. 401 * Parse logical device tag.
414 */ 402 */
415static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card, 403static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card,
@@ -417,30 +405,31 @@ static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card,
417{ 405{
418 unsigned char tmp[6]; 406 unsigned char tmp[6];
419 struct pnp_dev *dev; 407 struct pnp_dev *dev;
408 u32 eisa_id;
409 char id[8];
420 410
421 isapnp_peek(tmp, size); 411 isapnp_peek(tmp, size);
422 dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL); 412 eisa_id = tmp[0] | tmp[1] << 8 | tmp[2] << 16 | tmp[3] << 24;
413 pnp_eisa_id_to_string(eisa_id, id);
414
415 dev = pnp_alloc_dev(&isapnp_protocol, number, id);
423 if (!dev) 416 if (!dev)
424 return NULL; 417 return NULL;
425 dev->number = number; 418
426 isapnp_parse_id(dev, (tmp[1] << 8) | tmp[0], (tmp[3] << 8) | tmp[2]);
427 dev->regs = tmp[4];
428 dev->card = card; 419 dev->card = card;
429 if (size > 5)
430 dev->regs |= tmp[5] << 8;
431 dev->protocol = &isapnp_protocol;
432 dev->capabilities |= PNP_CONFIGURABLE; 420 dev->capabilities |= PNP_CONFIGURABLE;
433 dev->capabilities |= PNP_READ; 421 dev->capabilities |= PNP_READ;
434 dev->capabilities |= PNP_WRITE; 422 dev->capabilities |= PNP_WRITE;
435 dev->capabilities |= PNP_DISABLE; 423 dev->capabilities |= PNP_DISABLE;
436 pnp_init_resource_table(&dev->res); 424 pnp_init_resources(dev);
437 return dev; 425 return dev;
438} 426}
439 427
440/* 428/*
441 * Add IRQ resource to resources list. 429 * Add IRQ resource to resources list.
442 */ 430 */
443static void __init isapnp_parse_irq_resource(struct pnp_option *option, 431static void __init isapnp_parse_irq_resource(struct pnp_dev *dev,
432 struct pnp_option *option,
444 int size) 433 int size)
445{ 434{
446 unsigned char tmp[3]; 435 unsigned char tmp[3];
@@ -457,13 +446,14 @@ static void __init isapnp_parse_irq_resource(struct pnp_option *option,
457 irq->flags = tmp[2]; 446 irq->flags = tmp[2];
458 else 447 else
459 irq->flags = IORESOURCE_IRQ_HIGHEDGE; 448 irq->flags = IORESOURCE_IRQ_HIGHEDGE;
460 pnp_register_irq_resource(option, irq); 449 pnp_register_irq_resource(dev, option, irq);
461} 450}
462 451
463/* 452/*
464 * Add DMA resource to resources list. 453 * Add DMA resource to resources list.
465 */ 454 */
466static void __init isapnp_parse_dma_resource(struct pnp_option *option, 455static void __init isapnp_parse_dma_resource(struct pnp_dev *dev,
456 struct pnp_option *option,
467 int size) 457 int size)
468{ 458{
469 unsigned char tmp[2]; 459 unsigned char tmp[2];
@@ -475,13 +465,14 @@ static void __init isapnp_parse_dma_resource(struct pnp_option *option,
475 return; 465 return;
476 dma->map = tmp[0]; 466 dma->map = tmp[0];
477 dma->flags = tmp[1]; 467 dma->flags = tmp[1];
478 pnp_register_dma_resource(option, dma); 468 pnp_register_dma_resource(dev, option, dma);
479} 469}
480 470
481/* 471/*
482 * Add port resource to resources list. 472 * Add port resource to resources list.
483 */ 473 */
484static void __init isapnp_parse_port_resource(struct pnp_option *option, 474static void __init isapnp_parse_port_resource(struct pnp_dev *dev,
475 struct pnp_option *option,
485 int size) 476 int size)
486{ 477{
487 unsigned char tmp[7]; 478 unsigned char tmp[7];
@@ -496,13 +487,14 @@ static void __init isapnp_parse_port_resource(struct pnp_option *option,
496 port->align = tmp[5]; 487 port->align = tmp[5];
497 port->size = tmp[6]; 488 port->size = tmp[6];
498 port->flags = tmp[0] ? PNP_PORT_FLAG_16BITADDR : 0; 489 port->flags = tmp[0] ? PNP_PORT_FLAG_16BITADDR : 0;
499 pnp_register_port_resource(option, port); 490 pnp_register_port_resource(dev, option, port);
500} 491}
501 492
502/* 493/*
503 * Add fixed port resource to resources list. 494 * Add fixed port resource to resources list.
504 */ 495 */
505static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option, 496static void __init isapnp_parse_fixed_port_resource(struct pnp_dev *dev,
497 struct pnp_option *option,
506 int size) 498 int size)
507{ 499{
508 unsigned char tmp[3]; 500 unsigned char tmp[3];
@@ -516,13 +508,14 @@ static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option,
516 port->size = tmp[2]; 508 port->size = tmp[2];
517 port->align = 0; 509 port->align = 0;
518 port->flags = PNP_PORT_FLAG_FIXED; 510 port->flags = PNP_PORT_FLAG_FIXED;
519 pnp_register_port_resource(option, port); 511 pnp_register_port_resource(dev, option, port);
520} 512}
521 513
522/* 514/*
523 * Add memory resource to resources list. 515 * Add memory resource to resources list.
524 */ 516 */
525static void __init isapnp_parse_mem_resource(struct pnp_option *option, 517static void __init isapnp_parse_mem_resource(struct pnp_dev *dev,
518 struct pnp_option *option,
526 int size) 519 int size)
527{ 520{
528 unsigned char tmp[9]; 521 unsigned char tmp[9];
@@ -537,13 +530,14 @@ static void __init isapnp_parse_mem_resource(struct pnp_option *option,
537 mem->align = (tmp[6] << 8) | tmp[5]; 530 mem->align = (tmp[6] << 8) | tmp[5];
538 mem->size = ((tmp[8] << 8) | tmp[7]) << 8; 531 mem->size = ((tmp[8] << 8) | tmp[7]) << 8;
539 mem->flags = tmp[0]; 532 mem->flags = tmp[0];
540 pnp_register_mem_resource(option, mem); 533 pnp_register_mem_resource(dev, option, mem);
541} 534}
542 535
543/* 536/*
544 * Add 32-bit memory resource to resources list. 537 * Add 32-bit memory resource to resources list.
545 */ 538 */
546static void __init isapnp_parse_mem32_resource(struct pnp_option *option, 539static void __init isapnp_parse_mem32_resource(struct pnp_dev *dev,
540 struct pnp_option *option,
547 int size) 541 int size)
548{ 542{
549 unsigned char tmp[17]; 543 unsigned char tmp[17];
@@ -560,13 +554,14 @@ static void __init isapnp_parse_mem32_resource(struct pnp_option *option,
560 mem->size = 554 mem->size =
561 (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13]; 555 (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13];
562 mem->flags = tmp[0]; 556 mem->flags = tmp[0];
563 pnp_register_mem_resource(option, mem); 557 pnp_register_mem_resource(dev, option, mem);
564} 558}
565 559
566/* 560/*
567 * Add 32-bit fixed memory resource to resources list. 561 * Add 32-bit fixed memory resource to resources list.
568 */ 562 */
569static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option, 563static void __init isapnp_parse_fixed_mem32_resource(struct pnp_dev *dev,
564 struct pnp_option *option,
570 int size) 565 int size)
571{ 566{
572 unsigned char tmp[9]; 567 unsigned char tmp[9];
@@ -581,7 +576,7 @@ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option,
581 mem->size = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; 576 mem->size = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5];
582 mem->align = 0; 577 mem->align = 0;
583 mem->flags = tmp[0]; 578 mem->flags = tmp[0];
584 pnp_register_mem_resource(option, mem); 579 pnp_register_mem_resource(dev, option, mem);
585} 580}
586 581
587/* 582/*
@@ -613,6 +608,8 @@ static int __init isapnp_create_device(struct pnp_card *card,
613 unsigned char type, tmp[17]; 608 unsigned char type, tmp[17];
614 struct pnp_option *option; 609 struct pnp_option *option;
615 struct pnp_dev *dev; 610 struct pnp_dev *dev;
611 u32 eisa_id;
612 char id[8];
616 613
617 if ((dev = isapnp_parse_device(card, size, number++)) == NULL) 614 if ((dev = isapnp_parse_device(card, size, number++)) == NULL)
618 return 1; 615 return 1;
@@ -652,8 +649,10 @@ static int __init isapnp_create_device(struct pnp_card *card,
652 case _STAG_COMPATDEVID: 649 case _STAG_COMPATDEVID:
653 if (size == 4 && compat < DEVICE_COUNT_COMPATIBLE) { 650 if (size == 4 && compat < DEVICE_COUNT_COMPATIBLE) {
654 isapnp_peek(tmp, 4); 651 isapnp_peek(tmp, 4);
655 isapnp_parse_id(dev, (tmp[1] << 8) | tmp[0], 652 eisa_id = tmp[0] | tmp[1] << 8 |
656 (tmp[3] << 8) | tmp[2]); 653 tmp[2] << 16 | tmp[3] << 24;
654 pnp_eisa_id_to_string(eisa_id, id);
655 pnp_add_id(dev, id);
657 compat++; 656 compat++;
658 size = 0; 657 size = 0;
659 } 658 }
@@ -661,13 +660,13 @@ static int __init isapnp_create_device(struct pnp_card *card,
661 case _STAG_IRQ: 660 case _STAG_IRQ:
662 if (size < 2 || size > 3) 661 if (size < 2 || size > 3)
663 goto __skip; 662 goto __skip;
664 isapnp_parse_irq_resource(option, size); 663 isapnp_parse_irq_resource(dev, option, size);
665 size = 0; 664 size = 0;
666 break; 665 break;
667 case _STAG_DMA: 666 case _STAG_DMA:
668 if (size != 2) 667 if (size != 2)
669 goto __skip; 668 goto __skip;
670 isapnp_parse_dma_resource(option, size); 669 isapnp_parse_dma_resource(dev, option, size);
671 size = 0; 670 size = 0;
672 break; 671 break;
673 case _STAG_STARTDEP: 672 case _STAG_STARTDEP:
@@ -687,17 +686,18 @@ static int __init isapnp_create_device(struct pnp_card *card,
687 if (size != 0) 686 if (size != 0)
688 goto __skip; 687 goto __skip;
689 priority = 0; 688 priority = 0;
689 dev_dbg(&dev->dev, "end dependent options\n");
690 break; 690 break;
691 case _STAG_IOPORT: 691 case _STAG_IOPORT:
692 if (size != 7) 692 if (size != 7)
693 goto __skip; 693 goto __skip;
694 isapnp_parse_port_resource(option, size); 694 isapnp_parse_port_resource(dev, option, size);
695 size = 0; 695 size = 0;
696 break; 696 break;
697 case _STAG_FIXEDIO: 697 case _STAG_FIXEDIO:
698 if (size != 3) 698 if (size != 3)
699 goto __skip; 699 goto __skip;
700 isapnp_parse_fixed_port_resource(option, size); 700 isapnp_parse_fixed_port_resource(dev, option, size);
701 size = 0; 701 size = 0;
702 break; 702 break;
703 case _STAG_VENDOR: 703 case _STAG_VENDOR:
@@ -705,7 +705,7 @@ static int __init isapnp_create_device(struct pnp_card *card,
705 case _LTAG_MEMRANGE: 705 case _LTAG_MEMRANGE:
706 if (size != 9) 706 if (size != 9)
707 goto __skip; 707 goto __skip;
708 isapnp_parse_mem_resource(option, size); 708 isapnp_parse_mem_resource(dev, option, size);
709 size = 0; 709 size = 0;
710 break; 710 break;
711 case _LTAG_ANSISTR: 711 case _LTAG_ANSISTR:
@@ -720,13 +720,13 @@ static int __init isapnp_create_device(struct pnp_card *card,
720 case _LTAG_MEM32RANGE: 720 case _LTAG_MEM32RANGE:
721 if (size != 17) 721 if (size != 17)
722 goto __skip; 722 goto __skip;
723 isapnp_parse_mem32_resource(option, size); 723 isapnp_parse_mem32_resource(dev, option, size);
724 size = 0; 724 size = 0;
725 break; 725 break;
726 case _LTAG_FIXEDMEM32RANGE: 726 case _LTAG_FIXEDMEM32RANGE:
727 if (size != 9) 727 if (size != 9)
728 goto __skip; 728 goto __skip;
729 isapnp_parse_fixed_mem32_resource(option, size); 729 isapnp_parse_fixed_mem32_resource(dev, option, size);
730 size = 0; 730 size = 0;
731 break; 731 break;
732 case _STAG_END: 732 case _STAG_END:
@@ -734,9 +734,8 @@ static int __init isapnp_create_device(struct pnp_card *card,
734 isapnp_skip_bytes(size); 734 isapnp_skip_bytes(size);
735 return 1; 735 return 1;
736 default: 736 default:
737 printk(KERN_ERR 737 dev_err(&dev->dev, "unknown tag %#x (card %i), "
738 "isapnp: unexpected or unknown tag type 0x%x for logical device %i (device %i), ignored\n", 738 "ignored\n", type, card->number);
739 type, dev->number, card->number);
740 } 739 }
741__skip: 740__skip:
742 if (size > 0) 741 if (size > 0)
@@ -789,9 +788,8 @@ static void __init isapnp_parse_resource_map(struct pnp_card *card)
789 isapnp_skip_bytes(size); 788 isapnp_skip_bytes(size);
790 return; 789 return;
791 default: 790 default:
792 printk(KERN_ERR 791 dev_err(&card->dev, "unknown tag %#x, ignored\n",
793 "isapnp: unexpected or unknown tag type 0x%x for device %i, ignored\n", 792 type);
794 type, card->number);
795 } 793 }
796__skip: 794__skip:
797 if (size > 0) 795 if (size > 0)
@@ -822,25 +820,6 @@ static unsigned char __init isapnp_checksum(unsigned char *data)
822} 820}
823 821
824/* 822/*
825 * Parse EISA id for ISA PnP card.
826 */
827static void isapnp_parse_card_id(struct pnp_card *card, unsigned short vendor,
828 unsigned short device)
829{
830 struct pnp_id *id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
831
832 if (!id)
833 return;
834 sprintf(id->id, "%c%c%c%x%x%x%x",
835 'A' + ((vendor >> 2) & 0x3f) - 1,
836 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
837 'A' + ((vendor >> 8) & 0x1f) - 1,
838 (device >> 4) & 0x0f,
839 device & 0x0f, (device >> 12) & 0x0f, (device >> 8) & 0x0f);
840 pnp_add_card_id(id, card);
841}
842
843/*
844 * Build device list for all present ISA PnP devices. 823 * Build device list for all present ISA PnP devices.
845 */ 824 */
846static int __init isapnp_build_device_list(void) 825static int __init isapnp_build_device_list(void)
@@ -848,6 +827,8 @@ static int __init isapnp_build_device_list(void)
848 int csn; 827 int csn;
849 unsigned char header[9], checksum; 828 unsigned char header[9], checksum;
850 struct pnp_card *card; 829 struct pnp_card *card;
830 u32 eisa_id;
831 char id[8];
851 832
852 isapnp_wait(); 833 isapnp_wait();
853 isapnp_key(); 834 isapnp_key();
@@ -855,32 +836,30 @@ static int __init isapnp_build_device_list(void)
855 isapnp_wake(csn); 836 isapnp_wake(csn);
856 isapnp_peek(header, 9); 837 isapnp_peek(header, 9);
857 checksum = isapnp_checksum(header); 838 checksum = isapnp_checksum(header);
839 eisa_id = header[0] | header[1] << 8 |
840 header[2] << 16 | header[3] << 24;
841 pnp_eisa_id_to_string(eisa_id, id);
842 card = pnp_alloc_card(&isapnp_protocol, csn, id);
843 if (!card)
844 continue;
845
858#if 0 846#if 0
859 printk(KERN_DEBUG 847 dev_info(&card->dev,
860 "vendor: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 848 "vendor: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
861 header[0], header[1], header[2], header[3], header[4], 849 header[0], header[1], header[2], header[3], header[4],
862 header[5], header[6], header[7], header[8]); 850 header[5], header[6], header[7], header[8]);
863 printk(KERN_DEBUG "checksum = 0x%x\n", checksum); 851 dev_info(&card->dev, "checksum = %#x\n", checksum);
864#endif 852#endif
865 if ((card =
866 kzalloc(sizeof(struct pnp_card), GFP_KERNEL)) == NULL)
867 continue;
868
869 card->number = csn;
870 INIT_LIST_HEAD(&card->devices); 853 INIT_LIST_HEAD(&card->devices);
871 isapnp_parse_card_id(card, (header[1] << 8) | header[0],
872 (header[3] << 8) | header[2]);
873 card->serial = 854 card->serial =
874 (header[7] << 24) | (header[6] << 16) | (header[5] << 8) | 855 (header[7] << 24) | (header[6] << 16) | (header[5] << 8) |
875 header[4]; 856 header[4];
876 isapnp_checksum_value = 0x00; 857 isapnp_checksum_value = 0x00;
877 isapnp_parse_resource_map(card); 858 isapnp_parse_resource_map(card);
878 if (isapnp_checksum_value != 0x00) 859 if (isapnp_checksum_value != 0x00)
879 printk(KERN_ERR 860 dev_err(&card->dev, "invalid checksum %#x\n",
880 "isapnp: checksum for device %i is not valid (0x%x)\n", 861 isapnp_checksum_value);
881 csn, isapnp_checksum_value);
882 card->checksum = isapnp_checksum_value; 862 card->checksum = isapnp_checksum_value;
883 card->protocol = &isapnp_protocol;
884 863
885 pnp_add_card(card); 864 pnp_add_card(card);
886 } 865 }
@@ -947,100 +926,117 @@ EXPORT_SYMBOL(isapnp_cfg_begin);
947EXPORT_SYMBOL(isapnp_cfg_end); 926EXPORT_SYMBOL(isapnp_cfg_end);
948EXPORT_SYMBOL(isapnp_write_byte); 927EXPORT_SYMBOL(isapnp_write_byte);
949 928
950static int isapnp_read_resources(struct pnp_dev *dev, 929static int isapnp_get_resources(struct pnp_dev *dev)
951 struct pnp_resource_table *res)
952{ 930{
953 int tmp, ret; 931 struct pnp_resource *pnp_res;
932 int i, ret;
954 933
934 dev_dbg(&dev->dev, "get resources\n");
935 pnp_init_resources(dev);
936 isapnp_cfg_begin(dev->card->number, dev->number);
955 dev->active = isapnp_read_byte(ISAPNP_CFG_ACTIVATE); 937 dev->active = isapnp_read_byte(ISAPNP_CFG_ACTIVATE);
956 if (dev->active) { 938 if (!dev->active)
957 for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) { 939 goto __end;
958 ret = isapnp_read_word(ISAPNP_CFG_PORT + (tmp << 1)); 940
959 if (!ret) 941 for (i = 0; i < ISAPNP_MAX_PORT; i++) {
960 continue; 942 ret = isapnp_read_word(ISAPNP_CFG_PORT + (i << 1));
961 res->port_resource[tmp].start = ret; 943 if (ret) {
962 res->port_resource[tmp].flags = IORESOURCE_IO; 944 pnp_res = pnp_add_io_resource(dev, ret, ret, 0);
945 if (pnp_res)
946 pnp_res->index = i;
963 } 947 }
964 for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) { 948 }
965 ret = 949 for (i = 0; i < ISAPNP_MAX_MEM; i++) {
966 isapnp_read_word(ISAPNP_CFG_MEM + (tmp << 3)) << 8; 950 ret = isapnp_read_word(ISAPNP_CFG_MEM + (i << 3)) << 8;
967 if (!ret) 951 if (ret) {
968 continue; 952 pnp_res = pnp_add_mem_resource(dev, ret, ret, 0);
969 res->mem_resource[tmp].start = ret; 953 if (pnp_res)
970 res->mem_resource[tmp].flags = IORESOURCE_MEM; 954 pnp_res->index = i;
971 } 955 }
972 for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) { 956 }
973 ret = 957 for (i = 0; i < ISAPNP_MAX_IRQ; i++) {
974 (isapnp_read_word(ISAPNP_CFG_IRQ + (tmp << 1)) >> 958 ret = isapnp_read_word(ISAPNP_CFG_IRQ + (i << 1)) >> 8;
975 8); 959 if (ret) {
976 if (!ret) 960 pnp_res = pnp_add_irq_resource(dev, ret, 0);
977 continue; 961 if (pnp_res)
978 res->irq_resource[tmp].start = 962 pnp_res->index = i;
979 res->irq_resource[tmp].end = ret;
980 res->irq_resource[tmp].flags = IORESOURCE_IRQ;
981 } 963 }
982 for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) { 964 }
983 ret = isapnp_read_byte(ISAPNP_CFG_DMA + tmp); 965 for (i = 0; i < ISAPNP_MAX_DMA; i++) {
984 if (ret == 4) 966 ret = isapnp_read_byte(ISAPNP_CFG_DMA + i);
985 continue; 967 if (ret != 4) {
986 res->dma_resource[tmp].start = 968 pnp_res = pnp_add_dma_resource(dev, ret, 0);
987 res->dma_resource[tmp].end = ret; 969 if (pnp_res)
988 res->dma_resource[tmp].flags = IORESOURCE_DMA; 970 pnp_res->index = i;
989 } 971 }
990 } 972 }
991 return 0;
992}
993
994static int isapnp_get_resources(struct pnp_dev *dev,
995 struct pnp_resource_table *res)
996{
997 int ret;
998 973
999 pnp_init_resource_table(res); 974__end:
1000 isapnp_cfg_begin(dev->card->number, dev->number);
1001 ret = isapnp_read_resources(dev, res);
1002 isapnp_cfg_end(); 975 isapnp_cfg_end();
1003 return ret; 976 return 0;
1004} 977}
1005 978
1006static int isapnp_set_resources(struct pnp_dev *dev, 979static int isapnp_set_resources(struct pnp_dev *dev)
1007 struct pnp_resource_table *res)
1008{ 980{
1009 int tmp; 981 struct pnp_resource *pnp_res;
982 struct resource *res;
983 int tmp, index;
1010 984
985 dev_dbg(&dev->dev, "set resources\n");
1011 isapnp_cfg_begin(dev->card->number, dev->number); 986 isapnp_cfg_begin(dev->card->number, dev->number);
1012 dev->active = 1; 987 dev->active = 1;
1013 for (tmp = 0; 988 for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) {
1014 tmp < ISAPNP_MAX_PORT 989 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, tmp);
1015 && (res->port_resource[tmp]. 990 if (!pnp_res)
1016 flags & (IORESOURCE_IO | IORESOURCE_UNSET)) == IORESOURCE_IO; 991 continue;
1017 tmp++) 992 res = &pnp_res->res;
1018 isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1), 993 if (pnp_resource_valid(res)) {
1019 res->port_resource[tmp].start); 994 index = pnp_res->index;
1020 for (tmp = 0; 995 dev_dbg(&dev->dev, " set io %d to %#llx\n",
1021 tmp < ISAPNP_MAX_IRQ 996 index, (unsigned long long) res->start);
1022 && (res->irq_resource[tmp]. 997 isapnp_write_word(ISAPNP_CFG_PORT + (index << 1),
1023 flags & (IORESOURCE_IRQ | IORESOURCE_UNSET)) == IORESOURCE_IRQ; 998 res->start);
1024 tmp++) { 999 }
1025 int irq = res->irq_resource[tmp].start; 1000 }
1026 if (irq == 2) 1001 for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) {
1027 irq = 9; 1002 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, tmp);
1028 isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq); 1003 if (!pnp_res)
1004 continue;
1005 res = &pnp_res->res;
1006 if (pnp_resource_valid(res)) {
1007 int irq = res->start;
1008 if (irq == 2)
1009 irq = 9;
1010 index = pnp_res->index;
1011 dev_dbg(&dev->dev, " set irq %d to %d\n", index, irq);
1012 isapnp_write_byte(ISAPNP_CFG_IRQ + (index << 1), irq);
1013 }
1014 }
1015 for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) {
1016 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, tmp);
1017 if (!pnp_res)
1018 continue;
1019 res = &pnp_res->res;
1020 if (pnp_resource_valid(res)) {
1021 index = pnp_res->index;
1022 dev_dbg(&dev->dev, " set dma %d to %lld\n",
1023 index, (unsigned long long) res->start);
1024 isapnp_write_byte(ISAPNP_CFG_DMA + index, res->start);
1025 }
1026 }
1027 for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) {
1028 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, tmp);
1029 if (!pnp_res)
1030 continue;
1031 res = &pnp_res->res;
1032 if (pnp_resource_valid(res)) {
1033 index = pnp_res->index;
1034 dev_dbg(&dev->dev, " set mem %d to %#llx\n",
1035 index, (unsigned long long) res->start);
1036 isapnp_write_word(ISAPNP_CFG_MEM + (index << 3),
1037 (res->start >> 8) & 0xffff);
1038 }
1029 } 1039 }
1030 for (tmp = 0;
1031 tmp < ISAPNP_MAX_DMA
1032 && (res->dma_resource[tmp].
1033 flags & (IORESOURCE_DMA | IORESOURCE_UNSET)) == IORESOURCE_DMA;
1034 tmp++)
1035 isapnp_write_byte(ISAPNP_CFG_DMA + tmp,
1036 res->dma_resource[tmp].start);
1037 for (tmp = 0;
1038 tmp < ISAPNP_MAX_MEM
1039 && (res->mem_resource[tmp].
1040 flags & (IORESOURCE_MEM | IORESOURCE_UNSET)) == IORESOURCE_MEM;
1041 tmp++)
1042 isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3),
1043 (res->mem_resource[tmp].start >> 8) & 0xffff);
1044 /* FIXME: We aren't handling 32bit mems properly here */ 1040 /* FIXME: We aren't handling 32bit mems properly here */
1045 isapnp_activate(dev->number); 1041 isapnp_activate(dev->number);
1046 isapnp_cfg_end(); 1042 isapnp_cfg_end();
@@ -1138,13 +1134,13 @@ static int __init isapnp_init(void)
1138 protocol_for_each_card(&isapnp_protocol, card) { 1134 protocol_for_each_card(&isapnp_protocol, card) {
1139 cards++; 1135 cards++;
1140 if (isapnp_verbose) { 1136 if (isapnp_verbose) {
1141 printk(KERN_INFO "isapnp: Card '%s'\n", 1137 dev_info(&card->dev, "card '%s'\n",
1142 card->name[0] ? card->name : "Unknown"); 1138 card->name[0] ? card->name : "unknown");
1143 if (isapnp_verbose < 2) 1139 if (isapnp_verbose < 2)
1144 continue; 1140 continue;
1145 card_for_each_dev(card, dev) { 1141 card_for_each_dev(card, dev) {
1146 printk(KERN_INFO "isapnp: Device '%s'\n", 1142 dev_info(&card->dev, "device '%s'\n",
1147 dev->name[0] ? dev->name : "Unknown"); 1143 dev->name[0] ? dev->name : "unknown");
1148 } 1144 }
1149 } 1145 }
1150 } 1146 }
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 2b8266c3d40f..3f94edab25fa 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -85,6 +85,7 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
85} 85}
86 86
87static const struct file_operations isapnp_proc_bus_file_operations = { 87static const struct file_operations isapnp_proc_bus_file_operations = {
88 .owner = THIS_MODULE,
88 .llseek = isapnp_proc_bus_lseek, 89 .llseek = isapnp_proc_bus_lseek,
89 .read = isapnp_proc_bus_read, 90 .read = isapnp_proc_bus_read,
90}; 91};
@@ -102,12 +103,10 @@ static int isapnp_proc_attach_device(struct pnp_dev *dev)
102 return -ENOMEM; 103 return -ENOMEM;
103 } 104 }
104 sprintf(name, "%02x", dev->number); 105 sprintf(name, "%02x", dev->number);
105 e = dev->procent = create_proc_entry(name, S_IFREG | S_IRUGO, de); 106 e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de,
107 &isapnp_proc_bus_file_operations, dev);
106 if (!e) 108 if (!e)
107 return -ENOMEM; 109 return -ENOMEM;
108 e->proc_fops = &isapnp_proc_bus_file_operations;
109 e->owner = THIS_MODULE;
110 e->data = dev;
111 e->size = 256; 110 e->size = 256;
112 return 0; 111 return 0;
113} 112}
@@ -116,7 +115,7 @@ int __init isapnp_proc_init(void)
116{ 115{
117 struct pnp_dev *dev; 116 struct pnp_dev *dev;
118 117
119 isapnp_proc_bus_dir = proc_mkdir("isapnp", proc_bus); 118 isapnp_proc_bus_dir = proc_mkdir("bus/isapnp", NULL);
120 protocol_for_each_dev(&isapnp_protocol, dev) { 119 protocol_for_each_dev(&isapnp_protocol, dev) {
121 isapnp_proc_attach_device(dev); 120 isapnp_proc_attach_device(dev);
122 } 121 }
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index c28caf272c11..bea0914ff947 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -19,100 +19,118 @@ DEFINE_MUTEX(pnp_res_mutex);
19 19
20static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx) 20static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
21{ 21{
22 resource_size_t *start, *end; 22 struct pnp_resource *pnp_res;
23 unsigned long *flags; 23 struct resource *res;
24 24
25 if (idx >= PNP_MAX_PORT) { 25 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, idx);
26 if (!pnp_res) {
26 dev_err(&dev->dev, "too many I/O port resources\n"); 27 dev_err(&dev->dev, "too many I/O port resources\n");
27 /* pretend we were successful so at least the manager won't try again */ 28 /* pretend we were successful so at least the manager won't try again */
28 return 1; 29 return 1;
29 } 30 }
30 31
32 res = &pnp_res->res;
33
31 /* check if this resource has been manually set, if so skip */ 34 /* check if this resource has been manually set, if so skip */
32 if (!(dev->res.port_resource[idx].flags & IORESOURCE_AUTO)) 35 if (!(res->flags & IORESOURCE_AUTO)) {
36 dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
37 "flags %#lx\n", idx, (unsigned long long) res->start,
38 (unsigned long long) res->end, res->flags);
33 return 1; 39 return 1;
34 40 }
35 start = &dev->res.port_resource[idx].start;
36 end = &dev->res.port_resource[idx].end;
37 flags = &dev->res.port_resource[idx].flags;
38 41
39 /* set the initial values */ 42 /* set the initial values */
40 *flags |= rule->flags | IORESOURCE_IO; 43 pnp_res->index = idx;
41 *flags &= ~IORESOURCE_UNSET; 44 res->flags |= rule->flags | IORESOURCE_IO;
45 res->flags &= ~IORESOURCE_UNSET;
42 46
43 if (!rule->size) { 47 if (!rule->size) {
44 *flags |= IORESOURCE_DISABLED; 48 res->flags |= IORESOURCE_DISABLED;
49 dev_dbg(&dev->dev, " io %d disabled\n", idx);
45 return 1; /* skip disabled resource requests */ 50 return 1; /* skip disabled resource requests */
46 } 51 }
47 52
48 *start = rule->min; 53 res->start = rule->min;
49 *end = *start + rule->size - 1; 54 res->end = res->start + rule->size - 1;
50 55
51 /* run through until pnp_check_port is happy */ 56 /* run through until pnp_check_port is happy */
52 while (!pnp_check_port(dev, idx)) { 57 while (!pnp_check_port(dev, res)) {
53 *start += rule->align; 58 res->start += rule->align;
54 *end = *start + rule->size - 1; 59 res->end = res->start + rule->size - 1;
55 if (*start > rule->max || !rule->align) 60 if (res->start > rule->max || !rule->align) {
61 dev_dbg(&dev->dev, " couldn't assign io %d\n", idx);
56 return 0; 62 return 0;
63 }
57 } 64 }
65 dev_dbg(&dev->dev, " assign io %d %#llx-%#llx\n", idx,
66 (unsigned long long) res->start, (unsigned long long) res->end);
58 return 1; 67 return 1;
59} 68}
60 69
61static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) 70static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
62{ 71{
63 resource_size_t *start, *end; 72 struct pnp_resource *pnp_res;
64 unsigned long *flags; 73 struct resource *res;
65 74
66 if (idx >= PNP_MAX_MEM) { 75 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, idx);
76 if (!pnp_res) {
67 dev_err(&dev->dev, "too many memory resources\n"); 77 dev_err(&dev->dev, "too many memory resources\n");
68 /* pretend we were successful so at least the manager won't try again */ 78 /* pretend we were successful so at least the manager won't try again */
69 return 1; 79 return 1;
70 } 80 }
71 81
82 res = &pnp_res->res;
83
72 /* check if this resource has been manually set, if so skip */ 84 /* check if this resource has been manually set, if so skip */
73 if (!(dev->res.mem_resource[idx].flags & IORESOURCE_AUTO)) 85 if (!(res->flags & IORESOURCE_AUTO)) {
86 dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
87 "flags %#lx\n", idx, (unsigned long long) res->start,
88 (unsigned long long) res->end, res->flags);
74 return 1; 89 return 1;
75 90 }
76 start = &dev->res.mem_resource[idx].start;
77 end = &dev->res.mem_resource[idx].end;
78 flags = &dev->res.mem_resource[idx].flags;
79 91
80 /* set the initial values */ 92 /* set the initial values */
81 *flags |= rule->flags | IORESOURCE_MEM; 93 pnp_res->index = idx;
82 *flags &= ~IORESOURCE_UNSET; 94 res->flags |= rule->flags | IORESOURCE_MEM;
95 res->flags &= ~IORESOURCE_UNSET;
83 96
84 /* convert pnp flags to standard Linux flags */ 97 /* convert pnp flags to standard Linux flags */
85 if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) 98 if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
86 *flags |= IORESOURCE_READONLY; 99 res->flags |= IORESOURCE_READONLY;
87 if (rule->flags & IORESOURCE_MEM_CACHEABLE) 100 if (rule->flags & IORESOURCE_MEM_CACHEABLE)
88 *flags |= IORESOURCE_CACHEABLE; 101 res->flags |= IORESOURCE_CACHEABLE;
89 if (rule->flags & IORESOURCE_MEM_RANGELENGTH) 102 if (rule->flags & IORESOURCE_MEM_RANGELENGTH)
90 *flags |= IORESOURCE_RANGELENGTH; 103 res->flags |= IORESOURCE_RANGELENGTH;
91 if (rule->flags & IORESOURCE_MEM_SHADOWABLE) 104 if (rule->flags & IORESOURCE_MEM_SHADOWABLE)
92 *flags |= IORESOURCE_SHADOWABLE; 105 res->flags |= IORESOURCE_SHADOWABLE;
93 106
94 if (!rule->size) { 107 if (!rule->size) {
95 *flags |= IORESOURCE_DISABLED; 108 res->flags |= IORESOURCE_DISABLED;
109 dev_dbg(&dev->dev, " mem %d disabled\n", idx);
96 return 1; /* skip disabled resource requests */ 110 return 1; /* skip disabled resource requests */
97 } 111 }
98 112
99 *start = rule->min; 113 res->start = rule->min;
100 *end = *start + rule->size - 1; 114 res->end = res->start + rule->size - 1;
101 115
102 /* run through until pnp_check_mem is happy */ 116 /* run through until pnp_check_mem is happy */
103 while (!pnp_check_mem(dev, idx)) { 117 while (!pnp_check_mem(dev, res)) {
104 *start += rule->align; 118 res->start += rule->align;
105 *end = *start + rule->size - 1; 119 res->end = res->start + rule->size - 1;
106 if (*start > rule->max || !rule->align) 120 if (res->start > rule->max || !rule->align) {
121 dev_dbg(&dev->dev, " couldn't assign mem %d\n", idx);
107 return 0; 122 return 0;
123 }
108 } 124 }
125 dev_dbg(&dev->dev, " assign mem %d %#llx-%#llx\n", idx,
126 (unsigned long long) res->start, (unsigned long long) res->end);
109 return 1; 127 return 1;
110} 128}
111 129
112static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) 130static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
113{ 131{
114 resource_size_t *start, *end; 132 struct pnp_resource *pnp_res;
115 unsigned long *flags; 133 struct resource *res;
116 int i; 134 int i;
117 135
118 /* IRQ priority: this table is good for i386 */ 136 /* IRQ priority: this table is good for i386 */
@@ -120,49 +138,59 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
120 5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2 138 5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
121 }; 139 };
122 140
123 if (idx >= PNP_MAX_IRQ) { 141 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, idx);
142 if (!pnp_res) {
124 dev_err(&dev->dev, "too many IRQ resources\n"); 143 dev_err(&dev->dev, "too many IRQ resources\n");
125 /* pretend we were successful so at least the manager won't try again */ 144 /* pretend we were successful so at least the manager won't try again */
126 return 1; 145 return 1;
127 } 146 }
128 147
148 res = &pnp_res->res;
149
129 /* check if this resource has been manually set, if so skip */ 150 /* check if this resource has been manually set, if so skip */
130 if (!(dev->res.irq_resource[idx].flags & IORESOURCE_AUTO)) 151 if (!(res->flags & IORESOURCE_AUTO)) {
152 dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
153 idx, (int) res->start, res->flags);
131 return 1; 154 return 1;
132 155 }
133 start = &dev->res.irq_resource[idx].start;
134 end = &dev->res.irq_resource[idx].end;
135 flags = &dev->res.irq_resource[idx].flags;
136 156
137 /* set the initial values */ 157 /* set the initial values */
138 *flags |= rule->flags | IORESOURCE_IRQ; 158 pnp_res->index = idx;
139 *flags &= ~IORESOURCE_UNSET; 159 res->flags |= rule->flags | IORESOURCE_IRQ;
160 res->flags &= ~IORESOURCE_UNSET;
140 161
141 if (bitmap_empty(rule->map, PNP_IRQ_NR)) { 162 if (bitmap_empty(rule->map, PNP_IRQ_NR)) {
142 *flags |= IORESOURCE_DISABLED; 163 res->flags |= IORESOURCE_DISABLED;
164 dev_dbg(&dev->dev, " irq %d disabled\n", idx);
143 return 1; /* skip disabled resource requests */ 165 return 1; /* skip disabled resource requests */
144 } 166 }
145 167
146 /* TBD: need check for >16 IRQ */ 168 /* TBD: need check for >16 IRQ */
147 *start = find_next_bit(rule->map, PNP_IRQ_NR, 16); 169 res->start = find_next_bit(rule->map, PNP_IRQ_NR, 16);
148 if (*start < PNP_IRQ_NR) { 170 if (res->start < PNP_IRQ_NR) {
149 *end = *start; 171 res->end = res->start;
172 dev_dbg(&dev->dev, " assign irq %d %d\n", idx,
173 (int) res->start);
150 return 1; 174 return 1;
151 } 175 }
152 for (i = 0; i < 16; i++) { 176 for (i = 0; i < 16; i++) {
153 if (test_bit(xtab[i], rule->map)) { 177 if (test_bit(xtab[i], rule->map)) {
154 *start = *end = xtab[i]; 178 res->start = res->end = xtab[i];
155 if (pnp_check_irq(dev, idx)) 179 if (pnp_check_irq(dev, res)) {
180 dev_dbg(&dev->dev, " assign irq %d %d\n", idx,
181 (int) res->start);
156 return 1; 182 return 1;
183 }
157 } 184 }
158 } 185 }
186 dev_dbg(&dev->dev, " couldn't assign irq %d\n", idx);
159 return 0; 187 return 0;
160} 188}
161 189
162static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) 190static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
163{ 191{
164 resource_size_t *start, *end; 192 struct pnp_resource *pnp_res;
165 unsigned long *flags; 193 struct resource *res;
166 int i; 194 int i;
167 195
168 /* DMA priority: this table is good for i386 */ 196 /* DMA priority: this table is good for i386 */
@@ -170,71 +198,89 @@ static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
170 1, 3, 5, 6, 7, 0, 2, 4 198 1, 3, 5, 6, 7, 0, 2, 4
171 }; 199 };
172 200
173 if (idx >= PNP_MAX_DMA) { 201 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, idx);
202 if (!pnp_res) {
174 dev_err(&dev->dev, "too many DMA resources\n"); 203 dev_err(&dev->dev, "too many DMA resources\n");
175 return; 204 return;
176 } 205 }
177 206
207 res = &pnp_res->res;
208
178 /* check if this resource has been manually set, if so skip */ 209 /* check if this resource has been manually set, if so skip */
179 if (!(dev->res.dma_resource[idx].flags & IORESOURCE_AUTO)) 210 if (!(res->flags & IORESOURCE_AUTO)) {
211 dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
212 idx, (int) res->start, res->flags);
180 return; 213 return;
181 214 }
182 start = &dev->res.dma_resource[idx].start;
183 end = &dev->res.dma_resource[idx].end;
184 flags = &dev->res.dma_resource[idx].flags;
185 215
186 /* set the initial values */ 216 /* set the initial values */
187 *flags |= rule->flags | IORESOURCE_DMA; 217 pnp_res->index = idx;
188 *flags &= ~IORESOURCE_UNSET; 218 res->flags |= rule->flags | IORESOURCE_DMA;
219 res->flags &= ~IORESOURCE_UNSET;
189 220
190 for (i = 0; i < 8; i++) { 221 for (i = 0; i < 8; i++) {
191 if (rule->map & (1 << xtab[i])) { 222 if (rule->map & (1 << xtab[i])) {
192 *start = *end = xtab[i]; 223 res->start = res->end = xtab[i];
193 if (pnp_check_dma(dev, idx)) 224 if (pnp_check_dma(dev, res)) {
225 dev_dbg(&dev->dev, " assign dma %d %d\n", idx,
226 (int) res->start);
194 return; 227 return;
228 }
195 } 229 }
196 } 230 }
197#ifdef MAX_DMA_CHANNELS 231#ifdef MAX_DMA_CHANNELS
198 *start = *end = MAX_DMA_CHANNELS; 232 res->start = res->end = MAX_DMA_CHANNELS;
199#endif 233#endif
200 *flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; 234 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
235 dev_dbg(&dev->dev, " disable dma %d\n", idx);
236}
237
238void pnp_init_resource(struct resource *res)
239{
240 unsigned long type;
241
242 type = res->flags & (IORESOURCE_IO | IORESOURCE_MEM |
243 IORESOURCE_IRQ | IORESOURCE_DMA);
244
245 res->name = NULL;
246 res->flags = type | IORESOURCE_AUTO | IORESOURCE_UNSET;
247 if (type == IORESOURCE_IRQ || type == IORESOURCE_DMA) {
248 res->start = -1;
249 res->end = -1;
250 } else {
251 res->start = 0;
252 res->end = 0;
253 }
201} 254}
202 255
203/** 256/**
204 * pnp_init_resources - Resets a resource table to default values. 257 * pnp_init_resources - Resets a resource table to default values.
205 * @table: pointer to the desired resource table 258 * @table: pointer to the desired resource table
206 */ 259 */
207void pnp_init_resource_table(struct pnp_resource_table *table) 260void pnp_init_resources(struct pnp_dev *dev)
208{ 261{
262 struct resource *res;
209 int idx; 263 int idx;
210 264
211 for (idx = 0; idx < PNP_MAX_IRQ; idx++) { 265 for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
212 table->irq_resource[idx].name = NULL; 266 res = &dev->res->irq[idx].res;
213 table->irq_resource[idx].start = -1; 267 res->flags = IORESOURCE_IRQ;
214 table->irq_resource[idx].end = -1; 268 pnp_init_resource(res);
215 table->irq_resource[idx].flags =
216 IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
217 } 269 }
218 for (idx = 0; idx < PNP_MAX_DMA; idx++) { 270 for (idx = 0; idx < PNP_MAX_DMA; idx++) {
219 table->dma_resource[idx].name = NULL; 271 res = &dev->res->dma[idx].res;
220 table->dma_resource[idx].start = -1; 272 res->flags = IORESOURCE_DMA;
221 table->dma_resource[idx].end = -1; 273 pnp_init_resource(res);
222 table->dma_resource[idx].flags =
223 IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
224 } 274 }
225 for (idx = 0; idx < PNP_MAX_PORT; idx++) { 275 for (idx = 0; idx < PNP_MAX_PORT; idx++) {
226 table->port_resource[idx].name = NULL; 276 res = &dev->res->port[idx].res;
227 table->port_resource[idx].start = 0; 277 res->flags = IORESOURCE_IO;
228 table->port_resource[idx].end = 0; 278 pnp_init_resource(res);
229 table->port_resource[idx].flags =
230 IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
231 } 279 }
232 for (idx = 0; idx < PNP_MAX_MEM; idx++) { 280 for (idx = 0; idx < PNP_MAX_MEM; idx++) {
233 table->mem_resource[idx].name = NULL; 281 res = &dev->res->mem[idx].res;
234 table->mem_resource[idx].start = 0; 282 res->flags = IORESOURCE_MEM;
235 table->mem_resource[idx].end = 0; 283 pnp_init_resource(res);
236 table->mem_resource[idx].flags =
237 IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
238 } 284 }
239} 285}
240 286
@@ -242,41 +288,38 @@ void pnp_init_resource_table(struct pnp_resource_table *table)
242 * pnp_clean_resources - clears resources that were not manually set 288 * pnp_clean_resources - clears resources that were not manually set
243 * @res: the resources to clean 289 * @res: the resources to clean
244 */ 290 */
245static void pnp_clean_resource_table(struct pnp_resource_table *res) 291static void pnp_clean_resource_table(struct pnp_dev *dev)
246{ 292{
293 struct resource *res;
247 int idx; 294 int idx;
248 295
249 for (idx = 0; idx < PNP_MAX_IRQ; idx++) { 296 for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
250 if (!(res->irq_resource[idx].flags & IORESOURCE_AUTO)) 297 res = &dev->res->irq[idx].res;
251 continue; 298 if (res->flags & IORESOURCE_AUTO) {
252 res->irq_resource[idx].start = -1; 299 res->flags = IORESOURCE_IRQ;
253 res->irq_resource[idx].end = -1; 300 pnp_init_resource(res);
254 res->irq_resource[idx].flags = 301 }
255 IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
256 } 302 }
257 for (idx = 0; idx < PNP_MAX_DMA; idx++) { 303 for (idx = 0; idx < PNP_MAX_DMA; idx++) {
258 if (!(res->dma_resource[idx].flags & IORESOURCE_AUTO)) 304 res = &dev->res->dma[idx].res;
259 continue; 305 if (res->flags & IORESOURCE_AUTO) {
260 res->dma_resource[idx].start = -1; 306 res->flags = IORESOURCE_DMA;
261 res->dma_resource[idx].end = -1; 307 pnp_init_resource(res);
262 res->dma_resource[idx].flags = 308 }
263 IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
264 } 309 }
265 for (idx = 0; idx < PNP_MAX_PORT; idx++) { 310 for (idx = 0; idx < PNP_MAX_PORT; idx++) {
266 if (!(res->port_resource[idx].flags & IORESOURCE_AUTO)) 311 res = &dev->res->port[idx].res;
267 continue; 312 if (res->flags & IORESOURCE_AUTO) {
268 res->port_resource[idx].start = 0; 313 res->flags = IORESOURCE_IO;
269 res->port_resource[idx].end = 0; 314 pnp_init_resource(res);
270 res->port_resource[idx].flags = 315 }
271 IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
272 } 316 }
273 for (idx = 0; idx < PNP_MAX_MEM; idx++) { 317 for (idx = 0; idx < PNP_MAX_MEM; idx++) {
274 if (!(res->mem_resource[idx].flags & IORESOURCE_AUTO)) 318 res = &dev->res->mem[idx].res;
275 continue; 319 if (res->flags & IORESOURCE_AUTO) {
276 res->mem_resource[idx].start = 0; 320 res->flags = IORESOURCE_MEM;
277 res->mem_resource[idx].end = 0; 321 pnp_init_resource(res);
278 res->mem_resource[idx].flags = 322 }
279 IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
280 } 323 }
281} 324}
282 325
@@ -298,9 +341,11 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
298 if (!pnp_can_configure(dev)) 341 if (!pnp_can_configure(dev))
299 return -ENODEV; 342 return -ENODEV;
300 343
344 dbg_pnp_show_resources(dev, "before pnp_assign_resources");
301 mutex_lock(&pnp_res_mutex); 345 mutex_lock(&pnp_res_mutex);
302 pnp_clean_resource_table(&dev->res); /* start with a fresh slate */ 346 pnp_clean_resource_table(dev);
303 if (dev->independent) { 347 if (dev->independent) {
348 dev_dbg(&dev->dev, "assigning independent options\n");
304 port = dev->independent->port; 349 port = dev->independent->port;
305 mem = dev->independent->mem; 350 mem = dev->independent->mem;
306 irq = dev->independent->irq; 351 irq = dev->independent->irq;
@@ -333,6 +378,8 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
333 if (depnum) { 378 if (depnum) {
334 struct pnp_option *dep; 379 struct pnp_option *dep;
335 int i; 380 int i;
381
382 dev_dbg(&dev->dev, "assigning dependent option %d\n", depnum);
336 for (i = 1, dep = dev->dependent; i < depnum; 383 for (i = 1, dep = dev->dependent; i < depnum;
337 i++, dep = dep->next) 384 i++, dep = dep->next)
338 if (!dep) 385 if (!dep)
@@ -368,68 +415,17 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
368 goto fail; 415 goto fail;
369 416
370 mutex_unlock(&pnp_res_mutex); 417 mutex_unlock(&pnp_res_mutex);
418 dbg_pnp_show_resources(dev, "after pnp_assign_resources");
371 return 1; 419 return 1;
372 420
373fail: 421fail:
374 pnp_clean_resource_table(&dev->res); 422 pnp_clean_resource_table(dev);
375 mutex_unlock(&pnp_res_mutex); 423 mutex_unlock(&pnp_res_mutex);
424 dbg_pnp_show_resources(dev, "after pnp_assign_resources (failed)");
376 return 0; 425 return 0;
377} 426}
378 427
379/** 428/**
380 * pnp_manual_config_dev - Disables Auto Config and Manually sets the resource table
381 * @dev: pointer to the desired device
382 * @res: pointer to the new resource config
383 * @mode: 0 or PNP_CONFIG_FORCE
384 *
385 * This function can be used by drivers that want to manually set thier resources.
386 */
387int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
388 int mode)
389{
390 int i;
391 struct pnp_resource_table *bak;
392
393 if (!pnp_can_configure(dev))
394 return -ENODEV;
395 bak = pnp_alloc(sizeof(struct pnp_resource_table));
396 if (!bak)
397 return -ENOMEM;
398 *bak = dev->res;
399
400 mutex_lock(&pnp_res_mutex);
401 dev->res = *res;
402 if (!(mode & PNP_CONFIG_FORCE)) {
403 for (i = 0; i < PNP_MAX_PORT; i++) {
404 if (!pnp_check_port(dev, i))
405 goto fail;
406 }
407 for (i = 0; i < PNP_MAX_MEM; i++) {
408 if (!pnp_check_mem(dev, i))
409 goto fail;
410 }
411 for (i = 0; i < PNP_MAX_IRQ; i++) {
412 if (!pnp_check_irq(dev, i))
413 goto fail;
414 }
415 for (i = 0; i < PNP_MAX_DMA; i++) {
416 if (!pnp_check_dma(dev, i))
417 goto fail;
418 }
419 }
420 mutex_unlock(&pnp_res_mutex);
421
422 kfree(bak);
423 return 0;
424
425fail:
426 dev->res = *bak;
427 mutex_unlock(&pnp_res_mutex);
428 kfree(bak);
429 return -EINVAL;
430}
431
432/**
433 * pnp_auto_config_dev - automatically assigns resources to a device 429 * pnp_auto_config_dev - automatically assigns resources to a device
434 * @dev: pointer to the desired device 430 * @dev: pointer to the desired device
435 */ 431 */
@@ -473,7 +469,8 @@ int pnp_start_dev(struct pnp_dev *dev)
473 return -EINVAL; 469 return -EINVAL;
474 } 470 }
475 471
476 if (dev->protocol->set(dev, &dev->res) < 0) { 472 dbg_pnp_show_resources(dev, "pnp_start_dev");
473 if (dev->protocol->set(dev) < 0) {
477 dev_err(&dev->dev, "activation failed\n"); 474 dev_err(&dev->dev, "activation failed\n");
478 return -EIO; 475 return -EIO;
479 } 476 }
@@ -549,30 +546,13 @@ int pnp_disable_dev(struct pnp_dev *dev)
549 546
550 /* release the resources so that other devices can use them */ 547 /* release the resources so that other devices can use them */
551 mutex_lock(&pnp_res_mutex); 548 mutex_lock(&pnp_res_mutex);
552 pnp_clean_resource_table(&dev->res); 549 pnp_clean_resource_table(dev);
553 mutex_unlock(&pnp_res_mutex); 550 mutex_unlock(&pnp_res_mutex);
554 551
555 return 0; 552 return 0;
556} 553}
557 554
558/**
559 * pnp_resource_change - change one resource
560 * @resource: pointer to resource to be changed
561 * @start: start of region
562 * @size: size of region
563 */
564void pnp_resource_change(struct resource *resource, resource_size_t start,
565 resource_size_t size)
566{
567 resource->flags &= ~(IORESOURCE_AUTO | IORESOURCE_UNSET);
568 resource->start = start;
569 resource->end = start + size - 1;
570}
571
572EXPORT_SYMBOL(pnp_manual_config_dev);
573EXPORT_SYMBOL(pnp_start_dev); 555EXPORT_SYMBOL(pnp_start_dev);
574EXPORT_SYMBOL(pnp_stop_dev); 556EXPORT_SYMBOL(pnp_stop_dev);
575EXPORT_SYMBOL(pnp_activate_dev); 557EXPORT_SYMBOL(pnp_activate_dev);
576EXPORT_SYMBOL(pnp_disable_dev); 558EXPORT_SYMBOL(pnp_disable_dev);
577EXPORT_SYMBOL(pnp_resource_change);
578EXPORT_SYMBOL(pnp_init_resource_table);
diff --git a/drivers/pnp/pnpacpi/Makefile b/drivers/pnp/pnpacpi/Makefile
index 905326fcca85..2d7a1e6908be 100644
--- a/drivers/pnp/pnpacpi/Makefile
+++ b/drivers/pnp/pnpacpi/Makefile
@@ -3,3 +3,7 @@
3# 3#
4 4
5obj-y := core.o rsparser.o 5obj-y := core.o rsparser.o
6
7ifeq ($(CONFIG_PNP_DEBUG),y)
8EXTRA_CFLAGS += -DDEBUG
9endif
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index c283a9a70d83..50902773beaf 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -25,6 +25,7 @@
25#include <acpi/acpi_bus.h> 25#include <acpi/acpi_bus.h>
26#include <acpi/actypes.h> 26#include <acpi/actypes.h>
27 27
28#include "../base.h"
28#include "pnpacpi.h" 29#include "pnpacpi.h"
29 30
30static int num = 0; 31static int num = 0;
@@ -44,7 +45,7 @@ static struct acpi_device_id excluded_id_list[] __initdata = {
44 {"", 0}, 45 {"", 0},
45}; 46};
46 47
47static inline int is_exclusive_device(struct acpi_device *dev) 48static inline int __init is_exclusive_device(struct acpi_device *dev)
48{ 49{
49 return (!acpi_match_device_ids(dev, excluded_id_list)); 50 return (!acpi_match_device_ids(dev, excluded_id_list));
50} 51}
@@ -72,40 +73,24 @@ static int __init ispnpidacpi(char *id)
72 return 1; 73 return 1;
73} 74}
74 75
75static void __init pnpidacpi_to_pnpid(char *id, char *str) 76static int pnpacpi_get_resources(struct pnp_dev *dev)
76{ 77{
77 str[0] = id[0]; 78 dev_dbg(&dev->dev, "get resources\n");
78 str[1] = id[1]; 79 return pnpacpi_parse_allocated_resource(dev);
79 str[2] = id[2];
80 str[3] = tolower(id[3]);
81 str[4] = tolower(id[4]);
82 str[5] = tolower(id[5]);
83 str[6] = tolower(id[6]);
84 str[7] = '\0';
85} 80}
86 81
87static int pnpacpi_get_resources(struct pnp_dev *dev, 82static int pnpacpi_set_resources(struct pnp_dev *dev)
88 struct pnp_resource_table *res)
89{
90 acpi_status status;
91
92 status = pnpacpi_parse_allocated_resource((acpi_handle) dev->data,
93 &dev->res);
94 return ACPI_FAILURE(status) ? -ENODEV : 0;
95}
96
97static int pnpacpi_set_resources(struct pnp_dev *dev,
98 struct pnp_resource_table *res)
99{ 83{
100 acpi_handle handle = dev->data; 84 acpi_handle handle = dev->data;
101 struct acpi_buffer buffer; 85 struct acpi_buffer buffer;
102 int ret = 0; 86 int ret;
103 acpi_status status; 87 acpi_status status;
104 88
105 ret = pnpacpi_build_resource_template(handle, &buffer); 89 dev_dbg(&dev->dev, "set resources\n");
90 ret = pnpacpi_build_resource_template(dev, &buffer);
106 if (ret) 91 if (ret)
107 return ret; 92 return ret;
108 ret = pnpacpi_encode_resources(res, &buffer); 93 ret = pnpacpi_encode_resources(dev, &buffer);
109 if (ret) { 94 if (ret) {
110 kfree(buffer.pointer); 95 kfree(buffer.pointer);
111 return ret; 96 return ret;
@@ -163,7 +148,6 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
163{ 148{
164 acpi_handle temp = NULL; 149 acpi_handle temp = NULL;
165 acpi_status status; 150 acpi_status status;
166 struct pnp_id *dev_id;
167 struct pnp_dev *dev; 151 struct pnp_dev *dev;
168 152
169 status = acpi_get_handle(device->handle, "_CRS", &temp); 153 status = acpi_get_handle(device->handle, "_CRS", &temp);
@@ -171,11 +155,10 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
171 is_exclusive_device(device)) 155 is_exclusive_device(device))
172 return 0; 156 return 0;
173 157
174 dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL); 158 dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
175 if (!dev) { 159 if (!dev)
176 pnp_err("Out of memory");
177 return -ENOMEM; 160 return -ENOMEM;
178 } 161
179 dev->data = device->handle; 162 dev->data = device->handle;
180 /* .enabled means the device can decode the resources */ 163 /* .enabled means the device can decode the resources */
181 dev->active = device->status.enabled; 164 dev->active = device->status.enabled;
@@ -191,44 +174,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
191 if (ACPI_SUCCESS(status)) 174 if (ACPI_SUCCESS(status))
192 dev->capabilities |= PNP_DISABLE; 175 dev->capabilities |= PNP_DISABLE;
193 176
194 dev->protocol = &pnpacpi_protocol;
195
196 if (strlen(acpi_device_name(device))) 177 if (strlen(acpi_device_name(device)))
197 strncpy(dev->name, acpi_device_name(device), sizeof(dev->name)); 178 strncpy(dev->name, acpi_device_name(device), sizeof(dev->name));
198 else 179 else
199 strncpy(dev->name, acpi_device_bid(device), sizeof(dev->name)); 180 strncpy(dev->name, acpi_device_bid(device), sizeof(dev->name));
200 181
201 dev->number = num; 182 if (dev->active)
202 183 pnpacpi_parse_allocated_resource(dev);
203 /* set the initial values for the PnP device */
204 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
205 if (!dev_id)
206 goto err;
207 pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id);
208 pnp_add_id(dev_id, dev);
209
210 if (dev->active) {
211 /* parse allocated resource */
212 status = pnpacpi_parse_allocated_resource(device->handle,
213 &dev->res);
214 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
215 pnp_err("PnPACPI: METHOD_NAME__CRS failure for %s",
216 dev_id->id);
217 goto err1;
218 }
219 }
220 184
221 if (dev->capabilities & PNP_CONFIGURABLE) { 185 if (dev->capabilities & PNP_CONFIGURABLE)
222 status = pnpacpi_parse_resource_option_data(device->handle, 186 pnpacpi_parse_resource_option_data(dev);
223 dev);
224 if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
225 pnp_err("PnPACPI: METHOD_NAME__PRS failure for %s",
226 dev_id->id);
227 goto err1;
228 }
229 }
230 187
231 /* parse compatible ids */
232 if (device->flags.compatible_ids) { 188 if (device->flags.compatible_ids) {
233 struct acpi_compatible_id_list *cid_list = device->pnp.cid_list; 189 struct acpi_compatible_id_list *cid_list = device->pnp.cid_list;
234 int i; 190 int i;
@@ -236,27 +192,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
236 for (i = 0; i < cid_list->count; i++) { 192 for (i = 0; i < cid_list->count; i++) {
237 if (!ispnpidacpi(cid_list->id[i].value)) 193 if (!ispnpidacpi(cid_list->id[i].value))
238 continue; 194 continue;
239 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL); 195 pnp_add_id(dev, cid_list->id[i].value);
240 if (!dev_id)
241 continue;
242
243 pnpidacpi_to_pnpid(cid_list->id[i].value, dev_id->id);
244 pnp_add_id(dev_id, dev);
245 } 196 }
246 } 197 }
247 198
248 /* clear out the damaged flags */ 199 /* clear out the damaged flags */
249 if (!dev->active) 200 if (!dev->active)
250 pnp_init_resource_table(&dev->res); 201 pnp_init_resources(dev);
251 pnp_add_device(dev); 202 pnp_add_device(dev);
252 num++; 203 num++;
253 204
254 return AE_OK; 205 return AE_OK;
255err1:
256 kfree(dev_id);
257err:
258 kfree(dev);
259 return -EINVAL;
260} 206}
261 207
262static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle, 208static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
diff --git a/drivers/pnp/pnpacpi/pnpacpi.h b/drivers/pnp/pnpacpi/pnpacpi.h
index f28e2ed66fa3..3e60225b0227 100644
--- a/drivers/pnp/pnpacpi/pnpacpi.h
+++ b/drivers/pnp/pnpacpi/pnpacpi.h
@@ -5,8 +5,8 @@
5#include <linux/acpi.h> 5#include <linux/acpi.h>
6#include <linux/pnp.h> 6#include <linux/pnp.h>
7 7
8acpi_status pnpacpi_parse_allocated_resource(acpi_handle, struct pnp_resource_table*); 8int pnpacpi_parse_allocated_resource(struct pnp_dev *);
9acpi_status pnpacpi_parse_resource_option_data(acpi_handle, struct pnp_dev*); 9int pnpacpi_parse_resource_option_data(struct pnp_dev *);
10int pnpacpi_encode_resources(struct pnp_resource_table *, struct acpi_buffer *); 10int pnpacpi_encode_resources(struct pnp_dev *, struct acpi_buffer *);
11int pnpacpi_build_resource_template(acpi_handle, struct acpi_buffer*); 11int pnpacpi_build_resource_template(struct pnp_dev *, struct acpi_buffer *);
12#endif 12#endif
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 98cbc9f18eed..0201c8adfda7 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -21,6 +21,8 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/acpi.h> 22#include <linux/acpi.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/pnp.h>
25#include "../base.h"
24#include "pnpacpi.h" 26#include "pnpacpi.h"
25 27
26#ifdef CONFIG_IA64 28#ifdef CONFIG_IA64
@@ -32,19 +34,26 @@
32/* 34/*
33 * Allocated Resources 35 * Allocated Resources
34 */ 36 */
35static int irq_flags(int triggering, int polarity) 37static int irq_flags(int triggering, int polarity, int shareable)
36{ 38{
39 int flags;
40
37 if (triggering == ACPI_LEVEL_SENSITIVE) { 41 if (triggering == ACPI_LEVEL_SENSITIVE) {
38 if (polarity == ACPI_ACTIVE_LOW) 42 if (polarity == ACPI_ACTIVE_LOW)
39 return IORESOURCE_IRQ_LOWLEVEL; 43 flags = IORESOURCE_IRQ_LOWLEVEL;
40 else 44 else
41 return IORESOURCE_IRQ_HIGHLEVEL; 45 flags = IORESOURCE_IRQ_HIGHLEVEL;
42 } else { 46 } else {
43 if (polarity == ACPI_ACTIVE_LOW) 47 if (polarity == ACPI_ACTIVE_LOW)
44 return IORESOURCE_IRQ_LOWEDGE; 48 flags = IORESOURCE_IRQ_LOWEDGE;
45 else 49 else
46 return IORESOURCE_IRQ_HIGHEDGE; 50 flags = IORESOURCE_IRQ_HIGHEDGE;
47 } 51 }
52
53 if (shareable)
54 flags |= IORESOURCE_IRQ_SHAREABLE;
55
56 return flags;
48} 57}
49 58
50static void decode_irq_flags(int flag, int *triggering, int *polarity) 59static void decode_irq_flags(int flag, int *triggering, int *polarity)
@@ -69,29 +78,16 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity)
69 } 78 }
70} 79}
71 80
72static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, 81static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
73 u32 gsi, int triggering, 82 u32 gsi, int triggering,
74 int polarity, int shareable) 83 int polarity, int shareable)
75{ 84{
76 int i = 0; 85 int irq, flags;
77 int irq;
78 int p, t; 86 int p, t;
79 static unsigned char warned;
80 87
81 if (!valid_IRQ(gsi)) 88 if (!valid_IRQ(gsi))
82 return; 89 return;
83 90
84 while (!(res->irq_resource[i].flags & IORESOURCE_UNSET) &&
85 i < PNP_MAX_IRQ)
86 i++;
87 if (i >= PNP_MAX_IRQ) {
88 if (!warned) {
89 printk(KERN_WARNING "pnpacpi: exceeded the max number"
90 " of IRQ resources: %d\n", PNP_MAX_IRQ);
91 warned = 1;
92 }
93 return;
94 }
95 /* 91 /*
96 * in IO-APIC mode, use overrided attribute. Two reasons: 92 * in IO-APIC mode, use overrided attribute. Two reasons:
97 * 1. BIOS bug in DSDT 93 * 1. BIOS bug in DSDT
@@ -102,27 +98,21 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
102 p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; 98 p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
103 99
104 if (triggering != t || polarity != p) { 100 if (triggering != t || polarity != p) {
105 pnp_warn("IRQ %d override to %s, %s", 101 dev_warn(&dev->dev, "IRQ %d override to %s, %s\n",
106 gsi, t ? "edge":"level", p ? "low":"high"); 102 gsi, t ? "edge":"level", p ? "low":"high");
107 triggering = t; 103 triggering = t;
108 polarity = p; 104 polarity = p;
109 } 105 }
110 } 106 }
111 107
112 res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag 108 flags = irq_flags(triggering, polarity, shareable);
113 res->irq_resource[i].flags |= irq_flags(triggering, polarity);
114 irq = acpi_register_gsi(gsi, triggering, polarity); 109 irq = acpi_register_gsi(gsi, triggering, polarity);
115 if (irq < 0) { 110 if (irq >= 0)
116 res->irq_resource[i].flags |= IORESOURCE_DISABLED; 111 pcibios_penalize_isa_irq(irq, 1);
117 return; 112 else
118 } 113 flags |= IORESOURCE_DISABLED;
119
120 if (shareable)
121 res->irq_resource[i].flags |= IORESOURCE_IRQ_SHAREABLE;
122 114
123 res->irq_resource[i].start = irq; 115 pnp_add_irq_resource(dev, irq, flags);
124 res->irq_resource[i].end = irq;
125 pcibios_penalize_isa_irq(irq, 1);
126} 116}
127 117
128static int dma_flags(int type, int bus_master, int transfer) 118static int dma_flags(int type, int bus_master, int transfer)
@@ -168,88 +158,36 @@ static int dma_flags(int type, int bus_master, int transfer)
168 return flags; 158 return flags;
169} 159}
170 160
171static void pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table *res, 161static void pnpacpi_parse_allocated_ioresource(struct pnp_dev *dev, u64 start,
172 u32 dma, int type, 162 u64 len, int io_decode)
173 int bus_master, int transfer)
174{ 163{
175 int i = 0; 164 int flags = 0;
176 static unsigned char warned; 165 u64 end = start + len - 1;
177
178 while (i < PNP_MAX_DMA &&
179 !(res->dma_resource[i].flags & IORESOURCE_UNSET))
180 i++;
181 if (i < PNP_MAX_DMA) {
182 res->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag
183 res->dma_resource[i].flags |=
184 dma_flags(type, bus_master, transfer);
185 if (dma == -1) {
186 res->dma_resource[i].flags |= IORESOURCE_DISABLED;
187 return;
188 }
189 res->dma_resource[i].start = dma;
190 res->dma_resource[i].end = dma;
191 } else if (!warned) {
192 printk(KERN_WARNING "pnpacpi: exceeded the max number of DMA "
193 "resources: %d \n", PNP_MAX_DMA);
194 warned = 1;
195 }
196}
197 166
198static void pnpacpi_parse_allocated_ioresource(struct pnp_resource_table *res, 167 if (io_decode == ACPI_DECODE_16)
199 u64 io, u64 len, int io_decode) 168 flags |= PNP_PORT_FLAG_16BITADDR;
200{ 169 if (len == 0 || end >= 0x10003)
201 int i = 0; 170 flags |= IORESOURCE_DISABLED;
202 static unsigned char warned;
203 171
204 while (!(res->port_resource[i].flags & IORESOURCE_UNSET) && 172 pnp_add_io_resource(dev, start, end, flags);
205 i < PNP_MAX_PORT)
206 i++;
207 if (i < PNP_MAX_PORT) {
208 res->port_resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag
209 if (io_decode == ACPI_DECODE_16)
210 res->port_resource[i].flags |= PNP_PORT_FLAG_16BITADDR;
211 if (len <= 0 || (io + len - 1) >= 0x10003) {
212 res->port_resource[i].flags |= IORESOURCE_DISABLED;
213 return;
214 }
215 res->port_resource[i].start = io;
216 res->port_resource[i].end = io + len - 1;
217 } else if (!warned) {
218 printk(KERN_WARNING "pnpacpi: exceeded the max number of IO "
219 "resources: %d \n", PNP_MAX_PORT);
220 warned = 1;
221 }
222} 173}
223 174
224static void pnpacpi_parse_allocated_memresource(struct pnp_resource_table *res, 175static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev,
225 u64 mem, u64 len, 176 u64 start, u64 len,
226 int write_protect) 177 int write_protect)
227{ 178{
228 int i = 0; 179 int flags = 0;
229 static unsigned char warned; 180 u64 end = start + len - 1;
230 181
231 while (!(res->mem_resource[i].flags & IORESOURCE_UNSET) && 182 if (len == 0)
232 (i < PNP_MAX_MEM)) 183 flags |= IORESOURCE_DISABLED;
233 i++; 184 if (write_protect == ACPI_READ_WRITE_MEMORY)
234 if (i < PNP_MAX_MEM) { 185 flags |= IORESOURCE_MEM_WRITEABLE;
235 res->mem_resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag 186
236 if (len <= 0) { 187 pnp_add_mem_resource(dev, start, end, flags);
237 res->mem_resource[i].flags |= IORESOURCE_DISABLED;
238 return;
239 }
240 if (write_protect == ACPI_READ_WRITE_MEMORY)
241 res->mem_resource[i].flags |= IORESOURCE_MEM_WRITEABLE;
242
243 res->mem_resource[i].start = mem;
244 res->mem_resource[i].end = mem + len - 1;
245 } else if (!warned) {
246 printk(KERN_WARNING "pnpacpi: exceeded the max number of mem "
247 "resources: %d\n", PNP_MAX_MEM);
248 warned = 1;
249 }
250} 188}
251 189
252static void pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table, 190static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev,
253 struct acpi_resource *res) 191 struct acpi_resource *res)
254{ 192{
255 struct acpi_resource_address64 addr, *p = &addr; 193 struct acpi_resource_address64 addr, *p = &addr;
@@ -257,7 +195,7 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res
257 195
258 status = acpi_resource_to_address64(res, p); 196 status = acpi_resource_to_address64(res, p);
259 if (!ACPI_SUCCESS(status)) { 197 if (!ACPI_SUCCESS(status)) {
260 pnp_warn("PnPACPI: failed to convert resource type %d", 198 dev_warn(&dev->dev, "failed to convert resource type %d\n",
261 res->type); 199 res->type);
262 return; 200 return;
263 } 201 }
@@ -266,11 +204,11 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res
266 return; 204 return;
267 205
268 if (p->resource_type == ACPI_MEMORY_RANGE) 206 if (p->resource_type == ACPI_MEMORY_RANGE)
269 pnpacpi_parse_allocated_memresource(res_table, 207 pnpacpi_parse_allocated_memresource(dev,
270 p->minimum, p->address_length, 208 p->minimum, p->address_length,
271 p->info.mem.write_protect); 209 p->info.mem.write_protect);
272 else if (p->resource_type == ACPI_IO_RANGE) 210 else if (p->resource_type == ACPI_IO_RANGE)
273 pnpacpi_parse_allocated_ioresource(res_table, 211 pnpacpi_parse_allocated_ioresource(dev,
274 p->minimum, p->address_length, 212 p->minimum, p->address_length,
275 p->granularity == 0xfff ? ACPI_DECODE_10 : 213 p->granularity == 0xfff ? ACPI_DECODE_10 :
276 ACPI_DECODE_16); 214 ACPI_DECODE_16);
@@ -279,8 +217,16 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res
279static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, 217static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
280 void *data) 218 void *data)
281{ 219{
282 struct pnp_resource_table *res_table = data; 220 struct pnp_dev *dev = data;
283 int i; 221 struct acpi_resource_irq *irq;
222 struct acpi_resource_dma *dma;
223 struct acpi_resource_io *io;
224 struct acpi_resource_fixed_io *fixed_io;
225 struct acpi_resource_memory24 *memory24;
226 struct acpi_resource_memory32 *memory32;
227 struct acpi_resource_fixed_memory32 *fixed_memory32;
228 struct acpi_resource_extended_irq *extended_irq;
229 int i, flags;
284 230
285 switch (res->type) { 231 switch (res->type) {
286 case ACPI_RESOURCE_TYPE_IRQ: 232 case ACPI_RESOURCE_TYPE_IRQ:
@@ -288,29 +234,33 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
288 * Per spec, only one interrupt per descriptor is allowed in 234 * Per spec, only one interrupt per descriptor is allowed in
289 * _CRS, but some firmware violates this, so parse them all. 235 * _CRS, but some firmware violates this, so parse them all.
290 */ 236 */
291 for (i = 0; i < res->data.irq.interrupt_count; i++) { 237 irq = &res->data.irq;
292 pnpacpi_parse_allocated_irqresource(res_table, 238 for (i = 0; i < irq->interrupt_count; i++) {
293 res->data.irq.interrupts[i], 239 pnpacpi_parse_allocated_irqresource(dev,
294 res->data.irq.triggering, 240 irq->interrupts[i],
295 res->data.irq.polarity, 241 irq->triggering,
296 res->data.irq.sharable); 242 irq->polarity,
243 irq->sharable);
297 } 244 }
298 break; 245 break;
299 246
300 case ACPI_RESOURCE_TYPE_DMA: 247 case ACPI_RESOURCE_TYPE_DMA:
301 if (res->data.dma.channel_count > 0) 248 dma = &res->data.dma;
302 pnpacpi_parse_allocated_dmaresource(res_table, 249 if (dma->channel_count > 0) {
303 res->data.dma.channels[0], 250 flags = dma_flags(dma->type, dma->bus_master,
304 res->data.dma.type, 251 dma->transfer);
305 res->data.dma.bus_master, 252 if (dma->channels[0] == (u8) -1)
306 res->data.dma.transfer); 253 flags |= IORESOURCE_DISABLED;
254 pnp_add_dma_resource(dev, dma->channels[0], flags);
255 }
307 break; 256 break;
308 257
309 case ACPI_RESOURCE_TYPE_IO: 258 case ACPI_RESOURCE_TYPE_IO:
310 pnpacpi_parse_allocated_ioresource(res_table, 259 io = &res->data.io;
311 res->data.io.minimum, 260 pnpacpi_parse_allocated_ioresource(dev,
312 res->data.io.address_length, 261 io->minimum,
313 res->data.io.io_decode); 262 io->address_length,
263 io->io_decode);
314 break; 264 break;
315 265
316 case ACPI_RESOURCE_TYPE_START_DEPENDENT: 266 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -318,9 +268,10 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
318 break; 268 break;
319 269
320 case ACPI_RESOURCE_TYPE_FIXED_IO: 270 case ACPI_RESOURCE_TYPE_FIXED_IO:
321 pnpacpi_parse_allocated_ioresource(res_table, 271 fixed_io = &res->data.fixed_io;
322 res->data.fixed_io.address, 272 pnpacpi_parse_allocated_ioresource(dev,
323 res->data.fixed_io.address_length, 273 fixed_io->address,
274 fixed_io->address_length,
324 ACPI_DECODE_10); 275 ACPI_DECODE_10);
325 break; 276 break;
326 277
@@ -331,27 +282,30 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
331 break; 282 break;
332 283
333 case ACPI_RESOURCE_TYPE_MEMORY24: 284 case ACPI_RESOURCE_TYPE_MEMORY24:
334 pnpacpi_parse_allocated_memresource(res_table, 285 memory24 = &res->data.memory24;
335 res->data.memory24.minimum, 286 pnpacpi_parse_allocated_memresource(dev,
336 res->data.memory24.address_length, 287 memory24->minimum,
337 res->data.memory24.write_protect); 288 memory24->address_length,
289 memory24->write_protect);
338 break; 290 break;
339 case ACPI_RESOURCE_TYPE_MEMORY32: 291 case ACPI_RESOURCE_TYPE_MEMORY32:
340 pnpacpi_parse_allocated_memresource(res_table, 292 memory32 = &res->data.memory32;
341 res->data.memory32.minimum, 293 pnpacpi_parse_allocated_memresource(dev,
342 res->data.memory32.address_length, 294 memory32->minimum,
343 res->data.memory32.write_protect); 295 memory32->address_length,
296 memory32->write_protect);
344 break; 297 break;
345 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 298 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
346 pnpacpi_parse_allocated_memresource(res_table, 299 fixed_memory32 = &res->data.fixed_memory32;
347 res->data.fixed_memory32.address, 300 pnpacpi_parse_allocated_memresource(dev,
348 res->data.fixed_memory32.address_length, 301 fixed_memory32->address,
349 res->data.fixed_memory32.write_protect); 302 fixed_memory32->address_length,
303 fixed_memory32->write_protect);
350 break; 304 break;
351 case ACPI_RESOURCE_TYPE_ADDRESS16: 305 case ACPI_RESOURCE_TYPE_ADDRESS16:
352 case ACPI_RESOURCE_TYPE_ADDRESS32: 306 case ACPI_RESOURCE_TYPE_ADDRESS32:
353 case ACPI_RESOURCE_TYPE_ADDRESS64: 307 case ACPI_RESOURCE_TYPE_ADDRESS64:
354 pnpacpi_parse_allocated_address_space(res_table, res); 308 pnpacpi_parse_allocated_address_space(dev, res);
355 break; 309 break;
356 310
357 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: 311 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
@@ -360,15 +314,16 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
360 break; 314 break;
361 315
362 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 316 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
363 if (res->data.extended_irq.producer_consumer == ACPI_PRODUCER) 317 extended_irq = &res->data.extended_irq;
318 if (extended_irq->producer_consumer == ACPI_PRODUCER)
364 return AE_OK; 319 return AE_OK;
365 320
366 for (i = 0; i < res->data.extended_irq.interrupt_count; i++) { 321 for (i = 0; i < extended_irq->interrupt_count; i++) {
367 pnpacpi_parse_allocated_irqresource(res_table, 322 pnpacpi_parse_allocated_irqresource(dev,
368 res->data.extended_irq.interrupts[i], 323 extended_irq->interrupts[i],
369 res->data.extended_irq.triggering, 324 extended_irq->triggering,
370 res->data.extended_irq.polarity, 325 extended_irq->polarity,
371 res->data.extended_irq.sharable); 326 extended_irq->sharable);
372 } 327 }
373 break; 328 break;
374 329
@@ -376,24 +331,36 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
376 break; 331 break;
377 332
378 default: 333 default:
379 pnp_warn("PnPACPI: unknown resource type %d", res->type); 334 dev_warn(&dev->dev, "unknown resource type %d in _CRS\n",
335 res->type);
380 return AE_ERROR; 336 return AE_ERROR;
381 } 337 }
382 338
383 return AE_OK; 339 return AE_OK;
384} 340}
385 341
386acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle, 342int pnpacpi_parse_allocated_resource(struct pnp_dev *dev)
387 struct pnp_resource_table * res)
388{ 343{
389 /* Blank the resource table values */ 344 acpi_handle handle = dev->data;
390 pnp_init_resource_table(res); 345 acpi_status status;
346
347 dev_dbg(&dev->dev, "parse allocated resources\n");
391 348
392 return acpi_walk_resources(handle, METHOD_NAME__CRS, 349 pnp_init_resources(dev);
393 pnpacpi_allocated_resource, res); 350
351 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
352 pnpacpi_allocated_resource, dev);
353
354 if (ACPI_FAILURE(status)) {
355 if (status != AE_NOT_FOUND)
356 dev_err(&dev->dev, "can't evaluate _CRS: %d", status);
357 return -EPERM;
358 }
359 return 0;
394} 360}
395 361
396static __init void pnpacpi_parse_dma_option(struct pnp_option *option, 362static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
363 struct pnp_option *option,
397 struct acpi_resource_dma *p) 364 struct acpi_resource_dma *p)
398{ 365{
399 int i; 366 int i;
@@ -410,10 +377,11 @@ static __init void pnpacpi_parse_dma_option(struct pnp_option *option,
410 377
411 dma->flags = dma_flags(p->type, p->bus_master, p->transfer); 378 dma->flags = dma_flags(p->type, p->bus_master, p->transfer);
412 379
413 pnp_register_dma_resource(option, dma); 380 pnp_register_dma_resource(dev, option, dma);
414} 381}
415 382
416static __init void pnpacpi_parse_irq_option(struct pnp_option *option, 383static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
384 struct pnp_option *option,
417 struct acpi_resource_irq *p) 385 struct acpi_resource_irq *p)
418{ 386{
419 int i; 387 int i;
@@ -428,12 +396,13 @@ static __init void pnpacpi_parse_irq_option(struct pnp_option *option,
428 for (i = 0; i < p->interrupt_count; i++) 396 for (i = 0; i < p->interrupt_count; i++)
429 if (p->interrupts[i]) 397 if (p->interrupts[i])
430 __set_bit(p->interrupts[i], irq->map); 398 __set_bit(p->interrupts[i], irq->map);
431 irq->flags = irq_flags(p->triggering, p->polarity); 399 irq->flags = irq_flags(p->triggering, p->polarity, p->sharable);
432 400
433 pnp_register_irq_resource(option, irq); 401 pnp_register_irq_resource(dev, option, irq);
434} 402}
435 403
436static __init void pnpacpi_parse_ext_irq_option(struct pnp_option *option, 404static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
405 struct pnp_option *option,
437 struct acpi_resource_extended_irq *p) 406 struct acpi_resource_extended_irq *p)
438{ 407{
439 int i; 408 int i;
@@ -448,12 +417,13 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
448 for (i = 0; i < p->interrupt_count; i++) 417 for (i = 0; i < p->interrupt_count; i++)
449 if (p->interrupts[i]) 418 if (p->interrupts[i])
450 __set_bit(p->interrupts[i], irq->map); 419 __set_bit(p->interrupts[i], irq->map);
451 irq->flags = irq_flags(p->triggering, p->polarity); 420 irq->flags = irq_flags(p->triggering, p->polarity, p->sharable);
452 421
453 pnp_register_irq_resource(option, irq); 422 pnp_register_irq_resource(dev, option, irq);
454} 423}
455 424
456static __init void pnpacpi_parse_port_option(struct pnp_option *option, 425static __init void pnpacpi_parse_port_option(struct pnp_dev *dev,
426 struct pnp_option *option,
457 struct acpi_resource_io *io) 427 struct acpi_resource_io *io)
458{ 428{
459 struct pnp_port *port; 429 struct pnp_port *port;
@@ -469,10 +439,11 @@ static __init void pnpacpi_parse_port_option(struct pnp_option *option,
469 port->size = io->address_length; 439 port->size = io->address_length;
470 port->flags = ACPI_DECODE_16 == io->io_decode ? 440 port->flags = ACPI_DECODE_16 == io->io_decode ?
471 PNP_PORT_FLAG_16BITADDR : 0; 441 PNP_PORT_FLAG_16BITADDR : 0;
472 pnp_register_port_resource(option, port); 442 pnp_register_port_resource(dev, option, port);
473} 443}
474 444
475static __init void pnpacpi_parse_fixed_port_option(struct pnp_option *option, 445static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev,
446 struct pnp_option *option,
476 struct acpi_resource_fixed_io *io) 447 struct acpi_resource_fixed_io *io)
477{ 448{
478 struct pnp_port *port; 449 struct pnp_port *port;
@@ -486,10 +457,11 @@ static __init void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
486 port->size = io->address_length; 457 port->size = io->address_length;
487 port->align = 0; 458 port->align = 0;
488 port->flags = PNP_PORT_FLAG_FIXED; 459 port->flags = PNP_PORT_FLAG_FIXED;
489 pnp_register_port_resource(option, port); 460 pnp_register_port_resource(dev, option, port);
490} 461}
491 462
492static __init void pnpacpi_parse_mem24_option(struct pnp_option *option, 463static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
464 struct pnp_option *option,
493 struct acpi_resource_memory24 *p) 465 struct acpi_resource_memory24 *p)
494{ 466{
495 struct pnp_mem *mem; 467 struct pnp_mem *mem;
@@ -507,10 +479,11 @@ static __init void pnpacpi_parse_mem24_option(struct pnp_option *option,
507 mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ? 479 mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
508 IORESOURCE_MEM_WRITEABLE : 0; 480 IORESOURCE_MEM_WRITEABLE : 0;
509 481
510 pnp_register_mem_resource(option, mem); 482 pnp_register_mem_resource(dev, option, mem);
511} 483}
512 484
513static __init void pnpacpi_parse_mem32_option(struct pnp_option *option, 485static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev,
486 struct pnp_option *option,
514 struct acpi_resource_memory32 *p) 487 struct acpi_resource_memory32 *p)
515{ 488{
516 struct pnp_mem *mem; 489 struct pnp_mem *mem;
@@ -528,10 +501,11 @@ static __init void pnpacpi_parse_mem32_option(struct pnp_option *option,
528 mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ? 501 mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
529 IORESOURCE_MEM_WRITEABLE : 0; 502 IORESOURCE_MEM_WRITEABLE : 0;
530 503
531 pnp_register_mem_resource(option, mem); 504 pnp_register_mem_resource(dev, option, mem);
532} 505}
533 506
534static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option, 507static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev,
508 struct pnp_option *option,
535 struct acpi_resource_fixed_memory32 *p) 509 struct acpi_resource_fixed_memory32 *p)
536{ 510{
537 struct pnp_mem *mem; 511 struct pnp_mem *mem;
@@ -548,10 +522,11 @@ static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
548 mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ? 522 mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
549 IORESOURCE_MEM_WRITEABLE : 0; 523 IORESOURCE_MEM_WRITEABLE : 0;
550 524
551 pnp_register_mem_resource(option, mem); 525 pnp_register_mem_resource(dev, option, mem);
552} 526}
553 527
554static __init void pnpacpi_parse_address_option(struct pnp_option *option, 528static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
529 struct pnp_option *option,
555 struct acpi_resource *r) 530 struct acpi_resource *r)
556{ 531{
557 struct acpi_resource_address64 addr, *p = &addr; 532 struct acpi_resource_address64 addr, *p = &addr;
@@ -579,7 +554,7 @@ static __init void pnpacpi_parse_address_option(struct pnp_option *option,
579 mem->flags = (p->info.mem.write_protect == 554 mem->flags = (p->info.mem.write_protect ==
580 ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE 555 ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE
581 : 0; 556 : 0;
582 pnp_register_mem_resource(option, mem); 557 pnp_register_mem_resource(dev, option, mem);
583 } else if (p->resource_type == ACPI_IO_RANGE) { 558 } else if (p->resource_type == ACPI_IO_RANGE) {
584 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); 559 port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
585 if (!port) 560 if (!port)
@@ -588,7 +563,7 @@ static __init void pnpacpi_parse_address_option(struct pnp_option *option,
588 port->size = p->address_length; 563 port->size = p->address_length;
589 port->align = 0; 564 port->align = 0;
590 port->flags = PNP_PORT_FLAG_FIXED; 565 port->flags = PNP_PORT_FLAG_FIXED;
591 pnp_register_port_resource(option, port); 566 pnp_register_port_resource(dev, option, port);
592 } 567 }
593} 568}
594 569
@@ -608,11 +583,11 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
608 583
609 switch (res->type) { 584 switch (res->type) {
610 case ACPI_RESOURCE_TYPE_IRQ: 585 case ACPI_RESOURCE_TYPE_IRQ:
611 pnpacpi_parse_irq_option(option, &res->data.irq); 586 pnpacpi_parse_irq_option(dev, option, &res->data.irq);
612 break; 587 break;
613 588
614 case ACPI_RESOURCE_TYPE_DMA: 589 case ACPI_RESOURCE_TYPE_DMA:
615 pnpacpi_parse_dma_option(option, &res->data.dma); 590 pnpacpi_parse_dma_option(dev, option, &res->data.dma);
616 break; 591 break;
617 592
618 case ACPI_RESOURCE_TYPE_START_DEPENDENT: 593 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -642,19 +617,22 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
642 case ACPI_RESOURCE_TYPE_END_DEPENDENT: 617 case ACPI_RESOURCE_TYPE_END_DEPENDENT:
643 /*only one EndDependentFn is allowed */ 618 /*only one EndDependentFn is allowed */
644 if (!parse_data->option_independent) { 619 if (!parse_data->option_independent) {
645 pnp_warn("PnPACPI: more than one EndDependentFn"); 620 dev_warn(&dev->dev, "more than one EndDependentFn "
621 "in _PRS\n");
646 return AE_ERROR; 622 return AE_ERROR;
647 } 623 }
648 parse_data->option = parse_data->option_independent; 624 parse_data->option = parse_data->option_independent;
649 parse_data->option_independent = NULL; 625 parse_data->option_independent = NULL;
626 dev_dbg(&dev->dev, "end dependent options\n");
650 break; 627 break;
651 628
652 case ACPI_RESOURCE_TYPE_IO: 629 case ACPI_RESOURCE_TYPE_IO:
653 pnpacpi_parse_port_option(option, &res->data.io); 630 pnpacpi_parse_port_option(dev, option, &res->data.io);
654 break; 631 break;
655 632
656 case ACPI_RESOURCE_TYPE_FIXED_IO: 633 case ACPI_RESOURCE_TYPE_FIXED_IO:
657 pnpacpi_parse_fixed_port_option(option, &res->data.fixed_io); 634 pnpacpi_parse_fixed_port_option(dev, option,
635 &res->data.fixed_io);
658 break; 636 break;
659 637
660 case ACPI_RESOURCE_TYPE_VENDOR: 638 case ACPI_RESOURCE_TYPE_VENDOR:
@@ -662,57 +640,67 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
662 break; 640 break;
663 641
664 case ACPI_RESOURCE_TYPE_MEMORY24: 642 case ACPI_RESOURCE_TYPE_MEMORY24:
665 pnpacpi_parse_mem24_option(option, &res->data.memory24); 643 pnpacpi_parse_mem24_option(dev, option, &res->data.memory24);
666 break; 644 break;
667 645
668 case ACPI_RESOURCE_TYPE_MEMORY32: 646 case ACPI_RESOURCE_TYPE_MEMORY32:
669 pnpacpi_parse_mem32_option(option, &res->data.memory32); 647 pnpacpi_parse_mem32_option(dev, option, &res->data.memory32);
670 break; 648 break;
671 649
672 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 650 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
673 pnpacpi_parse_fixed_mem32_option(option, 651 pnpacpi_parse_fixed_mem32_option(dev, option,
674 &res->data.fixed_memory32); 652 &res->data.fixed_memory32);
675 break; 653 break;
676 654
677 case ACPI_RESOURCE_TYPE_ADDRESS16: 655 case ACPI_RESOURCE_TYPE_ADDRESS16:
678 case ACPI_RESOURCE_TYPE_ADDRESS32: 656 case ACPI_RESOURCE_TYPE_ADDRESS32:
679 case ACPI_RESOURCE_TYPE_ADDRESS64: 657 case ACPI_RESOURCE_TYPE_ADDRESS64:
680 pnpacpi_parse_address_option(option, res); 658 pnpacpi_parse_address_option(dev, option, res);
681 break; 659 break;
682 660
683 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: 661 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
684 break; 662 break;
685 663
686 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 664 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
687 pnpacpi_parse_ext_irq_option(option, &res->data.extended_irq); 665 pnpacpi_parse_ext_irq_option(dev, option,
666 &res->data.extended_irq);
688 break; 667 break;
689 668
690 case ACPI_RESOURCE_TYPE_GENERIC_REGISTER: 669 case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
691 break; 670 break;
692 671
693 default: 672 default:
694 pnp_warn("PnPACPI: unknown resource type %d", res->type); 673 dev_warn(&dev->dev, "unknown resource type %d in _PRS\n",
674 res->type);
695 return AE_ERROR; 675 return AE_ERROR;
696 } 676 }
697 677
698 return AE_OK; 678 return AE_OK;
699} 679}
700 680
701acpi_status __init pnpacpi_parse_resource_option_data(acpi_handle handle, 681int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev)
702 struct pnp_dev *dev)
703{ 682{
683 acpi_handle handle = dev->data;
704 acpi_status status; 684 acpi_status status;
705 struct acpipnp_parse_option_s parse_data; 685 struct acpipnp_parse_option_s parse_data;
706 686
687 dev_dbg(&dev->dev, "parse resource options\n");
688
707 parse_data.option = pnp_register_independent_option(dev); 689 parse_data.option = pnp_register_independent_option(dev);
708 if (!parse_data.option) 690 if (!parse_data.option)
709 return AE_ERROR; 691 return -ENOMEM;
692
710 parse_data.option_independent = parse_data.option; 693 parse_data.option_independent = parse_data.option;
711 parse_data.dev = dev; 694 parse_data.dev = dev;
712 status = acpi_walk_resources(handle, METHOD_NAME__PRS, 695 status = acpi_walk_resources(handle, METHOD_NAME__PRS,
713 pnpacpi_option_resource, &parse_data); 696 pnpacpi_option_resource, &parse_data);
714 697
715 return status; 698 if (ACPI_FAILURE(status)) {
699 if (status != AE_NOT_FOUND)
700 dev_err(&dev->dev, "can't evaluate _PRS: %d", status);
701 return -EPERM;
702 }
703 return 0;
716} 704}
717 705
718static int pnpacpi_supported_resource(struct acpi_resource *res) 706static int pnpacpi_supported_resource(struct acpi_resource *res)
@@ -760,9 +748,10 @@ static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data)
760 return AE_OK; 748 return AE_OK;
761} 749}
762 750
763int pnpacpi_build_resource_template(acpi_handle handle, 751int pnpacpi_build_resource_template(struct pnp_dev *dev,
764 struct acpi_buffer *buffer) 752 struct acpi_buffer *buffer)
765{ 753{
754 acpi_handle handle = dev->data;
766 struct acpi_resource *resource; 755 struct acpi_resource *resource;
767 int res_cnt = 0; 756 int res_cnt = 0;
768 acpi_status status; 757 acpi_status status;
@@ -770,7 +759,7 @@ int pnpacpi_build_resource_template(acpi_handle handle,
770 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 759 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
771 pnpacpi_count_resources, &res_cnt); 760 pnpacpi_count_resources, &res_cnt);
772 if (ACPI_FAILURE(status)) { 761 if (ACPI_FAILURE(status)) {
773 pnp_err("Evaluate _CRS failed"); 762 dev_err(&dev->dev, "can't evaluate _CRS: %d\n", status);
774 return -EINVAL; 763 return -EINVAL;
775 } 764 }
776 if (!res_cnt) 765 if (!res_cnt)
@@ -779,13 +768,13 @@ int pnpacpi_build_resource_template(acpi_handle handle,
779 buffer->pointer = kzalloc(buffer->length - 1, GFP_KERNEL); 768 buffer->pointer = kzalloc(buffer->length - 1, GFP_KERNEL);
780 if (!buffer->pointer) 769 if (!buffer->pointer)
781 return -ENOMEM; 770 return -ENOMEM;
782 pnp_dbg("Res cnt %d", res_cnt); 771
783 resource = (struct acpi_resource *)buffer->pointer; 772 resource = (struct acpi_resource *)buffer->pointer;
784 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 773 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
785 pnpacpi_type_resources, &resource); 774 pnpacpi_type_resources, &resource);
786 if (ACPI_FAILURE(status)) { 775 if (ACPI_FAILURE(status)) {
787 kfree(buffer->pointer); 776 kfree(buffer->pointer);
788 pnp_err("Evaluate _CRS failed"); 777 dev_err(&dev->dev, "can't evaluate _CRS: %d\n", status);
789 return -EINVAL; 778 return -EINVAL;
790 } 779 }
791 /* resource will pointer the end resource now */ 780 /* resource will pointer the end resource now */
@@ -794,129 +783,184 @@ int pnpacpi_build_resource_template(acpi_handle handle,
794 return 0; 783 return 0;
795} 784}
796 785
797static void pnpacpi_encode_irq(struct acpi_resource *resource, 786static void pnpacpi_encode_irq(struct pnp_dev *dev,
787 struct acpi_resource *resource,
798 struct resource *p) 788 struct resource *p)
799{ 789{
790 struct acpi_resource_irq *irq = &resource->data.irq;
800 int triggering, polarity; 791 int triggering, polarity;
801 792
802 decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity); 793 decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity);
803 resource->data.irq.triggering = triggering; 794 irq->triggering = triggering;
804 resource->data.irq.polarity = polarity; 795 irq->polarity = polarity;
805 if (triggering == ACPI_EDGE_SENSITIVE) 796 if (triggering == ACPI_EDGE_SENSITIVE)
806 resource->data.irq.sharable = ACPI_EXCLUSIVE; 797 irq->sharable = ACPI_EXCLUSIVE;
807 else 798 else
808 resource->data.irq.sharable = ACPI_SHARED; 799 irq->sharable = ACPI_SHARED;
809 resource->data.irq.interrupt_count = 1; 800 irq->interrupt_count = 1;
810 resource->data.irq.interrupts[0] = p->start; 801 irq->interrupts[0] = p->start;
802
803 dev_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start,
804 triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
805 polarity == ACPI_ACTIVE_LOW ? "low" : "high",
806 irq->sharable == ACPI_SHARED ? "shared" : "exclusive");
811} 807}
812 808
813static void pnpacpi_encode_ext_irq(struct acpi_resource *resource, 809static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
810 struct acpi_resource *resource,
814 struct resource *p) 811 struct resource *p)
815{ 812{
813 struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq;
816 int triggering, polarity; 814 int triggering, polarity;
817 815
818 decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity); 816 decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity);
819 resource->data.extended_irq.producer_consumer = ACPI_CONSUMER; 817 extended_irq->producer_consumer = ACPI_CONSUMER;
820 resource->data.extended_irq.triggering = triggering; 818 extended_irq->triggering = triggering;
821 resource->data.extended_irq.polarity = polarity; 819 extended_irq->polarity = polarity;
822 if (triggering == ACPI_EDGE_SENSITIVE) 820 if (triggering == ACPI_EDGE_SENSITIVE)
823 resource->data.irq.sharable = ACPI_EXCLUSIVE; 821 extended_irq->sharable = ACPI_EXCLUSIVE;
824 else 822 else
825 resource->data.irq.sharable = ACPI_SHARED; 823 extended_irq->sharable = ACPI_SHARED;
826 resource->data.extended_irq.interrupt_count = 1; 824 extended_irq->interrupt_count = 1;
827 resource->data.extended_irq.interrupts[0] = p->start; 825 extended_irq->interrupts[0] = p->start;
826
827 dev_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start,
828 triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
829 polarity == ACPI_ACTIVE_LOW ? "low" : "high",
830 extended_irq->sharable == ACPI_SHARED ? "shared" : "exclusive");
828} 831}
829 832
830static void pnpacpi_encode_dma(struct acpi_resource *resource, 833static void pnpacpi_encode_dma(struct pnp_dev *dev,
834 struct acpi_resource *resource,
831 struct resource *p) 835 struct resource *p)
832{ 836{
837 struct acpi_resource_dma *dma = &resource->data.dma;
838
833 /* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */ 839 /* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */
834 switch (p->flags & IORESOURCE_DMA_SPEED_MASK) { 840 switch (p->flags & IORESOURCE_DMA_SPEED_MASK) {
835 case IORESOURCE_DMA_TYPEA: 841 case IORESOURCE_DMA_TYPEA:
836 resource->data.dma.type = ACPI_TYPE_A; 842 dma->type = ACPI_TYPE_A;
837 break; 843 break;
838 case IORESOURCE_DMA_TYPEB: 844 case IORESOURCE_DMA_TYPEB:
839 resource->data.dma.type = ACPI_TYPE_B; 845 dma->type = ACPI_TYPE_B;
840 break; 846 break;
841 case IORESOURCE_DMA_TYPEF: 847 case IORESOURCE_DMA_TYPEF:
842 resource->data.dma.type = ACPI_TYPE_F; 848 dma->type = ACPI_TYPE_F;
843 break; 849 break;
844 default: 850 default:
845 resource->data.dma.type = ACPI_COMPATIBILITY; 851 dma->type = ACPI_COMPATIBILITY;
846 } 852 }
847 853
848 switch (p->flags & IORESOURCE_DMA_TYPE_MASK) { 854 switch (p->flags & IORESOURCE_DMA_TYPE_MASK) {
849 case IORESOURCE_DMA_8BIT: 855 case IORESOURCE_DMA_8BIT:
850 resource->data.dma.transfer = ACPI_TRANSFER_8; 856 dma->transfer = ACPI_TRANSFER_8;
851 break; 857 break;
852 case IORESOURCE_DMA_8AND16BIT: 858 case IORESOURCE_DMA_8AND16BIT:
853 resource->data.dma.transfer = ACPI_TRANSFER_8_16; 859 dma->transfer = ACPI_TRANSFER_8_16;
854 break; 860 break;
855 default: 861 default:
856 resource->data.dma.transfer = ACPI_TRANSFER_16; 862 dma->transfer = ACPI_TRANSFER_16;
857 } 863 }
858 864
859 resource->data.dma.bus_master = !!(p->flags & IORESOURCE_DMA_MASTER); 865 dma->bus_master = !!(p->flags & IORESOURCE_DMA_MASTER);
860 resource->data.dma.channel_count = 1; 866 dma->channel_count = 1;
861 resource->data.dma.channels[0] = p->start; 867 dma->channels[0] = p->start;
868
869 dev_dbg(&dev->dev, " encode dma %d "
870 "type %#x transfer %#x master %d\n",
871 (int) p->start, dma->type, dma->transfer, dma->bus_master);
862} 872}
863 873
864static void pnpacpi_encode_io(struct acpi_resource *resource, 874static void pnpacpi_encode_io(struct pnp_dev *dev,
875 struct acpi_resource *resource,
865 struct resource *p) 876 struct resource *p)
866{ 877{
878 struct acpi_resource_io *io = &resource->data.io;
879
867 /* Note: pnp_assign_port will copy pnp_port->flags into p->flags */ 880 /* Note: pnp_assign_port will copy pnp_port->flags into p->flags */
868 resource->data.io.io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR) ? 881 io->io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR) ?
869 ACPI_DECODE_16 : ACPI_DECODE_10; 882 ACPI_DECODE_16 : ACPI_DECODE_10;
870 resource->data.io.minimum = p->start; 883 io->minimum = p->start;
871 resource->data.io.maximum = p->end; 884 io->maximum = p->end;
872 resource->data.io.alignment = 0; /* Correct? */ 885 io->alignment = 0; /* Correct? */
873 resource->data.io.address_length = p->end - p->start + 1; 886 io->address_length = p->end - p->start + 1;
887
888 dev_dbg(&dev->dev, " encode io %#llx-%#llx decode %#x\n",
889 (unsigned long long) p->start, (unsigned long long) p->end,
890 io->io_decode);
874} 891}
875 892
876static void pnpacpi_encode_fixed_io(struct acpi_resource *resource, 893static void pnpacpi_encode_fixed_io(struct pnp_dev *dev,
894 struct acpi_resource *resource,
877 struct resource *p) 895 struct resource *p)
878{ 896{
879 resource->data.fixed_io.address = p->start; 897 struct acpi_resource_fixed_io *fixed_io = &resource->data.fixed_io;
880 resource->data.fixed_io.address_length = p->end - p->start + 1; 898
899 fixed_io->address = p->start;
900 fixed_io->address_length = p->end - p->start + 1;
901
902 dev_dbg(&dev->dev, " encode fixed_io %#llx-%#llx\n",
903 (unsigned long long) p->start, (unsigned long long) p->end);
881} 904}
882 905
883static void pnpacpi_encode_mem24(struct acpi_resource *resource, 906static void pnpacpi_encode_mem24(struct pnp_dev *dev,
907 struct acpi_resource *resource,
884 struct resource *p) 908 struct resource *p)
885{ 909{
910 struct acpi_resource_memory24 *memory24 = &resource->data.memory24;
911
886 /* Note: pnp_assign_mem will copy pnp_mem->flags into p->flags */ 912 /* Note: pnp_assign_mem will copy pnp_mem->flags into p->flags */
887 resource->data.memory24.write_protect = 913 memory24->write_protect =
888 (p->flags & IORESOURCE_MEM_WRITEABLE) ? 914 (p->flags & IORESOURCE_MEM_WRITEABLE) ?
889 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; 915 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
890 resource->data.memory24.minimum = p->start; 916 memory24->minimum = p->start;
891 resource->data.memory24.maximum = p->end; 917 memory24->maximum = p->end;
892 resource->data.memory24.alignment = 0; 918 memory24->alignment = 0;
893 resource->data.memory24.address_length = p->end - p->start + 1; 919 memory24->address_length = p->end - p->start + 1;
920
921 dev_dbg(&dev->dev, " encode mem24 %#llx-%#llx write_protect %#x\n",
922 (unsigned long long) p->start, (unsigned long long) p->end,
923 memory24->write_protect);
894} 924}
895 925
896static void pnpacpi_encode_mem32(struct acpi_resource *resource, 926static void pnpacpi_encode_mem32(struct pnp_dev *dev,
927 struct acpi_resource *resource,
897 struct resource *p) 928 struct resource *p)
898{ 929{
899 resource->data.memory32.write_protect = 930 struct acpi_resource_memory32 *memory32 = &resource->data.memory32;
931
932 memory32->write_protect =
900 (p->flags & IORESOURCE_MEM_WRITEABLE) ? 933 (p->flags & IORESOURCE_MEM_WRITEABLE) ?
901 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; 934 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
902 resource->data.memory32.minimum = p->start; 935 memory32->minimum = p->start;
903 resource->data.memory32.maximum = p->end; 936 memory32->maximum = p->end;
904 resource->data.memory32.alignment = 0; 937 memory32->alignment = 0;
905 resource->data.memory32.address_length = p->end - p->start + 1; 938 memory32->address_length = p->end - p->start + 1;
939
940 dev_dbg(&dev->dev, " encode mem32 %#llx-%#llx write_protect %#x\n",
941 (unsigned long long) p->start, (unsigned long long) p->end,
942 memory32->write_protect);
906} 943}
907 944
908static void pnpacpi_encode_fixed_mem32(struct acpi_resource *resource, 945static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev,
946 struct acpi_resource *resource,
909 struct resource *p) 947 struct resource *p)
910{ 948{
911 resource->data.fixed_memory32.write_protect = 949 struct acpi_resource_fixed_memory32 *fixed_memory32 = &resource->data.fixed_memory32;
950
951 fixed_memory32->write_protect =
912 (p->flags & IORESOURCE_MEM_WRITEABLE) ? 952 (p->flags & IORESOURCE_MEM_WRITEABLE) ?
913 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; 953 ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
914 resource->data.fixed_memory32.address = p->start; 954 fixed_memory32->address = p->start;
915 resource->data.fixed_memory32.address_length = p->end - p->start + 1; 955 fixed_memory32->address_length = p->end - p->start + 1;
956
957 dev_dbg(&dev->dev, " encode fixed_mem32 %#llx-%#llx "
958 "write_protect %#x\n",
959 (unsigned long long) p->start, (unsigned long long) p->end,
960 fixed_memory32->write_protect);
916} 961}
917 962
918int pnpacpi_encode_resources(struct pnp_resource_table *res_table, 963int pnpacpi_encode_resources(struct pnp_dev *dev, struct acpi_buffer *buffer)
919 struct acpi_buffer *buffer)
920{ 964{
921 int i = 0; 965 int i = 0;
922 /* pnpacpi_build_resource_template allocates extra mem */ 966 /* pnpacpi_build_resource_template allocates extra mem */
@@ -924,58 +968,48 @@ int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
924 struct acpi_resource *resource = buffer->pointer; 968 struct acpi_resource *resource = buffer->pointer;
925 int port = 0, irq = 0, dma = 0, mem = 0; 969 int port = 0, irq = 0, dma = 0, mem = 0;
926 970
927 pnp_dbg("res cnt %d", res_cnt); 971 dev_dbg(&dev->dev, "encode %d resources\n", res_cnt);
928 while (i < res_cnt) { 972 while (i < res_cnt) {
929 switch (resource->type) { 973 switch (resource->type) {
930 case ACPI_RESOURCE_TYPE_IRQ: 974 case ACPI_RESOURCE_TYPE_IRQ:
931 pnp_dbg("Encode irq"); 975 pnpacpi_encode_irq(dev, resource,
932 pnpacpi_encode_irq(resource, 976 pnp_get_resource(dev, IORESOURCE_IRQ, irq));
933 &res_table->irq_resource[irq]);
934 irq++; 977 irq++;
935 break; 978 break;
936 979
937 case ACPI_RESOURCE_TYPE_DMA: 980 case ACPI_RESOURCE_TYPE_DMA:
938 pnp_dbg("Encode dma"); 981 pnpacpi_encode_dma(dev, resource,
939 pnpacpi_encode_dma(resource, 982 pnp_get_resource(dev, IORESOURCE_DMA, dma));
940 &res_table->dma_resource[dma]);
941 dma++; 983 dma++;
942 break; 984 break;
943 case ACPI_RESOURCE_TYPE_IO: 985 case ACPI_RESOURCE_TYPE_IO:
944 pnp_dbg("Encode io"); 986 pnpacpi_encode_io(dev, resource,
945 pnpacpi_encode_io(resource, 987 pnp_get_resource(dev, IORESOURCE_IO, port));
946 &res_table->port_resource[port]);
947 port++; 988 port++;
948 break; 989 break;
949 case ACPI_RESOURCE_TYPE_FIXED_IO: 990 case ACPI_RESOURCE_TYPE_FIXED_IO:
950 pnp_dbg("Encode fixed io"); 991 pnpacpi_encode_fixed_io(dev, resource,
951 pnpacpi_encode_fixed_io(resource, 992 pnp_get_resource(dev, IORESOURCE_IO, port));
952 &res_table->
953 port_resource[port]);
954 port++; 993 port++;
955 break; 994 break;
956 case ACPI_RESOURCE_TYPE_MEMORY24: 995 case ACPI_RESOURCE_TYPE_MEMORY24:
957 pnp_dbg("Encode mem24"); 996 pnpacpi_encode_mem24(dev, resource,
958 pnpacpi_encode_mem24(resource, 997 pnp_get_resource(dev, IORESOURCE_MEM, mem));
959 &res_table->mem_resource[mem]);
960 mem++; 998 mem++;
961 break; 999 break;
962 case ACPI_RESOURCE_TYPE_MEMORY32: 1000 case ACPI_RESOURCE_TYPE_MEMORY32:
963 pnp_dbg("Encode mem32"); 1001 pnpacpi_encode_mem32(dev, resource,
964 pnpacpi_encode_mem32(resource, 1002 pnp_get_resource(dev, IORESOURCE_MEM, mem));
965 &res_table->mem_resource[mem]);
966 mem++; 1003 mem++;
967 break; 1004 break;
968 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 1005 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
969 pnp_dbg("Encode fixed mem32"); 1006 pnpacpi_encode_fixed_mem32(dev, resource,
970 pnpacpi_encode_fixed_mem32(resource, 1007 pnp_get_resource(dev, IORESOURCE_MEM, mem));
971 &res_table->
972 mem_resource[mem]);
973 mem++; 1008 mem++;
974 break; 1009 break;
975 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 1010 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
976 pnp_dbg("Encode ext irq"); 1011 pnpacpi_encode_ext_irq(dev, resource,
977 pnpacpi_encode_ext_irq(resource, 1012 pnp_get_resource(dev, IORESOURCE_IRQ, irq));
978 &res_table->irq_resource[irq]);
979 irq++; 1013 irq++;
980 break; 1014 break;
981 case ACPI_RESOURCE_TYPE_START_DEPENDENT: 1015 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -988,7 +1022,8 @@ int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
988 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: 1022 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
989 case ACPI_RESOURCE_TYPE_GENERIC_REGISTER: 1023 case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
990 default: /* other type */ 1024 default: /* other type */
991 pnp_warn("unknown resource type %d", resource->type); 1025 dev_warn(&dev->dev, "can't encode unknown resource "
1026 "type %d\n", resource->type);
992 return -EINVAL; 1027 return -EINVAL;
993 } 1028 }
994 resource++; 1029 resource++;
diff --git a/drivers/pnp/pnpbios/Makefile b/drivers/pnp/pnpbios/Makefile
index 3cd3ed760605..310e2b3a7710 100644
--- a/drivers/pnp/pnpbios/Makefile
+++ b/drivers/pnp/pnpbios/Makefile
@@ -5,3 +5,7 @@
5pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o 5pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o
6 6
7obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y) 7obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y)
8
9ifeq ($(CONFIG_PNP_DEBUG),y)
10EXTRA_CFLAGS += -DDEBUG
11endif
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index a8364d815222..7ff824496b39 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -7,7 +7,6 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/pnpbios.h>
11#include <linux/device.h> 10#include <linux/device.h>
12#include <linux/pnp.h> 11#include <linux/pnp.h>
13#include <linux/mm.h> 12#include <linux/mm.h>
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index a8a51500e1e9..19a4be1a9a31 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -50,7 +50,6 @@
50#include <linux/init.h> 50#include <linux/init.h>
51#include <linux/linkage.h> 51#include <linux/linkage.h>
52#include <linux/kernel.h> 52#include <linux/kernel.h>
53#include <linux/pnpbios.h>
54#include <linux/device.h> 53#include <linux/device.h>
55#include <linux/pnp.h> 54#include <linux/pnp.h>
56#include <linux/mm.h> 55#include <linux/mm.h>
@@ -69,6 +68,7 @@
69#include <asm/system.h> 68#include <asm/system.h>
70#include <asm/byteorder.h> 69#include <asm/byteorder.h>
71 70
71#include "../base.h"
72#include "pnpbios.h" 72#include "pnpbios.h"
73 73
74/* 74/*
@@ -203,8 +203,7 @@ static int pnp_dock_thread(void *unused)
203 203
204#endif /* CONFIG_HOTPLUG */ 204#endif /* CONFIG_HOTPLUG */
205 205
206static int pnpbios_get_resources(struct pnp_dev *dev, 206static int pnpbios_get_resources(struct pnp_dev *dev)
207 struct pnp_resource_table *res)
208{ 207{
209 u8 nodenum = dev->number; 208 u8 nodenum = dev->number;
210 struct pnp_bios_node *node; 209 struct pnp_bios_node *node;
@@ -212,6 +211,7 @@ static int pnpbios_get_resources(struct pnp_dev *dev,
212 if (!pnpbios_is_dynamic(dev)) 211 if (!pnpbios_is_dynamic(dev))
213 return -EPERM; 212 return -EPERM;
214 213
214 dev_dbg(&dev->dev, "get resources\n");
215 node = kzalloc(node_info.max_node_size, GFP_KERNEL); 215 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
216 if (!node) 216 if (!node)
217 return -1; 217 return -1;
@@ -219,14 +219,13 @@ static int pnpbios_get_resources(struct pnp_dev *dev,
219 kfree(node); 219 kfree(node);
220 return -ENODEV; 220 return -ENODEV;
221 } 221 }
222 pnpbios_read_resources_from_node(res, node); 222 pnpbios_read_resources_from_node(dev, node);
223 dev->active = pnp_is_active(dev); 223 dev->active = pnp_is_active(dev);
224 kfree(node); 224 kfree(node);
225 return 0; 225 return 0;
226} 226}
227 227
228static int pnpbios_set_resources(struct pnp_dev *dev, 228static int pnpbios_set_resources(struct pnp_dev *dev)
229 struct pnp_resource_table *res)
230{ 229{
231 u8 nodenum = dev->number; 230 u8 nodenum = dev->number;
232 struct pnp_bios_node *node; 231 struct pnp_bios_node *node;
@@ -235,6 +234,7 @@ static int pnpbios_set_resources(struct pnp_dev *dev,
235 if (!pnpbios_is_dynamic(dev)) 234 if (!pnpbios_is_dynamic(dev))
236 return -EPERM; 235 return -EPERM;
237 236
237 dev_dbg(&dev->dev, "set resources\n");
238 node = kzalloc(node_info.max_node_size, GFP_KERNEL); 238 node = kzalloc(node_info.max_node_size, GFP_KERNEL);
239 if (!node) 239 if (!node)
240 return -1; 240 return -1;
@@ -242,7 +242,7 @@ static int pnpbios_set_resources(struct pnp_dev *dev,
242 kfree(node); 242 kfree(node);
243 return -ENODEV; 243 return -ENODEV;
244 } 244 }
245 if (pnpbios_write_resources_to_node(res, node) < 0) { 245 if (pnpbios_write_resources_to_node(dev, node) < 0) {
246 kfree(node); 246 kfree(node);
247 return -1; 247 return -1;
248 } 248 }
@@ -317,7 +317,6 @@ static int __init insert_device(struct pnp_bios_node *node)
317{ 317{
318 struct list_head *pos; 318 struct list_head *pos;
319 struct pnp_dev *dev; 319 struct pnp_dev *dev;
320 struct pnp_id *dev_id;
321 char id[8]; 320 char id[8];
322 321
323 /* check if the device is already added */ 322 /* check if the device is already added */
@@ -327,20 +326,11 @@ static int __init insert_device(struct pnp_bios_node *node)
327 return -1; 326 return -1;
328 } 327 }
329 328
330 dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL); 329 pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id);
330 dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id);
331 if (!dev) 331 if (!dev)
332 return -1; 332 return -1;
333 333
334 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
335 if (!dev_id) {
336 kfree(dev);
337 return -1;
338 }
339
340 dev->number = node->handle;
341 pnpid32_to_pnpid(node->eisa_id, id);
342 memcpy(dev_id->id, id, 7);
343 pnp_add_id(dev_id, dev);
344 pnpbios_parse_data_stream(dev, node); 334 pnpbios_parse_data_stream(dev, node);
345 dev->active = pnp_is_active(dev); 335 dev->active = pnp_is_active(dev);
346 dev->flags = node->flags; 336 dev->flags = node->flags;
@@ -353,11 +343,10 @@ static int __init insert_device(struct pnp_bios_node *node)
353 dev->capabilities |= PNP_WRITE; 343 dev->capabilities |= PNP_WRITE;
354 if (dev->flags & PNPBIOS_REMOVABLE) 344 if (dev->flags & PNPBIOS_REMOVABLE)
355 dev->capabilities |= PNP_REMOVABLE; 345 dev->capabilities |= PNP_REMOVABLE;
356 dev->protocol = &pnpbios_protocol;
357 346
358 /* clear out the damaged flags */ 347 /* clear out the damaged flags */
359 if (!dev->active) 348 if (!dev->active)
360 pnp_init_resource_table(&dev->res); 349 pnp_init_resources(dev);
361 350
362 pnp_add_device(dev); 351 pnp_add_device(dev);
363 pnpbios_interface_attach_device(node); 352 pnpbios_interface_attach_device(node);
diff --git a/drivers/pnp/pnpbios/pnpbios.h b/drivers/pnp/pnpbios/pnpbios.h
index d8cb2fd1f127..b09cf6dc2075 100644
--- a/drivers/pnp/pnpbios/pnpbios.h
+++ b/drivers/pnp/pnpbios/pnpbios.h
@@ -2,6 +2,142 @@
2 * pnpbios.h - contains local definitions 2 * pnpbios.h - contains local definitions
3 */ 3 */
4 4
5/*
6 * Include file for the interface to a PnP BIOS
7 *
8 * Original BIOS code (C) 1998 Christian Schmidt (chr.schmidt@tu-bs.de)
9 * PnP handler parts (c) 1998 Tom Lees <tom@lpsg.demon.co.uk>
10 * Minor reorganizations by David Hinds <dahinds@users.sourceforge.net>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2, or (at your option) any
15 * later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27/*
28 * Return codes
29 */
30#define PNP_SUCCESS 0x00
31#define PNP_NOT_SET_STATICALLY 0x7f
32#define PNP_UNKNOWN_FUNCTION 0x81
33#define PNP_FUNCTION_NOT_SUPPORTED 0x82
34#define PNP_INVALID_HANDLE 0x83
35#define PNP_BAD_PARAMETER 0x84
36#define PNP_SET_FAILED 0x85
37#define PNP_EVENTS_NOT_PENDING 0x86
38#define PNP_SYSTEM_NOT_DOCKED 0x87
39#define PNP_NO_ISA_PNP_CARDS 0x88
40#define PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES 0x89
41#define PNP_CONFIG_CHANGE_FAILED_NO_BATTERY 0x8a
42#define PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT 0x8b
43#define PNP_BUFFER_TOO_SMALL 0x8c
44#define PNP_USE_ESCD_SUPPORT 0x8d
45#define PNP_MESSAGE_NOT_SUPPORTED 0x8e
46#define PNP_HARDWARE_ERROR 0x8f
47
48#define ESCD_SUCCESS 0x00
49#define ESCD_IO_ERROR_READING 0x55
50#define ESCD_INVALID 0x56
51#define ESCD_BUFFER_TOO_SMALL 0x59
52#define ESCD_NVRAM_TOO_SMALL 0x5a
53#define ESCD_FUNCTION_NOT_SUPPORTED 0x81
54
55/*
56 * Events that can be received by "get event"
57 */
58#define PNPEV_ABOUT_TO_CHANGE_CONFIG 0x0001
59#define PNPEV_DOCK_CHANGED 0x0002
60#define PNPEV_SYSTEM_DEVICE_CHANGED 0x0003
61#define PNPEV_CONFIG_CHANGED_FAILED 0x0004
62#define PNPEV_UNKNOWN_SYSTEM_EVENT 0xffff
63/* 0x8000 through 0xfffe are OEM defined */
64
65/*
66 * Messages that should be sent through "send message"
67 */
68#define PNPMSG_OK 0x00
69#define PNPMSG_ABORT 0x01
70#define PNPMSG_UNDOCK_DEFAULT_ACTION 0x40
71#define PNPMSG_POWER_OFF 0x41
72#define PNPMSG_PNP_OS_ACTIVE 0x42
73#define PNPMSG_PNP_OS_INACTIVE 0x43
74
75/*
76 * Plug and Play BIOS flags
77 */
78#define PNPBIOS_NO_DISABLE 0x0001
79#define PNPBIOS_NO_CONFIG 0x0002
80#define PNPBIOS_OUTPUT 0x0004
81#define PNPBIOS_INPUT 0x0008
82#define PNPBIOS_BOOTABLE 0x0010
83#define PNPBIOS_DOCK 0x0020
84#define PNPBIOS_REMOVABLE 0x0040
85#define pnpbios_is_static(x) (((x)->flags & 0x0100) == 0x0000)
86#define pnpbios_is_dynamic(x) ((x)->flags & 0x0080)
87
88/*
89 * Function Parameters
90 */
91#define PNPMODE_STATIC 1
92#define PNPMODE_DYNAMIC 0
93
94/* 0x8000 through 0xffff are OEM defined */
95
96#pragma pack(1)
97struct pnp_dev_node_info {
98 __u16 no_nodes;
99 __u16 max_node_size;
100};
101struct pnp_docking_station_info {
102 __u32 location_id;
103 __u32 serial;
104 __u16 capabilities;
105};
106struct pnp_isa_config_struc {
107 __u8 revision;
108 __u8 no_csns;
109 __u16 isa_rd_data_port;
110 __u16 reserved;
111};
112struct escd_info_struc {
113 __u16 min_escd_write_size;
114 __u16 escd_size;
115 __u32 nv_storage_base;
116};
117struct pnp_bios_node {
118 __u16 size;
119 __u8 handle;
120 __u32 eisa_id;
121 __u8 type_code[3];
122 __u16 flags;
123 __u8 data[0];
124};
125#pragma pack()
126
127/* non-exported */
128extern struct pnp_dev_node_info node_info;
129
130extern int pnp_bios_dev_node_info(struct pnp_dev_node_info *data);
131extern int pnp_bios_get_dev_node(u8 *nodenum, char config,
132 struct pnp_bios_node *data);
133extern int pnp_bios_set_dev_node(u8 nodenum, char config,
134 struct pnp_bios_node *data);
135extern int pnp_bios_get_stat_res(char *info);
136extern int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data);
137extern int pnp_bios_escd_info(struct escd_info_struc *data);
138extern int pnp_bios_read_escd(char *data, u32 nvram_base);
139extern int pnp_bios_dock_station_info(struct pnp_docking_station_info *data);
140
5#pragma pack(1) 141#pragma pack(1)
6union pnp_bios_install_struct { 142union pnp_bios_install_struct {
7 struct { 143 struct {
@@ -28,8 +164,8 @@ extern int pnp_bios_present(void);
28extern int pnpbios_dont_use_current_config; 164extern int pnpbios_dont_use_current_config;
29 165
30extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node); 166extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node);
31extern int pnpbios_read_resources_from_node(struct pnp_resource_table *res, struct pnp_bios_node * node); 167extern int pnpbios_read_resources_from_node(struct pnp_dev *dev, struct pnp_bios_node *node);
32extern int pnpbios_write_resources_to_node(struct pnp_resource_table *res, struct pnp_bios_node * node); 168extern int pnpbios_write_resources_to_node(struct pnp_dev *dev, struct pnp_bios_node *node);
33extern void pnpid32_to_pnpid(u32 id, char *str); 169extern void pnpid32_to_pnpid(u32 id, char *str);
34 170
35extern void pnpbios_print_status(const char * module, u16 status); 171extern void pnpbios_print_status(const char * module, u16 status);
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index bb19bc957bad..b35d921bac6e 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -23,7 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
26#include <linux/pnpbios.h> 26#include <linux/pnp.h>
27#include <linux/init.h> 27#include <linux/init.h>
28 28
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -256,7 +256,7 @@ int pnpbios_interface_attach_device(struct pnp_bios_node *node)
256 */ 256 */
257int __init pnpbios_proc_init(void) 257int __init pnpbios_proc_init(void)
258{ 258{
259 proc_pnp = proc_mkdir("pnp", proc_bus); 259 proc_pnp = proc_mkdir("bus/pnp", NULL);
260 if (!proc_pnp) 260 if (!proc_pnp)
261 return -EIO; 261 return -EIO;
262 proc_pnp_boot = proc_mkdir("boot", proc_pnp); 262 proc_pnp_boot = proc_mkdir("boot", proc_pnp);
@@ -294,5 +294,5 @@ void __exit pnpbios_proc_exit(void)
294 remove_proc_entry("configuration_info", proc_pnp); 294 remove_proc_entry("configuration_info", proc_pnp);
295 remove_proc_entry("devices", proc_pnp); 295 remove_proc_entry("devices", proc_pnp);
296 remove_proc_entry("boot", proc_pnp); 296 remove_proc_entry("boot", proc_pnp);
297 remove_proc_entry("pnp", proc_bus); 297 remove_proc_entry("bus/pnp", NULL);
298} 298}
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index caade3531416..5ff9a4c0447e 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -4,7 +4,6 @@
4 4
5#include <linux/ctype.h> 5#include <linux/ctype.h>
6#include <linux/pnp.h> 6#include <linux/pnp.h>
7#include <linux/pnpbios.h>
8#include <linux/string.h> 7#include <linux/string.h>
9#include <linux/slab.h> 8#include <linux/slab.h>
10 9
@@ -16,6 +15,7 @@ inline void pcibios_penalize_isa_irq(int irq, int active)
16} 15}
17#endif /* CONFIG_PCI */ 16#endif /* CONFIG_PCI */
18 17
18#include "../base.h"
19#include "pnpbios.h" 19#include "pnpbios.h"
20 20
21/* standard resource tags */ 21/* standard resource tags */
@@ -53,97 +53,43 @@ inline void pcibios_penalize_isa_irq(int irq, int active)
53 * Allocated Resources 53 * Allocated Resources
54 */ 54 */
55 55
56static void pnpbios_parse_allocated_irqresource(struct pnp_resource_table *res, 56static void pnpbios_parse_allocated_ioresource(struct pnp_dev *dev,
57 int irq) 57 int start, int len)
58{ 58{
59 int i = 0; 59 int flags = 0;
60 60 int end = start + len - 1;
61 while (!(res->irq_resource[i].flags & IORESOURCE_UNSET)
62 && i < PNP_MAX_IRQ)
63 i++;
64 if (i < PNP_MAX_IRQ) {
65 res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
66 if (irq == -1) {
67 res->irq_resource[i].flags |= IORESOURCE_DISABLED;
68 return;
69 }
70 res->irq_resource[i].start =
71 res->irq_resource[i].end = (unsigned long)irq;
72 pcibios_penalize_isa_irq(irq, 1);
73 }
74}
75 61
76static void pnpbios_parse_allocated_dmaresource(struct pnp_resource_table *res, 62 if (len <= 0 || end >= 0x10003)
77 int dma) 63 flags |= IORESOURCE_DISABLED;
78{
79 int i = 0;
80
81 while (i < PNP_MAX_DMA &&
82 !(res->dma_resource[i].flags & IORESOURCE_UNSET))
83 i++;
84 if (i < PNP_MAX_DMA) {
85 res->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag
86 if (dma == -1) {
87 res->dma_resource[i].flags |= IORESOURCE_DISABLED;
88 return;
89 }
90 res->dma_resource[i].start =
91 res->dma_resource[i].end = (unsigned long)dma;
92 }
93}
94 64
95static void pnpbios_parse_allocated_ioresource(struct pnp_resource_table *res, 65 pnp_add_io_resource(dev, start, end, flags);
96 int io, int len)
97{
98 int i = 0;
99
100 while (!(res->port_resource[i].flags & IORESOURCE_UNSET)
101 && i < PNP_MAX_PORT)
102 i++;
103 if (i < PNP_MAX_PORT) {
104 res->port_resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag
105 if (len <= 0 || (io + len - 1) >= 0x10003) {
106 res->port_resource[i].flags |= IORESOURCE_DISABLED;
107 return;
108 }
109 res->port_resource[i].start = (unsigned long)io;
110 res->port_resource[i].end = (unsigned long)(io + len - 1);
111 }
112} 66}
113 67
114static void pnpbios_parse_allocated_memresource(struct pnp_resource_table *res, 68static void pnpbios_parse_allocated_memresource(struct pnp_dev *dev,
115 int mem, int len) 69 int start, int len)
116{ 70{
117 int i = 0; 71 int flags = 0;
118 72 int end = start + len - 1;
119 while (!(res->mem_resource[i].flags & IORESOURCE_UNSET) 73
120 && i < PNP_MAX_MEM) 74 if (len <= 0)
121 i++; 75 flags |= IORESOURCE_DISABLED;
122 if (i < PNP_MAX_MEM) { 76
123 res->mem_resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag 77 pnp_add_mem_resource(dev, start, end, flags);
124 if (len <= 0) {
125 res->mem_resource[i].flags |= IORESOURCE_DISABLED;
126 return;
127 }
128 res->mem_resource[i].start = (unsigned long)mem;
129 res->mem_resource[i].end = (unsigned long)(mem + len - 1);
130 }
131} 78}
132 79
133static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p, 80static unsigned char *pnpbios_parse_allocated_resource_data(struct pnp_dev *dev,
134 unsigned char *end, 81 unsigned char *p,
135 struct 82 unsigned char *end)
136 pnp_resource_table
137 *res)
138{ 83{
139 unsigned int len, tag; 84 unsigned int len, tag;
140 int io, size, mask, i; 85 int io, size, mask, i, flags;
141 86
142 if (!p) 87 if (!p)
143 return NULL; 88 return NULL;
144 89
145 /* Blank the resource table values */ 90 dev_dbg(&dev->dev, "parse allocated resources\n");
146 pnp_init_resource_table(res); 91
92 pnp_init_resources(dev);
147 93
148 while ((char *)p < (char *)end) { 94 while ((char *)p < (char *)end) {
149 95
@@ -163,7 +109,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
163 goto len_err; 109 goto len_err;
164 io = *(short *)&p[4]; 110 io = *(short *)&p[4];
165 size = *(short *)&p[10]; 111 size = *(short *)&p[10];
166 pnpbios_parse_allocated_memresource(res, io, size); 112 pnpbios_parse_allocated_memresource(dev, io, size);
167 break; 113 break;
168 114
169 case LARGE_TAG_ANSISTR: 115 case LARGE_TAG_ANSISTR:
@@ -179,7 +125,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
179 goto len_err; 125 goto len_err;
180 io = *(int *)&p[4]; 126 io = *(int *)&p[4];
181 size = *(int *)&p[16]; 127 size = *(int *)&p[16];
182 pnpbios_parse_allocated_memresource(res, io, size); 128 pnpbios_parse_allocated_memresource(dev, io, size);
183 break; 129 break;
184 130
185 case LARGE_TAG_FIXEDMEM32: 131 case LARGE_TAG_FIXEDMEM32:
@@ -187,29 +133,37 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
187 goto len_err; 133 goto len_err;
188 io = *(int *)&p[4]; 134 io = *(int *)&p[4];
189 size = *(int *)&p[8]; 135 size = *(int *)&p[8];
190 pnpbios_parse_allocated_memresource(res, io, size); 136 pnpbios_parse_allocated_memresource(dev, io, size);
191 break; 137 break;
192 138
193 case SMALL_TAG_IRQ: 139 case SMALL_TAG_IRQ:
194 if (len < 2 || len > 3) 140 if (len < 2 || len > 3)
195 goto len_err; 141 goto len_err;
142 flags = 0;
196 io = -1; 143 io = -1;
197 mask = p[1] + p[2] * 256; 144 mask = p[1] + p[2] * 256;
198 for (i = 0; i < 16; i++, mask = mask >> 1) 145 for (i = 0; i < 16; i++, mask = mask >> 1)
199 if (mask & 0x01) 146 if (mask & 0x01)
200 io = i; 147 io = i;
201 pnpbios_parse_allocated_irqresource(res, io); 148 if (io != -1)
149 pcibios_penalize_isa_irq(io, 1);
150 else
151 flags = IORESOURCE_DISABLED;
152 pnp_add_irq_resource(dev, io, flags);
202 break; 153 break;
203 154
204 case SMALL_TAG_DMA: 155 case SMALL_TAG_DMA:
205 if (len != 2) 156 if (len != 2)
206 goto len_err; 157 goto len_err;
158 flags = 0;
207 io = -1; 159 io = -1;
208 mask = p[1]; 160 mask = p[1];
209 for (i = 0; i < 8; i++, mask = mask >> 1) 161 for (i = 0; i < 8; i++, mask = mask >> 1)
210 if (mask & 0x01) 162 if (mask & 0x01)
211 io = i; 163 io = i;
212 pnpbios_parse_allocated_dmaresource(res, io); 164 if (io == -1)
165 flags = IORESOURCE_DISABLED;
166 pnp_add_dma_resource(dev, io, flags);
213 break; 167 break;
214 168
215 case SMALL_TAG_PORT: 169 case SMALL_TAG_PORT:
@@ -217,7 +171,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
217 goto len_err; 171 goto len_err;
218 io = p[2] + p[3] * 256; 172 io = p[2] + p[3] * 256;
219 size = p[7]; 173 size = p[7];
220 pnpbios_parse_allocated_ioresource(res, io, size); 174 pnpbios_parse_allocated_ioresource(dev, io, size);
221 break; 175 break;
222 176
223 case SMALL_TAG_VENDOR: 177 case SMALL_TAG_VENDOR:
@@ -229,7 +183,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
229 goto len_err; 183 goto len_err;
230 io = p[1] + p[2] * 256; 184 io = p[1] + p[2] * 256;
231 size = p[3]; 185 size = p[3];
232 pnpbios_parse_allocated_ioresource(res, io, size); 186 pnpbios_parse_allocated_ioresource(dev, io, size);
233 break; 187 break;
234 188
235 case SMALL_TAG_END: 189 case SMALL_TAG_END:
@@ -239,9 +193,8 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
239 193
240 default: /* an unkown tag */ 194 default: /* an unkown tag */
241len_err: 195len_err:
242 printk(KERN_ERR 196 dev_err(&dev->dev, "unknown tag %#x length %d\n",
243 "PnPBIOS: Unknown tag '0x%x', length '%d'.\n", 197 tag, len);
244 tag, len);
245 break; 198 break;
246 } 199 }
247 200
@@ -252,8 +205,7 @@ len_err:
252 p += len + 1; 205 p += len + 1;
253 } 206 }
254 207
255 printk(KERN_ERR 208 dev_err(&dev->dev, "no end tag in resource structure\n");
256 "PnPBIOS: Resource structure does not contain an end tag.\n");
257 209
258 return NULL; 210 return NULL;
259} 211}
@@ -262,7 +214,8 @@ len_err:
262 * Resource Configuration Options 214 * Resource Configuration Options
263 */ 215 */
264 216
265static __init void pnpbios_parse_mem_option(unsigned char *p, int size, 217static __init void pnpbios_parse_mem_option(struct pnp_dev *dev,
218 unsigned char *p, int size,
266 struct pnp_option *option) 219 struct pnp_option *option)
267{ 220{
268 struct pnp_mem *mem; 221 struct pnp_mem *mem;
@@ -275,10 +228,11 @@ static __init void pnpbios_parse_mem_option(unsigned char *p, int size,
275 mem->align = (p[9] << 8) | p[8]; 228 mem->align = (p[9] << 8) | p[8];
276 mem->size = ((p[11] << 8) | p[10]) << 8; 229 mem->size = ((p[11] << 8) | p[10]) << 8;
277 mem->flags = p[3]; 230 mem->flags = p[3];
278 pnp_register_mem_resource(option, mem); 231 pnp_register_mem_resource(dev, option, mem);
279} 232}
280 233
281static __init void pnpbios_parse_mem32_option(unsigned char *p, int size, 234static __init void pnpbios_parse_mem32_option(struct pnp_dev *dev,
235 unsigned char *p, int size,
282 struct pnp_option *option) 236 struct pnp_option *option)
283{ 237{
284 struct pnp_mem *mem; 238 struct pnp_mem *mem;
@@ -291,10 +245,11 @@ static __init void pnpbios_parse_mem32_option(unsigned char *p, int size,
291 mem->align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12]; 245 mem->align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12];
292 mem->size = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16]; 246 mem->size = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16];
293 mem->flags = p[3]; 247 mem->flags = p[3];
294 pnp_register_mem_resource(option, mem); 248 pnp_register_mem_resource(dev, option, mem);
295} 249}
296 250
297static __init void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, 251static __init void pnpbios_parse_fixed_mem32_option(struct pnp_dev *dev,
252 unsigned char *p, int size,
298 struct pnp_option *option) 253 struct pnp_option *option)
299{ 254{
300 struct pnp_mem *mem; 255 struct pnp_mem *mem;
@@ -306,11 +261,12 @@ static __init void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
306 mem->size = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; 261 mem->size = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8];
307 mem->align = 0; 262 mem->align = 0;
308 mem->flags = p[3]; 263 mem->flags = p[3];
309 pnp_register_mem_resource(option, mem); 264 pnp_register_mem_resource(dev, option, mem);
310} 265}
311 266
312static __init void pnpbios_parse_irq_option(unsigned char *p, int size, 267static __init void pnpbios_parse_irq_option(struct pnp_dev *dev,
313 struct pnp_option *option) 268 unsigned char *p, int size,
269 struct pnp_option *option)
314{ 270{
315 struct pnp_irq *irq; 271 struct pnp_irq *irq;
316 unsigned long bits; 272 unsigned long bits;
@@ -324,11 +280,12 @@ static __init void pnpbios_parse_irq_option(unsigned char *p, int size,
324 irq->flags = p[3]; 280 irq->flags = p[3];
325 else 281 else
326 irq->flags = IORESOURCE_IRQ_HIGHEDGE; 282 irq->flags = IORESOURCE_IRQ_HIGHEDGE;
327 pnp_register_irq_resource(option, irq); 283 pnp_register_irq_resource(dev, option, irq);
328} 284}
329 285
330static __init void pnpbios_parse_dma_option(unsigned char *p, int size, 286static __init void pnpbios_parse_dma_option(struct pnp_dev *dev,
331 struct pnp_option *option) 287 unsigned char *p, int size,
288 struct pnp_option *option)
332{ 289{
333 struct pnp_dma *dma; 290 struct pnp_dma *dma;
334 291
@@ -337,10 +294,11 @@ static __init void pnpbios_parse_dma_option(unsigned char *p, int size,
337 return; 294 return;
338 dma->map = p[1]; 295 dma->map = p[1];
339 dma->flags = p[2]; 296 dma->flags = p[2];
340 pnp_register_dma_resource(option, dma); 297 pnp_register_dma_resource(dev, option, dma);
341} 298}
342 299
343static __init void pnpbios_parse_port_option(unsigned char *p, int size, 300static __init void pnpbios_parse_port_option(struct pnp_dev *dev,
301 unsigned char *p, int size,
344 struct pnp_option *option) 302 struct pnp_option *option)
345{ 303{
346 struct pnp_port *port; 304 struct pnp_port *port;
@@ -353,10 +311,11 @@ static __init void pnpbios_parse_port_option(unsigned char *p, int size,
353 port->align = p[6]; 311 port->align = p[6];
354 port->size = p[7]; 312 port->size = p[7];
355 port->flags = p[1] ? PNP_PORT_FLAG_16BITADDR : 0; 313 port->flags = p[1] ? PNP_PORT_FLAG_16BITADDR : 0;
356 pnp_register_port_resource(option, port); 314 pnp_register_port_resource(dev, option, port);
357} 315}
358 316
359static __init void pnpbios_parse_fixed_port_option(unsigned char *p, int size, 317static __init void pnpbios_parse_fixed_port_option(struct pnp_dev *dev,
318 unsigned char *p, int size,
360 struct pnp_option *option) 319 struct pnp_option *option)
361{ 320{
362 struct pnp_port *port; 321 struct pnp_port *port;
@@ -368,7 +327,7 @@ static __init void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
368 port->size = p[3]; 327 port->size = p[3];
369 port->align = 0; 328 port->align = 0;
370 port->flags = PNP_PORT_FLAG_FIXED; 329 port->flags = PNP_PORT_FLAG_FIXED;
371 pnp_register_port_resource(option, port); 330 pnp_register_port_resource(dev, option, port);
372} 331}
373 332
374static __init unsigned char * 333static __init unsigned char *
@@ -382,6 +341,8 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
382 if (!p) 341 if (!p)
383 return NULL; 342 return NULL;
384 343
344 dev_dbg(&dev->dev, "parse resource options\n");
345
385 option_independent = option = pnp_register_independent_option(dev); 346 option_independent = option = pnp_register_independent_option(dev);
386 if (!option) 347 if (!option)
387 return NULL; 348 return NULL;
@@ -402,37 +363,37 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
402 case LARGE_TAG_MEM: 363 case LARGE_TAG_MEM:
403 if (len != 9) 364 if (len != 9)
404 goto len_err; 365 goto len_err;
405 pnpbios_parse_mem_option(p, len, option); 366 pnpbios_parse_mem_option(dev, p, len, option);
406 break; 367 break;
407 368
408 case LARGE_TAG_MEM32: 369 case LARGE_TAG_MEM32:
409 if (len != 17) 370 if (len != 17)
410 goto len_err; 371 goto len_err;
411 pnpbios_parse_mem32_option(p, len, option); 372 pnpbios_parse_mem32_option(dev, p, len, option);
412 break; 373 break;
413 374
414 case LARGE_TAG_FIXEDMEM32: 375 case LARGE_TAG_FIXEDMEM32:
415 if (len != 9) 376 if (len != 9)
416 goto len_err; 377 goto len_err;
417 pnpbios_parse_fixed_mem32_option(p, len, option); 378 pnpbios_parse_fixed_mem32_option(dev, p, len, option);
418 break; 379 break;
419 380
420 case SMALL_TAG_IRQ: 381 case SMALL_TAG_IRQ:
421 if (len < 2 || len > 3) 382 if (len < 2 || len > 3)
422 goto len_err; 383 goto len_err;
423 pnpbios_parse_irq_option(p, len, option); 384 pnpbios_parse_irq_option(dev, p, len, option);
424 break; 385 break;
425 386
426 case SMALL_TAG_DMA: 387 case SMALL_TAG_DMA:
427 if (len != 2) 388 if (len != 2)
428 goto len_err; 389 goto len_err;
429 pnpbios_parse_dma_option(p, len, option); 390 pnpbios_parse_dma_option(dev, p, len, option);
430 break; 391 break;
431 392
432 case SMALL_TAG_PORT: 393 case SMALL_TAG_PORT:
433 if (len != 7) 394 if (len != 7)
434 goto len_err; 395 goto len_err;
435 pnpbios_parse_port_option(p, len, option); 396 pnpbios_parse_port_option(dev, p, len, option);
436 break; 397 break;
437 398
438 case SMALL_TAG_VENDOR: 399 case SMALL_TAG_VENDOR:
@@ -442,7 +403,7 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
442 case SMALL_TAG_FIXEDPORT: 403 case SMALL_TAG_FIXEDPORT:
443 if (len != 3) 404 if (len != 3)
444 goto len_err; 405 goto len_err;
445 pnpbios_parse_fixed_port_option(p, len, option); 406 pnpbios_parse_fixed_port_option(dev, p, len, option);
446 break; 407 break;
447 408
448 case SMALL_TAG_STARTDEP: 409 case SMALL_TAG_STARTDEP:
@@ -460,9 +421,10 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
460 if (len != 0) 421 if (len != 0)
461 goto len_err; 422 goto len_err;
462 if (option_independent == option) 423 if (option_independent == option)
463 printk(KERN_WARNING 424 dev_warn(&dev->dev, "missing "
464 "PnPBIOS: Missing SMALL_TAG_STARTDEP tag\n"); 425 "SMALL_TAG_STARTDEP tag\n");
465 option = option_independent; 426 option = option_independent;
427 dev_dbg(&dev->dev, "end dependent options\n");
466 break; 428 break;
467 429
468 case SMALL_TAG_END: 430 case SMALL_TAG_END:
@@ -470,9 +432,8 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
470 432
471 default: /* an unkown tag */ 433 default: /* an unkown tag */
472len_err: 434len_err:
473 printk(KERN_ERR 435 dev_err(&dev->dev, "unknown tag %#x length %d\n",
474 "PnPBIOS: Unknown tag '0x%x', length '%d'.\n", 436 tag, len);
475 tag, len);
476 break; 437 break;
477 } 438 }
478 439
@@ -483,8 +444,7 @@ len_err:
483 p += len + 1; 444 p += len + 1;
484 } 445 }
485 446
486 printk(KERN_ERR 447 dev_err(&dev->dev, "no end tag in resource structure\n");
487 "PnPBIOS: Resource structure does not contain an end tag.\n");
488 448
489 return NULL; 449 return NULL;
490} 450}
@@ -493,32 +453,12 @@ len_err:
493 * Compatible Device IDs 453 * Compatible Device IDs
494 */ 454 */
495 455
496#define HEX(id,a) hex[((id)>>a) & 15]
497#define CHAR(id,a) (0x40 + (((id)>>a) & 31))
498
499void pnpid32_to_pnpid(u32 id, char *str)
500{
501 const char *hex = "0123456789abcdef";
502
503 id = be32_to_cpu(id);
504 str[0] = CHAR(id, 26);
505 str[1] = CHAR(id, 21);
506 str[2] = CHAR(id, 16);
507 str[3] = HEX(id, 12);
508 str[4] = HEX(id, 8);
509 str[5] = HEX(id, 4);
510 str[6] = HEX(id, 0);
511 str[7] = '\0';
512}
513
514#undef CHAR
515#undef HEX
516
517static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p, 456static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p,
518 unsigned char *end, 457 unsigned char *end,
519 struct pnp_dev *dev) 458 struct pnp_dev *dev)
520{ 459{
521 int len, tag; 460 int len, tag;
461 u32 eisa_id;
522 char id[8]; 462 char id[8];
523 struct pnp_id *dev_id; 463 struct pnp_id *dev_id;
524 464
@@ -548,13 +488,11 @@ static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p,
548 case SMALL_TAG_COMPATDEVID: /* compatible ID */ 488 case SMALL_TAG_COMPATDEVID: /* compatible ID */
549 if (len != 4) 489 if (len != 4)
550 goto len_err; 490 goto len_err;
551 dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL); 491 eisa_id = p[1] | p[2] << 8 | p[3] << 16 | p[4] << 24;
492 pnp_eisa_id_to_string(eisa_id & PNP_EISA_ID_MASK, id);
493 dev_id = pnp_add_id(dev, id);
552 if (!dev_id) 494 if (!dev_id)
553 return NULL; 495 return NULL;
554 pnpid32_to_pnpid(p[1] | p[2] << 8 | p[3] << 16 | p[4] <<
555 24, id);
556 memcpy(&dev_id->id, id, 7);
557 pnp_add_id(dev_id, dev);
558 break; 496 break;
559 497
560 case SMALL_TAG_END: 498 case SMALL_TAG_END:
@@ -564,9 +502,8 @@ static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p,
564 502
565 default: /* an unkown tag */ 503 default: /* an unkown tag */
566len_err: 504len_err:
567 printk(KERN_ERR 505 dev_err(&dev->dev, "unknown tag %#x length %d\n",
568 "PnPBIOS: Unknown tag '0x%x', length '%d'.\n", 506 tag, len);
569 tag, len);
570 break; 507 break;
571 } 508 }
572 509
@@ -577,8 +514,7 @@ len_err:
577 p += len + 1; 514 p += len + 1;
578 } 515 }
579 516
580 printk(KERN_ERR 517 dev_err(&dev->dev, "no end tag in resource structure\n");
581 "PnPBIOS: Resource structure does not contain an end tag.\n");
582 518
583 return NULL; 519 return NULL;
584} 520}
@@ -587,7 +523,8 @@ len_err:
587 * Allocated Resource Encoding 523 * Allocated Resource Encoding
588 */ 524 */
589 525
590static void pnpbios_encode_mem(unsigned char *p, struct resource *res) 526static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p,
527 struct resource *res)
591{ 528{
592 unsigned long base = res->start; 529 unsigned long base = res->start;
593 unsigned long len = res->end - res->start + 1; 530 unsigned long len = res->end - res->start + 1;
@@ -598,9 +535,13 @@ static void pnpbios_encode_mem(unsigned char *p, struct resource *res)
598 p[7] = ((base >> 8) >> 8) & 0xff; 535 p[7] = ((base >> 8) >> 8) & 0xff;
599 p[10] = (len >> 8) & 0xff; 536 p[10] = (len >> 8) & 0xff;
600 p[11] = ((len >> 8) >> 8) & 0xff; 537 p[11] = ((len >> 8) >> 8) & 0xff;
538
539 dev_dbg(&dev->dev, " encode mem %#llx-%#llx\n",
540 (unsigned long long) res->start, (unsigned long long) res->end);
601} 541}
602 542
603static void pnpbios_encode_mem32(unsigned char *p, struct resource *res) 543static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
544 struct resource *res)
604{ 545{
605 unsigned long base = res->start; 546 unsigned long base = res->start;
606 unsigned long len = res->end - res->start + 1; 547 unsigned long len = res->end - res->start + 1;
@@ -617,9 +558,13 @@ static void pnpbios_encode_mem32(unsigned char *p, struct resource *res)
617 p[17] = (len >> 8) & 0xff; 558 p[17] = (len >> 8) & 0xff;
618 p[18] = (len >> 16) & 0xff; 559 p[18] = (len >> 16) & 0xff;
619 p[19] = (len >> 24) & 0xff; 560 p[19] = (len >> 24) & 0xff;
561
562 dev_dbg(&dev->dev, " encode mem32 %#llx-%#llx\n",
563 (unsigned long long) res->start, (unsigned long long) res->end);
620} 564}
621 565
622static void pnpbios_encode_fixed_mem32(unsigned char *p, struct resource *res) 566static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
567 struct resource *res)
623{ 568{
624 unsigned long base = res->start; 569 unsigned long base = res->start;
625 unsigned long len = res->end - res->start + 1; 570 unsigned long len = res->end - res->start + 1;
@@ -632,26 +577,38 @@ static void pnpbios_encode_fixed_mem32(unsigned char *p, struct resource *res)
632 p[9] = (len >> 8) & 0xff; 577 p[9] = (len >> 8) & 0xff;
633 p[10] = (len >> 16) & 0xff; 578 p[10] = (len >> 16) & 0xff;
634 p[11] = (len >> 24) & 0xff; 579 p[11] = (len >> 24) & 0xff;
580
581 dev_dbg(&dev->dev, " encode fixed_mem32 %#llx-%#llx\n",
582 (unsigned long long) res->start, (unsigned long long) res->end);
635} 583}
636 584
637static void pnpbios_encode_irq(unsigned char *p, struct resource *res) 585static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p,
586 struct resource *res)
638{ 587{
639 unsigned long map = 0; 588 unsigned long map = 0;
640 589
641 map = 1 << res->start; 590 map = 1 << res->start;
642 p[1] = map & 0xff; 591 p[1] = map & 0xff;
643 p[2] = (map >> 8) & 0xff; 592 p[2] = (map >> 8) & 0xff;
593
594 dev_dbg(&dev->dev, " encode irq %llu\n",
595 (unsigned long long)res->start);
644} 596}
645 597
646static void pnpbios_encode_dma(unsigned char *p, struct resource *res) 598static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
599 struct resource *res)
647{ 600{
648 unsigned long map = 0; 601 unsigned long map = 0;
649 602
650 map = 1 << res->start; 603 map = 1 << res->start;
651 p[1] = map & 0xff; 604 p[1] = map & 0xff;
605
606 dev_dbg(&dev->dev, " encode dma %llu\n",
607 (unsigned long long)res->start);
652} 608}
653 609
654static void pnpbios_encode_port(unsigned char *p, struct resource *res) 610static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
611 struct resource *res)
655{ 612{
656 unsigned long base = res->start; 613 unsigned long base = res->start;
657 unsigned long len = res->end - res->start + 1; 614 unsigned long len = res->end - res->start + 1;
@@ -661,9 +618,13 @@ static void pnpbios_encode_port(unsigned char *p, struct resource *res)
661 p[4] = base & 0xff; 618 p[4] = base & 0xff;
662 p[5] = (base >> 8) & 0xff; 619 p[5] = (base >> 8) & 0xff;
663 p[7] = len & 0xff; 620 p[7] = len & 0xff;
621
622 dev_dbg(&dev->dev, " encode io %#llx-%#llx\n",
623 (unsigned long long) res->start, (unsigned long long) res->end);
664} 624}
665 625
666static void pnpbios_encode_fixed_port(unsigned char *p, struct resource *res) 626static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
627 struct resource *res)
667{ 628{
668 unsigned long base = res->start; 629 unsigned long base = res->start;
669 unsigned long len = res->end - res->start + 1; 630 unsigned long len = res->end - res->start + 1;
@@ -671,13 +632,15 @@ static void pnpbios_encode_fixed_port(unsigned char *p, struct resource *res)
671 p[1] = base & 0xff; 632 p[1] = base & 0xff;
672 p[2] = (base >> 8) & 0xff; 633 p[2] = (base >> 8) & 0xff;
673 p[3] = len & 0xff; 634 p[3] = len & 0xff;
635
636 dev_dbg(&dev->dev, " encode fixed_io %#llx-%#llx\n",
637 (unsigned long long) res->start, (unsigned long long) res->end);
674} 638}
675 639
676static unsigned char *pnpbios_encode_allocated_resource_data(unsigned char *p, 640static unsigned char *pnpbios_encode_allocated_resource_data(struct pnp_dev
677 unsigned char *end, 641 *dev,
678 struct 642 unsigned char *p,
679 pnp_resource_table 643 unsigned char *end)
680 *res)
681{ 644{
682 unsigned int len, tag; 645 unsigned int len, tag;
683 int port = 0, irq = 0, dma = 0, mem = 0; 646 int port = 0, irq = 0, dma = 0, mem = 0;
@@ -701,42 +664,48 @@ static unsigned char *pnpbios_encode_allocated_resource_data(unsigned char *p,
701 case LARGE_TAG_MEM: 664 case LARGE_TAG_MEM:
702 if (len != 9) 665 if (len != 9)
703 goto len_err; 666 goto len_err;
704 pnpbios_encode_mem(p, &res->mem_resource[mem]); 667 pnpbios_encode_mem(dev, p,
668 pnp_get_resource(dev, IORESOURCE_MEM, mem));
705 mem++; 669 mem++;
706 break; 670 break;
707 671
708 case LARGE_TAG_MEM32: 672 case LARGE_TAG_MEM32:
709 if (len != 17) 673 if (len != 17)
710 goto len_err; 674 goto len_err;
711 pnpbios_encode_mem32(p, &res->mem_resource[mem]); 675 pnpbios_encode_mem32(dev, p,
676 pnp_get_resource(dev, IORESOURCE_MEM, mem));
712 mem++; 677 mem++;
713 break; 678 break;
714 679
715 case LARGE_TAG_FIXEDMEM32: 680 case LARGE_TAG_FIXEDMEM32:
716 if (len != 9) 681 if (len != 9)
717 goto len_err; 682 goto len_err;
718 pnpbios_encode_fixed_mem32(p, &res->mem_resource[mem]); 683 pnpbios_encode_fixed_mem32(dev, p,
684 pnp_get_resource(dev, IORESOURCE_MEM, mem));
719 mem++; 685 mem++;
720 break; 686 break;
721 687
722 case SMALL_TAG_IRQ: 688 case SMALL_TAG_IRQ:
723 if (len < 2 || len > 3) 689 if (len < 2 || len > 3)
724 goto len_err; 690 goto len_err;
725 pnpbios_encode_irq(p, &res->irq_resource[irq]); 691 pnpbios_encode_irq(dev, p,
692 pnp_get_resource(dev, IORESOURCE_IRQ, irq));
726 irq++; 693 irq++;
727 break; 694 break;
728 695
729 case SMALL_TAG_DMA: 696 case SMALL_TAG_DMA:
730 if (len != 2) 697 if (len != 2)
731 goto len_err; 698 goto len_err;
732 pnpbios_encode_dma(p, &res->dma_resource[dma]); 699 pnpbios_encode_dma(dev, p,
700 pnp_get_resource(dev, IORESOURCE_DMA, dma));
733 dma++; 701 dma++;
734 break; 702 break;
735 703
736 case SMALL_TAG_PORT: 704 case SMALL_TAG_PORT:
737 if (len != 7) 705 if (len != 7)
738 goto len_err; 706 goto len_err;
739 pnpbios_encode_port(p, &res->port_resource[port]); 707 pnpbios_encode_port(dev, p,
708 pnp_get_resource(dev, IORESOURCE_IO, port));
740 port++; 709 port++;
741 break; 710 break;
742 711
@@ -747,7 +716,8 @@ static unsigned char *pnpbios_encode_allocated_resource_data(unsigned char *p,
747 case SMALL_TAG_FIXEDPORT: 716 case SMALL_TAG_FIXEDPORT:
748 if (len != 3) 717 if (len != 3)
749 goto len_err; 718 goto len_err;
750 pnpbios_encode_fixed_port(p, &res->port_resource[port]); 719 pnpbios_encode_fixed_port(dev, p,
720 pnp_get_resource(dev, IORESOURCE_IO, port));
751 port++; 721 port++;
752 break; 722 break;
753 723
@@ -758,9 +728,8 @@ static unsigned char *pnpbios_encode_allocated_resource_data(unsigned char *p,
758 728
759 default: /* an unkown tag */ 729 default: /* an unkown tag */
760len_err: 730len_err:
761 printk(KERN_ERR 731 dev_err(&dev->dev, "unknown tag %#x length %d\n",
762 "PnPBIOS: Unknown tag '0x%x', length '%d'.\n", 732 tag, len);
763 tag, len);
764 break; 733 break;
765 } 734 }
766 735
@@ -771,8 +740,7 @@ len_err:
771 p += len + 1; 740 p += len + 1;
772 } 741 }
773 742
774 printk(KERN_ERR 743 dev_err(&dev->dev, "no end tag in resource structure\n");
775 "PnPBIOS: Resource structure does not contain an end tag.\n");
776 744
777 return NULL; 745 return NULL;
778} 746}
@@ -787,7 +755,7 @@ int __init pnpbios_parse_data_stream(struct pnp_dev *dev,
787 unsigned char *p = (char *)node->data; 755 unsigned char *p = (char *)node->data;
788 unsigned char *end = (char *)(node->data + node->size); 756 unsigned char *end = (char *)(node->data + node->size);
789 757
790 p = pnpbios_parse_allocated_resource_data(p, end, &dev->res); 758 p = pnpbios_parse_allocated_resource_data(dev, p, end);
791 if (!p) 759 if (!p)
792 return -EIO; 760 return -EIO;
793 p = pnpbios_parse_resource_option_data(p, end, dev); 761 p = pnpbios_parse_resource_option_data(p, end, dev);
@@ -799,25 +767,25 @@ int __init pnpbios_parse_data_stream(struct pnp_dev *dev,
799 return 0; 767 return 0;
800} 768}
801 769
802int pnpbios_read_resources_from_node(struct pnp_resource_table *res, 770int pnpbios_read_resources_from_node(struct pnp_dev *dev,
803 struct pnp_bios_node *node) 771 struct pnp_bios_node *node)
804{ 772{
805 unsigned char *p = (char *)node->data; 773 unsigned char *p = (char *)node->data;
806 unsigned char *end = (char *)(node->data + node->size); 774 unsigned char *end = (char *)(node->data + node->size);
807 775
808 p = pnpbios_parse_allocated_resource_data(p, end, res); 776 p = pnpbios_parse_allocated_resource_data(dev, p, end);
809 if (!p) 777 if (!p)
810 return -EIO; 778 return -EIO;
811 return 0; 779 return 0;
812} 780}
813 781
814int pnpbios_write_resources_to_node(struct pnp_resource_table *res, 782int pnpbios_write_resources_to_node(struct pnp_dev *dev,
815 struct pnp_bios_node *node) 783 struct pnp_bios_node *node)
816{ 784{
817 unsigned char *p = (char *)node->data; 785 unsigned char *p = (char *)node->data;
818 unsigned char *end = (char *)(node->data + node->size); 786 unsigned char *end = (char *)(node->data + node->size);
819 787
820 p = pnpbios_encode_allocated_resource_data(p, end, res); 788 p = pnpbios_encode_allocated_resource_data(dev, p, end);
821 if (!p) 789 if (!p)
822 return -EIO; 790 return -EIO;
823 return 0; 791 return 0;
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index e4daf4635c48..d049a2279fea 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -117,6 +117,7 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
117static void quirk_system_pci_resources(struct pnp_dev *dev) 117static void quirk_system_pci_resources(struct pnp_dev *dev)
118{ 118{
119 struct pci_dev *pdev = NULL; 119 struct pci_dev *pdev = NULL;
120 struct resource *res;
120 resource_size_t pnp_start, pnp_end, pci_start, pci_end; 121 resource_size_t pnp_start, pnp_end, pci_start, pci_end;
121 int i, j; 122 int i, j;
122 123
@@ -137,13 +138,15 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
137 138
138 pci_start = pci_resource_start(pdev, i); 139 pci_start = pci_resource_start(pdev, i);
139 pci_end = pci_resource_end(pdev, i); 140 pci_end = pci_resource_end(pdev, i);
140 for (j = 0; j < PNP_MAX_MEM; j++) { 141 for (j = 0;
141 if (!pnp_mem_valid(dev, j) || 142 (res = pnp_get_resource(dev, IORESOURCE_MEM, j));
142 pnp_mem_len(dev, j) == 0) 143 j++) {
144 if (res->flags & IORESOURCE_UNSET ||
145 (res->start == 0 && res->end == 0))
143 continue; 146 continue;
144 147
145 pnp_start = pnp_mem_start(dev, j); 148 pnp_start = res->start;
146 pnp_end = pnp_mem_end(dev, j); 149 pnp_end = res->end;
147 150
148 /* 151 /*
149 * If the PNP region doesn't overlap the PCI 152 * If the PNP region doesn't overlap the PCI
@@ -176,7 +179,7 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
176 pci_name(pdev), i, 179 pci_name(pdev), i,
177 (unsigned long long) pci_start, 180 (unsigned long long) pci_start,
178 (unsigned long long) pci_end); 181 (unsigned long long) pci_end);
179 pnp_mem_flags(dev, j) = 0; 182 res->flags = 0;
180 } 183 }
181 } 184 }
182 } 185 }
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index e50ebcffb962..2041620d5682 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -53,6 +53,8 @@ struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev)
53 if (dev->independent) 53 if (dev->independent)
54 dev_err(&dev->dev, "independent resource already registered\n"); 54 dev_err(&dev->dev, "independent resource already registered\n");
55 dev->independent = option; 55 dev->independent = option;
56
57 dev_dbg(&dev->dev, "new independent option\n");
56 return option; 58 return option;
57} 59}
58 60
@@ -70,12 +72,18 @@ struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev,
70 parent->next = option; 72 parent->next = option;
71 } else 73 } else
72 dev->dependent = option; 74 dev->dependent = option;
75
76 dev_dbg(&dev->dev, "new dependent option (priority %#x)\n", priority);
73 return option; 77 return option;
74} 78}
75 79
76int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data) 80int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option,
81 struct pnp_irq *data)
77{ 82{
78 struct pnp_irq *ptr; 83 struct pnp_irq *ptr;
84#ifdef DEBUG
85 char buf[PNP_IRQ_NR]; /* hex-encoded, so this is overkill but safe */
86#endif
79 87
80 ptr = option->irq; 88 ptr = option->irq;
81 while (ptr && ptr->next) 89 while (ptr && ptr->next)
@@ -94,10 +102,17 @@ int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data)
94 pcibios_penalize_isa_irq(i, 0); 102 pcibios_penalize_isa_irq(i, 0);
95 } 103 }
96#endif 104#endif
105
106#ifdef DEBUG
107 bitmap_scnprintf(buf, sizeof(buf), data->map, PNP_IRQ_NR);
108 dev_dbg(&dev->dev, " irq bitmask %s flags %#x\n", buf,
109 data->flags);
110#endif
97 return 0; 111 return 0;
98} 112}
99 113
100int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data) 114int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option,
115 struct pnp_dma *data)
101{ 116{
102 struct pnp_dma *ptr; 117 struct pnp_dma *ptr;
103 118
@@ -109,10 +124,13 @@ int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data)
109 else 124 else
110 option->dma = data; 125 option->dma = data;
111 126
127 dev_dbg(&dev->dev, " dma bitmask %#x flags %#x\n", data->map,
128 data->flags);
112 return 0; 129 return 0;
113} 130}
114 131
115int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data) 132int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option,
133 struct pnp_port *data)
116{ 134{
117 struct pnp_port *ptr; 135 struct pnp_port *ptr;
118 136
@@ -124,10 +142,14 @@ int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data)
124 else 142 else
125 option->port = data; 143 option->port = data;
126 144
145 dev_dbg(&dev->dev, " io "
146 "min %#x max %#x align %d size %d flags %#x\n",
147 data->min, data->max, data->align, data->size, data->flags);
127 return 0; 148 return 0;
128} 149}
129 150
130int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data) 151int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option,
152 struct pnp_mem *data)
131{ 153{
132 struct pnp_mem *ptr; 154 struct pnp_mem *ptr;
133 155
@@ -138,6 +160,10 @@ int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data)
138 ptr->next = data; 160 ptr->next = data;
139 else 161 else
140 option->mem = data; 162 option->mem = data;
163
164 dev_dbg(&dev->dev, " mem "
165 "min %#x max %#x align %d size %d flags %#x\n",
166 data->min, data->max, data->align, data->size, data->flags);
141 return 0; 167 return 0;
142} 168}
143 169
@@ -213,17 +239,18 @@ void pnp_free_option(struct pnp_option *option)
213#define cannot_compare(flags) \ 239#define cannot_compare(flags) \
214((flags) & (IORESOURCE_UNSET | IORESOURCE_DISABLED)) 240((flags) & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
215 241
216int pnp_check_port(struct pnp_dev *dev, int idx) 242int pnp_check_port(struct pnp_dev *dev, struct resource *res)
217{ 243{
218 int tmp; 244 int i;
219 struct pnp_dev *tdev; 245 struct pnp_dev *tdev;
246 struct resource *tres;
220 resource_size_t *port, *end, *tport, *tend; 247 resource_size_t *port, *end, *tport, *tend;
221 248
222 port = &dev->res.port_resource[idx].start; 249 port = &res->start;
223 end = &dev->res.port_resource[idx].end; 250 end = &res->end;
224 251
225 /* if the resource doesn't exist, don't complain about it */ 252 /* if the resource doesn't exist, don't complain about it */
226 if (cannot_compare(dev->res.port_resource[idx].flags)) 253 if (cannot_compare(res->flags))
227 return 1; 254 return 1;
228 255
229 /* check if the resource is already in use, skip if the 256 /* check if the resource is already in use, skip if the
@@ -234,18 +261,18 @@ int pnp_check_port(struct pnp_dev *dev, int idx)
234 } 261 }
235 262
236 /* check if the resource is reserved */ 263 /* check if the resource is reserved */
237 for (tmp = 0; tmp < 8; tmp++) { 264 for (i = 0; i < 8; i++) {
238 int rport = pnp_reserve_io[tmp << 1]; 265 int rport = pnp_reserve_io[i << 1];
239 int rend = pnp_reserve_io[(tmp << 1) + 1] + rport - 1; 266 int rend = pnp_reserve_io[(i << 1) + 1] + rport - 1;
240 if (ranged_conflict(port, end, &rport, &rend)) 267 if (ranged_conflict(port, end, &rport, &rend))
241 return 0; 268 return 0;
242 } 269 }
243 270
244 /* check for internal conflicts */ 271 /* check for internal conflicts */
245 for (tmp = 0; tmp < PNP_MAX_PORT && tmp != idx; tmp++) { 272 for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) {
246 if (dev->res.port_resource[tmp].flags & IORESOURCE_IO) { 273 if (tres != res && tres->flags & IORESOURCE_IO) {
247 tport = &dev->res.port_resource[tmp].start; 274 tport = &tres->start;
248 tend = &dev->res.port_resource[tmp].end; 275 tend = &tres->end;
249 if (ranged_conflict(port, end, tport, tend)) 276 if (ranged_conflict(port, end, tport, tend))
250 return 0; 277 return 0;
251 } 278 }
@@ -255,13 +282,14 @@ int pnp_check_port(struct pnp_dev *dev, int idx)
255 pnp_for_each_dev(tdev) { 282 pnp_for_each_dev(tdev) {
256 if (tdev == dev) 283 if (tdev == dev)
257 continue; 284 continue;
258 for (tmp = 0; tmp < PNP_MAX_PORT; tmp++) { 285 for (i = 0;
259 if (tdev->res.port_resource[tmp].flags & IORESOURCE_IO) { 286 (tres = pnp_get_resource(tdev, IORESOURCE_IO, i));
260 if (cannot_compare 287 i++) {
261 (tdev->res.port_resource[tmp].flags)) 288 if (tres->flags & IORESOURCE_IO) {
289 if (cannot_compare(tres->flags))
262 continue; 290 continue;
263 tport = &tdev->res.port_resource[tmp].start; 291 tport = &tres->start;
264 tend = &tdev->res.port_resource[tmp].end; 292 tend = &tres->end;
265 if (ranged_conflict(port, end, tport, tend)) 293 if (ranged_conflict(port, end, tport, tend))
266 return 0; 294 return 0;
267 } 295 }
@@ -271,17 +299,18 @@ int pnp_check_port(struct pnp_dev *dev, int idx)
271 return 1; 299 return 1;
272} 300}
273 301
274int pnp_check_mem(struct pnp_dev *dev, int idx) 302int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
275{ 303{
276 int tmp; 304 int i;
277 struct pnp_dev *tdev; 305 struct pnp_dev *tdev;
306 struct resource *tres;
278 resource_size_t *addr, *end, *taddr, *tend; 307 resource_size_t *addr, *end, *taddr, *tend;
279 308
280 addr = &dev->res.mem_resource[idx].start; 309 addr = &res->start;
281 end = &dev->res.mem_resource[idx].end; 310 end = &res->end;
282 311
283 /* if the resource doesn't exist, don't complain about it */ 312 /* if the resource doesn't exist, don't complain about it */
284 if (cannot_compare(dev->res.mem_resource[idx].flags)) 313 if (cannot_compare(res->flags))
285 return 1; 314 return 1;
286 315
287 /* check if the resource is already in use, skip if the 316 /* check if the resource is already in use, skip if the
@@ -292,18 +321,18 @@ int pnp_check_mem(struct pnp_dev *dev, int idx)
292 } 321 }
293 322
294 /* check if the resource is reserved */ 323 /* check if the resource is reserved */
295 for (tmp = 0; tmp < 8; tmp++) { 324 for (i = 0; i < 8; i++) {
296 int raddr = pnp_reserve_mem[tmp << 1]; 325 int raddr = pnp_reserve_mem[i << 1];
297 int rend = pnp_reserve_mem[(tmp << 1) + 1] + raddr - 1; 326 int rend = pnp_reserve_mem[(i << 1) + 1] + raddr - 1;
298 if (ranged_conflict(addr, end, &raddr, &rend)) 327 if (ranged_conflict(addr, end, &raddr, &rend))
299 return 0; 328 return 0;
300 } 329 }
301 330
302 /* check for internal conflicts */ 331 /* check for internal conflicts */
303 for (tmp = 0; tmp < PNP_MAX_MEM && tmp != idx; tmp++) { 332 for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
304 if (dev->res.mem_resource[tmp].flags & IORESOURCE_MEM) { 333 if (tres != res && tres->flags & IORESOURCE_MEM) {
305 taddr = &dev->res.mem_resource[tmp].start; 334 taddr = &tres->start;
306 tend = &dev->res.mem_resource[tmp].end; 335 tend = &tres->end;
307 if (ranged_conflict(addr, end, taddr, tend)) 336 if (ranged_conflict(addr, end, taddr, tend))
308 return 0; 337 return 0;
309 } 338 }
@@ -313,13 +342,14 @@ int pnp_check_mem(struct pnp_dev *dev, int idx)
313 pnp_for_each_dev(tdev) { 342 pnp_for_each_dev(tdev) {
314 if (tdev == dev) 343 if (tdev == dev)
315 continue; 344 continue;
316 for (tmp = 0; tmp < PNP_MAX_MEM; tmp++) { 345 for (i = 0;
317 if (tdev->res.mem_resource[tmp].flags & IORESOURCE_MEM) { 346 (tres = pnp_get_resource(tdev, IORESOURCE_MEM, i));
318 if (cannot_compare 347 i++) {
319 (tdev->res.mem_resource[tmp].flags)) 348 if (tres->flags & IORESOURCE_MEM) {
349 if (cannot_compare(tres->flags))
320 continue; 350 continue;
321 taddr = &tdev->res.mem_resource[tmp].start; 351 taddr = &tres->start;
322 tend = &tdev->res.mem_resource[tmp].end; 352 tend = &tres->end;
323 if (ranged_conflict(addr, end, taddr, tend)) 353 if (ranged_conflict(addr, end, taddr, tend))
324 return 0; 354 return 0;
325 } 355 }
@@ -334,14 +364,17 @@ static irqreturn_t pnp_test_handler(int irq, void *dev_id)
334 return IRQ_HANDLED; 364 return IRQ_HANDLED;
335} 365}
336 366
337int pnp_check_irq(struct pnp_dev *dev, int idx) 367int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
338{ 368{
339 int tmp; 369 int i;
340 struct pnp_dev *tdev; 370 struct pnp_dev *tdev;
341 resource_size_t *irq = &dev->res.irq_resource[idx].start; 371 struct resource *tres;
372 resource_size_t *irq;
373
374 irq = &res->start;
342 375
343 /* if the resource doesn't exist, don't complain about it */ 376 /* if the resource doesn't exist, don't complain about it */
344 if (cannot_compare(dev->res.irq_resource[idx].flags)) 377 if (cannot_compare(res->flags))
345 return 1; 378 return 1;
346 379
347 /* check if the resource is valid */ 380 /* check if the resource is valid */
@@ -349,15 +382,15 @@ int pnp_check_irq(struct pnp_dev *dev, int idx)
349 return 0; 382 return 0;
350 383
351 /* check if the resource is reserved */ 384 /* check if the resource is reserved */
352 for (tmp = 0; tmp < 16; tmp++) { 385 for (i = 0; i < 16; i++) {
353 if (pnp_reserve_irq[tmp] == *irq) 386 if (pnp_reserve_irq[i] == *irq)
354 return 0; 387 return 0;
355 } 388 }
356 389
357 /* check for internal conflicts */ 390 /* check for internal conflicts */
358 for (tmp = 0; tmp < PNP_MAX_IRQ && tmp != idx; tmp++) { 391 for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_IRQ, i)); i++) {
359 if (dev->res.irq_resource[tmp].flags & IORESOURCE_IRQ) { 392 if (tres != res && tres->flags & IORESOURCE_IRQ) {
360 if (dev->res.irq_resource[tmp].start == *irq) 393 if (tres->start == *irq)
361 return 0; 394 return 0;
362 } 395 }
363 } 396 }
@@ -388,12 +421,13 @@ int pnp_check_irq(struct pnp_dev *dev, int idx)
388 pnp_for_each_dev(tdev) { 421 pnp_for_each_dev(tdev) {
389 if (tdev == dev) 422 if (tdev == dev)
390 continue; 423 continue;
391 for (tmp = 0; tmp < PNP_MAX_IRQ; tmp++) { 424 for (i = 0;
392 if (tdev->res.irq_resource[tmp].flags & IORESOURCE_IRQ) { 425 (tres = pnp_get_resource(tdev, IORESOURCE_IRQ, i));
393 if (cannot_compare 426 i++) {
394 (tdev->res.irq_resource[tmp].flags)) 427 if (tres->flags & IORESOURCE_IRQ) {
428 if (cannot_compare(tres->flags))
395 continue; 429 continue;
396 if ((tdev->res.irq_resource[tmp].start == *irq)) 430 if (tres->start == *irq)
397 return 0; 431 return 0;
398 } 432 }
399 } 433 }
@@ -402,15 +436,18 @@ int pnp_check_irq(struct pnp_dev *dev, int idx)
402 return 1; 436 return 1;
403} 437}
404 438
405int pnp_check_dma(struct pnp_dev *dev, int idx) 439int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
406{ 440{
407#ifndef CONFIG_IA64 441#ifndef CONFIG_IA64
408 int tmp; 442 int i;
409 struct pnp_dev *tdev; 443 struct pnp_dev *tdev;
410 resource_size_t *dma = &dev->res.dma_resource[idx].start; 444 struct resource *tres;
445 resource_size_t *dma;
446
447 dma = &res->start;
411 448
412 /* if the resource doesn't exist, don't complain about it */ 449 /* if the resource doesn't exist, don't complain about it */
413 if (cannot_compare(dev->res.dma_resource[idx].flags)) 450 if (cannot_compare(res->flags))
414 return 1; 451 return 1;
415 452
416 /* check if the resource is valid */ 453 /* check if the resource is valid */
@@ -418,15 +455,15 @@ int pnp_check_dma(struct pnp_dev *dev, int idx)
418 return 0; 455 return 0;
419 456
420 /* check if the resource is reserved */ 457 /* check if the resource is reserved */
421 for (tmp = 0; tmp < 8; tmp++) { 458 for (i = 0; i < 8; i++) {
422 if (pnp_reserve_dma[tmp] == *dma) 459 if (pnp_reserve_dma[i] == *dma)
423 return 0; 460 return 0;
424 } 461 }
425 462
426 /* check for internal conflicts */ 463 /* check for internal conflicts */
427 for (tmp = 0; tmp < PNP_MAX_DMA && tmp != idx; tmp++) { 464 for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_DMA, i)); i++) {
428 if (dev->res.dma_resource[tmp].flags & IORESOURCE_DMA) { 465 if (tres != res && tres->flags & IORESOURCE_DMA) {
429 if (dev->res.dma_resource[tmp].start == *dma) 466 if (tres->start == *dma)
430 return 0; 467 return 0;
431 } 468 }
432 } 469 }
@@ -443,12 +480,13 @@ int pnp_check_dma(struct pnp_dev *dev, int idx)
443 pnp_for_each_dev(tdev) { 480 pnp_for_each_dev(tdev) {
444 if (tdev == dev) 481 if (tdev == dev)
445 continue; 482 continue;
446 for (tmp = 0; tmp < PNP_MAX_DMA; tmp++) { 483 for (i = 0;
447 if (tdev->res.dma_resource[tmp].flags & IORESOURCE_DMA) { 484 (tres = pnp_get_resource(tdev, IORESOURCE_DMA, i));
448 if (cannot_compare 485 i++) {
449 (tdev->res.dma_resource[tmp].flags)) 486 if (tres->flags & IORESOURCE_DMA) {
487 if (cannot_compare(tres->flags))
450 continue; 488 continue;
451 if ((tdev->res.dma_resource[tmp].start == *dma)) 489 if (tres->start == *dma)
452 return 0; 490 return 0;
453 } 491 }
454 } 492 }
@@ -461,6 +499,193 @@ int pnp_check_dma(struct pnp_dev *dev, int idx)
461#endif 499#endif
462} 500}
463 501
502struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev,
503 unsigned int type, unsigned int num)
504{
505 struct pnp_resource_table *res = dev->res;
506
507 switch (type) {
508 case IORESOURCE_IO:
509 if (num >= PNP_MAX_PORT)
510 return NULL;
511 return &res->port[num];
512 case IORESOURCE_MEM:
513 if (num >= PNP_MAX_MEM)
514 return NULL;
515 return &res->mem[num];
516 case IORESOURCE_IRQ:
517 if (num >= PNP_MAX_IRQ)
518 return NULL;
519 return &res->irq[num];
520 case IORESOURCE_DMA:
521 if (num >= PNP_MAX_DMA)
522 return NULL;
523 return &res->dma[num];
524 }
525 return NULL;
526}
527
528struct resource *pnp_get_resource(struct pnp_dev *dev,
529 unsigned int type, unsigned int num)
530{
531 struct pnp_resource *pnp_res;
532
533 pnp_res = pnp_get_pnp_resource(dev, type, num);
534 if (pnp_res)
535 return &pnp_res->res;
536
537 return NULL;
538}
539EXPORT_SYMBOL(pnp_get_resource);
540
541static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev, int type)
542{
543 struct pnp_resource *pnp_res;
544 int i;
545
546 switch (type) {
547 case IORESOURCE_IO:
548 for (i = 0; i < PNP_MAX_PORT; i++) {
549 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, i);
550 if (pnp_res && !pnp_resource_valid(&pnp_res->res))
551 return pnp_res;
552 }
553 break;
554 case IORESOURCE_MEM:
555 for (i = 0; i < PNP_MAX_MEM; i++) {
556 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, i);
557 if (pnp_res && !pnp_resource_valid(&pnp_res->res))
558 return pnp_res;
559 }
560 break;
561 case IORESOURCE_IRQ:
562 for (i = 0; i < PNP_MAX_IRQ; i++) {
563 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, i);
564 if (pnp_res && !pnp_resource_valid(&pnp_res->res))
565 return pnp_res;
566 }
567 break;
568 case IORESOURCE_DMA:
569 for (i = 0; i < PNP_MAX_DMA; i++) {
570 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, i);
571 if (pnp_res && !pnp_resource_valid(&pnp_res->res))
572 return pnp_res;
573 }
574 break;
575 }
576 return NULL;
577}
578
579struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
580 int flags)
581{
582 struct pnp_resource *pnp_res;
583 struct resource *res;
584 static unsigned char warned;
585
586 pnp_res = pnp_new_resource(dev, IORESOURCE_IRQ);
587 if (!pnp_res) {
588 if (!warned) {
589 dev_err(&dev->dev, "can't add resource for IRQ %d\n",
590 irq);
591 warned = 1;
592 }
593 return NULL;
594 }
595
596 res = &pnp_res->res;
597 res->flags = IORESOURCE_IRQ | flags;
598 res->start = irq;
599 res->end = irq;
600
601 dev_dbg(&dev->dev, " add irq %d flags %#x\n", irq, flags);
602 return pnp_res;
603}
604
605struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
606 int flags)
607{
608 struct pnp_resource *pnp_res;
609 struct resource *res;
610 static unsigned char warned;
611
612 pnp_res = pnp_new_resource(dev, IORESOURCE_DMA);
613 if (!pnp_res) {
614 if (!warned) {
615 dev_err(&dev->dev, "can't add resource for DMA %d\n",
616 dma);
617 warned = 1;
618 }
619 return NULL;
620 }
621
622 res = &pnp_res->res;
623 res->flags = IORESOURCE_DMA | flags;
624 res->start = dma;
625 res->end = dma;
626
627 dev_dbg(&dev->dev, " add dma %d flags %#x\n", dma, flags);
628 return pnp_res;
629}
630
631struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
632 resource_size_t start,
633 resource_size_t end, int flags)
634{
635 struct pnp_resource *pnp_res;
636 struct resource *res;
637 static unsigned char warned;
638
639 pnp_res = pnp_new_resource(dev, IORESOURCE_IO);
640 if (!pnp_res) {
641 if (!warned) {
642 dev_err(&dev->dev, "can't add resource for IO "
643 "%#llx-%#llx\n",(unsigned long long) start,
644 (unsigned long long) end);
645 warned = 1;
646 }
647 return NULL;
648 }
649
650 res = &pnp_res->res;
651 res->flags = IORESOURCE_IO | flags;
652 res->start = start;
653 res->end = end;
654
655 dev_dbg(&dev->dev, " add io %#llx-%#llx flags %#x\n",
656 (unsigned long long) start, (unsigned long long) end, flags);
657 return pnp_res;
658}
659
660struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
661 resource_size_t start,
662 resource_size_t end, int flags)
663{
664 struct pnp_resource *pnp_res;
665 struct resource *res;
666 static unsigned char warned;
667
668 pnp_res = pnp_new_resource(dev, IORESOURCE_MEM);
669 if (!pnp_res) {
670 if (!warned) {
671 dev_err(&dev->dev, "can't add resource for MEM "
672 "%#llx-%#llx\n",(unsigned long long) start,
673 (unsigned long long) end);
674 warned = 1;
675 }
676 return NULL;
677 }
678
679 res = &pnp_res->res;
680 res->flags = IORESOURCE_MEM | flags;
681 res->start = start;
682 res->end = end;
683
684 dev_dbg(&dev->dev, " add mem %#llx-%#llx flags %#x\n",
685 (unsigned long long) start, (unsigned long long) end, flags);
686 return pnp_res;
687}
688
464/* format is: pnp_reserve_irq=irq1[,irq2] .... */ 689/* format is: pnp_reserve_irq=irq1[,irq2] .... */
465static int __init pnp_setup_reserve_irq(char *str) 690static int __init pnp_setup_reserve_irq(char *str)
466{ 691{
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index 13c608f5fb30..3eba85ed729c 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -25,3 +25,66 @@ int pnp_is_active(struct pnp_dev *dev)
25} 25}
26 26
27EXPORT_SYMBOL(pnp_is_active); 27EXPORT_SYMBOL(pnp_is_active);
28
29/*
30 * Functionally similar to acpi_ex_eisa_id_to_string(), but that's
31 * buried in the ACPI CA, and we can't depend on it being present.
32 */
33void pnp_eisa_id_to_string(u32 id, char *str)
34{
35 id = be32_to_cpu(id);
36
37 /*
38 * According to the specs, the first three characters are five-bit
39 * compressed ASCII, and the left-over high order bit should be zero.
40 * However, the Linux ISAPNP code historically used six bits for the
41 * first character, and there seem to be IDs that depend on that,
42 * e.g., "nEC8241" in the Linux 8250_pnp serial driver and the
43 * FreeBSD sys/pc98/cbus/sio_cbus.c driver.
44 */
45 str[0] = 'A' + ((id >> 26) & 0x3f) - 1;
46 str[1] = 'A' + ((id >> 21) & 0x1f) - 1;
47 str[2] = 'A' + ((id >> 16) & 0x1f) - 1;
48 str[3] = hex_asc((id >> 12) & 0xf);
49 str[4] = hex_asc((id >> 8) & 0xf);
50 str[5] = hex_asc((id >> 4) & 0xf);
51 str[6] = hex_asc((id >> 0) & 0xf);
52 str[7] = '\0';
53}
54
55void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
56{
57#ifdef DEBUG
58 struct resource *res;
59 int i;
60
61 dev_dbg(&dev->dev, "current resources: %s\n", desc);
62
63 for (i = 0; i < PNP_MAX_IRQ; i++) {
64 res = pnp_get_resource(dev, IORESOURCE_IRQ, i);
65 if (res && !(res->flags & IORESOURCE_UNSET))
66 dev_dbg(&dev->dev, " irq %lld flags %#lx\n",
67 (unsigned long long) res->start, res->flags);
68 }
69 for (i = 0; i < PNP_MAX_DMA; i++) {
70 res = pnp_get_resource(dev, IORESOURCE_DMA, i);
71 if (res && !(res->flags & IORESOURCE_UNSET))
72 dev_dbg(&dev->dev, " dma %lld flags %#lx\n",
73 (unsigned long long) res->start, res->flags);
74 }
75 for (i = 0; i < PNP_MAX_PORT; i++) {
76 res = pnp_get_resource(dev, IORESOURCE_IO, i);
77 if (res && !(res->flags & IORESOURCE_UNSET))
78 dev_dbg(&dev->dev, " io %#llx-%#llx flags %#lx\n",
79 (unsigned long long) res->start,
80 (unsigned long long) res->end, res->flags);
81 }
82 for (i = 0; i < PNP_MAX_MEM; i++) {
83 res = pnp_get_resource(dev, IORESOURCE_MEM, i);
84 if (res && !(res->flags & IORESOURCE_UNSET))
85 dev_dbg(&dev->dev, " mem %#llx-%#llx flags %#lx\n",
86 (unsigned long long) res->start,
87 (unsigned long long) res->end, res->flags);
88 }
89#endif
90}
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 55c4563986b3..9c2496dbeee4 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -56,14 +56,15 @@ static void reserve_range(struct pnp_dev *dev, resource_size_t start,
56 56
57static void reserve_resources_of_dev(struct pnp_dev *dev) 57static void reserve_resources_of_dev(struct pnp_dev *dev)
58{ 58{
59 struct resource *res;
59 int i; 60 int i;
60 61
61 for (i = 0; i < PNP_MAX_PORT; i++) { 62 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) {
62 if (!pnp_port_valid(dev, i)) 63 if (res->flags & IORESOURCE_UNSET)
63 continue; 64 continue;
64 if (pnp_port_start(dev, i) == 0) 65 if (res->start == 0)
65 continue; /* disabled */ 66 continue; /* disabled */
66 if (pnp_port_start(dev, i) < 0x100) 67 if (res->start < 0x100)
67 /* 68 /*
68 * Below 0x100 is only standard PC hardware 69 * Below 0x100 is only standard PC hardware
69 * (pics, kbd, timer, dma, ...) 70 * (pics, kbd, timer, dma, ...)
@@ -73,19 +74,17 @@ static void reserve_resources_of_dev(struct pnp_dev *dev)
73 * So, do nothing 74 * So, do nothing
74 */ 75 */
75 continue; 76 continue;
76 if (pnp_port_end(dev, i) < pnp_port_start(dev, i)) 77 if (res->end < res->start)
77 continue; /* invalid */ 78 continue; /* invalid */
78 79
79 reserve_range(dev, pnp_port_start(dev, i), 80 reserve_range(dev, res->start, res->end, 1);
80 pnp_port_end(dev, i), 1);
81 } 81 }
82 82
83 for (i = 0; i < PNP_MAX_MEM; i++) { 83 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
84 if (!pnp_mem_valid(dev, i)) 84 if (res->flags & IORESOURCE_UNSET)
85 continue; 85 continue;
86 86
87 reserve_range(dev, pnp_mem_start(dev, i), 87 reserve_range(dev, res->start, res->end, 0);
88 pnp_mem_end(dev, i), 0);
89 } 88 }
90} 89}
91 90
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index bdb9b7285b3d..71be36f18709 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -262,7 +262,7 @@ static void ds2760_battery_work(struct work_struct *work)
262 struct ds2760_device_info, monitor_work.work); 262 struct ds2760_device_info, monitor_work.work);
263 const int interval = HZ * 60; 263 const int interval = HZ * 60;
264 264
265 dev_dbg(di->dev, "%s\n", __FUNCTION__); 265 dev_dbg(di->dev, "%s\n", __func__);
266 266
267 ds2760_battery_update_status(di); 267 ds2760_battery_update_status(di);
268 queue_delayed_work(di->monitor_wqueue, &di->monitor_work, interval); 268 queue_delayed_work(di->monitor_wqueue, &di->monitor_work, interval);
@@ -275,7 +275,7 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
275{ 275{
276 struct ds2760_device_info *di = to_ds2760_device_info(psy); 276 struct ds2760_device_info *di = to_ds2760_device_info(psy);
277 277
278 dev_dbg(di->dev, "%s\n", __FUNCTION__); 278 dev_dbg(di->dev, "%s\n", __func__);
279 279
280 cancel_delayed_work(&di->monitor_work); 280 cancel_delayed_work(&di->monitor_work);
281 queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); 281 queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index af7a231092a4..ab1e8289f07f 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -315,7 +315,6 @@ static int __init olpc_bat_init(void)
315 if (ret) 315 if (ret)
316 goto battery_failed; 316 goto battery_failed;
317 317
318 olpc_register_battery_callback(&olpc_battery_trigger_uevent);
319 goto success; 318 goto success;
320 319
321battery_failed: 320battery_failed:
@@ -328,7 +327,6 @@ success:
328 327
329static void __exit olpc_bat_exit(void) 328static void __exit olpc_bat_exit(void)
330{ 329{
331 olpc_deregister_battery_callback();
332 power_supply_unregister(&olpc_bat); 330 power_supply_unregister(&olpc_bat);
333 power_supply_unregister(&olpc_ac); 331 power_supply_unregister(&olpc_ac);
334 platform_device_unregister(bat_pdev); 332 platform_device_unregister(bat_pdev);
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index c8aa55b81fd8..82810b7bff9c 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -209,6 +209,12 @@ static int pda_power_probe(struct platform_device *pdev)
209 209
210 pdata = pdev->dev.platform_data; 210 pdata = pdev->dev.platform_data;
211 211
212 if (pdata->init) {
213 ret = pdata->init(dev);
214 if (ret < 0)
215 goto init_failed;
216 }
217
212 update_status(); 218 update_status();
213 update_charger(); 219 update_charger();
214 220
@@ -298,6 +304,9 @@ ac_irq_failed:
298 if (pdata->is_ac_online) 304 if (pdata->is_ac_online)
299 power_supply_unregister(&pda_psy_ac); 305 power_supply_unregister(&pda_psy_ac);
300ac_supply_failed: 306ac_supply_failed:
307 if (pdata->exit)
308 pdata->exit(dev);
309init_failed:
301wrongid: 310wrongid:
302 return ret; 311 return ret;
303} 312}
@@ -318,6 +327,8 @@ static int pda_power_remove(struct platform_device *pdev)
318 power_supply_unregister(&pda_psy_usb); 327 power_supply_unregister(&pda_psy_usb);
319 if (pdata->is_ac_online) 328 if (pdata->is_ac_online)
320 power_supply_unregister(&pda_psy_ac); 329 power_supply_unregister(&pda_psy_ac);
330 if (pdata->exit)
331 pdata->exit(dev);
321 332
322 return 0; 333 return 0;
323} 334}
diff --git a/drivers/power/pmu_battery.c b/drivers/power/pmu_battery.c
index 60a8cf3a0431..9346a862f1f2 100644
--- a/drivers/power/pmu_battery.c
+++ b/drivers/power/pmu_battery.c
@@ -159,7 +159,7 @@ static int __init pmu_bat_init(void)
159 if (!pbat) 159 if (!pbat)
160 break; 160 break;
161 161
162 sprintf(pbat->name, "PMU battery %d", i); 162 sprintf(pbat->name, "PMU_battery_%d", i);
163 pbat->bat.name = pbat->name; 163 pbat->bat.name = pbat->name;
164 pbat->bat.properties = pmu_bat_props; 164 pbat->bat.properties = pmu_bat_props;
165 pbat->bat.num_properties = ARRAY_SIZE(pmu_bat_props); 165 pbat->bat.num_properties = ARRAY_SIZE(pmu_bat_props);
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 03d6a38464ef..138dd76ee347 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -39,7 +39,7 @@ static void power_supply_changed_work(struct work_struct *work)
39 struct power_supply *psy = container_of(work, struct power_supply, 39 struct power_supply *psy = container_of(work, struct power_supply,
40 changed_work); 40 changed_work);
41 41
42 dev_dbg(psy->dev, "%s\n", __FUNCTION__); 42 dev_dbg(psy->dev, "%s\n", __func__);
43 43
44 class_for_each_device(power_supply_class, psy, 44 class_for_each_device(power_supply_class, psy,
45 __power_supply_changed_work); 45 __power_supply_changed_work);
@@ -51,7 +51,7 @@ static void power_supply_changed_work(struct work_struct *work)
51 51
52void power_supply_changed(struct power_supply *psy) 52void power_supply_changed(struct power_supply *psy)
53{ 53{
54 dev_dbg(psy->dev, "%s\n", __FUNCTION__); 54 dev_dbg(psy->dev, "%s\n", __func__);
55 55
56 schedule_work(&psy->changed_work); 56 schedule_work(&psy->changed_work);
57} 57}
@@ -82,7 +82,7 @@ int power_supply_am_i_supplied(struct power_supply *psy)
82 error = class_for_each_device(power_supply_class, psy, 82 error = class_for_each_device(power_supply_class, psy,
83 __power_supply_am_i_supplied); 83 __power_supply_am_i_supplied);
84 84
85 dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, error); 85 dev_dbg(psy->dev, "%s %d\n", __func__, error);
86 86
87 return error; 87 return error;
88} 88}
diff --git a/drivers/power/power_supply_leds.c b/drivers/power/power_supply_leds.c
index fa3034f85c38..2dece40c544f 100644
--- a/drivers/power/power_supply_leds.c
+++ b/drivers/power/power_supply_leds.c
@@ -24,7 +24,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
24 if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status)) 24 if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status))
25 return; 25 return;
26 26
27 dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, status.intval); 27 dev_dbg(psy->dev, "%s %d\n", __func__, status.intval);
28 28
29 switch (status.intval) { 29 switch (status.intval) {
30 case POWER_SUPPLY_STATUS_FULL: 30 case POWER_SUPPLY_STATUS_FULL:
@@ -101,7 +101,7 @@ static void power_supply_update_gen_leds(struct power_supply *psy)
101 if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &online)) 101 if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &online))
102 return; 102 return;
103 103
104 dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, online.intval); 104 dev_dbg(psy->dev, "%s %d\n", __func__, online.intval);
105 105
106 if (online.intval) 106 if (online.intval)
107 led_trigger_event(psy->online_trig, LED_FULL); 107 led_trigger_event(psy->online_trig, LED_FULL);
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 6c9592ce4996..85edf945ab86 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <asm/time.h>
25#include <asm/ps3.h> 26#include <asm/ps3.h>
26#include <asm/lv1call.h> 27#include <asm/lv1call.h>
27#include <asm/cell-pmu.h> 28#include <asm/cell-pmu.h>
diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c
index 7605453b74fd..f17513dd9d4b 100644
--- a/drivers/ps3/ps3-sys-manager.c
+++ b/drivers/ps3/ps3-sys-manager.c
@@ -184,10 +184,7 @@ enum ps3_sys_manager_next_op {
184 184
185/** 185/**
186 * enum ps3_sys_manager_wake_source - Next-op wakeup source (bit position mask). 186 * enum ps3_sys_manager_wake_source - Next-op wakeup source (bit position mask).
187 * @PS3_SM_WAKE_DEFAULT: Disk insert, power button, eject button, IR 187 * @PS3_SM_WAKE_DEFAULT: Disk insert, power button, eject button.
188 * controller, and bluetooth controller.
189 * @PS3_SM_WAKE_RTC:
190 * @PS3_SM_WAKE_RTC_ERROR:
191 * @PS3_SM_WAKE_W_O_L: Ether or wireless LAN. 188 * @PS3_SM_WAKE_W_O_L: Ether or wireless LAN.
192 * @PS3_SM_WAKE_P_O_R: Power on reset. 189 * @PS3_SM_WAKE_P_O_R: Power on reset.
193 * 190 *
@@ -200,8 +197,6 @@ enum ps3_sys_manager_next_op {
200enum ps3_sys_manager_wake_source { 197enum ps3_sys_manager_wake_source {
201 /* version 3 */ 198 /* version 3 */
202 PS3_SM_WAKE_DEFAULT = 0, 199 PS3_SM_WAKE_DEFAULT = 0,
203 PS3_SM_WAKE_RTC = 0x00000040,
204 PS3_SM_WAKE_RTC_ERROR = 0x00000080,
205 PS3_SM_WAKE_W_O_L = 0x00000400, 200 PS3_SM_WAKE_W_O_L = 0x00000400,
206 PS3_SM_WAKE_P_O_R = 0x80000000, 201 PS3_SM_WAKE_P_O_R = 0x80000000,
207}; 202};
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index 4142115d298e..c32822ad84a4 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -1,14 +1,6 @@
1# 1#
2# RapidIO configuration 2# RapidIO configuration
3# 3#
4config RAPIDIO_8_BIT_TRANSPORT
5 bool "8-bit transport addressing"
6 depends on RAPIDIO
7 ---help---
8 By default, the kernel assumes a 16-bit addressed RapidIO
9 network. By selecting this option, the kernel will support
10 an 8-bit addressed network.
11
12config RAPIDIO_DISC_TIMEOUT 4config RAPIDIO_DISC_TIMEOUT
13 int "Discovery timeout duration (seconds)" 5 int "Discovery timeout duration (seconds)"
14 depends on RAPIDIO 6 depends on RAPIDIO
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c
index 8b56bbdd011e..a3824baca2e5 100644
--- a/drivers/rapidio/rio-access.c
+++ b/drivers/rapidio/rio-access.c
@@ -48,7 +48,7 @@ int __rio_local_read_config_##size \
48 u32 data = 0; \ 48 u32 data = 0; \
49 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 49 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
50 spin_lock_irqsave(&rio_config_lock, flags); \ 50 spin_lock_irqsave(&rio_config_lock, flags); \
51 res = mport->ops->lcread(mport->id, offset, len, &data); \ 51 res = mport->ops->lcread(mport, mport->id, offset, len, &data); \
52 *value = (type)data; \ 52 *value = (type)data; \
53 spin_unlock_irqrestore(&rio_config_lock, flags); \ 53 spin_unlock_irqrestore(&rio_config_lock, flags); \
54 return res; \ 54 return res; \
@@ -71,7 +71,7 @@ int __rio_local_write_config_##size \
71 unsigned long flags; \ 71 unsigned long flags; \
72 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 72 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
73 spin_lock_irqsave(&rio_config_lock, flags); \ 73 spin_lock_irqsave(&rio_config_lock, flags); \
74 res = mport->ops->lcwrite(mport->id, offset, len, value); \ 74 res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\
75 spin_unlock_irqrestore(&rio_config_lock, flags); \ 75 spin_unlock_irqrestore(&rio_config_lock, flags); \
76 return res; \ 76 return res; \
77} 77}
@@ -108,7 +108,7 @@ int rio_mport_read_config_##size \
108 u32 data = 0; \ 108 u32 data = 0; \
109 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 109 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
110 spin_lock_irqsave(&rio_config_lock, flags); \ 110 spin_lock_irqsave(&rio_config_lock, flags); \
111 res = mport->ops->cread(mport->id, destid, hopcount, offset, len, &data); \ 111 res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \
112 *value = (type)data; \ 112 *value = (type)data; \
113 spin_unlock_irqrestore(&rio_config_lock, flags); \ 113 spin_unlock_irqrestore(&rio_config_lock, flags); \
114 return res; \ 114 return res; \
@@ -131,7 +131,7 @@ int rio_mport_write_config_##size \
131 unsigned long flags; \ 131 unsigned long flags; \
132 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 132 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
133 spin_lock_irqsave(&rio_config_lock, flags); \ 133 spin_lock_irqsave(&rio_config_lock, flags); \
134 res = mport->ops->cwrite(mport->id, destid, hopcount, offset, len, value); \ 134 res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \
135 spin_unlock_irqrestore(&rio_config_lock, flags); \ 135 spin_unlock_irqrestore(&rio_config_lock, flags); \
136 return res; \ 136 return res; \
137} 137}
@@ -166,7 +166,7 @@ int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data)
166 unsigned long flags; 166 unsigned long flags;
167 167
168 spin_lock_irqsave(&rio_doorbell_lock, flags); 168 spin_lock_irqsave(&rio_doorbell_lock, flags);
169 res = mport->ops->dsend(mport->id, destid, data); 169 res = mport->ops->dsend(mport, mport->id, destid, data);
170 spin_unlock_irqrestore(&rio_doorbell_lock, flags); 170 spin_unlock_irqrestore(&rio_doorbell_lock, flags);
171 171
172 return res; 172 return res;
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 44420723a359..a926c896475e 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -73,7 +73,7 @@ static u16 rio_get_device_id(struct rio_mport *port, u16 destid, u8 hopcount)
73 73
74 rio_mport_read_config_32(port, destid, hopcount, RIO_DID_CSR, &result); 74 rio_mport_read_config_32(port, destid, hopcount, RIO_DID_CSR, &result);
75 75
76 return RIO_GET_DID(result); 76 return RIO_GET_DID(port->sys_size, result);
77} 77}
78 78
79/** 79/**
@@ -88,7 +88,7 @@ static u16 rio_get_device_id(struct rio_mport *port, u16 destid, u8 hopcount)
88static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u16 did) 88static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u16 did)
89{ 89{
90 rio_mport_write_config_32(port, destid, hopcount, RIO_DID_CSR, 90 rio_mport_write_config_32(port, destid, hopcount, RIO_DID_CSR,
91 RIO_SET_DID(did)); 91 RIO_SET_DID(port->sys_size, did));
92} 92}
93 93
94/** 94/**
@@ -100,7 +100,8 @@ static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u
100 */ 100 */
101static void rio_local_set_device_id(struct rio_mport *port, u16 did) 101static void rio_local_set_device_id(struct rio_mport *port, u16 did)
102{ 102{
103 rio_local_write_config_32(port, RIO_DID_CSR, RIO_SET_DID(did)); 103 rio_local_write_config_32(port, RIO_DID_CSR, RIO_SET_DID(port->sys_size,
104 did));
104} 105}
105 106
106/** 107/**
@@ -350,8 +351,18 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
350 rswitch->switchid = next_switchid; 351 rswitch->switchid = next_switchid;
351 rswitch->hopcount = hopcount; 352 rswitch->hopcount = hopcount;
352 rswitch->destid = destid; 353 rswitch->destid = destid;
354 rswitch->route_table = kzalloc(sizeof(u8)*
355 RIO_MAX_ROUTE_ENTRIES(port->sys_size),
356 GFP_KERNEL);
357 if (!rswitch->route_table) {
358 kfree(rdev);
359 rdev = NULL;
360 kfree(rswitch);
361 goto out;
362 }
353 /* Initialize switch route table */ 363 /* Initialize switch route table */
354 for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES; rdid++) 364 for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size);
365 rdid++)
355 rswitch->route_table[rdid] = RIO_INVALID_ROUTE; 366 rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
356 rdev->rswitch = rswitch; 367 rdev->rswitch = rswitch;
357 sprintf(rio_name(rdev), "%02x:s:%04x", rdev->net->id, 368 sprintf(rio_name(rdev), "%02x:s:%04x", rdev->net->id,
@@ -480,7 +491,7 @@ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount)
480{ 491{
481 u32 result; 492 u32 result;
482 493
483 rio_mport_read_config_32(port, RIO_ANY_DESTID, hopcount, 494 rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size), hopcount,
484 RIO_HOST_DID_LOCK_CSR, &result); 495 RIO_HOST_DID_LOCK_CSR, &result);
485 496
486 return (u16) (result & 0xffff); 497 return (u16) (result & 0xffff);
@@ -571,14 +582,16 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
571 } 582 }
572 583
573 /* Attempt to acquire device lock */ 584 /* Attempt to acquire device lock */
574 rio_mport_write_config_32(port, RIO_ANY_DESTID, hopcount, 585 rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size),
586 hopcount,
575 RIO_HOST_DID_LOCK_CSR, port->host_deviceid); 587 RIO_HOST_DID_LOCK_CSR, port->host_deviceid);
576 while ((tmp = rio_get_host_deviceid_lock(port, hopcount)) 588 while ((tmp = rio_get_host_deviceid_lock(port, hopcount))
577 < port->host_deviceid) { 589 < port->host_deviceid) {
578 /* Delay a bit */ 590 /* Delay a bit */
579 mdelay(1); 591 mdelay(1);
580 /* Attempt to acquire device lock again */ 592 /* Attempt to acquire device lock again */
581 rio_mport_write_config_32(port, RIO_ANY_DESTID, hopcount, 593 rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size),
594 hopcount,
582 RIO_HOST_DID_LOCK_CSR, 595 RIO_HOST_DID_LOCK_CSR,
583 port->host_deviceid); 596 port->host_deviceid);
584 } 597 }
@@ -590,7 +603,9 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
590 } 603 }
591 604
592 /* Setup new RIO device */ 605 /* Setup new RIO device */
593 if ((rdev = rio_setup_device(net, port, RIO_ANY_DESTID, hopcount, 1))) { 606 rdev = rio_setup_device(net, port, RIO_ANY_DESTID(port->sys_size),
607 hopcount, 1);
608 if (rdev) {
594 /* Add device to the global and bus/net specific list. */ 609 /* Add device to the global and bus/net specific list. */
595 list_add_tail(&rdev->net_list, &net->devices); 610 list_add_tail(&rdev->net_list, &net->devices);
596 } else 611 } else
@@ -598,7 +613,8 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
598 613
599 if (rio_is_switch(rdev)) { 614 if (rio_is_switch(rdev)) {
600 next_switchid++; 615 next_switchid++;
601 sw_inport = rio_get_swpinfo_inport(port, RIO_ANY_DESTID, hopcount); 616 sw_inport = rio_get_swpinfo_inport(port,
617 RIO_ANY_DESTID(port->sys_size), hopcount);
602 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, 618 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
603 port->host_deviceid, sw_inport); 619 port->host_deviceid, sw_inport);
604 rdev->rswitch->route_table[port->host_deviceid] = sw_inport; 620 rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
@@ -612,7 +628,8 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
612 } 628 }
613 629
614 num_ports = 630 num_ports =
615 rio_get_swpinfo_tports(port, RIO_ANY_DESTID, hopcount); 631 rio_get_swpinfo_tports(port, RIO_ANY_DESTID(port->sys_size),
632 hopcount);
616 pr_debug( 633 pr_debug(
617 "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", 634 "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
618 rio_name(rdev), rdev->vid, rdev->did, num_ports); 635 rio_name(rdev), rdev->vid, rdev->did, num_ports);
@@ -624,13 +641,15 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
624 cur_destid = next_destid; 641 cur_destid = next_destid;
625 642
626 if (rio_sport_is_active 643 if (rio_sport_is_active
627 (port, RIO_ANY_DESTID, hopcount, port_num)) { 644 (port, RIO_ANY_DESTID(port->sys_size), hopcount,
645 port_num)) {
628 pr_debug( 646 pr_debug(
629 "RIO: scanning device on port %d\n", 647 "RIO: scanning device on port %d\n",
630 port_num); 648 port_num);
631 rio_route_add_entry(port, rdev->rswitch, 649 rio_route_add_entry(port, rdev->rswitch,
632 RIO_GLOBAL_TABLE, 650 RIO_GLOBAL_TABLE,
633 RIO_ANY_DESTID, port_num); 651 RIO_ANY_DESTID(port->sys_size),
652 port_num);
634 653
635 if (rio_enum_peer(net, port, hopcount + 1) < 0) 654 if (rio_enum_peer(net, port, hopcount + 1) < 0)
636 return -1; 655 return -1;
@@ -735,7 +754,8 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
735 pr_debug( 754 pr_debug(
736 "RIO: scanning device on port %d\n", 755 "RIO: scanning device on port %d\n",
737 port_num); 756 port_num);
738 for (ndestid = 0; ndestid < RIO_ANY_DESTID; 757 for (ndestid = 0;
758 ndestid < RIO_ANY_DESTID(port->sys_size);
739 ndestid++) { 759 ndestid++) {
740 rio_route_get_entry(port, rdev->rswitch, 760 rio_route_get_entry(port, rdev->rswitch,
741 RIO_GLOBAL_TABLE, 761 RIO_GLOBAL_TABLE,
@@ -917,7 +937,9 @@ static void rio_build_route_tables(void)
917 937
918 list_for_each_entry(rdev, &rio_devices, global_list) 938 list_for_each_entry(rdev, &rio_devices, global_list)
919 if (rio_is_switch(rdev)) 939 if (rio_is_switch(rdev))
920 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++) { 940 for (i = 0;
941 i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
942 i++) {
921 if (rio_route_get_entry 943 if (rio_route_get_entry
922 (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE, 944 (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE,
923 i, &sport) < 0) 945 i, &sport) < 0)
@@ -981,7 +1003,8 @@ int rio_disc_mport(struct rio_mport *mport)
981 del_timer_sync(&rio_enum_timer); 1003 del_timer_sync(&rio_enum_timer);
982 1004
983 pr_debug("done\n"); 1005 pr_debug("done\n");
984 if (rio_disc_peer(net, mport, RIO_ANY_DESTID, 0) < 0) { 1006 if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
1007 0) < 0) {
985 printk(KERN_INFO 1008 printk(KERN_INFO
986 "RIO: master port %d device has failed discovery\n", 1009 "RIO: master port %d device has failed discovery\n",
987 mport->id); 1010 mport->id);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 659e31164cf0..97a147f050d6 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -43,7 +43,8 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
43 if (!rdev->rswitch) 43 if (!rdev->rswitch)
44 goto out; 44 goto out;
45 45
46 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++) { 46 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
47 i++) {
47 if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE) 48 if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE)
48 continue; 49 continue;
49 str += 50 str +=
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 80c5f1ba2e49..680661abbc4b 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -43,7 +43,7 @@ u16 rio_local_get_device_id(struct rio_mport *port)
43 43
44 rio_local_read_config_32(port, RIO_DID_CSR, &result); 44 rio_local_read_config_32(port, RIO_DID_CSR, &result);
45 45
46 return (RIO_GET_DID(result)); 46 return (RIO_GET_DID(port->sys_size, result));
47} 47}
48 48
49/** 49/**
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 80e3f03b5041..7786d02581f2 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -51,10 +51,5 @@ extern struct rio_route_ops __end_rio_route_ops[];
51 DECLARE_RIO_ROUTE_SECTION(.rio_route_ops, \ 51 DECLARE_RIO_ROUTE_SECTION(.rio_route_ops, \
52 vid, did, add_hook, get_hook) 52 vid, did, add_hook, get_hook)
53 53
54#ifdef CONFIG_RAPIDIO_8_BIT_TRANSPORT 54#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
55#define RIO_GET_DID(x) ((x & 0x00ff0000) >> 16) 55#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
56#define RIO_SET_DID(x) ((x & 0x000000ff) << 16)
57#else
58#define RIO_GET_DID(x) (x & 0xffff)
59#define RIO_SET_DID(x) (x & 0xffff)
60#endif
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 4f28045d9ef2..8624f55d0560 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -419,7 +419,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
419 return -ENOMEM; 419 return -ENOMEM;
420 420
421 rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE); 421 rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE);
422 if (unlikely(IS_ERR(rtc))) { 422 if (IS_ERR(rtc)) {
423 ret = PTR_ERR(rtc->rtc_dev); 423 ret = PTR_ERR(rtc->rtc_dev);
424 goto err; 424 goto err;
425 } 425 }
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index dcdc142a3441..d060a06ce05b 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -854,11 +854,12 @@ cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
854 * don't define the IRQ. It should always be safe to 854 * don't define the IRQ. It should always be safe to
855 * hardcode it in these cases 855 * hardcode it in these cases
856 */ 856 */
857 return cmos_do_probe(&pnp->dev, &pnp->res.port_resource[0], 8); 857 return cmos_do_probe(&pnp->dev,
858 pnp_get_resource(pnp, IORESOURCE_IO, 0), 8);
858 else 859 else
859 return cmos_do_probe(&pnp->dev, 860 return cmos_do_probe(&pnp->dev,
860 &pnp->res.port_resource[0], 861 pnp_get_resource(pnp, IORESOURCE_IO, 0),
861 pnp->res.irq_resource[0].start); 862 pnp_irq(pnp, 0));
862} 863}
863 864
864static void __exit cmos_pnp_remove(struct pnp_dev *pnp) 865static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index f389a28720d2..bbf97e65202a 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -99,45 +99,38 @@ struct ds1307 {
99}; 99};
100 100
101struct chip_desc { 101struct chip_desc {
102 char name[9];
103 unsigned nvram56:1; 102 unsigned nvram56:1;
104 unsigned alarm:1; 103 unsigned alarm:1;
105 enum ds_type type;
106}; 104};
107 105
108static const struct chip_desc chips[] = { { 106static const struct chip_desc chips[] = {
109 .name = "ds1307", 107[ds_1307] = {
110 .type = ds_1307,
111 .nvram56 = 1, 108 .nvram56 = 1,
112}, { 109},
113 .name = "ds1337", 110[ds_1337] = {
114 .type = ds_1337,
115 .alarm = 1, 111 .alarm = 1,
116}, { 112},
117 .name = "ds1338", 113[ds_1338] = {
118 .type = ds_1338,
119 .nvram56 = 1, 114 .nvram56 = 1,
120}, { 115},
121 .name = "ds1339", 116[ds_1339] = {
122 .type = ds_1339,
123 .alarm = 1, 117 .alarm = 1,
124}, { 118},
125 .name = "ds1340", 119[ds_1340] = {
126 .type = ds_1340, 120},
127}, { 121[m41t00] = {
128 .name = "m41t00",
129 .type = m41t00,
130}, }; 122}, };
131 123
132static inline const struct chip_desc *find_chip(const char *s) 124static const struct i2c_device_id ds1307_id[] = {
133{ 125 { "ds1307", ds_1307 },
134 unsigned i; 126 { "ds1337", ds_1337 },
135 127 { "ds1338", ds_1338 },
136 for (i = 0; i < ARRAY_SIZE(chips); i++) 128 { "ds1339", ds_1339 },
137 if (strnicmp(s, chips[i].name, sizeof chips[i].name) == 0) 129 { "ds1340", ds_1340 },
138 return &chips[i]; 130 { "m41t00", m41t00 },
139 return NULL; 131 { }
140} 132};
133MODULE_DEVICE_TABLE(i2c, ds1307_id);
141 134
142static int ds1307_get_time(struct device *dev, struct rtc_time *t) 135static int ds1307_get_time(struct device *dev, struct rtc_time *t)
143{ 136{
@@ -326,21 +319,15 @@ static struct bin_attribute nvram = {
326 319
327static struct i2c_driver ds1307_driver; 320static struct i2c_driver ds1307_driver;
328 321
329static int __devinit ds1307_probe(struct i2c_client *client) 322static int __devinit ds1307_probe(struct i2c_client *client,
323 const struct i2c_device_id *id)
330{ 324{
331 struct ds1307 *ds1307; 325 struct ds1307 *ds1307;
332 int err = -ENODEV; 326 int err = -ENODEV;
333 int tmp; 327 int tmp;
334 const struct chip_desc *chip; 328 const struct chip_desc *chip = &chips[id->driver_data];
335 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 329 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
336 330
337 chip = find_chip(client->name);
338 if (!chip) {
339 dev_err(&client->dev, "unknown chip type '%s'\n",
340 client->name);
341 return -ENODEV;
342 }
343
344 if (!i2c_check_functionality(adapter, 331 if (!i2c_check_functionality(adapter,
345 I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) 332 I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
346 return -EIO; 333 return -EIO;
@@ -361,7 +348,7 @@ static int __devinit ds1307_probe(struct i2c_client *client)
361 ds1307->msg[1].len = sizeof(ds1307->regs); 348 ds1307->msg[1].len = sizeof(ds1307->regs);
362 ds1307->msg[1].buf = ds1307->regs; 349 ds1307->msg[1].buf = ds1307->regs;
363 350
364 ds1307->type = chip->type; 351 ds1307->type = id->driver_data;
365 352
366 switch (ds1307->type) { 353 switch (ds1307->type) {
367 case ds_1337: 354 case ds_1337:
@@ -550,6 +537,7 @@ static struct i2c_driver ds1307_driver = {
550 }, 537 },
551 .probe = ds1307_probe, 538 .probe = ds1307_probe,
552 .remove = __devexit_p(ds1307_remove), 539 .remove = __devexit_p(ds1307_remove),
540 .id_table = ds1307_id,
553}; 541};
554 542
555static int __init ds1307_init(void) 543static int __init ds1307_init(void)
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 45bda186befc..fa2d2f8b3f4d 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -41,6 +41,12 @@
41#define DS1374_REG_SR_AF 0x01 /* Alarm Flag */ 41#define DS1374_REG_SR_AF 0x01 /* Alarm Flag */
42#define DS1374_REG_TCR 0x09 /* Trickle Charge */ 42#define DS1374_REG_TCR 0x09 /* Trickle Charge */
43 43
44static const struct i2c_device_id ds1374_id[] = {
45 { "rtc-ds1374", 0 },
46 { }
47};
48MODULE_DEVICE_TABLE(i2c, ds1374_id);
49
44struct ds1374 { 50struct ds1374 {
45 struct i2c_client *client; 51 struct i2c_client *client;
46 struct rtc_device *rtc; 52 struct rtc_device *rtc;
@@ -355,7 +361,8 @@ static const struct rtc_class_ops ds1374_rtc_ops = {
355 .ioctl = ds1374_ioctl, 361 .ioctl = ds1374_ioctl,
356}; 362};
357 363
358static int ds1374_probe(struct i2c_client *client) 364static int ds1374_probe(struct i2c_client *client,
365 const struct i2c_device_id *id)
359{ 366{
360 struct ds1374 *ds1374; 367 struct ds1374 *ds1374;
361 int ret; 368 int ret;
@@ -429,6 +436,7 @@ static struct i2c_driver ds1374_driver = {
429 }, 436 },
430 .probe = ds1374_probe, 437 .probe = ds1374_probe,
431 .remove = __devexit_p(ds1374_remove), 438 .remove = __devexit_p(ds1374_remove),
439 .id_table = ds1374_id,
432}; 440};
433 441
434static int __init ds1374_init(void) 442static int __init ds1374_init(void)
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index fb15e3fb4ce2..fbb90b1e4098 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -490,7 +490,7 @@ isl1208_sysfs_unregister(struct device *dev)
490} 490}
491 491
492static int 492static int
493isl1208_probe(struct i2c_client *client) 493isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
494{ 494{
495 int rc = 0; 495 int rc = 0;
496 struct rtc_device *rtc; 496 struct rtc_device *rtc;
@@ -545,12 +545,19 @@ isl1208_remove(struct i2c_client *client)
545 return 0; 545 return 0;
546} 546}
547 547
548static const struct i2c_device_id isl1208_id[] = {
549 { "isl1208", 0 },
550 { }
551};
552MODULE_DEVICE_TABLE(i2c, isl1208_id);
553
548static struct i2c_driver isl1208_driver = { 554static struct i2c_driver isl1208_driver = {
549 .driver = { 555 .driver = {
550 .name = "rtc-isl1208", 556 .name = "rtc-isl1208",
551 }, 557 },
552 .probe = isl1208_probe, 558 .probe = isl1208_probe,
553 .remove = isl1208_remove, 559 .remove = isl1208_remove,
560 .id_table = isl1208_id,
554}; 561};
555 562
556static int __init 563static int __init
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 1cb33cac1237..316bfaa80872 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -60,48 +60,21 @@
60 60
61#define DRV_VERSION "0.05" 61#define DRV_VERSION "0.05"
62 62
63struct m41t80_chip_info { 63static const struct i2c_device_id m41t80_id[] = {
64 const char *name; 64 { "m41t80", 0 },
65 u8 features; 65 { "m41t81", M41T80_FEATURE_HT },
66}; 66 { "m41t81s", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
67 67 { "m41t82", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
68static const struct m41t80_chip_info m41t80_chip_info_tbl[] = { 68 { "m41t83", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
69 { 69 { "m41st84", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
70 .name = "m41t80", 70 { "m41st85", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
71 .features = 0, 71 { "m41st87", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
72 }, 72 { }
73 {
74 .name = "m41t81",
75 .features = M41T80_FEATURE_HT,
76 },
77 {
78 .name = "m41t81s",
79 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
80 },
81 {
82 .name = "m41t82",
83 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
84 },
85 {
86 .name = "m41t83",
87 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
88 },
89 {
90 .name = "m41st84",
91 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
92 },
93 {
94 .name = "m41st85",
95 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
96 },
97 {
98 .name = "m41st87",
99 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
100 },
101}; 73};
74MODULE_DEVICE_TABLE(i2c, m41t80_id);
102 75
103struct m41t80_data { 76struct m41t80_data {
104 const struct m41t80_chip_info *chip; 77 u8 features;
105 struct rtc_device *rtc; 78 struct rtc_device *rtc;
106}; 79};
107 80
@@ -208,7 +181,7 @@ static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
208 struct m41t80_data *clientdata = i2c_get_clientdata(client); 181 struct m41t80_data *clientdata = i2c_get_clientdata(client);
209 u8 reg; 182 u8 reg;
210 183
211 if (clientdata->chip->features & M41T80_FEATURE_BL) { 184 if (clientdata->features & M41T80_FEATURE_BL) {
212 reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS); 185 reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
213 seq_printf(seq, "battery\t\t: %s\n", 186 seq_printf(seq, "battery\t\t: %s\n",
214 (reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok"); 187 (reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok");
@@ -756,12 +729,12 @@ static struct notifier_block wdt_notifier = {
756 * 729 *
757 ***************************************************************************** 730 *****************************************************************************
758 */ 731 */
759static int m41t80_probe(struct i2c_client *client) 732static int m41t80_probe(struct i2c_client *client,
733 const struct i2c_device_id *id)
760{ 734{
761 int i, rc = 0; 735 int rc = 0;
762 struct rtc_device *rtc = NULL; 736 struct rtc_device *rtc = NULL;
763 struct rtc_time tm; 737 struct rtc_time tm;
764 const struct m41t80_chip_info *chip;
765 struct m41t80_data *clientdata = NULL; 738 struct m41t80_data *clientdata = NULL;
766 739
767 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C 740 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C
@@ -773,19 +746,6 @@ static int m41t80_probe(struct i2c_client *client)
773 dev_info(&client->dev, 746 dev_info(&client->dev,
774 "chip found, driver version " DRV_VERSION "\n"); 747 "chip found, driver version " DRV_VERSION "\n");
775 748
776 chip = NULL;
777 for (i = 0; i < ARRAY_SIZE(m41t80_chip_info_tbl); i++) {
778 if (!strcmp(m41t80_chip_info_tbl[i].name, client->name)) {
779 chip = &m41t80_chip_info_tbl[i];
780 break;
781 }
782 }
783 if (!chip) {
784 dev_err(&client->dev, "%s is not supported\n", client->name);
785 rc = -ENODEV;
786 goto exit;
787 }
788
789 clientdata = kzalloc(sizeof(*clientdata), GFP_KERNEL); 749 clientdata = kzalloc(sizeof(*clientdata), GFP_KERNEL);
790 if (!clientdata) { 750 if (!clientdata) {
791 rc = -ENOMEM; 751 rc = -ENOMEM;
@@ -801,7 +761,7 @@ static int m41t80_probe(struct i2c_client *client)
801 } 761 }
802 762
803 clientdata->rtc = rtc; 763 clientdata->rtc = rtc;
804 clientdata->chip = chip; 764 clientdata->features = id->driver_data;
805 i2c_set_clientdata(client, clientdata); 765 i2c_set_clientdata(client, clientdata);
806 766
807 /* Make sure HT (Halt Update) bit is cleared */ 767 /* Make sure HT (Halt Update) bit is cleared */
@@ -810,7 +770,7 @@ static int m41t80_probe(struct i2c_client *client)
810 goto ht_err; 770 goto ht_err;
811 771
812 if (rc & M41T80_ALHOUR_HT) { 772 if (rc & M41T80_ALHOUR_HT) {
813 if (chip->features & M41T80_FEATURE_HT) { 773 if (clientdata->features & M41T80_FEATURE_HT) {
814 m41t80_get_datetime(client, &tm); 774 m41t80_get_datetime(client, &tm);
815 dev_info(&client->dev, "HT bit was set!\n"); 775 dev_info(&client->dev, "HT bit was set!\n");
816 dev_info(&client->dev, 776 dev_info(&client->dev,
@@ -842,7 +802,7 @@ static int m41t80_probe(struct i2c_client *client)
842 goto exit; 802 goto exit;
843 803
844#ifdef CONFIG_RTC_DRV_M41T80_WDT 804#ifdef CONFIG_RTC_DRV_M41T80_WDT
845 if (chip->features & M41T80_FEATURE_HT) { 805 if (clientdata->features & M41T80_FEATURE_HT) {
846 rc = misc_register(&wdt_dev); 806 rc = misc_register(&wdt_dev);
847 if (rc) 807 if (rc)
848 goto exit; 808 goto exit;
@@ -878,7 +838,7 @@ static int m41t80_remove(struct i2c_client *client)
878 struct rtc_device *rtc = clientdata->rtc; 838 struct rtc_device *rtc = clientdata->rtc;
879 839
880#ifdef CONFIG_RTC_DRV_M41T80_WDT 840#ifdef CONFIG_RTC_DRV_M41T80_WDT
881 if (clientdata->chip->features & M41T80_FEATURE_HT) { 841 if (clientdata->features & M41T80_FEATURE_HT) {
882 misc_deregister(&wdt_dev); 842 misc_deregister(&wdt_dev);
883 unregister_reboot_notifier(&wdt_notifier); 843 unregister_reboot_notifier(&wdt_notifier);
884 } 844 }
@@ -896,6 +856,7 @@ static struct i2c_driver m41t80_driver = {
896 }, 856 },
897 .probe = m41t80_probe, 857 .probe = m41t80_probe,
898 .remove = m41t80_remove, 858 .remove = m41t80_remove,
859 .id_table = m41t80_id,
899}; 860};
900 861
901static int __init m41t80_rtc_init(void) 862static int __init m41t80_rtc_init(void)
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index a41681d26eba..0fc4c3630780 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -246,7 +246,8 @@ static const struct rtc_class_ops pcf8563_rtc_ops = {
246 .set_time = pcf8563_rtc_set_time, 246 .set_time = pcf8563_rtc_set_time,
247}; 247};
248 248
249static int pcf8563_probe(struct i2c_client *client) 249static int pcf8563_probe(struct i2c_client *client,
250 const struct i2c_device_id *id)
250{ 251{
251 struct pcf8563 *pcf8563; 252 struct pcf8563 *pcf8563;
252 253
@@ -299,12 +300,19 @@ static int pcf8563_remove(struct i2c_client *client)
299 return 0; 300 return 0;
300} 301}
301 302
303static const struct i2c_device_id pcf8563_id[] = {
304 { "pcf8563", 0 },
305 { }
306};
307MODULE_DEVICE_TABLE(i2c, pcf8563_id);
308
302static struct i2c_driver pcf8563_driver = { 309static struct i2c_driver pcf8563_driver = {
303 .driver = { 310 .driver = {
304 .name = "rtc-pcf8563", 311 .name = "rtc-pcf8563",
305 }, 312 },
306 .probe = pcf8563_probe, 313 .probe = pcf8563_probe,
307 .remove = pcf8563_remove, 314 .remove = pcf8563_remove,
315 .id_table = pcf8563_id,
308}; 316};
309 317
310static int __init pcf8563_init(void) 318static int __init pcf8563_init(void)
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index 8d300e6d0d9e..0c6257a034ff 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -108,12 +108,10 @@ void rtc_proc_add_device(struct rtc_device *rtc)
108 if (rtc->id == 0) { 108 if (rtc->id == 0) {
109 struct proc_dir_entry *ent; 109 struct proc_dir_entry *ent;
110 110
111 ent = create_proc_entry("driver/rtc", 0, NULL); 111 ent = proc_create_data("driver/rtc", 0, NULL,
112 if (ent) { 112 &rtc_proc_fops, rtc);
113 ent->proc_fops = &rtc_proc_fops; 113 if (ent)
114 ent->owner = rtc->owner; 114 ent->owner = rtc->owner;
115 ent->data = rtc;
116 }
117 } 115 }
118} 116}
119 117
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 7e63074708eb..56caf6b2c3e5 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -69,6 +69,15 @@ enum rtc_type {
69 rtc_rv5c387a, 69 rtc_rv5c387a,
70}; 70};
71 71
72static const struct i2c_device_id rs5c372_id[] = {
73 { "rs5c372a", rtc_rs5c372a },
74 { "rs5c372b", rtc_rs5c372b },
75 { "rv5c386", rtc_rv5c386 },
76 { "rv5c387a", rtc_rv5c387a },
77 { }
78};
79MODULE_DEVICE_TABLE(i2c, rs5c372_id);
80
72/* REVISIT: this assumes that: 81/* REVISIT: this assumes that:
73 * - we're in the 21st century, so it's safe to ignore the century 82 * - we're in the 21st century, so it's safe to ignore the century
74 * bit for rv5c38[67] (REG_MONTH bit 7); 83 * bit for rv5c38[67] (REG_MONTH bit 7);
@@ -494,7 +503,8 @@ static void rs5c_sysfs_unregister(struct device *dev)
494 503
495static struct i2c_driver rs5c372_driver; 504static struct i2c_driver rs5c372_driver;
496 505
497static int rs5c372_probe(struct i2c_client *client) 506static int rs5c372_probe(struct i2c_client *client,
507 const struct i2c_device_id *id)
498{ 508{
499 int err = 0; 509 int err = 0;
500 struct rs5c372 *rs5c372; 510 struct rs5c372 *rs5c372;
@@ -514,6 +524,7 @@ static int rs5c372_probe(struct i2c_client *client)
514 524
515 rs5c372->client = client; 525 rs5c372->client = client;
516 i2c_set_clientdata(client, rs5c372); 526 i2c_set_clientdata(client, rs5c372);
527 rs5c372->type = id->driver_data;
517 528
518 /* we read registers 0x0f then 0x00-0x0f; skip the first one */ 529 /* we read registers 0x0f then 0x00-0x0f; skip the first one */
519 rs5c372->regs = &rs5c372->buf[1]; 530 rs5c372->regs = &rs5c372->buf[1];
@@ -522,19 +533,6 @@ static int rs5c372_probe(struct i2c_client *client)
522 if (err < 0) 533 if (err < 0)
523 goto exit_kfree; 534 goto exit_kfree;
524 535
525 if (strcmp(client->name, "rs5c372a") == 0)
526 rs5c372->type = rtc_rs5c372a;
527 else if (strcmp(client->name, "rs5c372b") == 0)
528 rs5c372->type = rtc_rs5c372b;
529 else if (strcmp(client->name, "rv5c386") == 0)
530 rs5c372->type = rtc_rv5c386;
531 else if (strcmp(client->name, "rv5c387a") == 0)
532 rs5c372->type = rtc_rv5c387a;
533 else {
534 rs5c372->type = rtc_rs5c372b;
535 dev_warn(&client->dev, "assuming rs5c372b\n");
536 }
537
538 /* clock may be set for am/pm or 24 hr time */ 536 /* clock may be set for am/pm or 24 hr time */
539 switch (rs5c372->type) { 537 switch (rs5c372->type) {
540 case rtc_rs5c372a: 538 case rtc_rs5c372a:
@@ -651,6 +649,7 @@ static struct i2c_driver rs5c372_driver = {
651 }, 649 },
652 .probe = rs5c372_probe, 650 .probe = rs5c372_probe,
653 .remove = rs5c372_remove, 651 .remove = rs5c372_remove,
652 .id_table = rs5c372_id,
654}; 653};
655 654
656static __init int rs5c372_init(void) 655static __init int rs5c372_init(void)
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index e8abc90c32c5..29f47bacfc77 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -34,6 +34,12 @@
34#define S35390A_FLAG_RESET 0x80 34#define S35390A_FLAG_RESET 0x80
35#define S35390A_FLAG_TEST 0x01 35#define S35390A_FLAG_TEST 0x01
36 36
37static const struct i2c_device_id s35390a_id[] = {
38 { "s35390a", 0 },
39 { }
40};
41MODULE_DEVICE_TABLE(i2c, s35390a_id);
42
37struct s35390a { 43struct s35390a {
38 struct i2c_client *client[8]; 44 struct i2c_client *client[8];
39 struct rtc_device *rtc; 45 struct rtc_device *rtc;
@@ -195,7 +201,8 @@ static const struct rtc_class_ops s35390a_rtc_ops = {
195 201
196static struct i2c_driver s35390a_driver; 202static struct i2c_driver s35390a_driver;
197 203
198static int s35390a_probe(struct i2c_client *client) 204static int s35390a_probe(struct i2c_client *client,
205 const struct i2c_device_id *id)
199{ 206{
200 int err; 207 int err;
201 unsigned int i; 208 unsigned int i;
@@ -296,6 +303,7 @@ static struct i2c_driver s35390a_driver = {
296 }, 303 },
297 .probe = s35390a_probe, 304 .probe = s35390a_probe,
298 .remove = s35390a_remove, 305 .remove = s35390a_remove,
306 .id_table = s35390a_id,
299}; 307};
300 308
301static int __init s35390a_rtc_init(void) 309static int __init s35390a_rtc_init(void)
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 095282f63523..eaf55945f21b 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -494,7 +494,8 @@ static void x1205_sysfs_unregister(struct device *dev)
494} 494}
495 495
496 496
497static int x1205_probe(struct i2c_client *client) 497static int x1205_probe(struct i2c_client *client,
498 const struct i2c_device_id *id)
498{ 499{
499 int err = 0; 500 int err = 0;
500 unsigned char sr; 501 unsigned char sr;
@@ -552,12 +553,19 @@ static int x1205_remove(struct i2c_client *client)
552 return 0; 553 return 0;
553} 554}
554 555
556static const struct i2c_device_id x1205_id[] = {
557 { "x1205", 0 },
558 { }
559};
560MODULE_DEVICE_TABLE(i2c, x1205_id);
561
555static struct i2c_driver x1205_driver = { 562static struct i2c_driver x1205_driver = {
556 .driver = { 563 .driver = {
557 .name = "rtc-x1205", 564 .name = "rtc-x1205",
558 }, 565 },
559 .probe = x1205_probe, 566 .probe = x1205_probe,
560 .remove = x1205_remove, 567 .remove = x1205_remove,
568 .id_table = x1205_id,
561}; 569};
562 570
563static int __init x1205_init(void) 571static int __init x1205_init(void)
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 556063e8f7a9..03c0e40a92ff 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -157,6 +157,7 @@ static int dasd_devices_open(struct inode *inode, struct file *file)
157} 157}
158 158
159static const struct file_operations dasd_devices_file_ops = { 159static const struct file_operations dasd_devices_file_ops = {
160 .owner = THIS_MODULE,
160 .open = dasd_devices_open, 161 .open = dasd_devices_open,
161 .read = seq_read, 162 .read = seq_read,
162 .llseek = seq_lseek, 163 .llseek = seq_lseek,
@@ -311,17 +312,16 @@ out_error:
311int 312int
312dasd_proc_init(void) 313dasd_proc_init(void)
313{ 314{
314 dasd_proc_root_entry = proc_mkdir("dasd", &proc_root); 315 dasd_proc_root_entry = proc_mkdir("dasd", NULL);
315 if (!dasd_proc_root_entry) 316 if (!dasd_proc_root_entry)
316 goto out_nodasd; 317 goto out_nodasd;
317 dasd_proc_root_entry->owner = THIS_MODULE; 318 dasd_proc_root_entry->owner = THIS_MODULE;
318 dasd_devices_entry = create_proc_entry("devices", 319 dasd_devices_entry = proc_create("devices",
319 S_IFREG | S_IRUGO | S_IWUSR, 320 S_IFREG | S_IRUGO | S_IWUSR,
320 dasd_proc_root_entry); 321 dasd_proc_root_entry,
322 &dasd_devices_file_ops);
321 if (!dasd_devices_entry) 323 if (!dasd_devices_entry)
322 goto out_nodevices; 324 goto out_nodevices;
323 dasd_devices_entry->proc_fops = &dasd_devices_file_ops;
324 dasd_devices_entry->owner = THIS_MODULE;
325 dasd_statistics_entry = create_proc_entry("statistics", 325 dasd_statistics_entry = create_proc_entry("statistics",
326 S_IFREG | S_IRUGO | S_IWUSR, 326 S_IFREG | S_IRUGO | S_IWUSR,
327 dasd_proc_root_entry); 327 dasd_proc_root_entry);
@@ -335,7 +335,7 @@ dasd_proc_init(void)
335 out_nostatistics: 335 out_nostatistics:
336 remove_proc_entry("devices", dasd_proc_root_entry); 336 remove_proc_entry("devices", dasd_proc_root_entry);
337 out_nodevices: 337 out_nodevices:
338 remove_proc_entry("dasd", &proc_root); 338 remove_proc_entry("dasd", NULL);
339 out_nodasd: 339 out_nodasd:
340 return -ENOENT; 340 return -ENOENT;
341} 341}
@@ -345,5 +345,5 @@ dasd_proc_exit(void)
345{ 345{
346 remove_proc_entry("devices", dasd_proc_root_entry); 346 remove_proc_entry("devices", dasd_proc_root_entry);
347 remove_proc_entry("statistics", dasd_proc_root_entry); 347 remove_proc_entry("statistics", dasd_proc_root_entry);
348 remove_proc_entry("dasd", &proc_root); 348 remove_proc_entry("dasd", NULL);
349} 349}
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 0e1f35c9ed9d..3e5653c92f4b 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -982,15 +982,16 @@ tty3215_write(struct tty_struct * tty,
982/* 982/*
983 * Put character routine for 3215 ttys 983 * Put character routine for 3215 ttys
984 */ 984 */
985static void 985static int
986tty3215_put_char(struct tty_struct *tty, unsigned char ch) 986tty3215_put_char(struct tty_struct *tty, unsigned char ch)
987{ 987{
988 struct raw3215_info *raw; 988 struct raw3215_info *raw;
989 989
990 if (!tty) 990 if (!tty)
991 return; 991 return 0;
992 raw = (struct raw3215_info *) tty->driver_data; 992 raw = (struct raw3215_info *) tty->driver_data;
993 raw3215_putchar(raw, ch); 993 raw3215_putchar(raw, ch);
994 return 1;
994} 995}
995 996
996static void 997static void
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index b8f35bc52b7b..9e784d5f7f57 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -10,6 +10,7 @@
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/sysdev.h> 11#include <linux/sysdev.h>
12#include <linux/workqueue.h> 12#include <linux/workqueue.h>
13#include <asm/smp.h>
13#include "sclp.h" 14#include "sclp.h"
14 15
15#define TAG "sclp_config: " 16#define TAG "sclp_config: "
@@ -19,9 +20,11 @@ struct conf_mgm_data {
19 u8 ev_qualifier; 20 u8 ev_qualifier;
20} __attribute__((packed)); 21} __attribute__((packed));
21 22
23#define EV_QUAL_CPU_CHANGE 1
22#define EV_QUAL_CAP_CHANGE 3 24#define EV_QUAL_CAP_CHANGE 3
23 25
24static struct work_struct sclp_cpu_capability_work; 26static struct work_struct sclp_cpu_capability_work;
27static struct work_struct sclp_cpu_change_work;
25 28
26static void sclp_cpu_capability_notify(struct work_struct *work) 29static void sclp_cpu_capability_notify(struct work_struct *work)
27{ 30{
@@ -37,13 +40,24 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
37 put_online_cpus(); 40 put_online_cpus();
38} 41}
39 42
43static void sclp_cpu_change_notify(struct work_struct *work)
44{
45 smp_rescan_cpus();
46}
47
40static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) 48static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
41{ 49{
42 struct conf_mgm_data *cdata; 50 struct conf_mgm_data *cdata;
43 51
44 cdata = (struct conf_mgm_data *)(evbuf + 1); 52 cdata = (struct conf_mgm_data *)(evbuf + 1);
45 if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE) 53 switch (cdata->ev_qualifier) {
54 case EV_QUAL_CPU_CHANGE:
55 schedule_work(&sclp_cpu_change_work);
56 break;
57 case EV_QUAL_CAP_CHANGE:
46 schedule_work(&sclp_cpu_capability_work); 58 schedule_work(&sclp_cpu_capability_work);
59 break;
60 }
47} 61}
48 62
49static struct sclp_register sclp_conf_register = 63static struct sclp_register sclp_conf_register =
@@ -57,6 +71,7 @@ static int __init sclp_conf_init(void)
57 int rc; 71 int rc;
58 72
59 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); 73 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
74 INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
60 75
61 rc = sclp_register(&sclp_conf_register); 76 rc = sclp_register(&sclp_conf_register);
62 if (rc) { 77 if (rc) {
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index e3b3d390b4a3..40b11521cd20 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -412,14 +412,14 @@ sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
412 * - including previous characters from sclp_tty_put_char() and strings from 412 * - including previous characters from sclp_tty_put_char() and strings from
413 * sclp_write() without final '\n' - will be written. 413 * sclp_write() without final '\n' - will be written.
414 */ 414 */
415static void 415static int
416sclp_tty_put_char(struct tty_struct *tty, unsigned char ch) 416sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
417{ 417{
418 sclp_tty_chars[sclp_tty_chars_count++] = ch; 418 sclp_tty_chars[sclp_tty_chars_count++] = ch;
419 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { 419 if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
420 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); 420 sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
421 sclp_tty_chars_count = 0; 421 sclp_tty_chars_count = 0;
422 } 422 } return 1;
423} 423}
424 424
425/* 425/*
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index ed507594e62b..35707c04e613 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -524,11 +524,15 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
524 * NOTE: include/linux/tty_driver.h specifies that a character should be 524 * NOTE: include/linux/tty_driver.h specifies that a character should be
525 * ignored if there is no room in the queue. This driver implements a different 525 * ignored if there is no room in the queue. This driver implements a different
526 * semantic in that it will block when there is no more room left. 526 * semantic in that it will block when there is no more room left.
527 *
528 * FIXME: putchar can currently be called from BH and other non blocking
529 * handlers so this semantic isn't a good idea.
527 */ 530 */
528static void 531static int
529sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) 532sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
530{ 533{
531 __sclp_vt220_write(&ch, 1, 0, 0, 1); 534 __sclp_vt220_write(&ch, 1, 0, 0, 1);
535 return 1;
532} 536}
533 537
534/* 538/*
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index c9b96d51b28f..e7c888c14e71 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -111,6 +111,7 @@ static int tape_proc_open(struct inode *inode, struct file *file)
111 111
112static const struct file_operations tape_proc_ops = 112static const struct file_operations tape_proc_ops =
113{ 113{
114 .owner = THIS_MODULE,
114 .open = tape_proc_open, 115 .open = tape_proc_open,
115 .read = seq_read, 116 .read = seq_read,
116 .llseek = seq_lseek, 117 .llseek = seq_lseek,
@@ -124,14 +125,12 @@ void
124tape_proc_init(void) 125tape_proc_init(void)
125{ 126{
126 tape_proc_devices = 127 tape_proc_devices =
127 create_proc_entry ("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, 128 proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
128 &proc_root); 129 &tape_proc_ops);
129 if (tape_proc_devices == NULL) { 130 if (tape_proc_devices == NULL) {
130 PRINT_WARN("tape: Cannot register procfs entry tapedevices\n"); 131 PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
131 return; 132 return;
132 } 133 }
133 tape_proc_devices->proc_fops = &tape_proc_ops;
134 tape_proc_devices->owner = THIS_MODULE;
135} 134}
136 135
137/* 136/*
@@ -141,5 +140,5 @@ void
141tape_proc_cleanup(void) 140tape_proc_cleanup(void)
142{ 141{
143 if (tape_proc_devices != NULL) 142 if (tape_proc_devices != NULL)
144 remove_proc_entry ("tapedevices", &proc_root); 143 remove_proc_entry ("tapedevices", NULL);
145} 144}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 70b1980a08b6..c1f2adefad41 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -965,7 +965,7 @@ tty3270_write_room(struct tty_struct *tty)
965 * Insert character into the screen at the current position with the 965 * Insert character into the screen at the current position with the
966 * current color and highlight. This function does NOT do cursor movement. 966 * current color and highlight. This function does NOT do cursor movement.
967 */ 967 */
968static void 968static int
969tty3270_put_character(struct tty3270 *tp, char ch) 969tty3270_put_character(struct tty3270 *tp, char ch)
970{ 970{
971 struct tty3270_line *line; 971 struct tty3270_line *line;
@@ -986,6 +986,7 @@ tty3270_put_character(struct tty3270 *tp, char ch)
986 cell->character = tp->view.ascebc[(unsigned int) ch]; 986 cell->character = tp->view.ascebc[(unsigned int) ch];
987 cell->highlight = tp->highlight; 987 cell->highlight = tp->highlight;
988 cell->f_color = tp->f_color; 988 cell->f_color = tp->f_color;
989 return 1;
989} 990}
990 991
991/* 992/*
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index e8597ec92247..40ef948fcb3a 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -374,13 +374,10 @@ cio_ignore_proc_init (void)
374{ 374{
375 struct proc_dir_entry *entry; 375 struct proc_dir_entry *entry;
376 376
377 entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, 377 entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
378 &proc_root); 378 &cio_ignore_proc_fops);
379 if (!entry) 379 if (!entry)
380 return -ENOENT; 380 return -ENOENT;
381
382 entry->proc_fops = &cio_ignore_proc_fops;
383
384 return 0; 381 return 0;
385} 382}
386 383
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index fe1ad1722158..26a930e832bd 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -152,44 +152,89 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
152 return 0; 152 return 0;
153} 153}
154 154
155static int __get_next_bus_id(const char **buf, char *bus_id)
156{
157 int rc, len;
158 char *start, *end;
159
160 start = (char *)*buf;
161 end = strchr(start, ',');
162 if (!end) {
163 /* Last entry. Strip trailing newline, if applicable. */
164 end = strchr(start, '\n');
165 if (end)
166 *end = '\0';
167 len = strlen(start) + 1;
168 } else {
169 len = end - start + 1;
170 end++;
171 }
172 if (len < BUS_ID_SIZE) {
173 strlcpy(bus_id, start, len);
174 rc = 0;
175 } else
176 rc = -EINVAL;
177 *buf = end;
178 return rc;
179}
180
181static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE])
182{
183 int cssid, ssid, devno;
184
185 /* Must be of form %x.%x.%04x */
186 if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
187 return 0;
188 return 1;
189}
190
155/** 191/**
156 * ccwgroup_create() - create and register a ccw group device 192 * ccwgroup_create_from_string() - create and register a ccw group device
157 * @root: parent device for the new device 193 * @root: parent device for the new device
158 * @creator_id: identifier of creating driver 194 * @creator_id: identifier of creating driver
159 * @cdrv: ccw driver of slave devices 195 * @cdrv: ccw driver of slave devices
160 * @argc: number of slave devices 196 * @num_devices: number of slave devices
161 * @argv: bus ids of slave devices 197 * @buf: buffer containing comma separated bus ids of slave devices
162 * 198 *
163 * Create and register a new ccw group device as a child of @root. Slave 199 * Create and register a new ccw group device as a child of @root. Slave
164 * devices are obtained from the list of bus ids given in @argv[] and must all 200 * devices are obtained from the list of bus ids given in @buf and must all
165 * belong to @cdrv. 201 * belong to @cdrv.
166 * Returns: 202 * Returns:
167 * %0 on success and an error code on failure. 203 * %0 on success and an error code on failure.
168 * Context: 204 * Context:
169 * non-atomic 205 * non-atomic
170 */ 206 */
171int ccwgroup_create(struct device *root, unsigned int creator_id, 207int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
172 struct ccw_driver *cdrv, int argc, char *argv[]) 208 struct ccw_driver *cdrv, int num_devices,
209 const char *buf)
173{ 210{
174 struct ccwgroup_device *gdev; 211 struct ccwgroup_device *gdev;
175 int i; 212 int rc, i;
176 int rc; 213 char tmp_bus_id[BUS_ID_SIZE];
214 const char *curr_buf;
177 215
178 if (argc > 256) /* disallow dumb users */ 216 gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
179 return -EINVAL; 217 GFP_KERNEL);
180
181 gdev = kzalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL);
182 if (!gdev) 218 if (!gdev)
183 return -ENOMEM; 219 return -ENOMEM;
184 220
185 atomic_set(&gdev->onoff, 0); 221 atomic_set(&gdev->onoff, 0);
186 mutex_init(&gdev->reg_mutex); 222 mutex_init(&gdev->reg_mutex);
187 mutex_lock(&gdev->reg_mutex); 223 mutex_lock(&gdev->reg_mutex);
188 for (i = 0; i < argc; i++) { 224 curr_buf = buf;
189 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 225 for (i = 0; i < num_devices && curr_buf; i++) {
190 226 rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
191 /* all devices have to be of the same type in 227 if (rc != 0)
192 * order to be grouped */ 228 goto error;
229 if (!__is_valid_bus_id(tmp_bus_id)) {
230 rc = -EINVAL;
231 goto error;
232 }
233 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
234 /*
235 * All devices have to be of the same type in
236 * order to be grouped.
237 */
193 if (!gdev->cdev[i] 238 if (!gdev->cdev[i]
194 || gdev->cdev[i]->id.driver_info != 239 || gdev->cdev[i]->id.driver_info !=
195 gdev->cdev[0]->id.driver_info) { 240 gdev->cdev[0]->id.driver_info) {
@@ -203,9 +248,18 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
203 } 248 }
204 dev_set_drvdata(&gdev->cdev[i]->dev, gdev); 249 dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
205 } 250 }
206 251 /* Check for sufficient number of bus ids. */
252 if (i < num_devices && !curr_buf) {
253 rc = -EINVAL;
254 goto error;
255 }
256 /* Check for trailing stuff. */
257 if (i == num_devices && strlen(curr_buf) > 0) {
258 rc = -EINVAL;
259 goto error;
260 }
207 gdev->creator_id = creator_id; 261 gdev->creator_id = creator_id;
208 gdev->count = argc; 262 gdev->count = num_devices;
209 gdev->dev.bus = &ccwgroup_bus_type; 263 gdev->dev.bus = &ccwgroup_bus_type;
210 gdev->dev.parent = root; 264 gdev->dev.parent = root;
211 gdev->dev.release = ccwgroup_release; 265 gdev->dev.release = ccwgroup_release;
@@ -233,7 +287,7 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
233 device_remove_file(&gdev->dev, &dev_attr_ungroup); 287 device_remove_file(&gdev->dev, &dev_attr_ungroup);
234 device_unregister(&gdev->dev); 288 device_unregister(&gdev->dev);
235error: 289error:
236 for (i = 0; i < argc; i++) 290 for (i = 0; i < num_devices; i++)
237 if (gdev->cdev[i]) { 291 if (gdev->cdev[i]) {
238 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) 292 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
239 dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 293 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
@@ -243,6 +297,7 @@ error:
243 put_device(&gdev->dev); 297 put_device(&gdev->dev);
244 return rc; 298 return rc;
245} 299}
300EXPORT_SYMBOL(ccwgroup_create_from_string);
246 301
247static int __init 302static int __init
248init_ccwgroup (void) 303init_ccwgroup (void)
@@ -318,7 +373,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
318{ 373{
319 struct ccwgroup_device *gdev; 374 struct ccwgroup_device *gdev;
320 struct ccwgroup_driver *gdrv; 375 struct ccwgroup_driver *gdrv;
321 unsigned int value; 376 unsigned long value;
322 int ret; 377 int ret;
323 378
324 gdev = to_ccwgroupdev(dev); 379 gdev = to_ccwgroupdev(dev);
@@ -329,7 +384,9 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
329 if (!try_module_get(gdrv->owner)) 384 if (!try_module_get(gdrv->owner))
330 return -EINVAL; 385 return -EINVAL;
331 386
332 value = simple_strtoul(buf, NULL, 0); 387 ret = strict_strtoul(buf, 0, &value);
388 if (ret)
389 goto out;
333 ret = count; 390 ret = count;
334 if (value == 1) 391 if (value == 1)
335 ccwgroup_set_online(gdev); 392 ccwgroup_set_online(gdev);
@@ -337,6 +394,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
337 ccwgroup_set_offline(gdev); 394 ccwgroup_set_offline(gdev);
338 else 395 else
339 ret = -EINVAL; 396 ret = -EINVAL;
397out:
340 module_put(gdrv->owner); 398 module_put(gdrv->owner);
341 return ret; 399 return ret;
342} 400}
@@ -518,6 +576,5 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
518MODULE_LICENSE("GPL"); 576MODULE_LICENSE("GPL");
519EXPORT_SYMBOL(ccwgroup_driver_register); 577EXPORT_SYMBOL(ccwgroup_driver_register);
520EXPORT_SYMBOL(ccwgroup_driver_unregister); 578EXPORT_SYMBOL(ccwgroup_driver_unregister);
521EXPORT_SYMBOL(ccwgroup_create);
522EXPORT_SYMBOL(ccwgroup_probe_ccwdev); 579EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
523EXPORT_SYMBOL(ccwgroup_remove_ccwdev); 580EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 23ffcc4768a7..08a578161306 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -407,8 +407,7 @@ cio_modify (struct subchannel *sch)
407/* 407/*
408 * Enable subchannel. 408 * Enable subchannel.
409 */ 409 */
410int cio_enable_subchannel(struct subchannel *sch, unsigned int isc, 410int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
411 u32 intparm)
412{ 411{
413 char dbf_txt[15]; 412 char dbf_txt[15];
414 int ccode; 413 int ccode;
@@ -426,7 +425,7 @@ int cio_enable_subchannel(struct subchannel *sch, unsigned int isc,
426 425
427 for (retry = 5, ret = 0; retry > 0; retry--) { 426 for (retry = 5, ret = 0; retry > 0; retry--) {
428 sch->schib.pmcw.ena = 1; 427 sch->schib.pmcw.ena = 1;
429 sch->schib.pmcw.isc = isc; 428 sch->schib.pmcw.isc = sch->isc;
430 sch->schib.pmcw.intparm = intparm; 429 sch->schib.pmcw.intparm = intparm;
431 ret = cio_modify(sch); 430 ret = cio_modify(sch);
432 if (ret == -ENODEV) 431 if (ret == -ENODEV)
@@ -600,6 +599,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
600 else 599 else
601 sch->opm = chp_get_sch_opm(sch); 600 sch->opm = chp_get_sch_opm(sch);
602 sch->lpm = sch->schib.pmcw.pam & sch->opm; 601 sch->lpm = sch->schib.pmcw.pam & sch->opm;
602 sch->isc = 3;
603 603
604 CIO_DEBUG(KERN_INFO, 0, 604 CIO_DEBUG(KERN_INFO, 0,
605 "Detected device %04x on subchannel 0.%x.%04X" 605 "Detected device %04x on subchannel 0.%x.%04X"
@@ -610,13 +610,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
610 610
611 /* 611 /*
612 * We now have to initially ... 612 * We now have to initially ...
613 * ... set "interruption subclass"
614 * ... enable "concurrent sense" 613 * ... enable "concurrent sense"
615 * ... enable "multipath mode" if more than one 614 * ... enable "multipath mode" if more than one
616 * CHPID is available. This is done regardless 615 * CHPID is available. This is done regardless
617 * whether multiple paths are available for us. 616 * whether multiple paths are available for us.
618 */ 617 */
619 sch->schib.pmcw.isc = 3; /* could be smth. else */
620 sch->schib.pmcw.csense = 1; /* concurrent sense */ 618 sch->schib.pmcw.csense = 1; /* concurrent sense */
621 sch->schib.pmcw.ena = 0; 619 sch->schib.pmcw.ena = 0;
622 if ((sch->lpm & (sch->lpm - 1)) != 0) 620 if ((sch->lpm & (sch->lpm - 1)) != 0)
@@ -812,6 +810,7 @@ cio_probe_console(void)
812 * enable console I/O-interrupt subclass 7 810 * enable console I/O-interrupt subclass 7
813 */ 811 */
814 ctl_set_bit(6, 24); 812 ctl_set_bit(6, 24);
813 console_subchannel.isc = 7;
815 console_subchannel.schib.pmcw.isc = 7; 814 console_subchannel.schib.pmcw.isc = 7;
816 console_subchannel.schib.pmcw.intparm = 815 console_subchannel.schib.pmcw.intparm =
817 (u32)(addr_t)&console_subchannel; 816 (u32)(addr_t)&console_subchannel;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 08f2235c5a6f..3c75412904dc 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -74,6 +74,7 @@ struct subchannel {
74 __u8 lpm; /* logical path mask */ 74 __u8 lpm; /* logical path mask */
75 __u8 opm; /* operational path mask */ 75 __u8 opm; /* operational path mask */
76 struct schib schib; /* subchannel information block */ 76 struct schib schib; /* subchannel information block */
77 int isc; /* desired interruption subclass */
77 struct chsc_ssd_info ssd_info; /* subchannel description */ 78 struct chsc_ssd_info ssd_info; /* subchannel description */
78 struct device dev; /* entry in device tree */ 79 struct device dev; /* entry in device tree */
79 struct css_driver *driver; 80 struct css_driver *driver;
@@ -85,7 +86,7 @@ struct subchannel {
85#define to_subchannel(n) container_of(n, struct subchannel, dev) 86#define to_subchannel(n) container_of(n, struct subchannel, dev)
86 87
87extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); 88extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
88extern int cio_enable_subchannel(struct subchannel *, unsigned int, u32); 89extern int cio_enable_subchannel(struct subchannel *, u32);
89extern int cio_disable_subchannel (struct subchannel *); 90extern int cio_disable_subchannel (struct subchannel *);
90extern int cio_cancel (struct subchannel *); 91extern int cio_cancel (struct subchannel *);
91extern int cio_clear (struct subchannel *); 92extern int cio_clear (struct subchannel *);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index f4c132ab39ed..2808b6833b9e 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1219,16 +1219,21 @@ static ssize_t cmb_enable_store(struct device *dev,
1219{ 1219{
1220 struct ccw_device *cdev; 1220 struct ccw_device *cdev;
1221 int ret; 1221 int ret;
1222 unsigned long val;
1223
1224 ret = strict_strtoul(buf, 16, &val);
1225 if (ret)
1226 return ret;
1222 1227
1223 cdev = to_ccwdev(dev); 1228 cdev = to_ccwdev(dev);
1224 1229
1225 switch (buf[0]) { 1230 switch (val) {
1226 case '0': 1231 case 0:
1227 ret = disable_cmf(cdev); 1232 ret = disable_cmf(cdev);
1228 if (ret) 1233 if (ret)
1229 dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret); 1234 dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
1230 break; 1235 break;
1231 case '1': 1236 case 1:
1232 ret = enable_cmf(cdev); 1237 ret = enable_cmf(cdev);
1233 if (ret && ret != -EBUSY) 1238 if (ret && ret != -EBUSY)
1234 dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret); 1239 dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c1afab5f72d6..595e327d2f76 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -705,13 +705,17 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
705{ 705{
706 struct channel_subsystem *css = to_css(dev); 706 struct channel_subsystem *css = to_css(dev);
707 int ret; 707 int ret;
708 unsigned long val;
708 709
710 ret = strict_strtoul(buf, 16, &val);
711 if (ret)
712 return ret;
709 mutex_lock(&css->mutex); 713 mutex_lock(&css->mutex);
710 switch (buf[0]) { 714 switch (val) {
711 case '0': 715 case 0:
712 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 716 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
713 break; 717 break;
714 case '1': 718 case 1:
715 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 719 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
716 break; 720 break;
717 default: 721 default:
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e0c7adb8958e..abfd601d237a 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -512,8 +512,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
512 const char *buf, size_t count) 512 const char *buf, size_t count)
513{ 513{
514 struct ccw_device *cdev = to_ccwdev(dev); 514 struct ccw_device *cdev = to_ccwdev(dev);
515 int i, force; 515 int force, ret;
516 char *tmp; 516 unsigned long i;
517 517
518 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 518 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
519 return -EAGAIN; 519 return -EAGAIN;
@@ -525,25 +525,30 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
525 if (!strncmp(buf, "force\n", count)) { 525 if (!strncmp(buf, "force\n", count)) {
526 force = 1; 526 force = 1;
527 i = 1; 527 i = 1;
528 ret = 0;
528 } else { 529 } else {
529 force = 0; 530 force = 0;
530 i = simple_strtoul(buf, &tmp, 16); 531 ret = strict_strtoul(buf, 16, &i);
531 } 532 }
532 533 if (ret)
534 goto out;
533 switch (i) { 535 switch (i) {
534 case 0: 536 case 0:
535 online_store_handle_offline(cdev); 537 online_store_handle_offline(cdev);
538 ret = count;
536 break; 539 break;
537 case 1: 540 case 1:
538 online_store_handle_online(cdev, force); 541 online_store_handle_online(cdev, force);
542 ret = count;
539 break; 543 break;
540 default: 544 default:
541 count = -EINVAL; 545 ret = -EINVAL;
542 } 546 }
547out:
543 if (cdev->drv) 548 if (cdev->drv)
544 module_put(cdev->drv->owner); 549 module_put(cdev->drv->owner);
545 atomic_set(&cdev->private->onoff, 0); 550 atomic_set(&cdev->private->onoff, 0);
546 return count; 551 return ret;
547} 552}
548 553
549static ssize_t 554static ssize_t
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 4b92c84fb438..99403b0a97a7 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -555,8 +555,7 @@ ccw_device_recognition(struct ccw_device *cdev)
555 (cdev->private->state != DEV_STATE_BOXED)) 555 (cdev->private->state != DEV_STATE_BOXED))
556 return -EINVAL; 556 return -EINVAL;
557 sch = to_subchannel(cdev->dev.parent); 557 sch = to_subchannel(cdev->dev.parent);
558 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc, 558 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
559 (u32)(addr_t)sch);
560 if (ret != 0) 559 if (ret != 0)
561 /* Couldn't enable the subchannel for i/o. Sick device. */ 560 /* Couldn't enable the subchannel for i/o. Sick device. */
562 return ret; 561 return ret;
@@ -667,8 +666,7 @@ ccw_device_online(struct ccw_device *cdev)
667 sch = to_subchannel(cdev->dev.parent); 666 sch = to_subchannel(cdev->dev.parent);
668 if (css_init_done && !get_device(&cdev->dev)) 667 if (css_init_done && !get_device(&cdev->dev))
669 return -ENODEV; 668 return -ENODEV;
670 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc, 669 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
671 (u32)(addr_t)sch);
672 if (ret != 0) { 670 if (ret != 0) {
673 /* Couldn't enable the subchannel for i/o. Sick device. */ 671 /* Couldn't enable the subchannel for i/o. Sick device. */
674 if (ret == -ENODEV) 672 if (ret == -ENODEV)
@@ -1048,8 +1046,7 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1048 struct subchannel *sch; 1046 struct subchannel *sch;
1049 1047
1050 sch = to_subchannel(cdev->dev.parent); 1048 sch = to_subchannel(cdev->dev.parent);
1051 if (cio_enable_subchannel(sch, sch->schib.pmcw.isc, 1049 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
1052 (u32)(addr_t)sch) != 0)
1053 /* Couldn't enable the subchannel for i/o. Sick device. */ 1050 /* Couldn't enable the subchannel for i/o. Sick device. */
1054 return; 1051 return;
1055 1052
@@ -1082,7 +1079,6 @@ device_trigger_reprobe(struct subchannel *sch)
1082 */ 1079 */
1083 sch->lpm = sch->schib.pmcw.pam & sch->opm; 1080 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1084 /* Re-set some bits in the pmcw that were lost. */ 1081 /* Re-set some bits in the pmcw that were lost. */
1085 sch->schib.pmcw.isc = 3;
1086 sch->schib.pmcw.csense = 1; 1082 sch->schib.pmcw.csense = 1;
1087 sch->schib.pmcw.ena = 0; 1083 sch->schib.pmcw.ena = 0;
1088 if ((sch->lpm & (sch->lpm - 1)) != 0) 1084 if ((sch->lpm & (sch->lpm - 1)) != 0)
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a1718a0aa539..f308ad55a6d5 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -508,7 +508,7 @@ ccw_device_stlck(struct ccw_device *cdev)
508 return -ENOMEM; 508 return -ENOMEM;
509 } 509 }
510 spin_lock_irqsave(sch->lock, flags); 510 spin_lock_irqsave(sch->lock, flags);
511 ret = cio_enable_subchannel(sch, 3, (u32)(addr_t)sch); 511 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
512 if (ret) 512 if (ret)
513 goto out_unlock; 513 goto out_unlock;
514 /* 514 /*
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 10aa1e780801..445cf364e461 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -3632,7 +3632,7 @@ qdio_add_procfs_entry(void)
3632{ 3632{
3633 proc_perf_file_registration=0; 3633 proc_perf_file_registration=0;
3634 qdio_perf_proc_file=create_proc_entry(QDIO_PERF, 3634 qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3635 S_IFREG|0444,&proc_root); 3635 S_IFREG|0444,NULL);
3636 if (qdio_perf_proc_file) { 3636 if (qdio_perf_proc_file) {
3637 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; 3637 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3638 } else proc_perf_file_registration=-1; 3638 } else proc_perf_file_registration=-1;
@@ -3647,7 +3647,7 @@ static void
3647qdio_remove_procfs_entry(void) 3647qdio_remove_procfs_entry(void)
3648{ 3648{
3649 if (!proc_perf_file_registration) /* means if it went ok earlier */ 3649 if (!proc_perf_file_registration) /* means if it went ok earlier */
3650 remove_proc_entry(QDIO_PERF,&proc_root); 3650 remove_proc_entry(QDIO_PERF,NULL);
3651} 3651}
3652 3652
3653/** 3653/**
@@ -3663,11 +3663,11 @@ qdio_performance_stats_show(struct bus_type *bus, char *buf)
3663static ssize_t 3663static ssize_t
3664qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count) 3664qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
3665{ 3665{
3666 char *tmp; 3666 unsigned long i;
3667 int i; 3667 int ret;
3668 3668
3669 i = simple_strtoul(buf, &tmp, 16); 3669 ret = strict_strtoul(buf, 16, &i);
3670 if ((i == 0) || (i == 1)) { 3670 if (!ret && ((i == 0) || (i == 1))) {
3671 if (i == qdio_performance_stats) 3671 if (i == qdio_performance_stats)
3672 return count; 3672 return count;
3673 qdio_performance_stats = i; 3673 qdio_performance_stats = i;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index bbef3764fbf8..47a7e6200b26 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -17,6 +17,7 @@
17#include <linux/virtio_config.h> 17#include <linux/virtio_config.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/virtio_ring.h> 19#include <linux/virtio_ring.h>
20#include <linux/pfn.h>
20#include <asm/io.h> 21#include <asm/io.h>
21#include <asm/kvm_para.h> 22#include <asm/kvm_para.h>
22#include <asm/kvm_virtio.h> 23#include <asm/kvm_virtio.h>
@@ -180,11 +181,10 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
180 181
181 config = kvm_vq_config(kdev->desc)+index; 182 config = kvm_vq_config(kdev->desc)+index;
182 183
183 if (add_shared_memory(config->address, 184 err = vmem_add_mapping(config->address,
184 vring_size(config->num, PAGE_SIZE))) { 185 vring_size(config->num, PAGE_SIZE));
185 err = -ENOMEM; 186 if (err)
186 goto out; 187 goto out;
187 }
188 188
189 vq = vring_new_virtqueue(config->num, vdev, (void *) config->address, 189 vq = vring_new_virtqueue(config->num, vdev, (void *) config->address,
190 kvm_notify, callback); 190 kvm_notify, callback);
@@ -202,8 +202,8 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
202 vq->priv = config; 202 vq->priv = config;
203 return vq; 203 return vq;
204unmap: 204unmap:
205 remove_shared_memory(config->address, vring_size(config->num, 205 vmem_remove_mapping(config->address,
206 PAGE_SIZE)); 206 vring_size(config->num, PAGE_SIZE));
207out: 207out:
208 return ERR_PTR(err); 208 return ERR_PTR(err);
209} 209}
@@ -213,8 +213,8 @@ static void kvm_del_vq(struct virtqueue *vq)
213 struct kvm_vqconfig *config = vq->priv; 213 struct kvm_vqconfig *config = vq->priv;
214 214
215 vring_del_virtqueue(vq); 215 vring_del_virtqueue(vq);
216 remove_shared_memory(config->address, 216 vmem_remove_mapping(config->address,
217 vring_size(config->num, PAGE_SIZE)); 217 vring_size(config->num, PAGE_SIZE));
218} 218}
219 219
220/* 220/*
@@ -318,12 +318,13 @@ static int __init kvm_devices_init(void)
318 return rc; 318 return rc;
319 } 319 }
320 320
321 if (add_shared_memory((max_pfn) << PAGE_SHIFT, PAGE_SIZE)) { 321 rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE);
322 if (rc) {
322 device_unregister(&kvm_root); 323 device_unregister(&kvm_root);
323 return -ENOMEM; 324 return rc;
324 } 325 }
325 326
326 kvm_devices = (void *) (max_pfn << PAGE_SHIFT); 327 kvm_devices = (void *) PFN_PHYS(max_pfn);
327 328
328 ctl_set_bit(0, 9); 329 ctl_set_bit(0, 9);
329 register_external_interrupt(0x2603, kvm_extint_handler); 330 register_external_interrupt(0x2603, kvm_extint_handler);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index 76728ae4b843..8e7697305a4c 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -62,30 +62,14 @@ static struct device *cu3088_root_dev;
62static ssize_t 62static ssize_t
63group_write(struct device_driver *drv, const char *buf, size_t count) 63group_write(struct device_driver *drv, const char *buf, size_t count)
64{ 64{
65 const char *start, *end;
66 char bus_ids[2][BUS_ID_SIZE], *argv[2];
67 int i;
68 int ret; 65 int ret;
69 struct ccwgroup_driver *cdrv; 66 struct ccwgroup_driver *cdrv;
70 67
71 cdrv = to_ccwgroupdrv(drv); 68 cdrv = to_ccwgroupdrv(drv);
72 if (!cdrv) 69 if (!cdrv)
73 return -EINVAL; 70 return -EINVAL;
74 start = buf; 71 ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
75 for (i=0; i<2; i++) { 72 &cu3088_driver, 2, buf);
76 static const char delim[] = {',', '\n'};
77 int len;
78
79 if (!(end = strchr(start, delim[i])))
80 return -EINVAL;
81 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start + 1);
82 strlcpy (bus_ids[i], start, len);
83 argv[i] = bus_ids[i];
84 start = end + 1;
85 }
86
87 ret = ccwgroup_create(cu3088_root_dev, cdrv->driver_id,
88 &cu3088_driver, 2, argv);
89 73
90 return (ret == 0) ? count : ret; 74 return (ret == 0) ? count : ret;
91} 75}
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index f51ed9972587..dd22f4b37037 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1793,7 +1793,8 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
1793 skb->protocol = card->lan_type_trans(skb, card->dev); 1793 skb->protocol = card->lan_type_trans(skb, card->dev);
1794 card->stats.rx_bytes += skb_len; 1794 card->stats.rx_bytes += skb_len;
1795 card->stats.rx_packets++; 1795 card->stats.rx_packets++;
1796 *((__u32 *)skb->cb) = ++card->pkt_seq; 1796 if (skb->protocol == htons(ETH_P_802_2))
1797 *((__u32 *)skb->cb) = ++card->pkt_seq;
1797 netif_rx(skb); 1798 netif_rx(skb);
1798} 1799}
1799 1800
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 8f876f6ab367..e4ba6a0372ac 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1313,8 +1313,6 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1313 * and throw away packet. 1313 * and throw away packet.
1314 */ 1314 */
1315 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { 1315 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1316 if (!in_atomic())
1317 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1318 dev_kfree_skb(skb); 1316 dev_kfree_skb(skb);
1319 privptr->stats.tx_dropped++; 1317 privptr->stats.tx_dropped++;
1320 privptr->stats.tx_errors++; 1318 privptr->stats.tx_errors++;
@@ -2147,6 +2145,7 @@ static int __init netiucv_init(void)
2147 if (rc) 2145 if (rc)
2148 goto out_dbf; 2146 goto out_dbf;
2149 IUCV_DBF_TEXT(trace, 3, __func__); 2147 IUCV_DBF_TEXT(trace, 3, __func__);
2148 netiucv_driver.groups = netiucv_drv_attr_groups;
2150 rc = driver_register(&netiucv_driver); 2149 rc = driver_register(&netiucv_driver);
2151 if (rc) { 2150 if (rc) {
2152 PRINT_ERR("NETIUCV: failed to register driver.\n"); 2151 PRINT_ERR("NETIUCV: failed to register driver.\n");
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 66f4f12503c9..699ac11debd8 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -72,22 +72,7 @@ struct qeth_dbf_info {
72 debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) 72 debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
73 73
74#define QETH_DBF_TEXT_(name, level, text...) \ 74#define QETH_DBF_TEXT_(name, level, text...) \
75 do { \ 75 qeth_dbf_longtext(QETH_DBF_##name, level, text)
76 if (qeth_dbf_passes(qeth_dbf[QETH_DBF_##name].id, level)) { \
77 char *dbf_txt_buf = \
78 get_cpu_var(QETH_DBF_TXT_BUF); \
79 sprintf(dbf_txt_buf, text); \
80 debug_text_event(qeth_dbf[QETH_DBF_##name].id, \
81 level, dbf_txt_buf); \
82 put_cpu_var(QETH_DBF_TXT_BUF); \
83 } \
84 } while (0)
85
86/* Allow to sort out low debug levels early to avoid wasted sprints */
87static inline int qeth_dbf_passes(debug_info_t *dbf_grp, int level)
88{
89 return (level <= dbf_grp->level);
90}
91 76
92/** 77/**
93 * some more debug stuff 78 * some more debug stuff
@@ -773,27 +758,6 @@ static inline int qeth_get_micros(void)
773 return (int) (get_clock() >> 12); 758 return (int) (get_clock() >> 12);
774} 759}
775 760
776static inline void *qeth_push_skb(struct qeth_card *card, struct sk_buff *skb,
777 int size)
778{
779 void *hdr;
780
781 hdr = (void *) skb_push(skb, size);
782 /*
783 * sanity check, the Linux memory allocation scheme should
784 * never present us cases like this one (the qdio header size plus
785 * the first 40 bytes of the paket cross a 4k boundary)
786 */
787 if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
788 (((unsigned long) hdr + size +
789 QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
790 PRINT_ERR("Misaligned packet on interface %s. Discarded.",
791 QETH_CARD_IFNAME(card));
792 return NULL;
793 }
794 return hdr;
795}
796
797static inline int qeth_get_ip_version(struct sk_buff *skb) 761static inline int qeth_get_ip_version(struct sk_buff *skb)
798{ 762{
799 switch (skb->protocol) { 763 switch (skb->protocol) {
@@ -806,6 +770,12 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
806 } 770 }
807} 771}
808 772
773static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
774 struct qeth_buffer_pool_entry *entry)
775{
776 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
777}
778
809struct qeth_eddp_context; 779struct qeth_eddp_context;
810extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
811extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
@@ -843,8 +813,6 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
843int qeth_query_setadapterparms(struct qeth_card *); 813int qeth_query_setadapterparms(struct qeth_card *);
844int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, 814int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
845 unsigned int, const char *); 815 unsigned int, const char *);
846void qeth_put_buffer_pool_entry(struct qeth_card *,
847 struct qeth_buffer_pool_entry *);
848void qeth_queue_input_buffer(struct qeth_card *, int); 816void qeth_queue_input_buffer(struct qeth_card *, int);
849struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 817struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
850 struct qdio_buffer *, struct qdio_buffer_element **, int *, 818 struct qdio_buffer *, struct qdio_buffer_element **, int *,
@@ -880,8 +848,6 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
880 void *reply_param); 848 void *reply_param);
881int qeth_get_cast_type(struct qeth_card *, struct sk_buff *); 849int qeth_get_cast_type(struct qeth_card *, struct sk_buff *);
882int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 850int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
883struct sk_buff *qeth_prepare_skb(struct qeth_card *, struct sk_buff *,
884 struct qeth_hdr **);
885int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); 851int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
886int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 852int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
887 struct sk_buff *, struct qeth_hdr *, int, 853 struct sk_buff *, struct qeth_hdr *, int,
@@ -894,6 +860,8 @@ void qeth_core_get_ethtool_stats(struct net_device *,
894 struct ethtool_stats *, u64 *); 860 struct ethtool_stats *, u64 *);
895void qeth_core_get_strings(struct net_device *, u32, u8 *); 861void qeth_core_get_strings(struct net_device *, u32, u8 *);
896void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 862void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
863void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
864int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
897 865
898/* exports for OSN */ 866/* exports for OSN */
899int qeth_osn_assist(struct net_device *, void *, int); 867int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 055f5c3e7b56..436bf1f6d4a6 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -26,9 +26,6 @@
26#include "qeth_core.h" 26#include "qeth_core.h"
27#include "qeth_core_offl.h" 27#include "qeth_core_offl.h"
28 28
29static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf);
30#define QETH_DBF_TXT_BUF qeth_core_dbf_txt_buf
31
32struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 29struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
33 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 30 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
34 /* N P A M L V H */ 31 /* N P A M L V H */
@@ -2255,14 +2252,6 @@ void qeth_print_status_message(struct qeth_card *card)
2255} 2252}
2256EXPORT_SYMBOL_GPL(qeth_print_status_message); 2253EXPORT_SYMBOL_GPL(qeth_print_status_message);
2257 2254
2258void qeth_put_buffer_pool_entry(struct qeth_card *card,
2259 struct qeth_buffer_pool_entry *entry)
2260{
2261 QETH_DBF_TEXT(TRACE, 6, "ptbfplen");
2262 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
2263}
2264EXPORT_SYMBOL_GPL(qeth_put_buffer_pool_entry);
2265
2266static void qeth_initialize_working_pool_list(struct qeth_card *card) 2255static void qeth_initialize_working_pool_list(struct qeth_card *card)
2267{ 2256{
2268 struct qeth_buffer_pool_entry *entry; 2257 struct qeth_buffer_pool_entry *entry;
@@ -2603,7 +2592,6 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2603 int rc; 2592 int rc;
2604 int newcount = 0; 2593 int newcount = 0;
2605 2594
2606 QETH_DBF_TEXT(TRACE, 6, "queinbuf");
2607 count = (index < queue->next_buf_to_init)? 2595 count = (index < queue->next_buf_to_init)?
2608 card->qdio.in_buf_pool.buf_count - 2596 card->qdio.in_buf_pool.buf_count -
2609 (queue->next_buf_to_init - index) : 2597 (queue->next_buf_to_init - index) :
@@ -2792,8 +2780,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2792 int i; 2780 int i;
2793 unsigned int qdio_flags; 2781 unsigned int qdio_flags;
2794 2782
2795 QETH_DBF_TEXT(TRACE, 6, "flushbuf");
2796
2797 for (i = index; i < index + count; ++i) { 2783 for (i = index; i < index + count; ++i) {
2798 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2784 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2799 buf->buffer->element[buf->next_element_to_fill - 1].flags |= 2785 buf->buffer->element[buf->next_element_to_fill - 1].flags |=
@@ -3037,49 +3023,6 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3037} 3023}
3038EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3024EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3039 3025
3040static void __qeth_free_new_skb(struct sk_buff *orig_skb,
3041 struct sk_buff *new_skb)
3042{
3043 if (orig_skb != new_skb)
3044 dev_kfree_skb_any(new_skb);
3045}
3046
3047static inline struct sk_buff *qeth_realloc_headroom(struct qeth_card *card,
3048 struct sk_buff *skb, int size)
3049{
3050 struct sk_buff *new_skb = skb;
3051
3052 if (skb_headroom(skb) >= size)
3053 return skb;
3054 new_skb = skb_realloc_headroom(skb, size);
3055 if (!new_skb)
3056 PRINT_ERR("Could not realloc headroom for qeth_hdr "
3057 "on interface %s", QETH_CARD_IFNAME(card));
3058 return new_skb;
3059}
3060
3061struct sk_buff *qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3062 struct qeth_hdr **hdr)
3063{
3064 struct sk_buff *new_skb;
3065
3066 QETH_DBF_TEXT(TRACE, 6, "prepskb");
3067
3068 new_skb = qeth_realloc_headroom(card, skb,
3069 sizeof(struct qeth_hdr));
3070 if (!new_skb)
3071 return NULL;
3072
3073 *hdr = ((struct qeth_hdr *)qeth_push_skb(card, new_skb,
3074 sizeof(struct qeth_hdr)));
3075 if (*hdr == NULL) {
3076 __qeth_free_new_skb(skb, new_skb);
3077 return NULL;
3078 }
3079 return new_skb;
3080}
3081EXPORT_SYMBOL_GPL(qeth_prepare_skb);
3082
3083int qeth_get_elements_no(struct qeth_card *card, void *hdr, 3026int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3084 struct sk_buff *skb, int elems) 3027 struct sk_buff *skb, int elems)
3085{ 3028{
@@ -3100,8 +3043,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3100} 3043}
3101EXPORT_SYMBOL_GPL(qeth_get_elements_no); 3044EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3102 3045
3103static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, 3046static inline void __qeth_fill_buffer(struct sk_buff *skb,
3104 int is_tso, int *next_element_to_fill) 3047 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill)
3105{ 3048{
3106 int length = skb->len; 3049 int length = skb->len;
3107 int length_here; 3050 int length_here;
@@ -3143,15 +3086,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
3143 *next_element_to_fill = element; 3086 *next_element_to_fill = element;
3144} 3087}
3145 3088
3146static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, 3089static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3147 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb) 3090 struct qeth_qdio_out_buffer *buf, struct sk_buff *skb)
3148{ 3091{
3149 struct qdio_buffer *buffer; 3092 struct qdio_buffer *buffer;
3150 struct qeth_hdr_tso *hdr; 3093 struct qeth_hdr_tso *hdr;
3151 int flush_cnt = 0, hdr_len, large_send = 0; 3094 int flush_cnt = 0, hdr_len, large_send = 0;
3152 3095
3153 QETH_DBF_TEXT(TRACE, 6, "qdfillbf");
3154
3155 buffer = buf->buffer; 3096 buffer = buf->buffer;
3156 atomic_inc(&skb->users); 3097 atomic_inc(&skb->users);
3157 skb_queue_tail(&buf->skb_list, skb); 3098 skb_queue_tail(&buf->skb_list, skb);
@@ -3210,8 +3151,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3210 int flush_cnt = 0; 3151 int flush_cnt = 0;
3211 int index; 3152 int index;
3212 3153
3213 QETH_DBF_TEXT(TRACE, 6, "dosndpfa");
3214
3215 /* spin until we get the queue ... */ 3154 /* spin until we get the queue ... */
3216 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3155 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3217 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3156 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
@@ -3263,8 +3202,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3263 int tmp; 3202 int tmp;
3264 int rc = 0; 3203 int rc = 0;
3265 3204
3266 QETH_DBF_TEXT(TRACE, 6, "dosndpkt");
3267
3268 /* spin until we get the queue ... */ 3205 /* spin until we get the queue ... */
3269 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3206 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3270 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3207 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
@@ -3827,27 +3764,8 @@ static struct ccw_driver qeth_ccw_driver = {
3827static int qeth_core_driver_group(const char *buf, struct device *root_dev, 3764static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3828 unsigned long driver_id) 3765 unsigned long driver_id)
3829{ 3766{
3830 const char *start, *end; 3767 return ccwgroup_create_from_string(root_dev, driver_id,
3831 char bus_ids[3][BUS_ID_SIZE], *argv[3]; 3768 &qeth_ccw_driver, 3, buf);
3832 int i;
3833
3834 start = buf;
3835 for (i = 0; i < 3; i++) {
3836 static const char delim[] = { ',', ',', '\n' };
3837 int len;
3838
3839 end = strchr(start, delim[i]);
3840 if (!end)
3841 return -EINVAL;
3842 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
3843 strncpy(bus_ids[i], start, len);
3844 bus_ids[i][len] = '\0';
3845 start = end + 1;
3846 argv[i] = bus_ids[i];
3847 }
3848
3849 return (ccwgroup_create(root_dev, driver_id,
3850 &qeth_ccw_driver, 3, argv));
3851} 3769}
3852 3770
3853int qeth_core_hardsetup_card(struct qeth_card *card) 3771int qeth_core_hardsetup_card(struct qeth_card *card)
@@ -3885,8 +3803,9 @@ retry:
3885 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3803 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3886 return rc; 3804 return rc;
3887 } 3805 }
3888 3806 mpno = qdio_get_ssqd_pct(CARD_DDEV(card));
3889 mpno = QETH_MAX_PORTNO; 3807 if (mpno)
3808 mpno = min(mpno - 1, QETH_MAX_PORTNO);
3890 if (card->info.portno > mpno) { 3809 if (card->info.portno > mpno) {
3891 PRINT_ERR("Device %s does not offer port number %d \n.", 3810 PRINT_ERR("Device %s does not offer port number %d \n.",
3892 CARD_BUS_ID(card), card->info.portno); 3811 CARD_BUS_ID(card), card->info.portno);
@@ -3980,7 +3899,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
3980 int use_rx_sg = 0; 3899 int use_rx_sg = 0;
3981 int frag = 0; 3900 int frag = 0;
3982 3901
3983 QETH_DBF_TEXT(TRACE, 6, "nextskb");
3984 /* qeth_hdr must not cross element boundaries */ 3902 /* qeth_hdr must not cross element boundaries */
3985 if (element->length < offset + sizeof(struct qeth_hdr)) { 3903 if (element->length < offset + sizeof(struct qeth_hdr)) {
3986 if (qeth_is_last_sbale(element)) 3904 if (qeth_is_last_sbale(element))
@@ -4086,6 +4004,18 @@ static void qeth_unregister_dbf_views(void)
4086 } 4004 }
4087} 4005}
4088 4006
4007void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...)
4008{
4009 char dbf_txt_buf[32];
4010
4011 if (level > (qeth_dbf[dbf_nix].id)->level)
4012 return;
4013 snprintf(dbf_txt_buf, sizeof(dbf_txt_buf), text);
4014 debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf);
4015
4016}
4017EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
4018
4089static int qeth_register_dbf_views(void) 4019static int qeth_register_dbf_views(void)
4090{ 4020{
4091 int ret; 4021 int ret;
@@ -4433,6 +4363,96 @@ void qeth_core_get_drvinfo(struct net_device *dev,
4433} 4363}
4434EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); 4364EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
4435 4365
4366int qeth_core_ethtool_get_settings(struct net_device *netdev,
4367 struct ethtool_cmd *ecmd)
4368{
4369 struct qeth_card *card = netdev_priv(netdev);
4370 enum qeth_link_types link_type;
4371
4372 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
4373 link_type = QETH_LINK_TYPE_10GBIT_ETH;
4374 else
4375 link_type = card->info.link_type;
4376
4377 ecmd->transceiver = XCVR_INTERNAL;
4378 ecmd->supported = SUPPORTED_Autoneg;
4379 ecmd->advertising = ADVERTISED_Autoneg;
4380 ecmd->duplex = DUPLEX_FULL;
4381 ecmd->autoneg = AUTONEG_ENABLE;
4382
4383 switch (link_type) {
4384 case QETH_LINK_TYPE_FAST_ETH:
4385 case QETH_LINK_TYPE_LANE_ETH100:
4386 ecmd->supported |= SUPPORTED_10baseT_Half |
4387 SUPPORTED_10baseT_Full |
4388 SUPPORTED_100baseT_Half |
4389 SUPPORTED_100baseT_Full |
4390 SUPPORTED_TP;
4391 ecmd->advertising |= ADVERTISED_10baseT_Half |
4392 ADVERTISED_10baseT_Full |
4393 ADVERTISED_100baseT_Half |
4394 ADVERTISED_100baseT_Full |
4395 ADVERTISED_TP;
4396 ecmd->speed = SPEED_100;
4397 ecmd->port = PORT_TP;
4398 break;
4399
4400 case QETH_LINK_TYPE_GBIT_ETH:
4401 case QETH_LINK_TYPE_LANE_ETH1000:
4402 ecmd->supported |= SUPPORTED_10baseT_Half |
4403 SUPPORTED_10baseT_Full |
4404 SUPPORTED_100baseT_Half |
4405 SUPPORTED_100baseT_Full |
4406 SUPPORTED_1000baseT_Half |
4407 SUPPORTED_1000baseT_Full |
4408 SUPPORTED_FIBRE;
4409 ecmd->advertising |= ADVERTISED_10baseT_Half |
4410 ADVERTISED_10baseT_Full |
4411 ADVERTISED_100baseT_Half |
4412 ADVERTISED_100baseT_Full |
4413 ADVERTISED_1000baseT_Half |
4414 ADVERTISED_1000baseT_Full |
4415 ADVERTISED_FIBRE;
4416 ecmd->speed = SPEED_1000;
4417 ecmd->port = PORT_FIBRE;
4418 break;
4419
4420 case QETH_LINK_TYPE_10GBIT_ETH:
4421 ecmd->supported |= SUPPORTED_10baseT_Half |
4422 SUPPORTED_10baseT_Full |
4423 SUPPORTED_100baseT_Half |
4424 SUPPORTED_100baseT_Full |
4425 SUPPORTED_1000baseT_Half |
4426 SUPPORTED_1000baseT_Full |
4427 SUPPORTED_10000baseT_Full |
4428 SUPPORTED_FIBRE;
4429 ecmd->advertising |= ADVERTISED_10baseT_Half |
4430 ADVERTISED_10baseT_Full |
4431 ADVERTISED_100baseT_Half |
4432 ADVERTISED_100baseT_Full |
4433 ADVERTISED_1000baseT_Half |
4434 ADVERTISED_1000baseT_Full |
4435 ADVERTISED_10000baseT_Full |
4436 ADVERTISED_FIBRE;
4437 ecmd->speed = SPEED_10000;
4438 ecmd->port = PORT_FIBRE;
4439 break;
4440
4441 default:
4442 ecmd->supported |= SUPPORTED_10baseT_Half |
4443 SUPPORTED_10baseT_Full |
4444 SUPPORTED_TP;
4445 ecmd->advertising |= ADVERTISED_10baseT_Half |
4446 ADVERTISED_10baseT_Full |
4447 ADVERTISED_TP;
4448 ecmd->speed = SPEED_10;
4449 ecmd->port = PORT_TP;
4450 }
4451
4452 return 0;
4453}
4454EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
4455
4436static int __init qeth_core_init(void) 4456static int __init qeth_core_init(void)
4437{ 4457{
4438 int rc; 4458 int rc;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 3921d1631a78..86ec50ddae13 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -22,9 +22,6 @@
22#include "qeth_core.h" 22#include "qeth_core.h"
23#include "qeth_core_offl.h" 23#include "qeth_core_offl.h"
24 24
25#define QETH_DBF_TXT_BUF qeth_l2_dbf_txt_buf
26static DEFINE_PER_CPU(char[256], qeth_l2_dbf_txt_buf);
27
28static int qeth_l2_set_offline(struct ccwgroup_device *); 25static int qeth_l2_set_offline(struct ccwgroup_device *);
29static int qeth_l2_stop(struct net_device *); 26static int qeth_l2_stop(struct net_device *);
30static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); 27static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
@@ -635,8 +632,6 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
635 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 632 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
636 struct qeth_eddp_context *ctx = NULL; 633 struct qeth_eddp_context *ctx = NULL;
637 634
638 QETH_DBF_TEXT(TRACE, 6, "l2xmit");
639
640 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 635 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
641 card->stats.tx_carrier_errors++; 636 card->stats.tx_carrier_errors++;
642 goto tx_drop; 637 goto tx_drop;
@@ -658,9 +653,12 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
658 if (card->info.type == QETH_CARD_TYPE_OSN) 653 if (card->info.type == QETH_CARD_TYPE_OSN)
659 hdr = (struct qeth_hdr *)skb->data; 654 hdr = (struct qeth_hdr *)skb->data;
660 else { 655 else {
661 new_skb = qeth_prepare_skb(card, skb, &hdr); 656 /* create a clone with writeable headroom */
657 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr));
662 if (!new_skb) 658 if (!new_skb)
663 goto tx_drop; 659 goto tx_drop;
660 hdr = (struct qeth_hdr *)skb_push(new_skb,
661 sizeof(struct qeth_hdr));
664 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type); 662 qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
665 } 663 }
666 664
@@ -747,7 +745,6 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
747 int index; 745 int index;
748 int i; 746 int i;
749 747
750 QETH_DBF_TEXT(TRACE, 6, "qdinput");
751 card = (struct qeth_card *) card_ptr; 748 card = (struct qeth_card *) card_ptr;
752 net_dev = card->dev; 749 net_dev = card->dev;
753 if (card->options.performance_stats) { 750 if (card->options.performance_stats) {
@@ -852,6 +849,22 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
852 return; 849 return;
853} 850}
854 851
852static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data)
853{
854 struct qeth_card *card = netdev_priv(dev);
855
856 if (data) {
857 if (card->options.large_send == QETH_LARGE_SEND_NO) {
858 card->options.large_send = QETH_LARGE_SEND_EDDP;
859 dev->features |= NETIF_F_TSO;
860 }
861 } else {
862 dev->features &= ~NETIF_F_TSO;
863 card->options.large_send = QETH_LARGE_SEND_NO;
864 }
865 return 0;
866}
867
855static struct ethtool_ops qeth_l2_ethtool_ops = { 868static struct ethtool_ops qeth_l2_ethtool_ops = {
856 .get_link = ethtool_op_get_link, 869 .get_link = ethtool_op_get_link,
857 .get_tx_csum = ethtool_op_get_tx_csum, 870 .get_tx_csum = ethtool_op_get_tx_csum,
@@ -859,11 +872,12 @@ static struct ethtool_ops qeth_l2_ethtool_ops = {
859 .get_sg = ethtool_op_get_sg, 872 .get_sg = ethtool_op_get_sg,
860 .set_sg = ethtool_op_set_sg, 873 .set_sg = ethtool_op_set_sg,
861 .get_tso = ethtool_op_get_tso, 874 .get_tso = ethtool_op_get_tso,
862 .set_tso = ethtool_op_set_tso, 875 .set_tso = qeth_l2_ethtool_set_tso,
863 .get_strings = qeth_core_get_strings, 876 .get_strings = qeth_core_get_strings,
864 .get_ethtool_stats = qeth_core_get_ethtool_stats, 877 .get_ethtool_stats = qeth_core_get_ethtool_stats,
865 .get_stats_count = qeth_core_get_stats_count, 878 .get_stats_count = qeth_core_get_stats_count,
866 .get_drvinfo = qeth_core_get_drvinfo, 879 .get_drvinfo = qeth_core_get_drvinfo,
880 .get_settings = qeth_core_ethtool_get_settings,
867}; 881};
868 882
869static struct ethtool_ops qeth_l2_osn_ops = { 883static struct ethtool_ops qeth_l2_osn_ops = {
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 1be353593a59..9f143c83bba3 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,9 +13,6 @@
13 13
14#include "qeth_core.h" 14#include "qeth_core.h"
15 15
16#define QETH_DBF_TXT_BUF qeth_l3_dbf_txt_buf
17DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
18
19struct qeth_ipaddr { 16struct qeth_ipaddr {
20 struct list_head entry; 17 struct list_head entry;
21 enum qeth_ip_types type; 18 enum qeth_ip_types type;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e1bfe56087d6..94a8ead64ed4 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -28,8 +28,6 @@
28#include "qeth_l3.h" 28#include "qeth_l3.h"
29#include "qeth_core_offl.h" 29#include "qeth_core_offl.h"
30 30
31DEFINE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
32
33static int qeth_l3_set_offline(struct ccwgroup_device *); 31static int qeth_l3_set_offline(struct ccwgroup_device *);
34static int qeth_l3_recover(void *); 32static int qeth_l3_recover(void *);
35static int qeth_l3_stop(struct net_device *); 33static int qeth_l3_stop(struct net_device *);
@@ -2093,6 +2091,11 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2093 (card->state == CARD_STATE_UP)) { 2091 (card->state == CARD_STATE_UP)) {
2094 if (recovery_mode) 2092 if (recovery_mode)
2095 qeth_l3_stop(card->dev); 2093 qeth_l3_stop(card->dev);
2094 else {
2095 rtnl_lock();
2096 dev_close(card->dev);
2097 rtnl_unlock();
2098 }
2096 if (!card->use_hard_stop) { 2099 if (!card->use_hard_stop) {
2097 rc = qeth_send_stoplan(card); 2100 rc = qeth_send_stoplan(card);
2098 if (rc) 2101 if (rc)
@@ -2559,8 +2562,6 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2559static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 2562static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2560 struct sk_buff *skb, int ipv, int cast_type) 2563 struct sk_buff *skb, int ipv, int cast_type)
2561{ 2564{
2562 QETH_DBF_TEXT(TRACE, 6, "fillhdr");
2563
2564 memset(hdr, 0, sizeof(struct qeth_hdr)); 2565 memset(hdr, 0, sizeof(struct qeth_hdr));
2565 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2566 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2566 hdr->hdr.l3.ext_flags = 0; 2567 hdr->hdr.l3.ext_flags = 0;
@@ -2570,9 +2571,10 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2570 * v6 uses passthrough, v4 sets the tag in the QDIO header. 2571 * v6 uses passthrough, v4 sets the tag in the QDIO header.
2571 */ 2572 */
2572 if (card->vlangrp && vlan_tx_tag_present(skb)) { 2573 if (card->vlangrp && vlan_tx_tag_present(skb)) {
2573 hdr->hdr.l3.ext_flags = (ipv == 4) ? 2574 if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
2574 QETH_HDR_EXT_VLAN_FRAME : 2575 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
2575 QETH_HDR_EXT_INCLUDE_VLAN_TAG; 2576 else
2577 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
2576 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb); 2578 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
2577 } 2579 }
2578 2580
@@ -2638,8 +2640,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2638 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 2640 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
2639 struct qeth_eddp_context *ctx = NULL; 2641 struct qeth_eddp_context *ctx = NULL;
2640 2642
2641 QETH_DBF_TEXT(TRACE, 6, "l3xmit");
2642
2643 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2643 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2644 (skb->protocol != htons(ETH_P_IPV6)) && 2644 (skb->protocol != htons(ETH_P_IPV6)) &&
2645 (skb->protocol != htons(ETH_P_IP))) 2645 (skb->protocol != htons(ETH_P_IP)))
@@ -2890,6 +2890,7 @@ static struct ethtool_ops qeth_l3_ethtool_ops = {
2890 .get_ethtool_stats = qeth_core_get_ethtool_stats, 2890 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2891 .get_stats_count = qeth_core_get_stats_count, 2891 .get_stats_count = qeth_core_get_stats_count,
2892 .get_drvinfo = qeth_core_get_drvinfo, 2892 .get_drvinfo = qeth_core_get_drvinfo,
2893 .get_settings = qeth_core_ethtool_get_settings,
2893}; 2894};
2894 2895
2895/* 2896/*
@@ -2982,7 +2983,6 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2982 int index; 2983 int index;
2983 int i; 2984 int i;
2984 2985
2985 QETH_DBF_TEXT(TRACE, 6, "qdinput");
2986 card = (struct qeth_card *) card_ptr; 2986 card = (struct qeth_card *) card_ptr;
2987 net_dev = card->dev; 2987 net_dev = card->dev;
2988 if (card->options.performance_stats) { 2988 if (card->options.performance_stats) {
@@ -3140,9 +3140,15 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3140 netif_carrier_on(card->dev); 3140 netif_carrier_on(card->dev);
3141 3141
3142 qeth_set_allowed_threads(card, 0xffffffff, 0); 3142 qeth_set_allowed_threads(card, 0xffffffff, 0);
3143 if ((recover_flag == CARD_STATE_RECOVER) && recovery_mode) { 3143 if (recover_flag == CARD_STATE_RECOVER) {
3144 if (recovery_mode)
3144 qeth_l3_open(card->dev); 3145 qeth_l3_open(card->dev);
3145 qeth_l3_set_multicast_list(card->dev); 3146 else {
3147 rtnl_lock();
3148 dev_open(card->dev);
3149 rtnl_unlock();
3150 }
3151 qeth_l3_set_multicast_list(card->dev);
3146 } 3152 }
3147 /* let user_space know that device is online */ 3153 /* let user_space know that device is online */
3148 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3154 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 37b85c67b11d..c8bad675dbd1 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -1055,7 +1055,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
1055 rec->scsi_result = scsi_cmnd->result; 1055 rec->scsi_result = scsi_cmnd->result;
1056 rec->scsi_cmnd = (unsigned long)scsi_cmnd; 1056 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
1057 rec->scsi_serial = scsi_cmnd->serial_number; 1057 rec->scsi_serial = scsi_cmnd->serial_number;
1058 memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd, 1058 memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
1059 min((int)scsi_cmnd->cmd_len, 1059 min((int)scsi_cmnd->cmd_len,
1060 ZFCP_DBF_SCSI_OPCODE)); 1060 ZFCP_DBF_SCSI_OPCODE));
1061 rec->scsi_retries = scsi_cmnd->retries; 1061 rec->scsi_retries = scsi_cmnd->retries;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9af2330f07a2..b2ea4ea051f5 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4014,7 +4014,7 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4014 ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n", 4014 ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n",
4015 scpnt->result); 4015 scpnt->result);
4016 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, 4016 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
4017 (void *) &scpnt->cmnd, scpnt->cmd_len); 4017 scpnt->cmnd, scpnt->cmd_len);
4018 4018
4019 ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n", 4019 ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
4020 fcp_rsp_iu->fcp_sns_len); 4020 fcp_rsp_iu->fcp_sns_len);
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c
index a4e758143665..235703414370 100644
--- a/drivers/sbus/char/cpwatchdog.c
+++ b/drivers/sbus/char/cpwatchdog.c
@@ -637,7 +637,7 @@ static int wd_inittimer(int whichdog)
637 break; 637 break;
638 default: 638 default:
639 printk("%s: %s: invalid watchdog id: %i\n", 639 printk("%s: %s: invalid watchdog id: %i\n",
640 WD_OBPNAME, __FUNCTION__, whichdog); 640 WD_OBPNAME, __func__, whichdog);
641 return(1); 641 return(1);
642 } 642 }
643 if(0 != misc_register(whichmisc)) 643 if(0 != misc_register(whichmisc))
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 44d2ef906ac7..383f32c1d347 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -393,13 +393,13 @@ static int __init ts102_uctrl_init(void)
393 err = request_irq(driver->irq, uctrl_interrupt, 0, "uctrl", driver); 393 err = request_irq(driver->irq, uctrl_interrupt, 0, "uctrl", driver);
394 if (err) { 394 if (err) {
395 printk("%s: unable to register irq %d\n", 395 printk("%s: unable to register irq %d\n",
396 __FUNCTION__, driver->irq); 396 __func__, driver->irq);
397 return err; 397 return err;
398 } 398 }
399 399
400 if (misc_register(&uctrl_dev)) { 400 if (misc_register(&uctrl_dev)) {
401 printk("%s: unable to get misc minor %d\n", 401 printk("%s: unable to get misc minor %d\n",
402 __FUNCTION__, uctrl_dev.minor); 402 __func__, uctrl_dev.minor);
403 free_irq(driver->irq, driver); 403 free_irq(driver->irq, driver);
404 return -ENODEV; 404 return -ENODEV;
405 } 405 }
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index f4c4fe90240a..f5a9addb7050 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -599,7 +599,7 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
599 (struct NCR_700_command_slot *)SCp->host_scribble; 599 (struct NCR_700_command_slot *)SCp->host_scribble;
600 600
601 dma_unmap_single(hostdata->dev, slot->pCmd, 601 dma_unmap_single(hostdata->dev, slot->pCmd,
602 sizeof(SCp->cmnd), DMA_TO_DEVICE); 602 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
603 if (slot->flags == NCR_700_FLAG_AUTOSENSE) { 603 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
604 char *cmnd = NCR_700_get_sense_cmnd(SCp->device); 604 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
605#ifdef NCR_700_DEBUG 605#ifdef NCR_700_DEBUG
@@ -1004,7 +1004,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1004 * here */ 1004 * here */
1005 NCR_700_unmap(hostdata, SCp, slot); 1005 NCR_700_unmap(hostdata, SCp, slot);
1006 dma_unmap_single(hostdata->dev, slot->pCmd, 1006 dma_unmap_single(hostdata->dev, slot->pCmd,
1007 sizeof(SCp->cmnd), 1007 MAX_COMMAND_SIZE,
1008 DMA_TO_DEVICE); 1008 DMA_TO_DEVICE);
1009 1009
1010 cmnd[0] = REQUEST_SENSE; 1010 cmnd[0] = REQUEST_SENSE;
@@ -1901,7 +1901,7 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1901 } 1901 }
1902 slot->resume_offset = 0; 1902 slot->resume_offset = 0;
1903 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd, 1903 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1904 sizeof(SCp->cmnd), DMA_TO_DEVICE); 1904 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1905 NCR_700_start_command(SCp); 1905 NCR_700_start_command(SCp);
1906 return 0; 1906 return 0;
1907} 1907}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 99c57b0c1d54..81ccbd7f9e34 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -504,10 +504,9 @@ config SCSI_AIC7XXX_OLD
504source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 504source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
505source "drivers/scsi/aic94xx/Kconfig" 505source "drivers/scsi/aic94xx/Kconfig"
506 506
507# All the I2O code and drivers do not seem to be 64bit safe.
508config SCSI_DPT_I2O 507config SCSI_DPT_I2O
509 tristate "Adaptec I2O RAID support " 508 tristate "Adaptec I2O RAID support "
510 depends on !64BIT && SCSI && PCI && VIRT_TO_BUS 509 depends on SCSI && PCI && VIRT_TO_BUS
511 help 510 help
512 This driver supports all of Adaptec's I2O based RAID controllers as 511 This driver supports all of Adaptec's I2O based RAID controllers as
513 well as the DPT SmartRaid V cards. This is an Adaptec maintained 512 well as the DPT SmartRaid V cards. This is an Adaptec maintained
@@ -1680,6 +1679,7 @@ config MAC_SCSI
1680config SCSI_MAC_ESP 1679config SCSI_MAC_ESP
1681 tristate "Macintosh NCR53c9[46] SCSI" 1680 tristate "Macintosh NCR53c9[46] SCSI"
1682 depends on MAC && SCSI 1681 depends on MAC && SCSI
1682 select SCSI_SPI_ATTRS
1683 help 1683 help
1684 This is the NCR 53c9x SCSI controller found on most of the 68040 1684 This is the NCR 53c9x SCSI controller found on most of the 68040
1685 based Macintoshes. 1685 based Macintoshes.
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 792b2e807bf3..ced3eebe252c 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -895,7 +895,7 @@ static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, stru
895 } else { 895 } else {
896 scb->tag_msg = 0; /* No tag support */ 896 scb->tag_msg = 0; /* No tag support */
897 } 897 }
898 memcpy(&scb->cdb[0], &cmd->cmnd, scb->cdb_len); 898 memcpy(scb->cdb, cmd->cmnd, scb->cdb_len);
899} 899}
900 900
901/** 901/**
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 460d4024c46c..aa4e77c25273 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -498,6 +498,11 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
498 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 498 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
499 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 499 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
500 fsa_dev_ptr->valid = 1; 500 fsa_dev_ptr->valid = 1;
501 /* sense_key holds the current state of the spin-up */
502 if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
503 fsa_dev_ptr->sense_data.sense_key = NOT_READY;
504 else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
505 fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
501 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol); 506 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
502 fsa_dev_ptr->size 507 fsa_dev_ptr->size
503 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + 508 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
@@ -1509,20 +1514,35 @@ static void io_callback(void *context, struct fib * fibptr)
1509 scsi_dma_unmap(scsicmd); 1514 scsi_dma_unmap(scsicmd);
1510 1515
1511 readreply = (struct aac_read_reply *)fib_data(fibptr); 1516 readreply = (struct aac_read_reply *)fib_data(fibptr);
1512 if (le32_to_cpu(readreply->status) == ST_OK) 1517 switch (le32_to_cpu(readreply->status)) {
1513 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1518 case ST_OK:
1514 else { 1519 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1520 SAM_STAT_GOOD;
1521 dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
1522 break;
1523 case ST_NOT_READY:
1524 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1525 SAM_STAT_CHECK_CONDITION;
1526 set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
1527 SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
1528 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1529 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1530 SCSI_SENSE_BUFFERSIZE));
1531 break;
1532 default:
1515#ifdef AAC_DETAILED_STATUS_INFO 1533#ifdef AAC_DETAILED_STATUS_INFO
1516 printk(KERN_WARNING "io_callback: io failed, status = %d\n", 1534 printk(KERN_WARNING "io_callback: io failed, status = %d\n",
1517 le32_to_cpu(readreply->status)); 1535 le32_to_cpu(readreply->status));
1518#endif 1536#endif
1519 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1537 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1538 SAM_STAT_CHECK_CONDITION;
1520 set_sense(&dev->fsa_dev[cid].sense_data, 1539 set_sense(&dev->fsa_dev[cid].sense_data,
1521 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 1540 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1522 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 1541 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1523 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1542 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1524 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1543 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1525 SCSI_SENSE_BUFFERSIZE)); 1544 SCSI_SENSE_BUFFERSIZE));
1545 break;
1526 } 1546 }
1527 aac_fib_complete(fibptr); 1547 aac_fib_complete(fibptr);
1528 aac_fib_free(fibptr); 1548 aac_fib_free(fibptr);
@@ -1863,6 +1883,84 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
1863 return SCSI_MLQUEUE_HOST_BUSY; 1883 return SCSI_MLQUEUE_HOST_BUSY;
1864} 1884}
1865 1885
1886static void aac_start_stop_callback(void *context, struct fib *fibptr)
1887{
1888 struct scsi_cmnd *scsicmd = context;
1889
1890 if (!aac_valid_context(scsicmd, fibptr))
1891 return;
1892
1893 BUG_ON(fibptr == NULL);
1894
1895 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1896
1897 aac_fib_complete(fibptr);
1898 aac_fib_free(fibptr);
1899 scsicmd->scsi_done(scsicmd);
1900}
1901
1902static int aac_start_stop(struct scsi_cmnd *scsicmd)
1903{
1904 int status;
1905 struct fib *cmd_fibcontext;
1906 struct aac_power_management *pmcmd;
1907 struct scsi_device *sdev = scsicmd->device;
1908 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
1909
1910 if (!(aac->supplement_adapter_info.SupportedOptions2 &
1911 AAC_OPTION_POWER_MANAGEMENT)) {
1912 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1913 SAM_STAT_GOOD;
1914 scsicmd->scsi_done(scsicmd);
1915 return 0;
1916 }
1917
1918 if (aac->in_reset)
1919 return SCSI_MLQUEUE_HOST_BUSY;
1920
1921 /*
1922 * Allocate and initialize a Fib
1923 */
1924 cmd_fibcontext = aac_fib_alloc(aac);
1925 if (!cmd_fibcontext)
1926 return SCSI_MLQUEUE_HOST_BUSY;
1927
1928 aac_fib_init(cmd_fibcontext);
1929
1930 pmcmd = fib_data(cmd_fibcontext);
1931 pmcmd->command = cpu_to_le32(VM_ContainerConfig);
1932 pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
1933 /* Eject bit ignored, not relevant */
1934 pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
1935 cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
1936 pmcmd->cid = cpu_to_le32(sdev_id(sdev));
1937 pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
1938 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
1939
1940 /*
1941 * Now send the Fib to the adapter
1942 */
1943 status = aac_fib_send(ContainerCommand,
1944 cmd_fibcontext,
1945 sizeof(struct aac_power_management),
1946 FsaNormal,
1947 0, 1,
1948 (fib_callback)aac_start_stop_callback,
1949 (void *)scsicmd);
1950
1951 /*
1952 * Check that the command queued to the controller
1953 */
1954 if (status == -EINPROGRESS) {
1955 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1956 return 0;
1957 }
1958
1959 aac_fib_complete(cmd_fibcontext);
1960 aac_fib_free(cmd_fibcontext);
1961 return SCSI_MLQUEUE_HOST_BUSY;
1962}
1963
1866/** 1964/**
1867 * aac_scsi_cmd() - Process SCSI command 1965 * aac_scsi_cmd() - Process SCSI command
1868 * @scsicmd: SCSI command block 1966 * @scsicmd: SCSI command block
@@ -1899,7 +1997,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1899 * If the target container doesn't exist, it may have 1997 * If the target container doesn't exist, it may have
1900 * been newly created 1998 * been newly created
1901 */ 1999 */
1902 if ((fsa_dev_ptr[cid].valid & 1) == 0) { 2000 if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
2001 (fsa_dev_ptr[cid].sense_data.sense_key ==
2002 NOT_READY)) {
1903 switch (scsicmd->cmnd[0]) { 2003 switch (scsicmd->cmnd[0]) {
1904 case SERVICE_ACTION_IN: 2004 case SERVICE_ACTION_IN:
1905 if (!(dev->raw_io_interface) || 2005 if (!(dev->raw_io_interface) ||
@@ -2091,8 +2191,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2091 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); 2191 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
2092 /* Do not cache partition table for arrays */ 2192 /* Do not cache partition table for arrays */
2093 scsicmd->device->removable = 1; 2193 scsicmd->device->removable = 1;
2094 2194 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2095 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 2195 SAM_STAT_GOOD;
2096 scsicmd->scsi_done(scsicmd); 2196 scsicmd->scsi_done(scsicmd);
2097 2197
2098 return 0; 2198 return 0;
@@ -2187,15 +2287,32 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2187 * These commands are all No-Ops 2287 * These commands are all No-Ops
2188 */ 2288 */
2189 case TEST_UNIT_READY: 2289 case TEST_UNIT_READY:
2290 if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
2291 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2292 SAM_STAT_CHECK_CONDITION;
2293 set_sense(&dev->fsa_dev[cid].sense_data,
2294 NOT_READY, SENCODE_BECOMING_READY,
2295 ASENCODE_BECOMING_READY, 0, 0);
2296 memcpy(scsicmd->sense_buffer,
2297 &dev->fsa_dev[cid].sense_data,
2298 min_t(size_t,
2299 sizeof(dev->fsa_dev[cid].sense_data),
2300 SCSI_SENSE_BUFFERSIZE));
2301 scsicmd->scsi_done(scsicmd);
2302 return 0;
2303 }
2304 /* FALLTHRU */
2190 case RESERVE: 2305 case RESERVE:
2191 case RELEASE: 2306 case RELEASE:
2192 case REZERO_UNIT: 2307 case REZERO_UNIT:
2193 case REASSIGN_BLOCKS: 2308 case REASSIGN_BLOCKS:
2194 case SEEK_10: 2309 case SEEK_10:
2195 case START_STOP:
2196 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 2310 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2197 scsicmd->scsi_done(scsicmd); 2311 scsicmd->scsi_done(scsicmd);
2198 return 0; 2312 return 0;
2313
2314 case START_STOP:
2315 return aac_start_stop(scsicmd);
2199 } 2316 }
2200 2317
2201 switch (scsicmd->cmnd[0]) 2318 switch (scsicmd->cmnd[0])
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 113ca9c8934c..73916adb8f80 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 2455 15# define AAC_DRIVER_BUILD 2456
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -34,8 +34,8 @@
34#define CONTAINER_TO_ID(cont) (cont) 34#define CONTAINER_TO_ID(cont) (cont)
35#define CONTAINER_TO_LUN(cont) (0) 35#define CONTAINER_TO_LUN(cont) (0)
36 36
37#define aac_phys_to_logical(x) (x+1) 37#define aac_phys_to_logical(x) ((x)+1)
38#define aac_logical_to_phys(x) (x?x-1:0) 38#define aac_logical_to_phys(x) ((x)?(x)-1:0)
39 39
40/* #define AAC_DETAILED_STATUS_INFO */ 40/* #define AAC_DETAILED_STATUS_INFO */
41 41
@@ -424,6 +424,8 @@ struct aac_init
424 */ 424 */
425 __le32 InitFlags; /* flags for supported features */ 425 __le32 InitFlags; /* flags for supported features */
426#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 426#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
427#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
428#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
427 __le32 MaxIoCommands; /* max outstanding commands */ 429 __le32 MaxIoCommands; /* max outstanding commands */
428 __le32 MaxIoSize; /* largest I/O command */ 430 __le32 MaxIoSize; /* largest I/O command */
429 __le32 MaxFibSize; /* largest FIB to adapter */ 431 __le32 MaxFibSize; /* largest FIB to adapter */
@@ -867,8 +869,10 @@ struct aac_supplement_adapter_info
867}; 869};
868#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010) 870#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
869#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000) 871#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
870#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001) 872/* SupportedOptions2 */
871#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) 873#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
874#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
875#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004)
872#define AAC_SIS_VERSION_V3 3 876#define AAC_SIS_VERSION_V3 3
873#define AAC_SIS_SLOT_UNKNOWN 0xFF 877#define AAC_SIS_SLOT_UNKNOWN 0xFF
874 878
@@ -1148,6 +1152,7 @@ struct aac_dev
1148#define ST_DQUOT 69 1152#define ST_DQUOT 69
1149#define ST_STALE 70 1153#define ST_STALE 70
1150#define ST_REMOTE 71 1154#define ST_REMOTE 71
1155#define ST_NOT_READY 72
1151#define ST_BADHANDLE 10001 1156#define ST_BADHANDLE 10001
1152#define ST_NOT_SYNC 10002 1157#define ST_NOT_SYNC 10002
1153#define ST_BAD_COOKIE 10003 1158#define ST_BAD_COOKIE 10003
@@ -1269,6 +1274,18 @@ struct aac_synchronize_reply {
1269 u8 data[16]; 1274 u8 data[16];
1270}; 1275};
1271 1276
1277#define CT_POWER_MANAGEMENT 245
1278#define CT_PM_START_UNIT 2
1279#define CT_PM_STOP_UNIT 3
1280#define CT_PM_UNIT_IMMEDIATE 1
1281struct aac_power_management {
1282 __le32 command; /* VM_ContainerConfig */
1283 __le32 type; /* CT_POWER_MANAGEMENT */
1284 __le32 sub; /* CT_PM_* */
1285 __le32 cid;
1286 __le32 parm; /* CT_PM_sub_* */
1287};
1288
1272#define CT_PAUSE_IO 65 1289#define CT_PAUSE_IO 65
1273#define CT_RELEASE_IO 66 1290#define CT_RELEASE_IO 66
1274struct aac_pause { 1291struct aac_pause {
@@ -1536,6 +1553,7 @@ struct aac_mntent {
1536#define FSCS_NOTCLEAN 0x0001 /* fsck is necessary before mounting */ 1553#define FSCS_NOTCLEAN 0x0001 /* fsck is necessary before mounting */
1537#define FSCS_READONLY 0x0002 /* possible result of broken mirror */ 1554#define FSCS_READONLY 0x0002 /* possible result of broken mirror */
1538#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */ 1555#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */
1556#define FSCS_NOT_READY 0x0008 /* Array spinning up to fulfil request */
1539 1557
1540struct aac_query_mount { 1558struct aac_query_mount {
1541 __le32 command; 1559 __le32 command;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 294a802450be..cbac06355107 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -97,6 +97,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
97 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); 97 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
98 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); 98 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
99 } 99 }
100 init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
101 INITFLAGS_DRIVER_SUPPORTS_PM);
100 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); 102 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
101 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); 103 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
102 init->MaxFibSize = cpu_to_le32(dev->max_fib_size); 104 init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index ef67816a6fe5..289304aab690 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -515,7 +515,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
515 } 515 }
516 udelay(5); 516 udelay(5);
517 } 517 }
518 } else if (down_interruptible(&fibptr->event_wait) == 0) { 518 } else if (down_interruptible(&fibptr->event_wait)) {
519 fibptr->done = 2; 519 fibptr->done = 2;
520 up(&fibptr->event_wait); 520 up(&fibptr->event_wait);
521 } 521 }
@@ -906,15 +906,22 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
906 case AifEnAddJBOD: 906 case AifEnAddJBOD:
907 case AifEnDeleteJBOD: 907 case AifEnDeleteJBOD:
908 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); 908 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
909 if ((container >> 28)) 909 if ((container >> 28)) {
910 container = (u32)-1;
910 break; 911 break;
912 }
911 channel = (container >> 24) & 0xF; 913 channel = (container >> 24) & 0xF;
912 if (channel >= dev->maximum_num_channels) 914 if (channel >= dev->maximum_num_channels) {
915 container = (u32)-1;
913 break; 916 break;
917 }
914 id = container & 0xFFFF; 918 id = container & 0xFFFF;
915 if (id >= dev->maximum_num_physicals) 919 if (id >= dev->maximum_num_physicals) {
920 container = (u32)-1;
916 break; 921 break;
922 }
917 lun = (container >> 16) & 0xFF; 923 lun = (container >> 16) & 0xFF;
924 container = (u32)-1;
918 channel = aac_phys_to_logical(channel); 925 channel = aac_phys_to_logical(channel);
919 device_config_needed = 926 device_config_needed =
920 (((__le32 *)aifcmd->data)[0] == 927 (((__le32 *)aifcmd->data)[0] ==
@@ -933,13 +940,18 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
933 case EM_DRIVE_REMOVAL: 940 case EM_DRIVE_REMOVAL:
934 container = le32_to_cpu( 941 container = le32_to_cpu(
935 ((__le32 *)aifcmd->data)[2]); 942 ((__le32 *)aifcmd->data)[2]);
936 if ((container >> 28)) 943 if ((container >> 28)) {
944 container = (u32)-1;
937 break; 945 break;
946 }
938 channel = (container >> 24) & 0xF; 947 channel = (container >> 24) & 0xF;
939 if (channel >= dev->maximum_num_channels) 948 if (channel >= dev->maximum_num_channels) {
949 container = (u32)-1;
940 break; 950 break;
951 }
941 id = container & 0xFFFF; 952 id = container & 0xFFFF;
942 lun = (container >> 16) & 0xFF; 953 lun = (container >> 16) & 0xFF;
954 container = (u32)-1;
943 if (id >= dev->maximum_num_physicals) { 955 if (id >= dev->maximum_num_physicals) {
944 /* legacy dev_t ? */ 956 /* legacy dev_t ? */
945 if ((0x2000 <= id) || lun || channel || 957 if ((0x2000 <= id) || lun || channel ||
@@ -1025,9 +1037,10 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1025 break; 1037 break;
1026 } 1038 }
1027 1039
1040 container = 0;
1041retry_next:
1028 if (device_config_needed == NOTHING) 1042 if (device_config_needed == NOTHING)
1029 for (container = 0; container < dev->maximum_num_containers; 1043 for (; container < dev->maximum_num_containers; ++container) {
1030 ++container) {
1031 if ((dev->fsa_dev[container].config_waiting_on == 0) && 1044 if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1032 (dev->fsa_dev[container].config_needed != NOTHING) && 1045 (dev->fsa_dev[container].config_needed != NOTHING) &&
1033 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { 1046 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
@@ -1110,6 +1123,11 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1110 } 1123 }
1111 if (device_config_needed == ADD) 1124 if (device_config_needed == ADD)
1112 scsi_add_device(dev->scsi_host_ptr, channel, id, lun); 1125 scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1126 if (channel == CONTAINER_CHANNEL) {
1127 container++;
1128 device_config_needed = NOTHING;
1129 goto retry_next;
1130 }
1113} 1131}
1114 1132
1115static int _aac_reset_adapter(struct aac_dev *aac, int forced) 1133static int _aac_reset_adapter(struct aac_dev *aac, int forced)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index c109f63f8279..1f7c83607f84 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -401,6 +401,8 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
401static int aac_slave_configure(struct scsi_device *sdev) 401static int aac_slave_configure(struct scsi_device *sdev)
402{ 402{
403 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 403 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
404 if (aac->jbod && (sdev->type == TYPE_DISK))
405 sdev->removable = 1;
404 if ((sdev->type == TYPE_DISK) && 406 if ((sdev->type == TYPE_DISK) &&
405 (sdev_channel(sdev) != CONTAINER_CHANNEL) && 407 (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
406 (!aac->jbod || sdev->inq_periph_qual) && 408 (!aac->jbod || sdev->inq_periph_qual) &&
@@ -809,6 +811,12 @@ static ssize_t aac_show_flags(struct device *cdev,
809 "SAI_READ_CAPACITY_16\n"); 811 "SAI_READ_CAPACITY_16\n");
810 if (dev->jbod) 812 if (dev->jbod)
811 len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n"); 813 len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
814 if (dev->supplement_adapter_info.SupportedOptions2 &
815 AAC_OPTION_POWER_MANAGEMENT)
816 len += snprintf(buf + len, PAGE_SIZE - len,
817 "SUPPORTED_POWER_MANAGEMENT\n");
818 if (dev->msi)
819 len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
812 return len; 820 return len;
813} 821}
814 822
@@ -1106,7 +1114,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1106 aac->pdev = pdev; 1114 aac->pdev = pdev;
1107 aac->name = aac_driver_template.name; 1115 aac->name = aac_driver_template.name;
1108 aac->id = shost->unique_id; 1116 aac->id = shost->unique_id;
1109 aac->cardtype = index; 1117 aac->cardtype = index;
1110 INIT_LIST_HEAD(&aac->entry); 1118 INIT_LIST_HEAD(&aac->entry);
1111 1119
1112 aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL); 1120 aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
@@ -1146,19 +1154,19 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1146 goto out_deinit; 1154 goto out_deinit;
1147 1155
1148 /* 1156 /*
1149 * Lets override negotiations and drop the maximum SG limit to 34 1157 * Lets override negotiations and drop the maximum SG limit to 34
1150 */ 1158 */
1151 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) && 1159 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
1152 (shost->sg_tablesize > 34)) { 1160 (shost->sg_tablesize > 34)) {
1153 shost->sg_tablesize = 34; 1161 shost->sg_tablesize = 34;
1154 shost->max_sectors = (shost->sg_tablesize * 8) + 112; 1162 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1155 } 1163 }
1156 1164
1157 if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) && 1165 if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
1158 (shost->sg_tablesize > 17)) { 1166 (shost->sg_tablesize > 17)) {
1159 shost->sg_tablesize = 17; 1167 shost->sg_tablesize = 17;
1160 shost->max_sectors = (shost->sg_tablesize * 8) + 112; 1168 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1161 } 1169 }
1162 1170
1163 error = pci_set_dma_max_seg_size(pdev, 1171 error = pci_set_dma_max_seg_size(pdev,
1164 (aac->adapter_info.options & AAC_OPT_NEW_COMM) ? 1172 (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
@@ -1174,7 +1182,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1174 else 1182 else
1175 aac->printf_enabled = 0; 1183 aac->printf_enabled = 0;
1176 1184
1177 /* 1185 /*
1178 * max channel will be the physical channels plus 1 virtual channel 1186 * max channel will be the physical channels plus 1 virtual channel
1179 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) 1187 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
1180 * physical channels are address by their actual physical number+1 1188 * physical channels are address by their actual physical number+1
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index a09b2d3fdf5a..f5215fd4b73d 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -994,13 +994,13 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
994 SCpnt->SCp.sent_command = 0; 994 SCpnt->SCp.sent_command = 0;
995 995
996 if(SCpnt->SCp.phase & (resetting|check_condition)) { 996 if(SCpnt->SCp.phase & (resetting|check_condition)) {
997 if(SCpnt->host_scribble==0 || SCSEM(SCpnt) || SCNEXT(SCpnt)) { 997 if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
998 printk(ERR_LEAD "cannot reuse command\n", CMDINFO(SCpnt)); 998 printk(ERR_LEAD "cannot reuse command\n", CMDINFO(SCpnt));
999 return FAILED; 999 return FAILED;
1000 } 1000 }
1001 } else { 1001 } else {
1002 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); 1002 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
1003 if(SCpnt->host_scribble==0) { 1003 if(!SCpnt->host_scribble) {
1004 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); 1004 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt));
1005 return FAILED; 1005 return FAILED;
1006 } 1006 }
@@ -1162,7 +1162,7 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1162 } 1162 }
1163 1163
1164 DO_LOCK(flags); 1164 DO_LOCK(flags);
1165 issued = remove_SC(&ISSUE_SC, SCpnt)==0; 1165 issued = remove_SC(&ISSUE_SC, SCpnt) == NULL;
1166 disconnected = issued && remove_SC(&DISCONNECTED_SC, SCpnt); 1166 disconnected = issued && remove_SC(&DISCONNECTED_SC, SCpnt);
1167 DO_UNLOCK(flags); 1167 DO_UNLOCK(flags);
1168 1168
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 1ac119733bac..f220e5e436ab 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -50,7 +50,7 @@ aic7770_map_registers(struct ahc_softc *ahc, u_int port)
50 /* 50 /*
51 * Lock out other contenders for our i/o space. 51 * Lock out other contenders for our i/o space.
52 */ 52 */
53 if (request_region(port, AHC_EISA_IOSIZE, "aic7xxx") == 0) 53 if (!request_region(port, AHC_EISA_IOSIZE, "aic7xxx"))
54 return (ENOMEM); 54 return (ENOMEM);
55 ahc->tag = BUS_SPACE_PIO; 55 ahc->tag = BUS_SPACE_PIO;
56 ahc->bsh.ioport = port; 56 ahc->bsh.ioport = port;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0d7628f1f1ef..00f5b9868574 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -352,7 +352,7 @@ ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base)
352 *base = pci_resource_start(ahc->dev_softc, 0); 352 *base = pci_resource_start(ahc->dev_softc, 0);
353 if (*base == 0) 353 if (*base == 0)
354 return (ENOMEM); 354 return (ENOMEM);
355 if (request_region(*base, 256, "aic7xxx") == 0) 355 if (!request_region(*base, 256, "aic7xxx"))
356 return (ENOMEM); 356 return (ENOMEM);
357 return (0); 357 return (0);
358} 358}
@@ -369,7 +369,7 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
369 start = pci_resource_start(ahc->dev_softc, 1); 369 start = pci_resource_start(ahc->dev_softc, 1);
370 if (start != 0) { 370 if (start != 0) {
371 *bus_addr = start; 371 *bus_addr = start;
372 if (request_mem_region(start, 0x1000, "aic7xxx") == 0) 372 if (!request_mem_region(start, 0x1000, "aic7xxx"))
373 error = ENOMEM; 373 error = ENOMEM;
374 if (error == 0) { 374 if (error == 0) {
375 *maddr = ioremap_nocache(start, 256); 375 *maddr = ioremap_nocache(start, 256);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 90f5e0a6f2e3..2a730c470f62 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -529,10 +529,10 @@ static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
529/* The first entry, 0, is used for dynamic ids, the rest for devices 529/* The first entry, 0, is used for dynamic ids, the rest for devices
530 * we know about. 530 * we know about.
531 */ 531 */
532static struct asd_pcidev_struct { 532static const struct asd_pcidev_struct {
533 const char * name; 533 const char * name;
534 int (*setup)(struct asd_ha_struct *asd_ha); 534 int (*setup)(struct asd_ha_struct *asd_ha);
535} asd_pcidev_data[] = { 535} asd_pcidev_data[] __devinitconst = {
536 /* Id 0 is used for dynamic ids. */ 536 /* Id 0 is used for dynamic ids. */
537 { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter", 537 { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
538 .setup = asd_aic9410_setup 538 .setup = asd_aic9410_setup
@@ -735,7 +735,7 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
735static int __devinit asd_pci_probe(struct pci_dev *dev, 735static int __devinit asd_pci_probe(struct pci_dev *dev,
736 const struct pci_device_id *id) 736 const struct pci_device_id *id)
737{ 737{
738 struct asd_pcidev_struct *asd_dev; 738 const struct asd_pcidev_struct *asd_dev;
739 unsigned asd_id = (unsigned) id->driver_data; 739 unsigned asd_id = (unsigned) id->driver_data;
740 struct asd_ha_struct *asd_ha; 740 struct asd_ha_struct *asd_ha;
741 struct Scsi_Host *shost; 741 struct Scsi_Host *shost;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 403a7f2d8f9b..9785d7384199 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -28,7 +28,6 @@
28#define SERVICE_ACTION_OUT_12 0xa9 28#define SERVICE_ACTION_OUT_12 0xa9
29#define SERVICE_ACTION_IN_16 0x9e 29#define SERVICE_ACTION_IN_16 0x9e
30#define SERVICE_ACTION_OUT_16 0x9f 30#define SERVICE_ACTION_OUT_16 0x9f
31#define VARIABLE_LENGTH_CMD 0x7f
32 31
33 32
34 33
@@ -210,7 +209,7 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
210 cdb0 = cdbp[0]; 209 cdb0 = cdbp[0];
211 switch(cdb0) { 210 switch(cdb0) {
212 case VARIABLE_LENGTH_CMD: 211 case VARIABLE_LENGTH_CMD:
213 len = cdbp[7] + 8; 212 len = scsi_varlen_cdb_length(cdbp);
214 if (len < 10) { 213 if (len < 10) {
215 printk("short variable length command, " 214 printk("short variable length command, "
216 "len=%d ext_len=%d", len, cdb_len); 215 "len=%d ext_len=%d", len, cdb_len);
@@ -300,7 +299,7 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
300 cdb0 = cdbp[0]; 299 cdb0 = cdbp[0];
301 switch(cdb0) { 300 switch(cdb0) {
302 case VARIABLE_LENGTH_CMD: 301 case VARIABLE_LENGTH_CMD:
303 len = cdbp[7] + 8; 302 len = scsi_varlen_cdb_length(cdbp);
304 if (len < 10) { 303 if (len < 10) {
305 printk("short opcode=0x%x command, len=%d " 304 printk("short opcode=0x%x command, len=%d "
306 "ext_len=%d", cdb0, len, cdb_len); 305 "ext_len=%d", cdb0, len, cdb_len);
@@ -335,10 +334,7 @@ void __scsi_print_command(unsigned char *cdb)
335 int k, len; 334 int k, len;
336 335
337 print_opcode_name(cdb, 0); 336 print_opcode_name(cdb, 0);
338 if (VARIABLE_LENGTH_CMD == cdb[0]) 337 len = scsi_command_size(cdb);
339 len = cdb[7] + 8;
340 else
341 len = COMMAND_SIZE(cdb[0]);
342 /* print out all bytes in cdb */ 338 /* print out all bytes in cdb */
343 for (k = 0; k < len; ++k) 339 for (k = 0; k < len; ++k)
344 printk(" %02x", cdb[k]); 340 printk(" %02x", cdb[k]);
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
index cc784e8f6e9d..f60236721e0d 100644
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -89,7 +89,7 @@ typedef struct {
89 int njobs; /* # of jobs sent to HA */ 89 int njobs; /* # of jobs sent to HA */
90 int qdepth; /* Controller queue depth. */ 90 int qdepth; /* Controller queue depth. */
91 int wakebase; /* mpx wakeup base index. */ 91 int wakebase; /* mpx wakeup base index. */
92 uLONG SGsize; /* Scatter/Gather list size. */ 92 uINT SGsize; /* Scatter/Gather list size. */
93 unsigned heads; /* heads for drives on cntlr. */ 93 unsigned heads; /* heads for drives on cntlr. */
94 unsigned sectors; /* sectors for drives on cntlr. */ 94 unsigned sectors; /* sectors for drives on cntlr. */
95 uCHAR do_drive32; /* Flag for Above 16 MB Ability */ 95 uCHAR do_drive32; /* Flag for Above 16 MB Ability */
@@ -97,8 +97,8 @@ typedef struct {
97 char idPAL[4]; /* 4 Bytes Of The ID Pal */ 97 char idPAL[4]; /* 4 Bytes Of The ID Pal */
98 uCHAR primary; /* 1 For Primary, 0 For Secondary */ 98 uCHAR primary; /* 1 For Primary, 0 For Secondary */
99 uCHAR eataVersion; /* EATA Version */ 99 uCHAR eataVersion; /* EATA Version */
100 uLONG cpLength; /* EATA Command Packet Length */ 100 uINT cpLength; /* EATA Command Packet Length */
101 uLONG spLength; /* EATA Status Packet Length */ 101 uINT spLength; /* EATA Status Packet Length */
102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */ 102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */
103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */ 103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */
104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */ 104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */
@@ -107,23 +107,23 @@ typedef struct {
107typedef struct { 107typedef struct {
108 uSHORT length; // Remaining length of this 108 uSHORT length; // Remaining length of this
109 uSHORT drvrHBAnum; // Relative HBA # used by the driver 109 uSHORT drvrHBAnum; // Relative HBA # used by the driver
110 uLONG baseAddr; // Base I/O address 110 uINT baseAddr; // Base I/O address
111 uSHORT blinkState; // Blink LED state (0=Not in blink LED) 111 uSHORT blinkState; // Blink LED state (0=Not in blink LED)
112 uCHAR pciBusNum; // PCI Bus # (Optional) 112 uCHAR pciBusNum; // PCI Bus # (Optional)
113 uCHAR pciDeviceNum; // PCI Device # (Optional) 113 uCHAR pciDeviceNum; // PCI Device # (Optional)
114 uSHORT hbaFlags; // Miscellaneous HBA flags 114 uSHORT hbaFlags; // Miscellaneous HBA flags
115 uSHORT Interrupt; // Interrupt set for this device. 115 uSHORT Interrupt; // Interrupt set for this device.
116# if (defined(_DPT_ARC)) 116# if (defined(_DPT_ARC))
117 uLONG baseLength; 117 uINT baseLength;
118 ADAPTER_OBJECT *AdapterObject; 118 ADAPTER_OBJECT *AdapterObject;
119 LARGE_INTEGER DmaLogicalAddress; 119 LARGE_INTEGER DmaLogicalAddress;
120 PVOID DmaVirtualAddress; 120 PVOID DmaVirtualAddress;
121 LARGE_INTEGER ReplyLogicalAddress; 121 LARGE_INTEGER ReplyLogicalAddress;
122 PVOID ReplyVirtualAddress; 122 PVOID ReplyVirtualAddress;
123# else 123# else
124 uLONG reserved1; // Reserved for future expansion 124 uINT reserved1; // Reserved for future expansion
125 uLONG reserved2; // Reserved for future expansion 125 uINT reserved2; // Reserved for future expansion
126 uLONG reserved3; // Reserved for future expansion 126 uINT reserved3; // Reserved for future expansion
127# endif 127# endif
128} drvrHBAinfo_S; 128} drvrHBAinfo_S;
129 129
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
index 94bc894d1200..72c8992fdf21 100644
--- a/drivers/scsi/dpt/dptsig.h
+++ b/drivers/scsi/dpt/dptsig.h
@@ -33,11 +33,7 @@
33/* to make sure we are talking the same size under all OS's */ 33/* to make sure we are talking the same size under all OS's */
34typedef unsigned char sigBYTE; 34typedef unsigned char sigBYTE;
35typedef unsigned short sigWORD; 35typedef unsigned short sigWORD;
36#if (defined(_MULTI_DATAMODEL) && defined(sun) && !defined(_ILP32)) 36typedef unsigned int sigINT;
37typedef uint32_t sigLONG;
38#else
39typedef unsigned long sigLONG;
40#endif
41 37
42/* 38/*
43 * use sigWORDLittleEndian for: 39 * use sigWORDLittleEndian for:
@@ -300,7 +296,7 @@ typedef struct dpt_sig {
300 sigBYTE dsFiletype; /* type of file */ 296 sigBYTE dsFiletype; /* type of file */
301 sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */ 297 sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
302 sigBYTE dsOEM; /* OEM file was created for */ 298 sigBYTE dsOEM; /* OEM file was created for */
303 sigLONG dsOS; /* which Operating systems */ 299 sigINT dsOS; /* which Operating systems */
304 sigWORD dsCapabilities; /* RAID levels, etc. */ 300 sigWORD dsCapabilities; /* RAID levels, etc. */
305 sigWORD dsDeviceSupp; /* Types of SCSI devices supported */ 301 sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
306 sigWORD dsAdapterSupp; /* DPT adapter families supported */ 302 sigWORD dsAdapterSupp; /* DPT adapter families supported */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
index d23b70c8c768..a90c4cb8ea8b 100644
--- a/drivers/scsi/dpt/sys_info.h
+++ b/drivers/scsi/dpt/sys_info.h
@@ -145,8 +145,8 @@
145 uCHAR smartROMRevision; 145 uCHAR smartROMRevision;
146 uSHORT flags; /* See bit definitions above */ 146 uSHORT flags; /* See bit definitions above */
147 uSHORT conventionalMemSize; /* in KB */ 147 uSHORT conventionalMemSize; /* in KB */
148 uLONG extendedMemSize; /* in KB */ 148 uINT extendedMemSize; /* in KB */
149 uLONG osType; /* Same as DPTSIG's definition */ 149 uINT osType; /* Same as DPTSIG's definition */
150 uCHAR osMajorVersion; 150 uCHAR osMajorVersion;
151 uCHAR osMinorVersion; /* The OS version */ 151 uCHAR osMinorVersion; /* The OS version */
152 uCHAR osRevision; 152 uCHAR osRevision;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index c9dd8392aab2..0fb5bf4c43ac 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -29,11 +29,6 @@
29/*#define DEBUG 1 */ 29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */ 30/*#define UARTDELAY 1 */
31 31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
37#include <linux/module.h> 32#include <linux/module.h>
38 33
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn"); 34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
@@ -108,27 +103,28 @@ static dpt_sig_S DPTI_sig = {
108 103
109static DEFINE_MUTEX(adpt_configuration_lock); 104static DEFINE_MUTEX(adpt_configuration_lock);
110 105
111static struct i2o_sys_tbl *sys_tbl = NULL; 106static struct i2o_sys_tbl *sys_tbl;
112static int sys_tbl_ind = 0; 107static dma_addr_t sys_tbl_pa;
113static int sys_tbl_len = 0; 108static int sys_tbl_ind;
109static int sys_tbl_len;
114 110
115static adpt_hba* hba_chain = NULL; 111static adpt_hba* hba_chain = NULL;
116static int hba_count = 0; 112static int hba_count = 0;
117 113
114static struct class *adpt_sysfs_class;
115
116#ifdef CONFIG_COMPAT
117static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
118#endif
119
118static const struct file_operations adpt_fops = { 120static const struct file_operations adpt_fops = {
119 .ioctl = adpt_ioctl, 121 .ioctl = adpt_ioctl,
120 .open = adpt_open, 122 .open = adpt_open,
121 .release = adpt_close 123 .release = adpt_close,
122}; 124#ifdef CONFIG_COMPAT
123 125 .compat_ioctl = compat_adpt_ioctl,
124#ifdef REBOOT_NOTIFIER
125static struct notifier_block adpt_reboot_notifier =
126{
127 adpt_reboot_event,
128 NULL,
129 0
130};
131#endif 126#endif
127};
132 128
133/* Structures and definitions for synchronous message posting. 129/* Structures and definitions for synchronous message posting.
134 * See adpt_i2o_post_wait() for description 130 * See adpt_i2o_post_wait() for description
@@ -151,9 +147,24 @@ static DEFINE_SPINLOCK(adpt_post_wait_lock);
151 *============================================================================ 147 *============================================================================
152 */ 148 */
153 149
150static inline int dpt_dma64(adpt_hba *pHba)
151{
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153}
154
155static inline u32 dma_high(dma_addr_t addr)
156{
157 return upper_32_bits(addr);
158}
159
160static inline u32 dma_low(dma_addr_t addr)
161{
162 return (u32)addr;
163}
164
154static u8 adpt_read_blink_led(adpt_hba* host) 165static u8 adpt_read_blink_led(adpt_hba* host)
155{ 166{
156 if(host->FwDebugBLEDflag_P != 0) { 167 if (host->FwDebugBLEDflag_P) {
157 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){ 168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
158 return readb(host->FwDebugBLEDvalue_P); 169 return readb(host->FwDebugBLEDvalue_P);
159 } 170 }
@@ -178,8 +189,6 @@ static int adpt_detect(struct scsi_host_template* sht)
178 struct pci_dev *pDev = NULL; 189 struct pci_dev *pDev = NULL;
179 adpt_hba* pHba; 190 adpt_hba* pHba;
180 191
181 adpt_init();
182
183 PINFO("Detecting Adaptec I2O RAID controllers...\n"); 192 PINFO("Detecting Adaptec I2O RAID controllers...\n");
184 193
185 /* search for all Adatpec I2O RAID cards */ 194 /* search for all Adatpec I2O RAID cards */
@@ -247,13 +256,29 @@ rebuild_sys_tab:
247 adpt_inquiry(pHba); 256 adpt_inquiry(pHba);
248 } 257 }
249 258
259 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
260 if (IS_ERR(adpt_sysfs_class)) {
261 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
262 adpt_sysfs_class = NULL;
263 }
264
250 for (pHba = hba_chain; pHba; pHba = pHba->next) { 265 for (pHba = hba_chain; pHba; pHba = pHba->next) {
251 if( adpt_scsi_register(pHba,sht) < 0){ 266 if (adpt_scsi_host_alloc(pHba, sht) < 0){
252 adpt_i2o_delete_hba(pHba); 267 adpt_i2o_delete_hba(pHba);
253 continue; 268 continue;
254 } 269 }
255 pHba->initialized = TRUE; 270 pHba->initialized = TRUE;
256 pHba->state &= ~DPTI_STATE_RESET; 271 pHba->state &= ~DPTI_STATE_RESET;
272 if (adpt_sysfs_class) {
273 struct device *dev = device_create(adpt_sysfs_class,
274 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit),
275 "dpti%d", pHba->unit);
276 if (IS_ERR(dev)) {
277 printk(KERN_WARNING"dpti%d: unable to "
278 "create device in dpt_i2o class\n",
279 pHba->unit);
280 }
281 }
257 } 282 }
258 283
259 // Register our control device node 284 // Register our control device node
@@ -282,7 +307,7 @@ static int adpt_release(struct Scsi_Host *host)
282 307
283static void adpt_inquiry(adpt_hba* pHba) 308static void adpt_inquiry(adpt_hba* pHba)
284{ 309{
285 u32 msg[14]; 310 u32 msg[17];
286 u32 *mptr; 311 u32 *mptr;
287 u32 *lenptr; 312 u32 *lenptr;
288 int direction; 313 int direction;
@@ -290,11 +315,12 @@ static void adpt_inquiry(adpt_hba* pHba)
290 u32 len; 315 u32 len;
291 u32 reqlen; 316 u32 reqlen;
292 u8* buf; 317 u8* buf;
318 dma_addr_t addr;
293 u8 scb[16]; 319 u8 scb[16];
294 s32 rcode; 320 s32 rcode;
295 321
296 memset(msg, 0, sizeof(msg)); 322 memset(msg, 0, sizeof(msg));
297 buf = kmalloc(80,GFP_KERNEL|ADDR32); 323 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
298 if(!buf){ 324 if(!buf){
299 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); 325 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
300 return; 326 return;
@@ -305,7 +331,10 @@ static void adpt_inquiry(adpt_hba* pHba)
305 direction = 0x00000000; 331 direction = 0x00000000;
306 scsidir =0x40000000; // DATA IN (iop<--dev) 332 scsidir =0x40000000; // DATA IN (iop<--dev)
307 333
308 reqlen = 14; // SINGLE SGE 334 if (dpt_dma64(pHba))
335 reqlen = 17; // SINGLE SGE, 64 bit
336 else
337 reqlen = 14; // SINGLE SGE, 32 bit
309 /* Stick the headers on */ 338 /* Stick the headers on */
310 msg[0] = reqlen<<16 | SGL_OFFSET_12; 339 msg[0] = reqlen<<16 | SGL_OFFSET_12;
311 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID); 340 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
@@ -338,8 +367,16 @@ static void adpt_inquiry(adpt_hba* pHba)
338 367
339 /* Now fill in the SGList and command */ 368 /* Now fill in the SGList and command */
340 *lenptr = len; 369 *lenptr = len;
341 *mptr++ = 0xD0000000|direction|len; 370 if (dpt_dma64(pHba)) {
342 *mptr++ = virt_to_bus(buf); 371 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
372 *mptr++ = 1 << PAGE_SHIFT;
373 *mptr++ = 0xD0000000|direction|len;
374 *mptr++ = dma_low(addr);
375 *mptr++ = dma_high(addr);
376 } else {
377 *mptr++ = 0xD0000000|direction|len;
378 *mptr++ = addr;
379 }
343 380
344 // Send it on it's way 381 // Send it on it's way
345 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); 382 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
@@ -347,7 +384,7 @@ static void adpt_inquiry(adpt_hba* pHba)
347 sprintf(pHba->detail, "Adaptec I2O RAID"); 384 sprintf(pHba->detail, "Adaptec I2O RAID");
348 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode); 385 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
349 if (rcode != -ETIME && rcode != -EINTR) 386 if (rcode != -ETIME && rcode != -EINTR)
350 kfree(buf); 387 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
351 } else { 388 } else {
352 memset(pHba->detail, 0, sizeof(pHba->detail)); 389 memset(pHba->detail, 0, sizeof(pHba->detail));
353 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16); 390 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
@@ -356,7 +393,7 @@ static void adpt_inquiry(adpt_hba* pHba)
356 memcpy(&(pHba->detail[40]), " FW: ", 4); 393 memcpy(&(pHba->detail[40]), " FW: ", 4);
357 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4); 394 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
358 pHba->detail[48] = '\0'; /* precautionary */ 395 pHba->detail[48] = '\0'; /* precautionary */
359 kfree(buf); 396 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
360 } 397 }
361 adpt_i2o_status_get(pHba); 398 adpt_i2o_status_get(pHba);
362 return ; 399 return ;
@@ -632,6 +669,91 @@ stop_output:
632 return len; 669 return len;
633} 670}
634 671
672/*
673 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
674 */
675static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
676{
677 return (u32)cmd->serial_number;
678}
679
680/*
681 * Go from a u32 'context' to a struct scsi_cmnd * .
682 * This could probably be made more efficient.
683 */
684static struct scsi_cmnd *
685 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
686{
687 struct scsi_cmnd * cmd;
688 struct scsi_device * d;
689
690 if (context == 0)
691 return NULL;
692
693 spin_unlock(pHba->host->host_lock);
694 shost_for_each_device(d, pHba->host) {
695 unsigned long flags;
696 spin_lock_irqsave(&d->list_lock, flags);
697 list_for_each_entry(cmd, &d->cmd_list, list) {
698 if (((u32)cmd->serial_number == context)) {
699 spin_unlock_irqrestore(&d->list_lock, flags);
700 scsi_device_put(d);
701 spin_lock(pHba->host->host_lock);
702 return cmd;
703 }
704 }
705 spin_unlock_irqrestore(&d->list_lock, flags);
706 }
707 spin_lock(pHba->host->host_lock);
708
709 return NULL;
710}
711
712/*
713 * Turn a pointer to ioctl reply data into an u32 'context'
714 */
715static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
716{
717#if BITS_PER_LONG == 32
718 return (u32)(unsigned long)reply;
719#else
720 ulong flags = 0;
721 u32 nr, i;
722
723 spin_lock_irqsave(pHba->host->host_lock, flags);
724 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
725 for (i = 0; i < nr; i++) {
726 if (pHba->ioctl_reply_context[i] == NULL) {
727 pHba->ioctl_reply_context[i] = reply;
728 break;
729 }
730 }
731 spin_unlock_irqrestore(pHba->host->host_lock, flags);
732 if (i >= nr) {
733 kfree (reply);
734 printk(KERN_WARNING"%s: Too many outstanding "
735 "ioctl commands\n", pHba->name);
736 return (u32)-1;
737 }
738
739 return i;
740#endif
741}
742
743/*
744 * Go from an u32 'context' to a pointer to ioctl reply data.
745 */
746static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
747{
748#if BITS_PER_LONG == 32
749 return (void *)(unsigned long)context;
750#else
751 void *p = pHba->ioctl_reply_context[context];
752 pHba->ioctl_reply_context[context] = NULL;
753
754 return p;
755#endif
756}
635 757
636/*=========================================================================== 758/*===========================================================================
637 * Error Handling routines 759 * Error Handling routines
@@ -660,7 +782,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
660 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; 782 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
661 msg[2] = 0; 783 msg[2] = 0;
662 msg[3]= 0; 784 msg[3]= 0;
663 msg[4] = (u32)cmd; 785 msg[4] = adpt_cmd_to_context(cmd);
664 if (pHba->host) 786 if (pHba->host)
665 spin_lock_irq(pHba->host->host_lock); 787 spin_lock_irq(pHba->host->host_lock);
666 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); 788 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -861,27 +983,6 @@ static void adpt_i2o_sys_shutdown(void)
861 printk(KERN_INFO "Adaptec I2O controllers down.\n"); 983 printk(KERN_INFO "Adaptec I2O controllers down.\n");
862} 984}
863 985
864/*
865 * reboot/shutdown notification.
866 *
867 * - Quiesce each IOP in the system
868 *
869 */
870
871#ifdef REBOOT_NOTIFIER
872static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
873{
874
875 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
876 return NOTIFY_DONE;
877
878 adpt_i2o_sys_shutdown();
879
880 return NOTIFY_DONE;
881}
882#endif
883
884
885static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) 986static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
886{ 987{
887 988
@@ -893,6 +994,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
893 u32 hba_map1_area_size = 0; 994 u32 hba_map1_area_size = 0;
894 void __iomem *base_addr_virt = NULL; 995 void __iomem *base_addr_virt = NULL;
895 void __iomem *msg_addr_virt = NULL; 996 void __iomem *msg_addr_virt = NULL;
997 int dma64 = 0;
896 998
897 int raptorFlag = FALSE; 999 int raptorFlag = FALSE;
898 1000
@@ -906,9 +1008,21 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
906 } 1008 }
907 1009
908 pci_set_master(pDev); 1010 pci_set_master(pDev);
909 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) 1011
1012 /*
1013 * See if we should enable dma64 mode.
1014 */
1015 if (sizeof(dma_addr_t) > 4 &&
1016 pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
1017 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
1018 dma64 = 1;
1019 }
1020 if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
910 return -EINVAL; 1021 return -EINVAL;
911 1022
1023 /* adapter only supports message blocks below 4GB */
1024 pci_set_consistent_dma_mask(pDev, DMA_32BIT_MASK);
1025
912 base_addr0_phys = pci_resource_start(pDev,0); 1026 base_addr0_phys = pci_resource_start(pDev,0);
913 hba_map0_area_size = pci_resource_len(pDev,0); 1027 hba_map0_area_size = pci_resource_len(pDev,0);
914 1028
@@ -929,6 +1043,25 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
929 raptorFlag = TRUE; 1043 raptorFlag = TRUE;
930 } 1044 }
931 1045
1046#if BITS_PER_LONG == 64
1047 /*
1048 * The original Adaptec 64 bit driver has this comment here:
1049 * "x86_64 machines need more optimal mappings"
1050 *
1051 * I assume some HBAs report ridiculously large mappings
1052 * and we need to limit them on platforms with IOMMUs.
1053 */
1054 if (raptorFlag == TRUE) {
1055 if (hba_map0_area_size > 128)
1056 hba_map0_area_size = 128;
1057 if (hba_map1_area_size > 524288)
1058 hba_map1_area_size = 524288;
1059 } else {
1060 if (hba_map0_area_size > 524288)
1061 hba_map0_area_size = 524288;
1062 }
1063#endif
1064
932 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size); 1065 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
933 if (!base_addr_virt) { 1066 if (!base_addr_virt) {
934 pci_release_regions(pDev); 1067 pci_release_regions(pDev);
@@ -991,16 +1124,22 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
991 pHba->state = DPTI_STATE_RESET; 1124 pHba->state = DPTI_STATE_RESET;
992 pHba->pDev = pDev; 1125 pHba->pDev = pDev;
993 pHba->devices = NULL; 1126 pHba->devices = NULL;
1127 pHba->dma64 = dma64;
994 1128
995 // Initializing the spinlocks 1129 // Initializing the spinlocks
996 spin_lock_init(&pHba->state_lock); 1130 spin_lock_init(&pHba->state_lock);
997 spin_lock_init(&adpt_post_wait_lock); 1131 spin_lock_init(&adpt_post_wait_lock);
998 1132
999 if(raptorFlag == 0){ 1133 if(raptorFlag == 0){
1000 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", 1134 printk(KERN_INFO "Adaptec I2O RAID controller"
1001 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq); 1135 " %d at %p size=%x irq=%d%s\n",
1136 hba_count-1, base_addr_virt,
1137 hba_map0_area_size, pDev->irq,
1138 dma64 ? " (64-bit DMA)" : "");
1002 } else { 1139 } else {
1003 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq); 1140 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1141 hba_count-1, pDev->irq,
1142 dma64 ? " (64-bit DMA)" : "");
1004 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size); 1143 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1005 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); 1144 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1006 } 1145 }
@@ -1053,10 +1192,26 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1053 if(pHba->msg_addr_virt != pHba->base_addr_virt){ 1192 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1054 iounmap(pHba->msg_addr_virt); 1193 iounmap(pHba->msg_addr_virt);
1055 } 1194 }
1056 kfree(pHba->hrt); 1195 if(pHba->FwDebugBuffer_P)
1057 kfree(pHba->lct); 1196 iounmap(pHba->FwDebugBuffer_P);
1058 kfree(pHba->status_block); 1197 if(pHba->hrt) {
1059 kfree(pHba->reply_pool); 1198 dma_free_coherent(&pHba->pDev->dev,
1199 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1200 pHba->hrt, pHba->hrt_pa);
1201 }
1202 if(pHba->lct) {
1203 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1204 pHba->lct, pHba->lct_pa);
1205 }
1206 if(pHba->status_block) {
1207 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1208 pHba->status_block, pHba->status_block_pa);
1209 }
1210 if(pHba->reply_pool) {
1211 dma_free_coherent(&pHba->pDev->dev,
1212 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1213 pHba->reply_pool, pHba->reply_pool_pa);
1214 }
1060 1215
1061 for(d = pHba->devices; d ; d = next){ 1216 for(d = pHba->devices; d ; d = next){
1062 next = d->next; 1217 next = d->next;
@@ -1075,23 +1230,19 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1075 pci_dev_put(pHba->pDev); 1230 pci_dev_put(pHba->pDev);
1076 kfree(pHba); 1231 kfree(pHba);
1077 1232
1233 if (adpt_sysfs_class)
1234 device_destroy(adpt_sysfs_class,
1235 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1236
1078 if(hba_count <= 0){ 1237 if(hba_count <= 0){
1079 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); 1238 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1239 if (adpt_sysfs_class) {
1240 class_destroy(adpt_sysfs_class);
1241 adpt_sysfs_class = NULL;
1242 }
1080 } 1243 }
1081} 1244}
1082 1245
1083
1084static int adpt_init(void)
1085{
1086 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
1087#ifdef REBOOT_NOTIFIER
1088 register_reboot_notifier(&adpt_reboot_notifier);
1089#endif
1090
1091 return 0;
1092}
1093
1094
1095static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun) 1246static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1096{ 1247{
1097 struct adpt_device* d; 1248 struct adpt_device* d;
@@ -1283,6 +1434,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1283{ 1434{
1284 u32 msg[8]; 1435 u32 msg[8];
1285 u8* status; 1436 u8* status;
1437 dma_addr_t addr;
1286 u32 m = EMPTY_QUEUE ; 1438 u32 m = EMPTY_QUEUE ;
1287 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ); 1439 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1288 1440
@@ -1305,12 +1457,13 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1305 schedule_timeout_uninterruptible(1); 1457 schedule_timeout_uninterruptible(1);
1306 } while (m == EMPTY_QUEUE); 1458 } while (m == EMPTY_QUEUE);
1307 1459
1308 status = kzalloc(4, GFP_KERNEL|ADDR32); 1460 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1309 if(status == NULL) { 1461 if(status == NULL) {
1310 adpt_send_nop(pHba, m); 1462 adpt_send_nop(pHba, m);
1311 printk(KERN_ERR"IOP reset failed - no free memory.\n"); 1463 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1312 return -ENOMEM; 1464 return -ENOMEM;
1313 } 1465 }
1466 memset(status,0,4);
1314 1467
1315 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; 1468 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1316 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; 1469 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
@@ -1318,8 +1471,8 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1318 msg[3]=0; 1471 msg[3]=0;
1319 msg[4]=0; 1472 msg[4]=0;
1320 msg[5]=0; 1473 msg[5]=0;
1321 msg[6]=virt_to_bus(status); 1474 msg[6]=dma_low(addr);
1322 msg[7]=0; 1475 msg[7]=dma_high(addr);
1323 1476
1324 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg)); 1477 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1325 wmb(); 1478 wmb();
@@ -1329,7 +1482,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1329 while(*status == 0){ 1482 while(*status == 0){
1330 if(time_after(jiffies,timeout)){ 1483 if(time_after(jiffies,timeout)){
1331 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name); 1484 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1332 kfree(status); 1485 /* We lose 4 bytes of "status" here, but we cannot
1486 free these because controller may awake and corrupt
1487 those bytes at any time */
1488 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1333 return -ETIMEDOUT; 1489 return -ETIMEDOUT;
1334 } 1490 }
1335 rmb(); 1491 rmb();
@@ -1348,6 +1504,10 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1348 } 1504 }
1349 if(time_after(jiffies,timeout)){ 1505 if(time_after(jiffies,timeout)){
1350 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); 1506 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1507 /* We lose 4 bytes of "status" here, but we
1508 cannot free these because controller may
1509 awake and corrupt those bytes at any time */
1510 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1351 return -ETIMEDOUT; 1511 return -ETIMEDOUT;
1352 } 1512 }
1353 schedule_timeout_uninterruptible(1); 1513 schedule_timeout_uninterruptible(1);
@@ -1364,7 +1524,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1364 PDEBUG("%s: Reset completed.\n", pHba->name); 1524 PDEBUG("%s: Reset completed.\n", pHba->name);
1365 } 1525 }
1366 1526
1367 kfree(status); 1527 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1368#ifdef UARTDELAY 1528#ifdef UARTDELAY
1369 // This delay is to allow someone attached to the card through the debug UART to 1529 // This delay is to allow someone attached to the card through the debug UART to
1370 // set up the dump levels that they want before the rest of the initialization sequence 1530 // set up the dump levels that they want before the rest of the initialization sequence
@@ -1636,6 +1796,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1636 u32 i = 0; 1796 u32 i = 0;
1637 u32 rcode = 0; 1797 u32 rcode = 0;
1638 void *p = NULL; 1798 void *p = NULL;
1799 dma_addr_t addr;
1639 ulong flags = 0; 1800 ulong flags = 0;
1640 1801
1641 memset(&msg, 0, MAX_MESSAGE_SIZE*4); 1802 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
@@ -1668,10 +1829,13 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1668 } 1829 }
1669 sg_offset = (msg[0]>>4)&0xf; 1830 sg_offset = (msg[0]>>4)&0xf;
1670 msg[2] = 0x40000000; // IOCTL context 1831 msg[2] = 0x40000000; // IOCTL context
1671 msg[3] = (u32)reply; 1832 msg[3] = adpt_ioctl_to_context(pHba, reply);
1833 if (msg[3] == (u32)-1)
1834 return -EBUSY;
1835
1672 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); 1836 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1673 if(sg_offset) { 1837 if(sg_offset) {
1674 // TODO 64bit fix 1838 // TODO add 64 bit API
1675 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); 1839 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1676 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1840 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1677 if (sg_count > pHba->sg_tablesize){ 1841 if (sg_count > pHba->sg_tablesize){
@@ -1690,7 +1854,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1690 } 1854 }
1691 sg_size = sg[i].flag_count & 0xffffff; 1855 sg_size = sg[i].flag_count & 0xffffff;
1692 /* Allocate memory for the transfer */ 1856 /* Allocate memory for the transfer */
1693 p = kmalloc(sg_size, GFP_KERNEL|ADDR32); 1857 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1694 if(!p) { 1858 if(!p) {
1695 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 1859 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1696 pHba->name,sg_size,i,sg_count); 1860 pHba->name,sg_size,i,sg_count);
@@ -1700,15 +1864,15 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1700 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. 1864 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1701 /* Copy in the user's SG buffer if necessary */ 1865 /* Copy in the user's SG buffer if necessary */
1702 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { 1866 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1703 // TODO 64bit fix 1867 // sg_simple_element API is 32 bit
1704 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) { 1868 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1705 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i); 1869 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1706 rcode = -EFAULT; 1870 rcode = -EFAULT;
1707 goto cleanup; 1871 goto cleanup;
1708 } 1872 }
1709 } 1873 }
1710 //TODO 64bit fix 1874 /* sg_simple_element API is 32 bit, but addr < 4GB */
1711 sg[i].addr_bus = (u32)virt_to_bus(p); 1875 sg[i].addr_bus = addr;
1712 } 1876 }
1713 } 1877 }
1714 1878
@@ -1736,7 +1900,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1736 if(sg_offset) { 1900 if(sg_offset) {
1737 /* Copy back the Scatter Gather buffers back to user space */ 1901 /* Copy back the Scatter Gather buffers back to user space */
1738 u32 j; 1902 u32 j;
1739 // TODO 64bit fix 1903 // TODO add 64 bit API
1740 struct sg_simple_element* sg; 1904 struct sg_simple_element* sg;
1741 int sg_size; 1905 int sg_size;
1742 1906
@@ -1756,14 +1920,14 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1756 } 1920 }
1757 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1921 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1758 1922
1759 // TODO 64bit fix 1923 // TODO add 64 bit API
1760 sg = (struct sg_simple_element*)(msg + sg_offset); 1924 sg = (struct sg_simple_element*)(msg + sg_offset);
1761 for (j = 0; j < sg_count; j++) { 1925 for (j = 0; j < sg_count; j++) {
1762 /* Copy out the SG list to user's buffer if necessary */ 1926 /* Copy out the SG list to user's buffer if necessary */
1763 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { 1927 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1764 sg_size = sg[j].flag_count & 0xffffff; 1928 sg_size = sg[j].flag_count & 0xffffff;
1765 // TODO 64bit fix 1929 // sg_simple_element API is 32 bit
1766 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) { 1930 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1767 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); 1931 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1768 rcode = -EFAULT; 1932 rcode = -EFAULT;
1769 goto cleanup; 1933 goto cleanup;
@@ -1787,12 +1951,17 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1787 1951
1788 1952
1789cleanup: 1953cleanup:
1790 if (rcode != -ETIME && rcode != -EINTR) 1954 if (rcode != -ETIME && rcode != -EINTR) {
1955 struct sg_simple_element *sg =
1956 (struct sg_simple_element*) (msg +sg_offset);
1791 kfree (reply); 1957 kfree (reply);
1792 while(sg_index) { 1958 while(sg_index) {
1793 if(sg_list[--sg_index]) { 1959 if(sg_list[--sg_index]) {
1794 if (rcode != -ETIME && rcode != -EINTR) 1960 dma_free_coherent(&pHba->pDev->dev,
1795 kfree(sg_list[sg_index]); 1961 sg[sg_index].flag_count & 0xffffff,
1962 sg_list[sg_index],
1963 sg[sg_index].addr_bus);
1964 }
1796 } 1965 }
1797 } 1966 }
1798 return rcode; 1967 return rcode;
@@ -1978,6 +2147,38 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1978 return error; 2147 return error;
1979} 2148}
1980 2149
2150#ifdef CONFIG_COMPAT
2151static long compat_adpt_ioctl(struct file *file,
2152 unsigned int cmd, unsigned long arg)
2153{
2154 struct inode *inode;
2155 long ret;
2156
2157 inode = file->f_dentry->d_inode;
2158
2159 lock_kernel();
2160
2161 switch(cmd) {
2162 case DPT_SIGNATURE:
2163 case I2OUSRCMD:
2164 case DPT_CTRLINFO:
2165 case DPT_SYSINFO:
2166 case DPT_BLINKLED:
2167 case I2ORESETCMD:
2168 case I2ORESCANCMD:
2169 case (DPT_TARGET_BUSY & 0xFFFF):
2170 case DPT_TARGET_BUSY:
2171 ret = adpt_ioctl(inode, file, cmd, arg);
2172 break;
2173 default:
2174 ret = -ENOIOCTLCMD;
2175 }
2176
2177 unlock_kernel();
2178
2179 return ret;
2180}
2181#endif
1981 2182
1982static irqreturn_t adpt_isr(int irq, void *dev_id) 2183static irqreturn_t adpt_isr(int irq, void *dev_id)
1983{ 2184{
@@ -2009,7 +2210,16 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2009 goto out; 2210 goto out;
2010 } 2211 }
2011 } 2212 }
2012 reply = bus_to_virt(m); 2213 if (pHba->reply_pool_pa <= m &&
2214 m < pHba->reply_pool_pa +
2215 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2216 reply = (u8 *)pHba->reply_pool +
2217 (m - pHba->reply_pool_pa);
2218 } else {
2219 /* Ick, we should *never* be here */
2220 printk(KERN_ERR "dpti: reply frame not from pool\n");
2221 reply = (u8 *)bus_to_virt(m);
2222 }
2013 2223
2014 if (readl(reply) & MSG_FAIL) { 2224 if (readl(reply) & MSG_FAIL) {
2015 u32 old_m = readl(reply+28); 2225 u32 old_m = readl(reply+28);
@@ -2029,7 +2239,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2029 } 2239 }
2030 context = readl(reply+8); 2240 context = readl(reply+8);
2031 if(context & 0x40000000){ // IOCTL 2241 if(context & 0x40000000){ // IOCTL
2032 void *p = (void *)readl(reply+12); 2242 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2033 if( p != NULL) { 2243 if( p != NULL) {
2034 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); 2244 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2035 } 2245 }
@@ -2043,15 +2253,17 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
2043 status = I2O_POST_WAIT_OK; 2253 status = I2O_POST_WAIT_OK;
2044 } 2254 }
2045 if(!(context & 0x40000000)) { 2255 if(!(context & 0x40000000)) {
2046 cmd = (struct scsi_cmnd*) readl(reply+12); 2256 cmd = adpt_cmd_from_context(pHba,
2257 readl(reply+12));
2047 if(cmd != NULL) { 2258 if(cmd != NULL) {
2048 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); 2259 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2049 } 2260 }
2050 } 2261 }
2051 adpt_i2o_post_wait_complete(context, status); 2262 adpt_i2o_post_wait_complete(context, status);
2052 } else { // SCSI message 2263 } else { // SCSI message
2053 cmd = (struct scsi_cmnd*) readl(reply+12); 2264 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2054 if(cmd != NULL){ 2265 if(cmd != NULL){
2266 scsi_dma_unmap(cmd);
2055 if(cmd->serial_number != 0) { // If not timedout 2267 if(cmd->serial_number != 0) { // If not timedout
2056 adpt_i2o_to_scsi(reply, cmd); 2268 adpt_i2o_to_scsi(reply, cmd);
2057 } 2269 }
@@ -2072,6 +2284,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2072 int i; 2284 int i;
2073 u32 msg[MAX_MESSAGE_SIZE]; 2285 u32 msg[MAX_MESSAGE_SIZE];
2074 u32* mptr; 2286 u32* mptr;
2287 u32* lptr;
2075 u32 *lenptr; 2288 u32 *lenptr;
2076 int direction; 2289 int direction;
2077 int scsidir; 2290 int scsidir;
@@ -2079,6 +2292,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2079 u32 len; 2292 u32 len;
2080 u32 reqlen; 2293 u32 reqlen;
2081 s32 rcode; 2294 s32 rcode;
2295 dma_addr_t addr;
2082 2296
2083 memset(msg, 0 , sizeof(msg)); 2297 memset(msg, 0 , sizeof(msg));
2084 len = scsi_bufflen(cmd); 2298 len = scsi_bufflen(cmd);
@@ -2118,7 +2332,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2118 // I2O_CMD_SCSI_EXEC 2332 // I2O_CMD_SCSI_EXEC
2119 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); 2333 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2120 msg[2] = 0; 2334 msg[2] = 0;
2121 msg[3] = (u32)cmd; /* We want the SCSI control block back */ 2335 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2122 // Our cards use the transaction context as the tag for queueing 2336 // Our cards use the transaction context as the tag for queueing
2123 // Adaptec/DPT Private stuff 2337 // Adaptec/DPT Private stuff
2124 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); 2338 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
@@ -2136,7 +2350,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2136 memcpy(mptr, cmd->cmnd, cmd->cmd_len); 2350 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2137 mptr+=4; 2351 mptr+=4;
2138 lenptr=mptr++; /* Remember me - fill in when we know */ 2352 lenptr=mptr++; /* Remember me - fill in when we know */
2139 reqlen = 14; // SINGLE SGE 2353 if (dpt_dma64(pHba)) {
2354 reqlen = 16; // SINGLE SGE
2355 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2356 *mptr++ = 1 << PAGE_SHIFT;
2357 } else {
2358 reqlen = 14; // SINGLE SGE
2359 }
2140 /* Now fill in the SGList and command */ 2360 /* Now fill in the SGList and command */
2141 2361
2142 nseg = scsi_dma_map(cmd); 2362 nseg = scsi_dma_map(cmd);
@@ -2146,12 +2366,16 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2146 2366
2147 len = 0; 2367 len = 0;
2148 scsi_for_each_sg(cmd, sg, nseg, i) { 2368 scsi_for_each_sg(cmd, sg, nseg, i) {
2369 lptr = mptr;
2149 *mptr++ = direction|0x10000000|sg_dma_len(sg); 2370 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2150 len+=sg_dma_len(sg); 2371 len+=sg_dma_len(sg);
2151 *mptr++ = sg_dma_address(sg); 2372 addr = sg_dma_address(sg);
2373 *mptr++ = dma_low(addr);
2374 if (dpt_dma64(pHba))
2375 *mptr++ = dma_high(addr);
2152 /* Make this an end of list */ 2376 /* Make this an end of list */
2153 if (i == nseg - 1) 2377 if (i == nseg - 1)
2154 mptr[-2] = direction|0xD0000000|sg_dma_len(sg); 2378 *lptr = direction|0xD0000000|sg_dma_len(sg);
2155 } 2379 }
2156 reqlen = mptr - msg; 2380 reqlen = mptr - msg;
2157 *lenptr = len; 2381 *lenptr = len;
@@ -2177,13 +2401,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2177} 2401}
2178 2402
2179 2403
2180static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht) 2404static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2181{ 2405{
2182 struct Scsi_Host *host = NULL; 2406 struct Scsi_Host *host;
2183 2407
2184 host = scsi_register(sht, sizeof(adpt_hba*)); 2408 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2185 if (host == NULL) { 2409 if (host == NULL) {
2186 printk ("%s: scsi_register returned NULL\n",pHba->name); 2410 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2187 return -1; 2411 return -1;
2188 } 2412 }
2189 host->hostdata[0] = (unsigned long)pHba; 2413 host->hostdata[0] = (unsigned long)pHba;
@@ -2200,7 +2424,7 @@ static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2200 host->max_lun = 256; 2424 host->max_lun = 256;
2201 host->max_channel = pHba->top_scsi_channel + 1; 2425 host->max_channel = pHba->top_scsi_channel + 1;
2202 host->cmd_per_lun = 1; 2426 host->cmd_per_lun = 1;
2203 host->unique_id = (uint) pHba; 2427 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2204 host->sg_tablesize = pHba->sg_tablesize; 2428 host->sg_tablesize = pHba->sg_tablesize;
2205 host->can_queue = pHba->post_fifo_size; 2429 host->can_queue = pHba->post_fifo_size;
2206 2430
@@ -2640,11 +2864,10 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2640static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) 2864static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2641{ 2865{
2642 u8 *status; 2866 u8 *status;
2867 dma_addr_t addr;
2643 u32 __iomem *msg = NULL; 2868 u32 __iomem *msg = NULL;
2644 int i; 2869 int i;
2645 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ; 2870 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2646 u32* ptr;
2647 u32 outbound_frame; // This had to be a 32 bit address
2648 u32 m; 2871 u32 m;
2649 2872
2650 do { 2873 do {
@@ -2663,13 +2886,14 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2663 2886
2664 msg=(u32 __iomem *)(pHba->msg_addr_virt+m); 2887 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2665 2888
2666 status = kzalloc(4, GFP_KERNEL|ADDR32); 2889 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2667 if (!status) { 2890 if (!status) {
2668 adpt_send_nop(pHba, m); 2891 adpt_send_nop(pHba, m);
2669 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n", 2892 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2670 pHba->name); 2893 pHba->name);
2671 return -ENOMEM; 2894 return -ENOMEM;
2672 } 2895 }
2896 memset(status, 0, 4);
2673 2897
2674 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]); 2898 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2675 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]); 2899 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
@@ -2678,7 +2902,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2678 writel(4096, &msg[4]); /* Host page frame size */ 2902 writel(4096, &msg[4]); /* Host page frame size */
2679 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */ 2903 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2680 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */ 2904 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2681 writel(virt_to_bus(status), &msg[7]); 2905 writel((u32)addr, &msg[7]);
2682 2906
2683 writel(m, pHba->post_port); 2907 writel(m, pHba->post_port);
2684 wmb(); 2908 wmb();
@@ -2693,6 +2917,10 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2693 rmb(); 2917 rmb();
2694 if(time_after(jiffies,timeout)){ 2918 if(time_after(jiffies,timeout)){
2695 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); 2919 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2920 /* We lose 4 bytes of "status" here, but we
2921 cannot free these because controller may
2922 awake and corrupt those bytes at any time */
2923 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2696 return -ETIMEDOUT; 2924 return -ETIMEDOUT;
2697 } 2925 }
2698 schedule_timeout_uninterruptible(1); 2926 schedule_timeout_uninterruptible(1);
@@ -2701,25 +2929,30 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2701 // If the command was successful, fill the fifo with our reply 2929 // If the command was successful, fill the fifo with our reply
2702 // message packets 2930 // message packets
2703 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) { 2931 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2704 kfree(status); 2932 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2705 return -2; 2933 return -2;
2706 } 2934 }
2707 kfree(status); 2935 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2708 2936
2709 kfree(pHba->reply_pool); 2937 if(pHba->reply_pool != NULL) {
2938 dma_free_coherent(&pHba->pDev->dev,
2939 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2940 pHba->reply_pool, pHba->reply_pool_pa);
2941 }
2710 2942
2711 pHba->reply_pool = kzalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); 2943 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2944 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2945 &pHba->reply_pool_pa, GFP_KERNEL);
2712 if (!pHba->reply_pool) { 2946 if (!pHba->reply_pool) {
2713 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name); 2947 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2714 return -ENOMEM; 2948 return -ENOMEM;
2715 } 2949 }
2950 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2716 2951
2717 ptr = pHba->reply_pool;
2718 for(i = 0; i < pHba->reply_fifo_size; i++) { 2952 for(i = 0; i < pHba->reply_fifo_size; i++) {
2719 outbound_frame = (u32)virt_to_bus(ptr); 2953 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2720 writel(outbound_frame, pHba->reply_port); 2954 pHba->reply_port);
2721 wmb(); 2955 wmb();
2722 ptr += REPLY_FRAME_SIZE;
2723 } 2956 }
2724 adpt_i2o_status_get(pHba); 2957 adpt_i2o_status_get(pHba);
2725 return 0; 2958 return 0;
@@ -2743,11 +2976,11 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2743 u32 m; 2976 u32 m;
2744 u32 __iomem *msg; 2977 u32 __iomem *msg;
2745 u8 *status_block=NULL; 2978 u8 *status_block=NULL;
2746 ulong status_block_bus;
2747 2979
2748 if(pHba->status_block == NULL) { 2980 if(pHba->status_block == NULL) {
2749 pHba->status_block = (i2o_status_block*) 2981 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2750 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32); 2982 sizeof(i2o_status_block),
2983 &pHba->status_block_pa, GFP_KERNEL);
2751 if(pHba->status_block == NULL) { 2984 if(pHba->status_block == NULL) {
2752 printk(KERN_ERR 2985 printk(KERN_ERR
2753 "dpti%d: Get Status Block failed; Out of memory. \n", 2986 "dpti%d: Get Status Block failed; Out of memory. \n",
@@ -2757,7 +2990,6 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2757 } 2990 }
2758 memset(pHba->status_block, 0, sizeof(i2o_status_block)); 2991 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2759 status_block = (u8*)(pHba->status_block); 2992 status_block = (u8*)(pHba->status_block);
2760 status_block_bus = virt_to_bus(pHba->status_block);
2761 timeout = jiffies+TMOUT_GETSTATUS*HZ; 2993 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2762 do { 2994 do {
2763 rmb(); 2995 rmb();
@@ -2782,8 +3014,8 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2782 writel(0, &msg[3]); 3014 writel(0, &msg[3]);
2783 writel(0, &msg[4]); 3015 writel(0, &msg[4]);
2784 writel(0, &msg[5]); 3016 writel(0, &msg[5]);
2785 writel(((u32)status_block_bus)&0xffffffff, &msg[6]); 3017 writel( dma_low(pHba->status_block_pa), &msg[6]);
2786 writel(0, &msg[7]); 3018 writel( dma_high(pHba->status_block_pa), &msg[7]);
2787 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes 3019 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2788 3020
2789 //post message 3021 //post message
@@ -2812,7 +3044,17 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2812 } 3044 }
2813 3045
2814 // Calculate the Scatter Gather list size 3046 // Calculate the Scatter Gather list size
2815 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element); 3047 if (dpt_dma64(pHba)) {
3048 pHba->sg_tablesize
3049 = ((pHba->status_block->inbound_frame_size * 4
3050 - 14 * sizeof(u32))
3051 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3052 } else {
3053 pHba->sg_tablesize
3054 = ((pHba->status_block->inbound_frame_size * 4
3055 - 12 * sizeof(u32))
3056 / sizeof(struct sg_simple_element));
3057 }
2816 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) { 3058 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2817 pHba->sg_tablesize = SG_LIST_ELEMENTS; 3059 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2818 } 3060 }
@@ -2863,7 +3105,9 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2863 } 3105 }
2864 do { 3106 do {
2865 if (pHba->lct == NULL) { 3107 if (pHba->lct == NULL) {
2866 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32); 3108 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3109 pHba->lct_size, &pHba->lct_pa,
3110 GFP_KERNEL);
2867 if(pHba->lct == NULL) { 3111 if(pHba->lct == NULL) {
2868 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n", 3112 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2869 pHba->name); 3113 pHba->name);
@@ -2879,7 +3123,7 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2879 msg[4] = 0xFFFFFFFF; /* All devices */ 3123 msg[4] = 0xFFFFFFFF; /* All devices */
2880 msg[5] = 0x00000000; /* Report now */ 3124 msg[5] = 0x00000000; /* Report now */
2881 msg[6] = 0xD0000000|pHba->lct_size; 3125 msg[6] = 0xD0000000|pHba->lct_size;
2882 msg[7] = virt_to_bus(pHba->lct); 3126 msg[7] = (u32)pHba->lct_pa;
2883 3127
2884 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) { 3128 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2885 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", 3129 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
@@ -2890,7 +3134,8 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2890 3134
2891 if ((pHba->lct->table_size << 2) > pHba->lct_size) { 3135 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2892 pHba->lct_size = pHba->lct->table_size << 2; 3136 pHba->lct_size = pHba->lct->table_size << 2;
2893 kfree(pHba->lct); 3137 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3138 pHba->lct, pHba->lct_pa);
2894 pHba->lct = NULL; 3139 pHba->lct = NULL;
2895 } 3140 }
2896 } while (pHba->lct == NULL); 3141 } while (pHba->lct == NULL);
@@ -2901,13 +3146,19 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2901 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO; 3146 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2902 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) { 3147 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2903 pHba->FwDebugBufferSize = buf[1]; 3148 pHba->FwDebugBufferSize = buf[1];
2904 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0]; 3149 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
2905 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET; 3150 pHba->FwDebugBufferSize);
2906 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET; 3151 if (pHba->FwDebugBuffer_P) {
2907 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1; 3152 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
2908 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET; 3153 FW_DEBUG_FLAGS_OFFSET;
2909 pHba->FwDebugBuffer_P += buf[2]; 3154 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
2910 pHba->FwDebugFlags = 0; 3155 FW_DEBUG_BLED_OFFSET;
3156 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3157 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3158 FW_DEBUG_STR_LENGTH_OFFSET;
3159 pHba->FwDebugBuffer_P += buf[2];
3160 pHba->FwDebugFlags = 0;
3161 }
2911 } 3162 }
2912 3163
2913 return 0; 3164 return 0;
@@ -2915,25 +3166,30 @@ static int adpt_i2o_lct_get(adpt_hba* pHba)
2915 3166
2916static int adpt_i2o_build_sys_table(void) 3167static int adpt_i2o_build_sys_table(void)
2917{ 3168{
2918 adpt_hba* pHba = NULL; 3169 adpt_hba* pHba = hba_chain;
2919 int count = 0; 3170 int count = 0;
2920 3171
3172 if (sys_tbl)
3173 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3174 sys_tbl, sys_tbl_pa);
3175
2921 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs 3176 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2922 (hba_count) * sizeof(struct i2o_sys_tbl_entry); 3177 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2923 3178
2924 kfree(sys_tbl); 3179 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
2925 3180 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
2926 sys_tbl = kzalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2927 if (!sys_tbl) { 3181 if (!sys_tbl) {
2928 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n"); 3182 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2929 return -ENOMEM; 3183 return -ENOMEM;
2930 } 3184 }
3185 memset(sys_tbl, 0, sys_tbl_len);
2931 3186
2932 sys_tbl->num_entries = hba_count; 3187 sys_tbl->num_entries = hba_count;
2933 sys_tbl->version = I2OVERSION; 3188 sys_tbl->version = I2OVERSION;
2934 sys_tbl->change_ind = sys_tbl_ind++; 3189 sys_tbl->change_ind = sys_tbl_ind++;
2935 3190
2936 for(pHba = hba_chain; pHba; pHba = pHba->next) { 3191 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3192 u64 addr;
2937 // Get updated Status Block so we have the latest information 3193 // Get updated Status Block so we have the latest information
2938 if (adpt_i2o_status_get(pHba)) { 3194 if (adpt_i2o_status_get(pHba)) {
2939 sys_tbl->num_entries--; 3195 sys_tbl->num_entries--;
@@ -2949,8 +3205,9 @@ static int adpt_i2o_build_sys_table(void)
2949 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size; 3205 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2950 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ?? 3206 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2951 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities; 3207 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2952 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port); 3208 addr = pHba->base_addr_phys + 0x40;
2953 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32); 3209 sys_tbl->iops[count].inbound_low = dma_low(addr);
3210 sys_tbl->iops[count].inbound_high = dma_high(addr);
2954 3211
2955 count++; 3212 count++;
2956 } 3213 }
@@ -3086,7 +3343,8 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3086 3343
3087 do { 3344 do {
3088 if (pHba->hrt == NULL) { 3345 if (pHba->hrt == NULL) {
3089 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32); 3346 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3347 size, &pHba->hrt_pa, GFP_KERNEL);
3090 if (pHba->hrt == NULL) { 3348 if (pHba->hrt == NULL) {
3091 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name); 3349 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3092 return -ENOMEM; 3350 return -ENOMEM;
@@ -3098,7 +3356,7 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3098 msg[2]= 0; 3356 msg[2]= 0;
3099 msg[3]= 0; 3357 msg[3]= 0;
3100 msg[4]= (0xD0000000 | size); /* Simple transaction */ 3358 msg[4]= (0xD0000000 | size); /* Simple transaction */
3101 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */ 3359 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3102 3360
3103 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) { 3361 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3104 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret); 3362 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
@@ -3106,8 +3364,10 @@ static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3106 } 3364 }
3107 3365
3108 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) { 3366 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3109 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2; 3367 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3110 kfree(pHba->hrt); 3368 dma_free_coherent(&pHba->pDev->dev, size,
3369 pHba->hrt, pHba->hrt_pa);
3370 size = newsize;
3111 pHba->hrt = NULL; 3371 pHba->hrt = NULL;
3112 } 3372 }
3113 } while(pHba->hrt == NULL); 3373 } while(pHba->hrt == NULL);
@@ -3121,33 +3381,54 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3121 int group, int field, void *buf, int buflen) 3381 int group, int field, void *buf, int buflen)
3122{ 3382{
3123 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; 3383 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3124 u8 *resblk; 3384 u8 *opblk_va;
3385 dma_addr_t opblk_pa;
3386 u8 *resblk_va;
3387 dma_addr_t resblk_pa;
3125 3388
3126 int size; 3389 int size;
3127 3390
3128 /* 8 bytes for header */ 3391 /* 8 bytes for header */
3129 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32); 3392 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3130 if (resblk == NULL) { 3393 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3394 if (resblk_va == NULL) {
3131 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name); 3395 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3132 return -ENOMEM; 3396 return -ENOMEM;
3133 } 3397 }
3134 3398
3399 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3400 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3401 if (opblk_va == NULL) {
3402 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3403 resblk_va, resblk_pa);
3404 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3405 pHba->name);
3406 return -ENOMEM;
3407 }
3135 if (field == -1) /* whole group */ 3408 if (field == -1) /* whole group */
3136 opblk[4] = -1; 3409 opblk[4] = -1;
3137 3410
3411 memcpy(opblk_va, opblk, sizeof(opblk));
3138 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, 3412 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3139 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen)); 3413 opblk_va, opblk_pa, sizeof(opblk),
3414 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3415 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3140 if (size == -ETIME) { 3416 if (size == -ETIME) {
3417 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3418 resblk_va, resblk_pa);
3141 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name); 3419 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3142 return -ETIME; 3420 return -ETIME;
3143 } else if (size == -EINTR) { 3421 } else if (size == -EINTR) {
3422 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3423 resblk_va, resblk_pa);
3144 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name); 3424 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3145 return -EINTR; 3425 return -EINTR;
3146 } 3426 }
3147 3427
3148 memcpy(buf, resblk+8, buflen); /* cut off header */ 3428 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3149 3429
3150 kfree(resblk); 3430 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3431 resblk_va, resblk_pa);
3151 if (size < 0) 3432 if (size < 0)
3152 return size; 3433 return size;
3153 3434
@@ -3164,10 +3445,11 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3164 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 3445 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3165 */ 3446 */
3166static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 3447static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3167 void *opblk, int oplen, void *resblk, int reslen) 3448 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3449 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3168{ 3450{
3169 u32 msg[9]; 3451 u32 msg[9];
3170 u32 *res = (u32 *)resblk; 3452 u32 *res = (u32 *)resblk_va;
3171 int wait_status; 3453 int wait_status;
3172 3454
3173 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5; 3455 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
@@ -3176,12 +3458,12 @@ static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3176 msg[3] = 0; 3458 msg[3] = 0;
3177 msg[4] = 0; 3459 msg[4] = 0;
3178 msg[5] = 0x54000000 | oplen; /* OperationBlock */ 3460 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3179 msg[6] = virt_to_bus(opblk); 3461 msg[6] = (u32)opblk_pa;
3180 msg[7] = 0xD0000000 | reslen; /* ResultBlock */ 3462 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3181 msg[8] = virt_to_bus(resblk); 3463 msg[8] = (u32)resblk_pa;
3182 3464
3183 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) { 3465 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3184 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk); 3466 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3185 return wait_status; /* -DetailedStatus */ 3467 return wait_status; /* -DetailedStatus */
3186 } 3468 }
3187 3469
@@ -3284,7 +3566,7 @@ static int adpt_i2o_systab_send(adpt_hba* pHba)
3284 * Private i/o space declaration 3566 * Private i/o space declaration
3285 */ 3567 */
3286 msg[6] = 0x54000000 | sys_tbl_len; 3568 msg[6] = 0x54000000 | sys_tbl_len;
3287 msg[7] = virt_to_phys(sys_tbl); 3569 msg[7] = (u32)sys_tbl_pa;
3288 msg[8] = 0x54000000 | 0; 3570 msg[8] = 0x54000000 | 0;
3289 msg[9] = 0; 3571 msg[9] = 0;
3290 msg[10] = 0xD4000000 | 0; 3572 msg[10] = 0xD4000000 | 0;
@@ -3323,11 +3605,10 @@ static static void adpt_delay(int millisec)
3323#endif 3605#endif
3324 3606
3325static struct scsi_host_template driver_template = { 3607static struct scsi_host_template driver_template = {
3608 .module = THIS_MODULE,
3326 .name = "dpt_i2o", 3609 .name = "dpt_i2o",
3327 .proc_name = "dpt_i2o", 3610 .proc_name = "dpt_i2o",
3328 .proc_info = adpt_proc_info, 3611 .proc_info = adpt_proc_info,
3329 .detect = adpt_detect,
3330 .release = adpt_release,
3331 .info = adpt_info, 3612 .info = adpt_info,
3332 .queuecommand = adpt_queue, 3613 .queuecommand = adpt_queue,
3333 .eh_abort_handler = adpt_abort, 3614 .eh_abort_handler = adpt_abort,
@@ -3341,5 +3622,48 @@ static struct scsi_host_template driver_template = {
3341 .cmd_per_lun = 1, 3622 .cmd_per_lun = 1,
3342 .use_clustering = ENABLE_CLUSTERING, 3623 .use_clustering = ENABLE_CLUSTERING,
3343}; 3624};
3344#include "scsi_module.c" 3625
3626static int __init adpt_init(void)
3627{
3628 int error;
3629 adpt_hba *pHba, *next;
3630
3631 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3632
3633 error = adpt_detect(&driver_template);
3634 if (error < 0)
3635 return error;
3636 if (hba_chain == NULL)
3637 return -ENODEV;
3638
3639 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3640 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3641 if (error)
3642 goto fail;
3643 scsi_scan_host(pHba->host);
3644 }
3645 return 0;
3646fail:
3647 for (pHba = hba_chain; pHba; pHba = next) {
3648 next = pHba->next;
3649 scsi_remove_host(pHba->host);
3650 }
3651 return error;
3652}
3653
3654static void __exit adpt_exit(void)
3655{
3656 adpt_hba *pHba, *next;
3657
3658 for (pHba = hba_chain; pHba; pHba = pHba->next)
3659 scsi_remove_host(pHba->host);
3660 for (pHba = hba_chain; pHba; pHba = next) {
3661 next = pHba->next;
3662 adpt_release(pHba->host);
3663 }
3664}
3665
3666module_init(adpt_init);
3667module_exit(adpt_exit);
3668
3345MODULE_LICENSE("GPL"); 3669MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index fd79068c5869..924cd5a51676 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -84,7 +84,6 @@ static int adpt_device_reset(struct scsi_cmnd* cmd);
84#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID 84#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID
85#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511) 85#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511)
86 86
87//#define REBOOT_NOTIFIER 1
88/* Debugging macro from Linux Device Drivers - Rubini */ 87/* Debugging macro from Linux Device Drivers - Rubini */
89#undef PDEBUG 88#undef PDEBUG
90#ifdef DEBUG 89#ifdef DEBUG
@@ -229,14 +228,19 @@ typedef struct _adpt_hba {
229 u32 post_fifo_size; 228 u32 post_fifo_size;
230 u32 reply_fifo_size; 229 u32 reply_fifo_size;
231 u32* reply_pool; 230 u32* reply_pool;
231 dma_addr_t reply_pool_pa;
232 u32 sg_tablesize; // Scatter/Gather List Size. 232 u32 sg_tablesize; // Scatter/Gather List Size.
233 u8 top_scsi_channel; 233 u8 top_scsi_channel;
234 u8 top_scsi_id; 234 u8 top_scsi_id;
235 u8 top_scsi_lun; 235 u8 top_scsi_lun;
236 u8 dma64;
236 237
237 i2o_status_block* status_block; 238 i2o_status_block* status_block;
239 dma_addr_t status_block_pa;
238 i2o_hrt* hrt; 240 i2o_hrt* hrt;
241 dma_addr_t hrt_pa;
239 i2o_lct* lct; 242 i2o_lct* lct;
243 dma_addr_t lct_pa;
240 uint lct_size; 244 uint lct_size;
241 struct i2o_device* devices; 245 struct i2o_device* devices;
242 struct adpt_channel channel[MAX_CHANNEL]; 246 struct adpt_channel channel[MAX_CHANNEL];
@@ -249,6 +253,7 @@ typedef struct _adpt_hba {
249 void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED 253 void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
250 void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED 254 void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
251 u32 FwDebugFlags; 255 u32 FwDebugFlags;
256 u32 *ioctl_reply_context[4];
252} adpt_hba; 257} adpt_hba;
253 258
254struct sg_simple_element { 259struct sg_simple_element {
@@ -264,9 +269,6 @@ static void adpt_i2o_sys_shutdown(void);
264static int adpt_init(void); 269static int adpt_init(void);
265static int adpt_i2o_build_sys_table(void); 270static int adpt_i2o_build_sys_table(void);
266static irqreturn_t adpt_isr(int irq, void *dev_id); 271static irqreturn_t adpt_isr(int irq, void *dev_id);
267#ifdef REBOOT_NOTIFIER
268static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p);
269#endif
270 272
271static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d); 273static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d);
272static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, 274static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
@@ -275,7 +277,8 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
275static const char *adpt_i2o_get_class_name(int class); 277static const char *adpt_i2o_get_class_name(int class);
276#endif 278#endif
277static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, 279static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
278 void *opblk, int oplen, void *resblk, int reslen); 280 void *opblk, dma_addr_t opblk_pa, int oplen,
281 void *resblk, dma_addr_t resblk_pa, int reslen);
279static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout); 282static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
280static int adpt_i2o_lct_get(adpt_hba* pHba); 283static int adpt_i2o_lct_get(adpt_hba* pHba);
281static int adpt_i2o_parse_lct(adpt_hba* pHba); 284static int adpt_i2o_parse_lct(adpt_hba* pHba);
@@ -289,7 +292,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
289static s32 adpt_i2o_hrt_get(adpt_hba* pHba); 292static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
290static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice); 293static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
291static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd); 294static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd);
292static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht); 295static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
293static s32 adpt_hba_reset(adpt_hba* pHba); 296static s32 adpt_hba_reset(adpt_hba* pHba);
294static s32 adpt_i2o_reset_hba(adpt_hba* pHba); 297static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
295static s32 adpt_rescan(adpt_hba* pHba); 298static s32 adpt_rescan(adpt_hba* pHba);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 2cd6b4959eb2..c33bcb284df7 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1443,7 +1443,7 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
1443 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 1443 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
1444 current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1; 1444 current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
1445 } else { 1445 } else {
1446 current_SC->SCp.ptr = 0; 1446 current_SC->SCp.ptr = NULL;
1447 current_SC->SCp.this_residual = 0; 1447 current_SC->SCp.this_residual = 0;
1448 current_SC->SCp.buffer = NULL; 1448 current_SC->SCp.buffer = NULL;
1449 current_SC->SCp.buffers_residual = 0; 1449 current_SC->SCp.buffers_residual = 0;
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index c6d6e7c6559a..8e2e964af668 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -465,7 +465,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
465 scp->request = (struct request *)&wait; 465 scp->request = (struct request *)&wait;
466 scp->timeout_per_command = timeout*HZ; 466 scp->timeout_per_command = timeout*HZ;
467 scp->cmd_len = 12; 467 scp->cmd_len = 12;
468 memcpy(scp->cmnd, cmnd, 12); 468 scp->cmnd = cmnd;
469 cmndinfo.priority = IOCTL_PRI; 469 cmndinfo.priority = IOCTL_PRI;
470 cmndinfo.internal_cmd_str = gdtcmd; 470 cmndinfo.internal_cmd_str = gdtcmd;
471 cmndinfo.internal_command = 1; 471 cmndinfo.internal_command = 1;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 5b7be1e9841c..aaa48e0c8ed0 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -763,9 +763,9 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
763 scp, 763 scp,
764 host->host_no, scp->device->channel, 764 host->host_no, scp->device->channel,
765 scp->device->id, scp->device->lun, 765 scp->device->id, scp->device->lun,
766 *((u32 *)&scp->cmnd), 766 ((u32 *)scp->cmnd)[0],
767 *((u32 *)&scp->cmnd + 1), 767 ((u32 *)scp->cmnd)[1],
768 *((u32 *)&scp->cmnd + 2), 768 ((u32 *)scp->cmnd)[2],
769 _req->index, _req->req_virt); 769 _req->index, _req->req_virt);
770 770
771 scp->result = 0; 771 scp->result = 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 4a922c57125e..ccfd8aca3765 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -686,7 +686,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
686 } 686 }
687 687
688 if (cmnd) { 688 if (cmnd) {
689 cmnd->result = rsp->status; 689 cmnd->result |= rsp->status;
690 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) 690 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
691 memcpy(cmnd->sense_buffer, 691 memcpy(cmnd->sense_buffer,
692 rsp->data, 692 rsp->data,
@@ -730,6 +730,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
730 u16 lun = lun_from_dev(cmnd->device); 730 u16 lun = lun_from_dev(cmnd->device);
731 u8 out_fmt, in_fmt; 731 u8 out_fmt, in_fmt;
732 732
733 cmnd->result = (DID_OK << 16);
733 evt_struct = get_event_struct(&hostdata->pool); 734 evt_struct = get_event_struct(&hostdata->pool);
734 if (!evt_struct) 735 if (!evt_struct)
735 return SCSI_MLQUEUE_HOST_BUSY; 736 return SCSI_MLQUEUE_HOST_BUSY;
@@ -738,7 +739,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
738 srp_cmd = &evt_struct->iu.srp.cmd; 739 srp_cmd = &evt_struct->iu.srp.cmd;
739 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); 740 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
740 srp_cmd->opcode = SRP_CMD; 741 srp_cmd->opcode = SRP_CMD;
741 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); 742 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
742 srp_cmd->lun = ((u64) lun) << 48; 743 srp_cmd->lun = ((u64) lun) << 48;
743 744
744 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 745 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
@@ -1347,6 +1348,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1347 1348
1348 del_timer(&evt_struct->timer); 1349 del_timer(&evt_struct->timer);
1349 1350
1351 if (crq->status != VIOSRP_OK && evt_struct->cmnd)
1352 evt_struct->cmnd->result = DID_ERROR << 16;
1350 if (evt_struct->done) 1353 if (evt_struct->done)
1351 evt_struct->done(evt_struct); 1354 evt_struct->done(evt_struct);
1352 else 1355 else
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 90f1a61283ad..4c4aadb3e405 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -59,6 +59,15 @@ enum viosrp_crq_formats {
59 VIOSRP_INLINE_FORMAT = 0x07 59 VIOSRP_INLINE_FORMAT = 0x07
60}; 60};
61 61
62enum viosrp_crq_status {
63 VIOSRP_OK = 0x0,
64 VIOSRP_NONRECOVERABLE_ERR = 0x1,
65 VIOSRP_VIOLATES_MAX_XFER = 0x2,
66 VIOSRP_PARTNER_PANIC = 0x3,
67 VIOSRP_DEVICE_BUSY = 0x8,
68 VIOSRP_ADAPTER_FAIL = 0x10
69};
70
62struct viosrp_crq { 71struct viosrp_crq {
63 u8 valid; /* used by RPA */ 72 u8 valid; /* used by RPA */
64 u8 format; /* SCSI vs out-of-band */ 73 u8 format; /* SCSI vs out-of-band */
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 32553639aded..44d8d5163a1a 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -134,6 +134,7 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
134static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, 134static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
135 unsigned int bcount) 135 unsigned int bcount)
136{ 136{
137 ide_hwif_t *hwif = drive->hwif;
137 int count; 138 int count;
138 char *buf; 139 char *buf;
139 140
@@ -145,14 +146,12 @@ static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
145 local_irq_save(flags); 146 local_irq_save(flags);
146 buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) + 147 buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
147 pc->sg->offset; 148 pc->sg->offset;
148 drive->hwif->atapi_input_bytes(drive, 149 hwif->input_data(drive, NULL, buf + pc->b_count, count);
149 buf + pc->b_count, count);
150 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0); 150 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
151 local_irq_restore(flags); 151 local_irq_restore(flags);
152 } else { 152 } else {
153 buf = sg_virt(pc->sg); 153 buf = sg_virt(pc->sg);
154 drive->hwif->atapi_input_bytes(drive, 154 hwif->input_data(drive, NULL, buf + pc->b_count, count);
155 buf + pc->b_count, count);
156 } 155 }
157 bcount -= count; pc->b_count += count; 156 bcount -= count; pc->b_count += count;
158 if (pc->b_count == pc->sg->length) { 157 if (pc->b_count == pc->sg->length) {
@@ -165,13 +164,14 @@ static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
165 164
166 if (bcount) { 165 if (bcount) {
167 printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n"); 166 printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
168 ide_atapi_discard_data(drive, bcount); 167 ide_pad_transfer(drive, 0, bcount);
169 } 168 }
170} 169}
171 170
172static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, 171static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
173 unsigned int bcount) 172 unsigned int bcount)
174{ 173{
174 ide_hwif_t *hwif = drive->hwif;
175 int count; 175 int count;
176 char *buf; 176 char *buf;
177 177
@@ -183,14 +183,12 @@ static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
183 local_irq_save(flags); 183 local_irq_save(flags);
184 buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) + 184 buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
185 pc->sg->offset; 185 pc->sg->offset;
186 drive->hwif->atapi_output_bytes(drive, 186 hwif->output_data(drive, NULL, buf + pc->b_count, count);
187 buf + pc->b_count, count);
188 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0); 187 kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
189 local_irq_restore(flags); 188 local_irq_restore(flags);
190 } else { 189 } else {
191 buf = sg_virt(pc->sg); 190 buf = sg_virt(pc->sg);
192 drive->hwif->atapi_output_bytes(drive, 191 hwif->output_data(drive, NULL, buf + pc->b_count, count);
193 buf + pc->b_count, count);
194 } 192 }
195 bcount -= count; pc->b_count += count; 193 bcount -= count; pc->b_count += count;
196 if (pc->b_count == pc->sg->length) { 194 if (pc->b_count == pc->sg->length) {
@@ -203,7 +201,7 @@ static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
203 201
204 if (bcount) { 202 if (bcount) {
205 printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n"); 203 printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
206 ide_atapi_write_zeros(drive, bcount); 204 ide_pad_transfer(drive, 1, bcount);
207 } 205 }
208} 206}
209 207
@@ -258,7 +256,8 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
258 256
259 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 257 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
260 /* force an abort */ 258 /* force an abort */
261 hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr); 259 hwif->OUTBSYNC(drive, WIN_IDLEIMMEDIATE,
260 hwif->io_ports.command_addr);
262 261
263 rq->errors++; 262 rq->errors++;
264 263
@@ -431,14 +430,15 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
431 idescsi_input_buffers(drive, pc, 430 idescsi_input_buffers(drive, pc,
432 temp); 431 temp);
433 else 432 else
434 drive->hwif->atapi_input_bytes(drive, pc->cur_pos, temp); 433 hwif->input_data(drive, NULL,
434 pc->cur_pos, temp);
435 printk(KERN_ERR "ide-scsi: transferred" 435 printk(KERN_ERR "ide-scsi: transferred"
436 " %d of %d bytes\n", 436 " %d of %d bytes\n",
437 temp, bcount); 437 temp, bcount);
438 } 438 }
439 pc->xferred += temp; 439 pc->xferred += temp;
440 pc->cur_pos += temp; 440 pc->cur_pos += temp;
441 ide_atapi_discard_data(drive, bcount - temp); 441 ide_pad_transfer(drive, 0, bcount - temp);
442 ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry); 442 ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
443 return ide_started; 443 return ide_started;
444 } 444 }
@@ -452,15 +452,13 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
452 if (pc->sg) 452 if (pc->sg)
453 idescsi_input_buffers(drive, pc, bcount); 453 idescsi_input_buffers(drive, pc, bcount);
454 else 454 else
455 hwif->atapi_input_bytes(drive, pc->cur_pos, 455 hwif->input_data(drive, NULL, pc->cur_pos, bcount);
456 bcount);
457 } else { 456 } else {
458 pc->flags |= PC_FLAG_WRITING; 457 pc->flags |= PC_FLAG_WRITING;
459 if (pc->sg) 458 if (pc->sg)
460 idescsi_output_buffers(drive, pc, bcount); 459 idescsi_output_buffers(drive, pc, bcount);
461 else 460 else
462 hwif->atapi_output_bytes(drive, pc->cur_pos, 461 hwif->output_data(drive, NULL, pc->cur_pos, bcount);
463 bcount);
464 } 462 }
465 /* Update the current position */ 463 /* Update the current position */
466 pc->xferred += bcount; 464 pc->xferred += bcount;
@@ -493,8 +491,10 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
493 BUG_ON(HWGROUP(drive)->handler != NULL); 491 BUG_ON(HWGROUP(drive)->handler != NULL);
494 /* Set the interrupt routine */ 492 /* Set the interrupt routine */
495 ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry); 493 ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
494
496 /* Send the actual packet */ 495 /* Send the actual packet */
497 drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12); 496 hwif->output_data(drive, NULL, scsi->pc->c, 12);
497
498 if (pc->flags & PC_FLAG_DMA_OK) { 498 if (pc->flags & PC_FLAG_DMA_OK) {
499 pc->flags |= PC_FLAG_DMA_IN_PROGRESS; 499 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
500 hwif->dma_ops->dma_start(drive); 500 hwif->dma_ops->dma_start(drive);
@@ -574,7 +574,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
574 return ide_started; 574 return ide_started;
575 } else { 575 } else {
576 /* Issue the packet command */ 576 /* Issue the packet command */
577 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr); 577 ide_execute_pkt_cmd(drive);
578 return idescsi_transfer_pc(drive); 578 return idescsi_transfer_pc(drive);
579 } 579 }
580} 580}
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index dbae3fdb8506..e3f739776bad 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2590,7 +2590,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
2590 cblk->hastat = 0; 2590 cblk->hastat = 0;
2591 cblk->tastat = 0; 2591 cblk->tastat = 0;
2592 /* Command the command */ 2592 /* Command the command */
2593 memcpy(&cblk->cdb[0], &cmnd->cmnd, cmnd->cmd_len); 2593 memcpy(cblk->cdb, cmnd->cmnd, cmnd->cmd_len);
2594 2594
2595 /* Set up tags */ 2595 /* Set up tags */
2596 if (cmnd->device->tagged_supported) { /* Tag Support */ 2596 if (cmnd->device->tagged_supported) { /* Tag Support */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index de5ae6a65029..999e91ea7451 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2791,7 +2791,7 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
2791 2791
2792static struct device_attribute ipr_ioa_state_attr = { 2792static struct device_attribute ipr_ioa_state_attr = {
2793 .attr = { 2793 .attr = {
2794 .name = "state", 2794 .name = "online_state",
2795 .mode = S_IRUGO | S_IWUSR, 2795 .mode = S_IRUGO | S_IWUSR,
2796 }, 2796 },
2797 .show = ipr_show_adapter_state, 2797 .show = ipr_show_adapter_state,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index b135a1ed4b2c..18551aaf5e09 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4996,7 +4996,7 @@ static int __init megaraid_init(void)
4996 max_mbox_busy_wait = MBOX_BUSY_WAIT; 4996 max_mbox_busy_wait = MBOX_BUSY_WAIT;
4997 4997
4998#ifdef CONFIG_PROC_FS 4998#ifdef CONFIG_PROC_FS
4999 mega_proc_dir_entry = proc_mkdir("megaraid", &proc_root); 4999 mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
5000 if (!mega_proc_dir_entry) { 5000 if (!mega_proc_dir_entry) {
5001 printk(KERN_WARNING 5001 printk(KERN_WARNING
5002 "megaraid: failed to create megaraid root\n"); 5002 "megaraid: failed to create megaraid root\n");
@@ -5005,7 +5005,7 @@ static int __init megaraid_init(void)
5005 error = pci_register_driver(&megaraid_pci_driver); 5005 error = pci_register_driver(&megaraid_pci_driver);
5006 if (error) { 5006 if (error) {
5007#ifdef CONFIG_PROC_FS 5007#ifdef CONFIG_PROC_FS
5008 remove_proc_entry("megaraid", &proc_root); 5008 remove_proc_entry("megaraid", NULL);
5009#endif 5009#endif
5010 return error; 5010 return error;
5011 } 5011 }
@@ -5035,7 +5035,7 @@ static void __exit megaraid_exit(void)
5035 pci_unregister_driver(&megaraid_pci_driver); 5035 pci_unregister_driver(&megaraid_pci_driver);
5036 5036
5037#ifdef CONFIG_PROC_FS 5037#ifdef CONFIG_PROC_FS
5038 remove_proc_entry("megaraid", &proc_root); 5038 remove_proc_entry("megaraid", NULL);
5039#endif 5039#endif
5040} 5040}
5041 5041
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 820f91fb63ba..70a0f11f48b2 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -3168,6 +3168,23 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
3168 uint8_t raw_mbox[sizeof(mbox_t)]; 3168 uint8_t raw_mbox[sizeof(mbox_t)];
3169 int rval; 3169 int rval;
3170 3170
3171 /*
3172 * Newer firmware on Dell CERC expect a different
3173 * random deletion handling, so disable it.
3174 */
3175 if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
3176 adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
3177 adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
3178 adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
3179 (adapter->fw_version[0] > '6' ||
3180 (adapter->fw_version[0] == '6' &&
3181 adapter->fw_version[2] > '6') ||
3182 (adapter->fw_version[0] == '6'
3183 && adapter->fw_version[2] == '6'
3184 && adapter->fw_version[3] > '1'))) {
3185 con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
3186 return 0;
3187 }
3171 3188
3172 mbox = (mbox_t *)raw_mbox; 3189 mbox = (mbox_t *)raw_mbox;
3173 3190
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 626459d1e902..c1d86d961a92 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -88,6 +88,7 @@
88#define PCI_SUBSYS_ID_PERC3_QC 0x0471 88#define PCI_SUBSYS_ID_PERC3_QC 0x0471
89#define PCI_SUBSYS_ID_PERC3_DC 0x0493 89#define PCI_SUBSYS_ID_PERC3_DC 0x0493
90#define PCI_SUBSYS_ID_PERC3_SC 0x0475 90#define PCI_SUBSYS_ID_PERC3_SC 0x0475
91#define PCI_SUBSYS_ID_CERC_ATA100_4CH 0x0511
91 92
92 93
93#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel 94#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index b937e9cddb23..7d84c8bbcf3f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.03.16-rc1 13 * Version : v00.00.03.20-rc1
14 * 14 *
15 * Authors: 15 * Authors:
16 * (email-id : megaraidlinux@lsi.com) 16 * (email-id : megaraidlinux@lsi.com)
@@ -2650,12 +2650,13 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
2650 return; 2650 return;
2651} 2651}
2652 2652
2653#ifdef CONFIG_PM
2653/** 2654/**
2654 * megasas_suspend - driver suspend entry point 2655 * megasas_suspend - driver suspend entry point
2655 * @pdev: PCI device structure 2656 * @pdev: PCI device structure
2656 * @state: PCI power state to suspend routine 2657 * @state: PCI power state to suspend routine
2657 */ 2658 */
2658static int __devinit 2659static int
2659megasas_suspend(struct pci_dev *pdev, pm_message_t state) 2660megasas_suspend(struct pci_dev *pdev, pm_message_t state)
2660{ 2661{
2661 struct Scsi_Host *host; 2662 struct Scsi_Host *host;
@@ -2687,7 +2688,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
2687 * megasas_resume- driver resume entry point 2688 * megasas_resume- driver resume entry point
2688 * @pdev: PCI device structure 2689 * @pdev: PCI device structure
2689 */ 2690 */
2690static int __devinit 2691static int
2691megasas_resume(struct pci_dev *pdev) 2692megasas_resume(struct pci_dev *pdev)
2692{ 2693{
2693 int rval; 2694 int rval;
@@ -2782,12 +2783,16 @@ fail_ready_state:
2782 2783
2783 return -ENODEV; 2784 return -ENODEV;
2784} 2785}
2786#else
2787#define megasas_suspend NULL
2788#define megasas_resume NULL
2789#endif
2785 2790
2786/** 2791/**
2787 * megasas_detach_one - PCI hot"un"plug entry point 2792 * megasas_detach_one - PCI hot"un"plug entry point
2788 * @pdev: PCI device structure 2793 * @pdev: PCI device structure
2789 */ 2794 */
2790static void megasas_detach_one(struct pci_dev *pdev) 2795static void __devexit megasas_detach_one(struct pci_dev *pdev)
2791{ 2796{
2792 int i; 2797 int i;
2793 struct Scsi_Host *host; 2798 struct Scsi_Host *host;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3a997eb457bf..b0c41e671702 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/* 18/*
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.03.16-rc1" 21#define MEGASAS_VERSION "00.00.03.20-rc1"
22#define MEGASAS_RELDATE "Nov. 07, 2007" 22#define MEGASAS_RELDATE "March 10, 2008"
23#define MEGASAS_EXT_VERSION "Thu. Nov. 07 10:09:32 PDT 2007" 23#define MEGASAS_EXT_VERSION "Mon. March 10 11:02:31 PDT 2008"
24 24
25/* 25/*
26 * Device IDs 26 * Device IDs
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
index e55b9037adb2..1dd70d7a4947 100644
--- a/drivers/scsi/mvsas.c
+++ b/drivers/scsi/mvsas.c
@@ -2822,7 +2822,9 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2822 dev_printk(KERN_DEBUG, &pdev->dev, 2822 dev_printk(KERN_DEBUG, &pdev->dev,
2823 "phy[%d] Get Attached Address 0x%llX ," 2823 "phy[%d] Get Attached Address 0x%llX ,"
2824 " SAS Address 0x%llX\n", 2824 " SAS Address 0x%llX\n",
2825 i, phy->att_dev_sas_addr, phy->dev_sas_addr); 2825 i,
2826 (unsigned long long)phy->att_dev_sas_addr,
2827 (unsigned long long)phy->dev_sas_addr);
2826 dev_printk(KERN_DEBUG, &pdev->dev, 2828 dev_printk(KERN_DEBUG, &pdev->dev,
2827 "Rate = %x , type = %d\n", 2829 "Rate = %x , type = %d\n",
2828 sas_phy->linkrate, phy->phy_type); 2830 sas_phy->linkrate, phy->phy_type);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index d89289400425..c57c94c0ffd2 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8186,7 +8186,7 @@ static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd)
8186 cmd->next_wcmd = NULL; 8186 cmd->next_wcmd = NULL;
8187 if (!(wcmd = np->waiting_list)) np->waiting_list = cmd; 8187 if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
8188 else { 8188 else {
8189 while ((wcmd->next_wcmd) != 0) 8189 while (wcmd->next_wcmd)
8190 wcmd = (struct scsi_cmnd *) wcmd->next_wcmd; 8190 wcmd = (struct scsi_cmnd *) wcmd->next_wcmd;
8191 wcmd->next_wcmd = (char *) cmd; 8191 wcmd->next_wcmd = (char *) cmd;
8192 } 8192 }
@@ -8222,7 +8222,7 @@ static void process_waiting_list(struct ncb *np, int sts)
8222#ifdef DEBUG_WAITING_LIST 8222#ifdef DEBUG_WAITING_LIST
8223 if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts); 8223 if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
8224#endif 8224#endif
8225 while ((wcmd = waiting_list) != 0) { 8225 while ((wcmd = waiting_list) != NULL) {
8226 waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd; 8226 waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd;
8227 wcmd->next_wcmd = NULL; 8227 wcmd->next_wcmd = NULL;
8228 if (sts == DID_OK) { 8228 if (sts == DID_OK) {
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 09ab3eac1c1a..fa060932d2b4 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2858,7 +2858,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2858 2858
2859 /* Load SCSI command packet. */ 2859 /* Load SCSI command packet. */
2860 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 2860 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2861 memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd)); 2861 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2862 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 2862 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2863 2863
2864 /* Set transfer direction. */ 2864 /* Set transfer direction. */
@@ -3127,7 +3127,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3127 3127
3128 /* Load SCSI command packet. */ 3128 /* Load SCSI command packet. */
3129 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 3129 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3130 memcpy(pkt->scsi_cdb, &(CMD_CDBP(cmd)), CMD_CDBLEN(cmd)); 3130 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3131 3131
3132 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 3132 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3133 /* Set transfer direction. */ 3133 /* Set transfer direction. */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 12d69d7c8577..110e776d1a07 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -79,15 +79,6 @@ static void scsi_done(struct scsi_cmnd *cmd);
79#define MIN_RESET_PERIOD (15*HZ) 79#define MIN_RESET_PERIOD (15*HZ)
80 80
81/* 81/*
82 * Macro to determine the size of SCSI command. This macro takes vendor
83 * unique commands into account. SCSI commands in groups 6 and 7 are
84 * vendor unique and we will depend upon the command length being
85 * supplied correctly in cmd_len.
86 */
87#define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
88 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
89
90/*
91 * Note - the initial logging level can be set here to log events at boot time. 82 * Note - the initial logging level can be set here to log events at boot time.
92 * After the system is up, you may enable logging via the /proc interface. 83 * After the system is up, you may enable logging via the /proc interface.
93 */ 84 */
@@ -469,6 +460,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
469 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 460 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
470 if (!cmd) { 461 if (!cmd) {
471 scsi_put_host_cmd_pool(gfp_mask); 462 scsi_put_host_cmd_pool(gfp_mask);
463 shost->cmd_pool = NULL;
472 return -ENOMEM; 464 return -ENOMEM;
473 } 465 }
474 list_add(&cmd->list, &shost->free_list); 466 list_add(&cmd->list, &shost->free_list);
@@ -481,6 +473,13 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
481 */ 473 */
482void scsi_destroy_command_freelist(struct Scsi_Host *shost) 474void scsi_destroy_command_freelist(struct Scsi_Host *shost)
483{ 475{
476 /*
477 * If cmd_pool is NULL the free list was not initialized, so
478 * do not attempt to release resources.
479 */
480 if (!shost->cmd_pool)
481 return;
482
484 while (!list_empty(&shost->free_list)) { 483 while (!list_empty(&shost->free_list)) {
485 struct scsi_cmnd *cmd; 484 struct scsi_cmnd *cmd;
486 485
@@ -701,9 +700,11 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
701 * Before we queue this command, check if the command 700 * Before we queue this command, check if the command
702 * length exceeds what the host adapter can handle. 701 * length exceeds what the host adapter can handle.
703 */ 702 */
704 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) { 703 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
705 SCSI_LOG_MLQUEUE(3, 704 SCSI_LOG_MLQUEUE(3,
706 printk("queuecommand : command too long.\n")); 705 printk("queuecommand : command too long. "
706 "cdb_size=%d host->max_cmd_len=%d\n",
707 cmd->cmd_len, cmd->device->host->max_cmd_len));
707 cmd->result = (DID_ABORT << 16); 708 cmd->result = (DID_ABORT << 16);
708 709
709 scsi_done(cmd); 710 scsi_done(cmd);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 07103c399fe0..f6600bfb5bde 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1773,7 +1773,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp)
1773 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 1773 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
1774 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n", 1774 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
1775 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 1775 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
1776 set_bit(QUEUE_FLAG_BIDI, &sdp->request_queue->queue_flags); 1776 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
1777 return 0; 1777 return 0;
1778} 1778}
1779 1779
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index b8de041bc0ae..a235802f2981 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -449,37 +449,40 @@ int scsi_get_device_flags(struct scsi_device *sdev,
449} 449}
450 450
451#ifdef CONFIG_SCSI_PROC_FS 451#ifdef CONFIG_SCSI_PROC_FS
452/* 452static int devinfo_seq_show(struct seq_file *m, void *v)
453 * proc_scsi_dev_info_read: dump the scsi_dev_info_list via
454 * /proc/scsi/device_info
455 */
456static int proc_scsi_devinfo_read(char *buffer, char **start,
457 off_t offset, int length)
458{ 453{
459 struct scsi_dev_info_list *devinfo; 454 struct scsi_dev_info_list *devinfo =
460 int size, len = 0; 455 list_entry(v, struct scsi_dev_info_list, dev_info_list);
461 off_t begin = 0;
462 off_t pos = 0;
463 456
464 list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) { 457 seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
465 size = sprintf(buffer + len, "'%.8s' '%.16s' 0x%x\n",
466 devinfo->vendor, devinfo->model, devinfo->flags); 458 devinfo->vendor, devinfo->model, devinfo->flags);
467 len += size; 459 return 0;
468 pos = begin + len; 460}
469 if (pos < offset) { 461
470 len = 0; 462static void * devinfo_seq_start(struct seq_file *m, loff_t *pos)
471 begin = pos; 463{
472 } 464 return seq_list_start(&scsi_dev_info_list, *pos);
473 if (pos > offset + length) 465}
474 goto stop_output;
475 }
476 466
477stop_output: 467static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos)
478 *start = buffer + (offset - begin); /* Start of wanted data */ 468{
479 len -= (offset - begin); /* Start slop */ 469 return seq_list_next(v, &scsi_dev_info_list, pos);
480 if (len > length) 470}
481 len = length; /* Ending slop */ 471
482 return (len); 472static void devinfo_seq_stop(struct seq_file *m, void *v)
473{
474}
475
476static const struct seq_operations scsi_devinfo_seq_ops = {
477 .start = devinfo_seq_start,
478 .next = devinfo_seq_next,
479 .stop = devinfo_seq_stop,
480 .show = devinfo_seq_show,
481};
482
483static int proc_scsi_devinfo_open(struct inode *inode, struct file *file)
484{
485 return seq_open(file, &scsi_devinfo_seq_ops);
483} 486}
484 487
485/* 488/*
@@ -489,11 +492,12 @@ stop_output:
489 * integer value of flag to the scsi device info list. 492 * integer value of flag to the scsi device info list.
490 * To use, echo "vendor:model:flag" > /proc/scsi/device_info 493 * To use, echo "vendor:model:flag" > /proc/scsi/device_info
491 */ 494 */
492static int proc_scsi_devinfo_write(struct file *file, const char __user *buf, 495static ssize_t proc_scsi_devinfo_write(struct file *file,
493 unsigned long length, void *data) 496 const char __user *buf,
497 size_t length, loff_t *ppos)
494{ 498{
495 char *buffer; 499 char *buffer;
496 int err = length; 500 ssize_t err = length;
497 501
498 if (!buf || length>PAGE_SIZE) 502 if (!buf || length>PAGE_SIZE)
499 return -EINVAL; 503 return -EINVAL;
@@ -517,6 +521,15 @@ out:
517 free_page((unsigned long)buffer); 521 free_page((unsigned long)buffer);
518 return err; 522 return err;
519} 523}
524
525static const struct file_operations scsi_devinfo_proc_fops = {
526 .owner = THIS_MODULE,
527 .open = proc_scsi_devinfo_open,
528 .read = seq_read,
529 .write = proc_scsi_devinfo_write,
530 .llseek = seq_lseek,
531 .release = seq_release,
532};
520#endif /* CONFIG_SCSI_PROC_FS */ 533#endif /* CONFIG_SCSI_PROC_FS */
521 534
522module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0); 535module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0);
@@ -577,15 +590,13 @@ int __init scsi_init_devinfo(void)
577 } 590 }
578 591
579#ifdef CONFIG_SCSI_PROC_FS 592#ifdef CONFIG_SCSI_PROC_FS
580 p = create_proc_entry("scsi/device_info", 0, NULL); 593 p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops);
581 if (!p) { 594 if (!p) {
582 error = -ENOMEM; 595 error = -ENOMEM;
583 goto out; 596 goto out;
584 } 597 }
585 598
586 p->owner = THIS_MODULE; 599 p->owner = THIS_MODULE;
587 p->get_info = proc_scsi_devinfo_read;
588 p->write_proc = proc_scsi_devinfo_write;
589#endif /* CONFIG_SCSI_PROC_FS */ 600#endif /* CONFIG_SCSI_PROC_FS */
590 601
591 out: 602 out:
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 221f31e36d26..eaf5a8add1ba 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -626,7 +626,7 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
626 * @scmd: SCSI command structure to hijack 626 * @scmd: SCSI command structure to hijack
627 * @ses: structure to save restore information 627 * @ses: structure to save restore information
628 * @cmnd: CDB to send. Can be NULL if no new cmnd is needed 628 * @cmnd: CDB to send. Can be NULL if no new cmnd is needed
629 * @cmnd_size: size in bytes of @cmnd 629 * @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB)
630 * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored) 630 * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
631 * 631 *
632 * This function is used to save a scsi command information before re-execution 632 * This function is used to save a scsi command information before re-execution
@@ -648,12 +648,14 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
648 * command. 648 * command.
649 */ 649 */
650 ses->cmd_len = scmd->cmd_len; 650 ses->cmd_len = scmd->cmd_len;
651 memcpy(ses->cmnd, scmd->cmnd, sizeof(scmd->cmnd)); 651 ses->cmnd = scmd->cmnd;
652 ses->data_direction = scmd->sc_data_direction; 652 ses->data_direction = scmd->sc_data_direction;
653 ses->sdb = scmd->sdb; 653 ses->sdb = scmd->sdb;
654 ses->next_rq = scmd->request->next_rq; 654 ses->next_rq = scmd->request->next_rq;
655 ses->result = scmd->result; 655 ses->result = scmd->result;
656 656
657 scmd->cmnd = ses->eh_cmnd;
658 memset(scmd->cmnd, 0, BLK_MAX_CDB);
657 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 659 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
658 scmd->request->next_rq = NULL; 660 scmd->request->next_rq = NULL;
659 661
@@ -665,14 +667,13 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
665 scmd->sdb.table.sgl = &ses->sense_sgl; 667 scmd->sdb.table.sgl = &ses->sense_sgl;
666 scmd->sc_data_direction = DMA_FROM_DEVICE; 668 scmd->sc_data_direction = DMA_FROM_DEVICE;
667 scmd->sdb.table.nents = 1; 669 scmd->sdb.table.nents = 1;
668 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
669 scmd->cmnd[0] = REQUEST_SENSE; 670 scmd->cmnd[0] = REQUEST_SENSE;
670 scmd->cmnd[4] = scmd->sdb.length; 671 scmd->cmnd[4] = scmd->sdb.length;
671 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 672 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
672 } else { 673 } else {
673 scmd->sc_data_direction = DMA_NONE; 674 scmd->sc_data_direction = DMA_NONE;
674 if (cmnd) { 675 if (cmnd) {
675 memset(scmd->cmnd, 0, sizeof(scmd->cmnd)); 676 BUG_ON(cmnd_size > BLK_MAX_CDB);
676 memcpy(scmd->cmnd, cmnd, cmnd_size); 677 memcpy(scmd->cmnd, cmnd, cmnd_size);
677 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 678 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
678 } 679 }
@@ -705,7 +706,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
705 * Restore original data 706 * Restore original data
706 */ 707 */
707 scmd->cmd_len = ses->cmd_len; 708 scmd->cmd_len = ses->cmd_len;
708 memcpy(scmd->cmnd, ses->cmnd, sizeof(scmd->cmnd)); 709 scmd->cmnd = ses->cmnd;
709 scmd->sc_data_direction = ses->data_direction; 710 scmd->sc_data_direction = ses->data_direction;
710 scmd->sdb = ses->sdb; 711 scmd->sdb = ses->sdb;
711 scmd->request->next_rq = ses->next_rq; 712 scmd->request->next_rq = ses->next_rq;
@@ -1771,11 +1772,12 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1771 unsigned long flags; 1772 unsigned long flags;
1772 int rtn; 1773 int rtn;
1773 1774
1775 blk_rq_init(NULL, &req);
1774 scmd->request = &req; 1776 scmd->request = &req;
1775 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); 1777 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1776 1778
1777 memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd)); 1779 scmd->cmnd = req.cmd;
1778 1780
1779 scmd->scsi_done = scsi_reset_provider_done_command; 1781 scmd->scsi_done = scsi_reset_provider_done_command;
1780 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 1782 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
1781 1783
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 67f412bb4974..a82d2fe80fb5 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -445,7 +445,7 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
445 scsi_set_resid(cmd, 0); 445 scsi_set_resid(cmd, 0);
446 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 446 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
447 if (cmd->cmd_len == 0) 447 if (cmd->cmd_len == 0)
448 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 448 cmd->cmd_len = scsi_command_size(cmd->cmnd);
449} 449}
450 450
451void scsi_device_unbusy(struct scsi_device *sdev) 451void scsi_device_unbusy(struct scsi_device *sdev)
@@ -536,6 +536,9 @@ static void scsi_run_queue(struct request_queue *q)
536 !shost->host_blocked && !shost->host_self_blocked && 536 !shost->host_blocked && !shost->host_self_blocked &&
537 !((shost->can_queue > 0) && 537 !((shost->can_queue > 0) &&
538 (shost->host_busy >= shost->can_queue))) { 538 (shost->host_busy >= shost->can_queue))) {
539
540 int flagset;
541
539 /* 542 /*
540 * As long as shost is accepting commands and we have 543 * As long as shost is accepting commands and we have
541 * starved queues, call blk_run_queue. scsi_request_fn 544 * starved queues, call blk_run_queue. scsi_request_fn
@@ -549,19 +552,20 @@ static void scsi_run_queue(struct request_queue *q)
549 sdev = list_entry(shost->starved_list.next, 552 sdev = list_entry(shost->starved_list.next,
550 struct scsi_device, starved_entry); 553 struct scsi_device, starved_entry);
551 list_del_init(&sdev->starved_entry); 554 list_del_init(&sdev->starved_entry);
552 spin_unlock_irqrestore(shost->host_lock, flags); 555 spin_unlock(shost->host_lock);
553 556
557 spin_lock(sdev->request_queue->queue_lock);
558 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
559 !test_bit(QUEUE_FLAG_REENTER,
560 &sdev->request_queue->queue_flags);
561 if (flagset)
562 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
563 __blk_run_queue(sdev->request_queue);
564 if (flagset)
565 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
566 spin_unlock(sdev->request_queue->queue_lock);
554 567
555 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 568 spin_lock(shost->host_lock);
556 !test_and_set_bit(QUEUE_FLAG_REENTER,
557 &sdev->request_queue->queue_flags)) {
558 blk_run_queue(sdev->request_queue);
559 clear_bit(QUEUE_FLAG_REENTER,
560 &sdev->request_queue->queue_flags);
561 } else
562 blk_run_queue(sdev->request_queue);
563
564 spin_lock_irqsave(shost->host_lock, flags);
565 if (unlikely(!list_empty(&sdev->starved_entry))) 569 if (unlikely(!list_empty(&sdev->starved_entry)))
566 /* 570 /*
567 * sdev lost a race, and was put back on the 571 * sdev lost a race, and was put back on the
@@ -1090,6 +1094,8 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1090 cmd->tag = req->tag; 1094 cmd->tag = req->tag;
1091 cmd->request = req; 1095 cmd->request = req;
1092 1096
1097 cmd->cmnd = req->cmd;
1098
1093 return cmd; 1099 return cmd;
1094} 1100}
1095 1101
@@ -1127,8 +1133,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1127 req->buffer = NULL; 1133 req->buffer = NULL;
1128 } 1134 }
1129 1135
1130 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1131 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1132 cmd->cmd_len = req->cmd_len; 1136 cmd->cmd_len = req->cmd_len;
1133 if (!req->data_len) 1137 if (!req->data_len)
1134 cmd->sc_data_direction = DMA_NONE; 1138 cmd->sc_data_direction = DMA_NONE;
@@ -1165,6 +1169,7 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1165 if (unlikely(!cmd)) 1169 if (unlikely(!cmd))
1166 return BLKPREP_DEFER; 1170 return BLKPREP_DEFER;
1167 1171
1172 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1168 return scsi_init_io(cmd, GFP_ATOMIC); 1173 return scsi_init_io(cmd, GFP_ATOMIC);
1169} 1174}
1170EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1175EXPORT_SYMBOL(scsi_setup_fs_cmnd);
@@ -1585,8 +1590,9 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1585 1590
1586 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1591 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1587 1592
1593 /* New queue, no concurrency on queue_flags */
1588 if (!shost->use_clustering) 1594 if (!shost->use_clustering)
1589 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1595 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1590 1596
1591 /* 1597 /*
1592 * set a reasonable default alignment on word boundaries: the 1598 * set a reasonable default alignment on word boundaries: the
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 3a1c99d5c775..e4a0d2f9b357 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -413,6 +413,7 @@ static int proc_scsi_open(struct inode *inode, struct file *file)
413} 413}
414 414
415static const struct file_operations proc_scsi_operations = { 415static const struct file_operations proc_scsi_operations = {
416 .owner = THIS_MODULE,
416 .open = proc_scsi_open, 417 .open = proc_scsi_open,
417 .read = seq_read, 418 .read = seq_read,
418 .write = proc_scsi_write, 419 .write = proc_scsi_write,
@@ -431,10 +432,9 @@ int __init scsi_init_procfs(void)
431 if (!proc_scsi) 432 if (!proc_scsi)
432 goto err1; 433 goto err1;
433 434
434 pde = create_proc_entry("scsi/scsi", 0, NULL); 435 pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations);
435 if (!pde) 436 if (!pde)
436 goto err2; 437 goto err2;
437 pde->proc_fops = &proc_scsi_operations;
438 438
439 return 0; 439 return 0;
440 440
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index fcd7455ffc39..a00eee6f7be9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1828,7 +1828,7 @@ void scsi_scan_host(struct Scsi_Host *shost)
1828 } 1828 }
1829 1829
1830 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); 1830 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
1831 if (unlikely(IS_ERR(p))) 1831 if (IS_ERR(p))
1832 do_scan_async(data); 1832 do_scan_async(data);
1833} 1833}
1834EXPORT_SYMBOL(scsi_scan_host); 1834EXPORT_SYMBOL(scsi_scan_host);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index ee8496aa0336..257e097c39af 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -107,6 +107,8 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
107 cmd->jiffies_at_alloc = jiffies; 107 cmd->jiffies_at_alloc = jiffies;
108 cmd->request = rq; 108 cmd->request = rq;
109 109
110 cmd->cmnd = rq->cmd;
111
110 rq->special = cmd; 112 rq->special = cmd;
111 rq->cmd_type = REQ_TYPE_SPECIAL; 113 rq->cmd_type = REQ_TYPE_SPECIAL;
112 rq->cmd_flags |= REQ_TYPE_BLOCK_PC; 114 rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 7899e3dda9bf..f4461d35ffb9 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -248,8 +248,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
248 else 248 else
249 q->queuedata = shost; 249 q->queuedata = shost;
250 250
251 set_bit(QUEUE_FLAG_BIDI, &q->queue_flags); 251 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
252
253 return 0; 252 return 0;
254} 253}
255 254
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3cea17dd5dba..01cefbb2d539 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -860,7 +860,6 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
860 860
861static void sd_prepare_flush(struct request_queue *q, struct request *rq) 861static void sd_prepare_flush(struct request_queue *q, struct request *rq)
862{ 862{
863 memset(rq->cmd, 0, sizeof(rq->cmd));
864 rq->cmd_type = REQ_TYPE_BLOCK_PC; 863 rq->cmd_type = REQ_TYPE_BLOCK_PC;
865 rq->timeout = SD_TIMEOUT; 864 rq->timeout = SD_TIMEOUT;
866 rq->cmd[0] = SYNCHRONIZE_CACHE; 865 rq->cmd[0] = SYNCHRONIZE_CACHE;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 2029422bc04d..c9d7f721b9e2 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2667,7 +2667,6 @@ sg_proc_init(void)
2667{ 2667{
2668 int k, mask; 2668 int k, mask;
2669 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); 2669 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2670 struct proc_dir_entry *pdep;
2671 struct sg_proc_leaf * leaf; 2670 struct sg_proc_leaf * leaf;
2672 2671
2673 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); 2672 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
@@ -2676,13 +2675,10 @@ sg_proc_init(void)
2676 for (k = 0; k < num_leaves; ++k) { 2675 for (k = 0; k < num_leaves; ++k) {
2677 leaf = &sg_proc_leaf_arr[k]; 2676 leaf = &sg_proc_leaf_arr[k];
2678 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2677 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2679 pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp); 2678 leaf->fops->owner = THIS_MODULE;
2680 if (pdep) { 2679 leaf->fops->read = seq_read;
2681 leaf->fops->owner = THIS_MODULE, 2680 leaf->fops->llseek = seq_lseek;
2682 leaf->fops->read = seq_read, 2681 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2683 leaf->fops->llseek = seq_lseek,
2684 pdep->proc_fops = leaf->fops;
2685 }
2686 } 2682 }
2687 return 0; 2683 return 0;
2688} 2684}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 35142b5341b5..22a6aae78699 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1647,7 +1647,7 @@ static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status)
1647 SYM_QUEHEAD *qp; 1647 SYM_QUEHEAD *qp;
1648 struct sym_ccb *cp; 1648 struct sym_ccb *cp;
1649 1649
1650 while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) { 1650 while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) {
1651 struct scsi_cmnd *cmd; 1651 struct scsi_cmnd *cmd;
1652 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 1652 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
1653 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 1653 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
@@ -3168,7 +3168,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int
3168 * the COMP queue and put back other ones into 3168 * the COMP queue and put back other ones into
3169 * the BUSY queue. 3169 * the BUSY queue.
3170 */ 3170 */
3171 while ((qp = sym_remque_head(&qtmp)) != 0) { 3171 while ((qp = sym_remque_head(&qtmp)) != NULL) {
3172 struct scsi_cmnd *cmd; 3172 struct scsi_cmnd *cmd;
3173 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 3173 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
3174 cmd = cp->cmd; 3174 cmd = cp->cmd;
@@ -5729,7 +5729,7 @@ void sym_hcb_free(struct sym_hcb *np)
5729 sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); 5729 sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
5730 5730
5731 if (np->actccbs) { 5731 if (np->actccbs) {
5732 while ((qp = sym_remque_head(&np->free_ccbq)) != 0) { 5732 while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) {
5733 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 5733 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
5734 sym_mfree_dma(cp, sizeof(*cp), "CCB"); 5734 sym_mfree_dma(cp, sizeof(*cp), "CCB");
5735 } 5735 }
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 640333b1e75c..329eb8780e74 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -744,7 +744,8 @@ static int wait_on_busy(unsigned long iobase, unsigned int loop) {
744static int board_inquiry(unsigned int j) { 744static int board_inquiry(unsigned int j) {
745 struct mscp *cpp; 745 struct mscp *cpp;
746 dma_addr_t id_dma_addr; 746 dma_addr_t id_dma_addr;
747 unsigned int time, limit = 0; 747 unsigned int limit = 0;
748 unsigned long time;
748 749
749 id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id, 750 id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id,
750 sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL); 751 sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL);
@@ -1392,7 +1393,8 @@ static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
1392} 1393}
1393 1394
1394static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) { 1395static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1395 unsigned int i, j, time, k, c, limit = 0; 1396 unsigned int i, j, k, c, limit = 0;
1397 unsigned long time;
1396 int arg_done = FALSE; 1398 int arg_done = FALSE;
1397 struct scsi_cmnd *SCpnt; 1399 struct scsi_cmnd *SCpnt;
1398 1400
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index f385dce8dfbe..27aa40f3980e 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -951,7 +951,7 @@ static int ultrastor_abort(struct scsi_cmnd *SCpnt)
951 printk("abort: command mismatch, %p != %p\n", 951 printk("abort: command mismatch, %p != %p\n",
952 config.mscp[mscp_index].SCint, SCpnt); 952 config.mscp[mscp_index].SCint, SCpnt);
953#endif 953#endif
954 if (config.mscp[mscp_index].SCint == 0) 954 if (config.mscp[mscp_index].SCint == NULL)
955 return FAILED; 955 return FAILED;
956 956
957 if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort"); 957 if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
@@ -1101,7 +1101,7 @@ static void ultrastor_interrupt(void *dev_id)
1101 SCtmp = mscp->SCint; 1101 SCtmp = mscp->SCint;
1102 mscp->SCint = NULL; 1102 mscp->SCint = NULL;
1103 1103
1104 if (SCtmp == 0) 1104 if (!SCtmp)
1105 { 1105 {
1106#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT) 1106#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
1107 printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp); 1107 printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp);
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 2b8a410e0959..bbf5bc5892c7 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -200,7 +200,7 @@ static void rs_stop(struct tty_struct *tty)
200 local_irq_restore(flags); 200 local_irq_restore(flags);
201} 201}
202 202
203static void rs_put_char(char ch) 203static int rs_put_char(char ch)
204{ 204{
205 int flags, loops = 0; 205 int flags, loops = 0;
206 206
@@ -214,6 +214,7 @@ static void rs_put_char(char ch)
214 UTX_TXDATA = ch; 214 UTX_TXDATA = ch;
215 udelay(5); 215 udelay(5);
216 local_irq_restore(flags); 216 local_irq_restore(flags);
217 return 1;
217} 218}
218 219
219static void rs_start(struct tty_struct *tty) 220static void rs_start(struct tty_struct *tty)
@@ -1017,18 +1018,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1017 tty_wait_until_sent(tty, 0); 1018 tty_wait_until_sent(tty, 0);
1018 send_break(info, arg ? arg*(100) : 250); 1019 send_break(info, arg ? arg*(100) : 250);
1019 return 0; 1020 return 0;
1020 case TIOCGSOFTCAR:
1021 error = put_user(C_CLOCAL(tty) ? 1 : 0,
1022 (unsigned long *) arg);
1023 if (error)
1024 return error;
1025 return 0;
1026 case TIOCSSOFTCAR:
1027 get_user(arg, (unsigned long *) arg);
1028 tty->termios->c_cflag =
1029 ((tty->termios->c_cflag & ~CLOCAL) |
1030 (arg ? CLOCAL : 0));
1031 return 0;
1032 case TIOCGSERIAL: 1021 case TIOCGSERIAL:
1033 if (access_ok(VERIFY_WRITE, (void *) arg, 1022 if (access_ok(VERIFY_WRITE, (void *) arg,
1034 sizeof(struct serial_struct))) 1023 sizeof(struct serial_struct)))
@@ -1061,9 +1050,6 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1061{ 1050{
1062 struct m68k_serial *info = (struct m68k_serial *)tty->driver_data; 1051 struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
1063 1052
1064 if (tty->termios->c_cflag == old_termios->c_cflag)
1065 return;
1066
1067 change_speed(info); 1053 change_speed(info);
1068 1054
1069 if ((old_termios->c_cflag & CRTSCTS) && 1055 if ((old_termios->c_cflag & CRTSCTS) &&
@@ -1140,8 +1126,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
1140 uart->ustcnt &= ~(USTCNT_RXEN | USTCNT_RX_INTR_MASK); 1126 uart->ustcnt &= ~(USTCNT_RXEN | USTCNT_RX_INTR_MASK);
1141 1127
1142 shutdown(info); 1128 shutdown(info);
1143 if (tty->driver->flush_buffer) 1129 rs_flush_buffer(tty);
1144 tty->driver->flush_buffer(tty);
1145 1130
1146 tty_ldisc_flush(tty); 1131 tty_ldisc_flush(tty);
1147 tty->closing = 0; 1132 tty->closing = 0;
diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
index f59463601874..d9d4e9552a4d 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/serial/68360serial.c
@@ -995,10 +995,10 @@ static void rs_360_put_char(struct tty_struct *tty, unsigned char ch)
995 volatile QUICC_BD *bdp; 995 volatile QUICC_BD *bdp;
996 996
997 if (serial_paranoia_check(info, tty->name, "rs_put_char")) 997 if (serial_paranoia_check(info, tty->name, "rs_put_char"))
998 return; 998 return 0;
999 999
1000 if (!tty) 1000 if (!tty)
1001 return; 1001 return 0;
1002 1002
1003 bdp = info->tx_cur; 1003 bdp = info->tx_cur;
1004 while (bdp->status & BD_SC_READY); 1004 while (bdp->status & BD_SC_READY);
@@ -1016,6 +1016,7 @@ static void rs_360_put_char(struct tty_struct *tty, unsigned char ch)
1016 bdp++; 1016 bdp++;
1017 1017
1018 info->tx_cur = (QUICC_BD *)bdp; 1018 info->tx_cur = (QUICC_BD *)bdp;
1019 return 1;
1019 1020
1020} 1021}
1021 1022
@@ -1246,7 +1247,7 @@ static int rs_360_tiocmget(struct tty_struct *tty, struct file *file)
1246#ifdef modem_control 1247#ifdef modem_control
1247 unsigned char control, status; 1248 unsigned char control, status;
1248 1249
1249 if (serial_paranoia_check(info, tty->name, __FUNCTION__)) 1250 if (serial_paranoia_check(info, tty->name, __func__))
1250 return -ENODEV; 1251 return -ENODEV;
1251 1252
1252 if (tty->flags & (1 << TTY_IO_ERROR)) 1253 if (tty->flags & (1 << TTY_IO_ERROR))
@@ -1277,12 +1278,12 @@ static int rs_360_tiocmset(struct tty_struct *tty, struct file *file,
1277 ser_info_t *info = (ser_info_t *)tty->driver_data; 1278 ser_info_t *info = (ser_info_t *)tty->driver_data;
1278 unsigned int arg; 1279 unsigned int arg;
1279 1280
1280 if (serial_paranoia_check(info, tty->name, __FUNCTION__)) 1281 if (serial_paranoia_check(info, tty->name, __func__))
1281 return -ENODEV; 1282 return -ENODEV;
1282 1283
1283 if (tty->flags & (1 << TTY_IO_ERROR)) 1284 if (tty->flags & (1 << TTY_IO_ERROR))
1284 return -EIO; 1285 return -EIO;
1285 1286 /* FIXME: locking on info->mcr */
1286 if (set & TIOCM_RTS) 1287 if (set & TIOCM_RTS)
1287 info->mcr |= UART_MCR_RTS; 1288 info->mcr |= UART_MCR_RTS;
1288 if (set & TIOCM_DTR) 1289 if (set & TIOCM_DTR)
@@ -1436,18 +1437,6 @@ static int rs_360_ioctl(struct tty_struct *tty, struct file * file,
1436 return retval; 1437 return retval;
1437 end_break(info); 1438 end_break(info);
1438 return 0; 1439 return 0;
1439 case TIOCGSOFTCAR:
1440 /* return put_user(C_CLOCAL(tty) ? 1 : 0, (int *) arg); */
1441 put_user(C_CLOCAL(tty) ? 1 : 0, (int *) arg);
1442 return 0;
1443 case TIOCSSOFTCAR:
1444 error = get_user(arg, (unsigned int *) arg);
1445 if (error)
1446 return error;
1447 tty->termios->c_cflag =
1448 ((tty->termios->c_cflag & ~CLOCAL) |
1449 (arg ? CLOCAL : 0));
1450 return 0;
1451#ifdef maybe 1440#ifdef maybe
1452 case TIOCSERGETLSR: /* Get line status register */ 1441 case TIOCSERGETLSR: /* Get line status register */
1453 return get_lsr_info(info, (unsigned int *) arg); 1442 return get_lsr_info(info, (unsigned int *) arg);
@@ -1665,8 +1654,7 @@ static void rs_360_close(struct tty_struct *tty, struct file * filp)
1665 rs_360_wait_until_sent(tty, info->timeout); 1654 rs_360_wait_until_sent(tty, info->timeout);
1666 } 1655 }
1667 shutdown(info); 1656 shutdown(info);
1668 if (tty->driver->flush_buffer) 1657 rs_360_flush_buffer(tty);
1669 tty->driver->flush_buffer(tty);
1670 tty_ldisc_flush(tty); 1658 tty_ldisc_flush(tty);
1671 tty->closing = 0; 1659 tty->closing = 0;
1672 info->event = 0; 1660 info->event = 0;
@@ -1717,6 +1705,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
1717 printk("jiff=%lu...", jiffies); 1705 printk("jiff=%lu...", jiffies);
1718#endif 1706#endif
1719 1707
1708 lock_kernel();
1720 /* We go through the loop at least once because we can't tell 1709 /* We go through the loop at least once because we can't tell
1721 * exactly when the last character exits the shifter. There can 1710 * exactly when the last character exits the shifter. There can
1722 * be at least two characters waiting to be sent after the buffers 1711 * be at least two characters waiting to be sent after the buffers
@@ -1745,6 +1734,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
1745 bdp--; 1734 bdp--;
1746 } while (bdp->status & BD_SC_READY); 1735 } while (bdp->status & BD_SC_READY);
1747 current->state = TASK_RUNNING; 1736 current->state = TASK_RUNNING;
1737 unlock_kernel();
1748#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT 1738#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
1749 printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); 1739 printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
1750#endif 1740#endif
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index ea41f2626458..a1ca9b7bf2d5 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2271,7 +2271,8 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
2271 } 2271 }
2272 2272
2273 if (up->port.flags & UPF_IOREMAP) { 2273 if (up->port.flags & UPF_IOREMAP) {
2274 up->port.membase = ioremap(up->port.mapbase, size); 2274 up->port.membase = ioremap_nocache(up->port.mapbase,
2275 size);
2275 if (!up->port.membase) { 2276 if (!up->port.membase) {
2276 release_mem_region(up->port.mapbase, size); 2277 release_mem_region(up->port.mapbase, size);
2277 ret = -ENOMEM; 2278 ret = -ENOMEM;
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index 38776e8b064b..f279745e9fef 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -153,10 +153,10 @@ static int __init parse_options(struct early_serial8250_device *device,
153 (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); 153 (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
154 port->membase += port->mapbase & ~PAGE_MASK; 154 port->membase += port->mapbase & ~PAGE_MASK;
155#else 155#else
156 port->membase = ioremap(port->mapbase, 64); 156 port->membase = ioremap_nocache(port->mapbase, 64);
157 if (!port->membase) { 157 if (!port->membase) {
158 printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n", 158 printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n",
159 __FUNCTION__, 159 __func__,
160 (unsigned long long)port->mapbase); 160 (unsigned long long)port->mapbase);
161 return -ENOMEM; 161 return -ENOMEM;
162 } 162 }
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 6e57382b9137..53fa19cf2f06 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -86,7 +86,7 @@ setup_port(struct serial_private *priv, struct uart_port *port,
86 len = pci_resource_len(dev, bar); 86 len = pci_resource_len(dev, bar);
87 87
88 if (!priv->remapped_bar[bar]) 88 if (!priv->remapped_bar[bar])
89 priv->remapped_bar[bar] = ioremap(base, len); 89 priv->remapped_bar[bar] = ioremap_nocache(base, len);
90 if (!priv->remapped_bar[bar]) 90 if (!priv->remapped_bar[bar])
91 return -ENOMEM; 91 return -ENOMEM;
92 92
@@ -270,7 +270,7 @@ static int pci_plx9050_init(struct pci_dev *dev)
270 /* 270 /*
271 * enable/disable interrupts 271 * enable/disable interrupts
272 */ 272 */
273 p = ioremap(pci_resource_start(dev, 0), 0x80); 273 p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
274 if (p == NULL) 274 if (p == NULL)
275 return -ENOMEM; 275 return -ENOMEM;
276 writel(irq_config, p + 0x4c); 276 writel(irq_config, p + 0x4c);
@@ -294,7 +294,7 @@ static void __devexit pci_plx9050_exit(struct pci_dev *dev)
294 /* 294 /*
295 * disable interrupts 295 * disable interrupts
296 */ 296 */
297 p = ioremap(pci_resource_start(dev, 0), 0x80); 297 p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
298 if (p != NULL) { 298 if (p != NULL) {
299 writel(0, p + 0x4c); 299 writel(0, p + 0x4c);
300 300
@@ -341,7 +341,8 @@ static int sbs_init(struct pci_dev *dev)
341{ 341{
342 u8 __iomem *p; 342 u8 __iomem *p;
343 343
344 p = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); 344 p = ioremap_nocache(pci_resource_start(dev, 0),
345 pci_resource_len(dev, 0));
345 346
346 if (p == NULL) 347 if (p == NULL)
347 return -ENOMEM; 348 return -ENOMEM;
@@ -365,7 +366,8 @@ static void __devexit sbs_exit(struct pci_dev *dev)
365{ 366{
366 u8 __iomem *p; 367 u8 __iomem *p;
367 368
368 p = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); 369 p = ioremap_nocache(pci_resource_start(dev, 0),
370 pci_resource_len(dev, 0));
369 /* FIXME: What if resource_len < OCT_REG_CR_OFF */ 371 /* FIXME: What if resource_len < OCT_REG_CR_OFF */
370 if (p != NULL) 372 if (p != NULL)
371 writeb(0, p + OCT_REG_CR_OFF); 373 writeb(0, p + OCT_REG_CR_OFF);
@@ -419,7 +421,7 @@ static int pci_siig10x_init(struct pci_dev *dev)
419 break; 421 break;
420 } 422 }
421 423
422 p = ioremap(pci_resource_start(dev, 0), 0x80); 424 p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
423 if (p == NULL) 425 if (p == NULL)
424 return -ENOMEM; 426 return -ENOMEM;
425 427
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 34b809e3b596..36acbcca2d48 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1355,4 +1355,47 @@ config SERIAL_SC26XX_CONSOLE
1355 help 1355 help
1356 Support for Console on SC2681/SC2692 serial ports. 1356 Support for Console on SC2681/SC2692 serial ports.
1357 1357
1358config SERIAL_BFIN_SPORT
1359 tristate "Blackfin SPORT emulate UART (EXPERIMENTAL)"
1360 depends on BFIN && EXPERIMENTAL
1361 select SERIAL_CORE
1362 help
1363 Enble support SPORT emulate UART on Blackfin series.
1364
1365 To compile this driver as a module, choose M here: the
1366 module will be called bfin_sport_uart.
1367
1368choice
1369 prompt "Baud rate for Blackfin SPORT UART"
1370 depends on SERIAL_BFIN_SPORT
1371 default SERIAL_SPORT_BAUD_RATE_57600
1372 help
1373 Choose a baud rate for the SPORT UART, other uart settings are
1374 8 bit, 1 stop bit, no parity, no flow control.
1375
1376config SERIAL_SPORT_BAUD_RATE_115200
1377 bool "115200"
1378
1379config SERIAL_SPORT_BAUD_RATE_57600
1380 bool "57600"
1381
1382config SERIAL_SPORT_BAUD_RATE_38400
1383 bool "38400"
1384
1385config SERIAL_SPORT_BAUD_RATE_19200
1386 bool "19200"
1387
1388config SERIAL_SPORT_BAUD_RATE_9600
1389 bool "9600"
1390endchoice
1391
1392config SPORT_BAUD_RATE
1393 int
1394 depends on SERIAL_BFIN_SPORT
1395 default 115200 if (SERIAL_SPORT_BAUD_RATE_115200)
1396 default 57600 if (SERIAL_SPORT_BAUD_RATE_57600)
1397 default 38400 if (SERIAL_SPORT_BAUD_RATE_38400)
1398 default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
1399 default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
1400
1358endmenu 1401endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index f02ff9fad017..0d9c09b1e836 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SERIAL_PXA) += pxa.o
27obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o 27obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
28obj-$(CONFIG_SERIAL_SA1100) += sa1100.o 28obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
29obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o 29obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
30obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
30obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o 31obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
31obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o 32obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o
32obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o 33obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 5f55534a290b..8a2f6a1baa74 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -762,7 +762,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
762 break; 762 break;
763 default: 763 default:
764 printk(KERN_ERR "%s: word lengh not supported\n", 764 printk(KERN_ERR "%s: word lengh not supported\n",
765 __FUNCTION__); 765 __func__);
766 } 766 }
767 767
768 if (termios->c_cflag & CSTOPB) 768 if (termios->c_cflag & CSTOPB)
@@ -1029,7 +1029,7 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
1029 1029
1030 *baud = get_sclk() / (16*(dll | dlh << 8)); 1030 *baud = get_sclk() / (16*(dll | dlh << 8));
1031 } 1031 }
1032 pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __FUNCTION__, *baud, *parity, *bits); 1032 pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits);
1033} 1033}
1034#endif 1034#endif
1035 1035
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
new file mode 100644
index 000000000000..aca1240ad808
--- /dev/null
+++ b/drivers/serial/bfin_sport_uart.c
@@ -0,0 +1,614 @@
1/*
2 * File: linux/drivers/serial/bfin_sport_uart.c
3 *
4 * Based on: drivers/serial/bfin_5xx.c by Aubrey Li.
5 * Author: Roy Huang <roy.huang@analog.com>
6 *
7 * Created: Nov 22, 2006
8 * Copyright: (c) 2006-2007 Analog Devices Inc.
9 * Description: this driver enable SPORTs on Blackfin emulate UART.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see the file COPYING, or write
23 * to the Free Software Foundation, Inc.,
24 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27/*
28 * This driver and the hardware supported are in term of EE-191 of ADI.
29 * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
30 * This application note describe how to implement a UART on a Sharc DSP,
31 * but this driver is implemented on Blackfin Processor.
32 */
33
34/* After reset, there is a prelude of low level pulse when transmit data first
35 * time. No addtional pulse in following transmit.
36 * According to document:
37 * The SPORTs are ready to start transmitting or receiving data no later than
38 * three serial clock cycles after they are enabled in the SPORTx_TCR1 or
39 * SPORTx_RCR1 register. No serial clock cycles are lost from this point on.
40 * The first internal frame sync will occur one frame sync delay after the
41 * SPORTs are ready. External frame syncs can occur as soon as the SPORT is
42 * ready.
43 */
44
45/* Thanks to Axel Alatalo <axel@rubico.se> for fixing sport rx bug. Sometimes
46 * sport receives data incorrectly. The following is Axel's words.
47 * As EE-191, sport rx samples 3 times of the UART baudrate and takes the
48 * middle smaple of every 3 samples as the data bit. For a 8-N-1 UART setting,
49 * 30 samples will be required for a byte. If transmitter sends a 1/3 bit short
50 * byte due to buadrate drift, then the 30th sample of a byte, this sample is
51 * also the third sample of the stop bit, will happens on the immediately
52 * following start bit which will be thrown away and missed. Thus since parts
53 * of the startbit will be missed and the receiver will begin to drift, the
54 * effect accumulates over time until synchronization is lost.
55 * If only require 2 samples of the stopbit (by sampling in total 29 samples),
56 * then a to short byte as in the case above will be tolerated. Then the 1/3
57 * early startbit will trigger a framesync since the last read is complete
58 * after only 2/3 stopbit and framesync is active during the last 1/3 looking
59 * for a possible early startbit. */
60
61//#define DEBUG
62
63#include <linux/module.h>
64#include <linux/ioport.h>
65#include <linux/init.h>
66#include <linux/console.h>
67#include <linux/sysrq.h>
68#include <linux/platform_device.h>
69#include <linux/tty.h>
70#include <linux/tty_flip.h>
71#include <linux/serial_core.h>
72
73#include <asm/delay.h>
74#include <asm/portmux.h>
75
76#include "bfin_sport_uart.h"
77
78unsigned short bfin_uart_pin_req_sport0[] =
79 {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
80 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
81
82unsigned short bfin_uart_pin_req_sport1[] =
83 {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
84 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
85
86#define DRV_NAME "bfin-sport-uart"
87
88struct sport_uart_port {
89 struct uart_port port;
90 char *name;
91
92 int tx_irq;
93 int rx_irq;
94 int err_irq;
95};
96
97static void sport_uart_tx_chars(struct sport_uart_port *up);
98static void sport_stop_tx(struct uart_port *port);
99
100static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
101{
102 pr_debug("%s value:%x\n", __FUNCTION__, value);
103 /* Place a Start and Stop bit */
104 __asm__ volatile (
105 "R2 = b#01111111100;\n\t"
106 "R3 = b#10000000001;\n\t"
107 "%0 <<= 2;\n\t"
108 "%0 = %0 & R2;\n\t"
109 "%0 = %0 | R3;\n\t"
110 :"=r"(value)
111 :"0"(value)
112 :"R2", "R3");
113 pr_debug("%s value:%x\n", __FUNCTION__, value);
114
115 SPORT_PUT_TX(up, value);
116}
117
118static inline unsigned int rx_one_byte(struct sport_uart_port *up)
119{
120 unsigned int value, extract;
121
122 value = SPORT_GET_RX32(up);
123 pr_debug("%s value:%x\n", __FUNCTION__, value);
124
125 /* Extract 8 bits data */
126 __asm__ volatile (
127 "R5 = 0;\n\t"
128 "P0 = 8;\n\t"
129 "R1 = 0x1801(Z);\n\t"
130 "R3 = 0x0300(Z);\n\t"
131 "R4 = 0;\n\t"
132 "LSETUP(loop_s, loop_e) LC0 = P0;\nloop_s:\t"
133 "R2 = extract(%1, R1.L)(Z);\n\t"
134 "R2 <<= R4;\n\t"
135 "R5 = R5 | R2;\n\t"
136 "R1 = R1 - R3;\nloop_e:\t"
137 "R4 += 1;\n\t"
138 "%0 = R5;\n\t"
139 :"=r"(extract)
140 :"r"(value)
141 :"P0", "R1", "R2","R3","R4", "R5");
142
143 pr_debug(" extract:%x\n", extract);
144 return extract;
145}
146
147static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate)
148{
149 int tclkdiv, tfsdiv, rclkdiv;
150
151 /* Set TCR1 and TCR2 */
152 SPORT_PUT_TCR1(up, (LTFS | ITFS | TFSR | TLSBIT | ITCLK));
153 SPORT_PUT_TCR2(up, 10);
154 pr_debug("%s TCR1:%x, TCR2:%x\n", __FUNCTION__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
155
156 /* Set RCR1 and RCR2 */
157 SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK));
158 SPORT_PUT_RCR2(up, 28);
159 pr_debug("%s RCR1:%x, RCR2:%x\n", __FUNCTION__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up));
160
161 tclkdiv = sclk/(2 * baud_rate) - 1;
162 tfsdiv = 12;
163 rclkdiv = sclk/(2 * baud_rate * 3) - 1;
164 SPORT_PUT_TCLKDIV(up, tclkdiv);
165 SPORT_PUT_TFSDIV(up, tfsdiv);
166 SPORT_PUT_RCLKDIV(up, rclkdiv);
167 SSYNC();
168 pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n",
169 __FUNCTION__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv);
170
171 return 0;
172}
173
174static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
175{
176 struct sport_uart_port *up = dev_id;
177 struct tty_struct *tty = up->port.info->tty;
178 unsigned int ch;
179
180 do {
181 ch = rx_one_byte(up);
182 up->port.icount.rx++;
183
184 if (uart_handle_sysrq_char(&up->port, ch))
185 ;
186 else
187 tty_insert_flip_char(tty, ch, TTY_NORMAL);
188 } while (SPORT_GET_STAT(up) & RXNE);
189 tty_flip_buffer_push(tty);
190
191 return IRQ_HANDLED;
192}
193
194static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
195{
196 sport_uart_tx_chars(dev_id);
197
198 return IRQ_HANDLED;
199}
200
201static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
202{
203 struct sport_uart_port *up = dev_id;
204 struct tty_struct *tty = up->port.info->tty;
205 unsigned int stat = SPORT_GET_STAT(up);
206
207 /* Overflow in RX FIFO */
208 if (stat & ROVF) {
209 up->port.icount.overrun++;
210 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
211 SPORT_PUT_STAT(up, ROVF); /* Clear ROVF bit */
212 }
213 /* These should not happen */
214 if (stat & (TOVF | TUVF | RUVF)) {
215 printk(KERN_ERR "SPORT Error:%s %s %s\n",
216 (stat & TOVF)?"TX overflow":"",
217 (stat & TUVF)?"TX underflow":"",
218 (stat & RUVF)?"RX underflow":"");
219 SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
220 SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
221 }
222 SSYNC();
223
224 return IRQ_HANDLED;
225}
226
227/* Reqeust IRQ, Setup clock */
228static int sport_startup(struct uart_port *port)
229{
230 struct sport_uart_port *up = (struct sport_uart_port *)port;
231 char buffer[20];
232 int retval;
233
234 pr_debug("%s enter\n", __FUNCTION__);
235 memset(buffer, 20, '\0');
236 snprintf(buffer, 20, "%s rx", up->name);
237 retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
238 if (retval) {
239 printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
240 return retval;
241 }
242
243 snprintf(buffer, 20, "%s tx", up->name);
244 retval = request_irq(up->tx_irq, sport_uart_tx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
245 if (retval) {
246 printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
247 goto fail1;
248 }
249
250 snprintf(buffer, 20, "%s err", up->name);
251 retval = request_irq(up->err_irq, sport_uart_err_irq, IRQF_SAMPLE_RANDOM, buffer, up);
252 if (retval) {
253 printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
254 goto fail2;
255 }
256
257 if (port->line) {
258 if (peripheral_request_list(bfin_uart_pin_req_sport1, DRV_NAME))
259 goto fail3;
260 } else {
261 if (peripheral_request_list(bfin_uart_pin_req_sport0, DRV_NAME))
262 goto fail3;
263 }
264
265 sport_uart_setup(up, get_sclk(), port->uartclk);
266
267 /* Enable receive interrupt */
268 SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) | RSPEN));
269 SSYNC();
270
271 return 0;
272
273
274fail3:
275 printk(KERN_ERR DRV_NAME
276 ": Requesting Peripherals failed\n");
277
278 free_irq(up->err_irq, up);
279fail2:
280 free_irq(up->tx_irq, up);
281fail1:
282 free_irq(up->rx_irq, up);
283
284 return retval;
285
286}
287
288static void sport_uart_tx_chars(struct sport_uart_port *up)
289{
290 struct circ_buf *xmit = &up->port.info->xmit;
291
292 if (SPORT_GET_STAT(up) & TXF)
293 return;
294
295 if (up->port.x_char) {
296 tx_one_byte(up, up->port.x_char);
297 up->port.icount.tx++;
298 up->port.x_char = 0;
299 return;
300 }
301
302 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
303 sport_stop_tx(&up->port);
304 return;
305 }
306
307 while(!(SPORT_GET_STAT(up) & TXF) && !uart_circ_empty(xmit)) {
308 tx_one_byte(up, xmit->buf[xmit->tail]);
309 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1);
310 up->port.icount.tx++;
311 }
312
313 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
314 uart_write_wakeup(&up->port);
315}
316
317static unsigned int sport_tx_empty(struct uart_port *port)
318{
319 struct sport_uart_port *up = (struct sport_uart_port *)port;
320 unsigned int stat;
321
322 stat = SPORT_GET_STAT(up);
323 pr_debug("%s stat:%04x\n", __FUNCTION__, stat);
324 if (stat & TXHRE) {
325 return TIOCSER_TEMT;
326 } else
327 return 0;
328}
329
330static unsigned int sport_get_mctrl(struct uart_port *port)
331{
332 pr_debug("%s enter\n", __FUNCTION__);
333 return (TIOCM_CTS | TIOCM_CD | TIOCM_DSR);
334}
335
336static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
337{
338 pr_debug("%s enter\n", __FUNCTION__);
339}
340
341static void sport_stop_tx(struct uart_port *port)
342{
343 struct sport_uart_port *up = (struct sport_uart_port *)port;
344 unsigned int stat;
345
346 pr_debug("%s enter\n", __FUNCTION__);
347
348 stat = SPORT_GET_STAT(up);
349 while(!(stat & TXHRE)) {
350 udelay(1);
351 stat = SPORT_GET_STAT(up);
352 }
353 /* Although the hold register is empty, last byte is still in shift
354 * register and not sent out yet. If baud rate is lower than default,
355 * delay should be longer. For example, if the baud rate is 9600,
356 * the delay must be at least 2ms by experience */
357 udelay(500);
358
359 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
360 SSYNC();
361
362 return;
363}
364
365static void sport_start_tx(struct uart_port *port)
366{
367 struct sport_uart_port *up = (struct sport_uart_port *)port;
368
369 pr_debug("%s enter\n", __FUNCTION__);
370 /* Write data into SPORT FIFO before enable SPROT to transmit */
371 sport_uart_tx_chars(up);
372
373 /* Enable transmit, then an interrupt will generated */
374 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
375 SSYNC();
376 pr_debug("%s exit\n", __FUNCTION__);
377}
378
379static void sport_stop_rx(struct uart_port *port)
380{
381 struct sport_uart_port *up = (struct sport_uart_port *)port;
382
383 pr_debug("%s enter\n", __FUNCTION__);
384 /* Disable sport to stop rx */
385 SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
386 SSYNC();
387}
388
389static void sport_enable_ms(struct uart_port *port)
390{
391 pr_debug("%s enter\n", __FUNCTION__);
392}
393
394static void sport_break_ctl(struct uart_port *port, int break_state)
395{
396 pr_debug("%s enter\n", __FUNCTION__);
397}
398
399static void sport_shutdown(struct uart_port *port)
400{
401 struct sport_uart_port *up = (struct sport_uart_port *)port;
402
403 pr_debug("%s enter\n", __FUNCTION__);
404
405 /* Disable sport */
406 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
407 SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
408 SSYNC();
409
410 if (port->line) {
411 peripheral_free_list(bfin_uart_pin_req_sport1);
412 } else {
413 peripheral_free_list(bfin_uart_pin_req_sport0);
414 }
415
416 free_irq(up->rx_irq, up);
417 free_irq(up->tx_irq, up);
418 free_irq(up->err_irq, up);
419}
420
421static void sport_set_termios(struct uart_port *port,
422 struct termios *termios, struct termios *old)
423{
424 pr_debug("%s enter, c_cflag:%08x\n", __FUNCTION__, termios->c_cflag);
425 uart_update_timeout(port, CS8 ,port->uartclk);
426}
427
428static const char *sport_type(struct uart_port *port)
429{
430 struct sport_uart_port *up = (struct sport_uart_port *)port;
431
432 pr_debug("%s enter\n", __FUNCTION__);
433 return up->name;
434}
435
436static void sport_release_port(struct uart_port *port)
437{
438 pr_debug("%s enter\n", __FUNCTION__);
439}
440
441static int sport_request_port(struct uart_port *port)
442{
443 pr_debug("%s enter\n", __FUNCTION__);
444 return 0;
445}
446
447static void sport_config_port(struct uart_port *port, int flags)
448{
449 struct sport_uart_port *up = (struct sport_uart_port *)port;
450
451 pr_debug("%s enter\n", __FUNCTION__);
452 up->port.type = PORT_BFIN_SPORT;
453}
454
455static int sport_verify_port(struct uart_port *port, struct serial_struct *ser)
456{
457 pr_debug("%s enter\n", __FUNCTION__);
458 return 0;
459}
460
461struct uart_ops sport_uart_ops = {
462 .tx_empty = sport_tx_empty,
463 .set_mctrl = sport_set_mctrl,
464 .get_mctrl = sport_get_mctrl,
465 .stop_tx = sport_stop_tx,
466 .start_tx = sport_start_tx,
467 .stop_rx = sport_stop_rx,
468 .enable_ms = sport_enable_ms,
469 .break_ctl = sport_break_ctl,
470 .startup = sport_startup,
471 .shutdown = sport_shutdown,
472 .set_termios = sport_set_termios,
473 .type = sport_type,
474 .release_port = sport_release_port,
475 .request_port = sport_request_port,
476 .config_port = sport_config_port,
477 .verify_port = sport_verify_port,
478};
479
480static struct sport_uart_port sport_uart_ports[] = {
481 { /* SPORT 0 */
482 .name = "SPORT0",
483 .tx_irq = IRQ_SPORT0_TX,
484 .rx_irq = IRQ_SPORT0_RX,
485 .err_irq= IRQ_SPORT0_ERROR,
486 .port = {
487 .type = PORT_BFIN_SPORT,
488 .iotype = UPIO_MEM,
489 .membase = (void __iomem *)SPORT0_TCR1,
490 .mapbase = SPORT0_TCR1,
491 .irq = IRQ_SPORT0_RX,
492 .uartclk = CONFIG_SPORT_BAUD_RATE,
493 .fifosize = 8,
494 .ops = &sport_uart_ops,
495 .line = 0,
496 },
497 }, { /* SPORT 1 */
498 .name = "SPORT1",
499 .tx_irq = IRQ_SPORT1_TX,
500 .rx_irq = IRQ_SPORT1_RX,
501 .err_irq= IRQ_SPORT1_ERROR,
502 .port = {
503 .type = PORT_BFIN_SPORT,
504 .iotype = UPIO_MEM,
505 .membase = (void __iomem *)SPORT1_TCR1,
506 .mapbase = SPORT1_TCR1,
507 .irq = IRQ_SPORT1_RX,
508 .uartclk = CONFIG_SPORT_BAUD_RATE,
509 .fifosize = 8,
510 .ops = &sport_uart_ops,
511 .line = 1,
512 },
513 }
514};
515
516static struct uart_driver sport_uart_reg = {
517 .owner = THIS_MODULE,
518 .driver_name = "SPORT-UART",
519 .dev_name = "ttySS",
520 .major = 204,
521 .minor = 84,
522 .nr = ARRAY_SIZE(sport_uart_ports),
523 .cons = NULL,
524};
525
526static int sport_uart_suspend(struct platform_device *dev, pm_message_t state)
527{
528 struct sport_uart_port *sport = platform_get_drvdata(dev);
529
530 pr_debug("%s enter\n", __FUNCTION__);
531 if (sport)
532 uart_suspend_port(&sport_uart_reg, &sport->port);
533
534 return 0;
535}
536
537static int sport_uart_resume(struct platform_device *dev)
538{
539 struct sport_uart_port *sport = platform_get_drvdata(dev);
540
541 pr_debug("%s enter\n", __FUNCTION__);
542 if (sport)
543 uart_resume_port(&sport_uart_reg, &sport->port);
544
545 return 0;
546}
547
548static int sport_uart_probe(struct platform_device *dev)
549{
550 pr_debug("%s enter\n", __FUNCTION__);
551 sport_uart_ports[dev->id].port.dev = &dev->dev;
552 uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port);
553 platform_set_drvdata(dev, &sport_uart_ports[dev->id]);
554
555 return 0;
556}
557
558static int sport_uart_remove(struct platform_device *dev)
559{
560 struct sport_uart_port *sport = platform_get_drvdata(dev);
561
562 pr_debug("%s enter\n", __FUNCTION__);
563 platform_set_drvdata(dev, NULL);
564
565 if (sport)
566 uart_remove_one_port(&sport_uart_reg, &sport->port);
567
568 return 0;
569}
570
571static struct platform_driver sport_uart_driver = {
572 .probe = sport_uart_probe,
573 .remove = sport_uart_remove,
574 .suspend = sport_uart_suspend,
575 .resume = sport_uart_resume,
576 .driver = {
577 .name = DRV_NAME,
578 },
579};
580
581static int __init sport_uart_init(void)
582{
583 int ret;
584
585 pr_debug("%s enter\n", __FUNCTION__);
586 ret = uart_register_driver(&sport_uart_reg);
587 if (ret != 0) {
588 printk(KERN_ERR "Failed to register %s:%d\n",
589 sport_uart_reg.driver_name, ret);
590 return ret;
591 }
592
593 ret = platform_driver_register(&sport_uart_driver);
594 if (ret != 0) {
595 printk(KERN_ERR "Failed to register sport uart driver:%d\n", ret);
596 uart_unregister_driver(&sport_uart_reg);
597 }
598
599
600 pr_debug("%s exit\n", __FUNCTION__);
601 return ret;
602}
603
604static void __exit sport_uart_exit(void)
605{
606 pr_debug("%s enter\n", __FUNCTION__);
607 platform_driver_unregister(&sport_uart_driver);
608 uart_unregister_driver(&sport_uart_reg);
609}
610
611module_init(sport_uart_init);
612module_exit(sport_uart_exit);
613
614MODULE_LICENSE("GPL");
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
new file mode 100644
index 000000000000..671d41cc1a3f
--- /dev/null
+++ b/drivers/serial/bfin_sport_uart.h
@@ -0,0 +1,63 @@
1/*
2 * File: linux/drivers/serial/bfin_sport_uart.h
3 *
4 * Based on: include/asm-blackfin/mach-533/bfin_serial_5xx.h
5 * Author: Roy Huang <roy.huang>analog.com>
6 *
7 * Created: Nov 22, 2006
8 * Copyright: (C) Analog Device Inc.
9 * Description: this driver enable SPORTs on Blackfin emulate UART.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see the file COPYING, or write
23 * to the Free Software Foundation, Inc.,
24 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27
28#define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */
29#define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */
30#define OFFSET_TCLKDIV 0x08 /* Transmit Serial Clock Divider Register */
31#define OFFSET_TFSDIV 0x0C /* Transmit Frame Sync Divider Register */
32#define OFFSET_TX 0x10 /* Transmit Data Register */
33#define OFFSET_RX 0x18 /* Receive Data Register */
34#define OFFSET_RCR1 0x20 /* Receive Configuration 1 Register */
35#define OFFSET_RCR2 0x24 /* Receive Configuration 2 Register */
36#define OFFSET_RCLKDIV 0x28 /* Receive Serial Clock Divider Register */
37#define OFFSET_RFSDIV 0x2c /* Receive Frame Sync Divider Register */
38#define OFFSET_STAT 0x30 /* Status Register */
39
40#define SPORT_GET_TCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_TCR1))
41#define SPORT_GET_TCR2(sport) bfin_read16(((sport)->port.membase + OFFSET_TCR2))
42#define SPORT_GET_TCLKDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_TCLKDIV))
43#define SPORT_GET_TFSDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_TFSDIV))
44#define SPORT_GET_TX(sport) bfin_read16(((sport)->port.membase + OFFSET_TX))
45#define SPORT_GET_RX(sport) bfin_read16(((sport)->port.membase + OFFSET_RX))
46#define SPORT_GET_RX32(sport) bfin_read32(((sport)->port.membase + OFFSET_RX))
47#define SPORT_GET_RCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR1))
48#define SPORT_GET_RCR2(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR2))
49#define SPORT_GET_RCLKDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_RCLKDIV))
50#define SPORT_GET_RFSDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_RFSDIV))
51#define SPORT_GET_STAT(sport) bfin_read16(((sport)->port.membase + OFFSET_STAT))
52
53#define SPORT_PUT_TCR1(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TCR1), v)
54#define SPORT_PUT_TCR2(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TCR2), v)
55#define SPORT_PUT_TCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TCLKDIV), v)
56#define SPORT_PUT_TFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TFSDIV), v)
57#define SPORT_PUT_TX(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TX), v)
58#define SPORT_PUT_RX(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RX), v)
59#define SPORT_PUT_RCR1(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCR1), v)
60#define SPORT_PUT_RCR2(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCR2), v)
61#define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v)
62#define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v)
63#define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v)
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index a638ba0679ac..a19dc7ef8861 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -1117,7 +1117,7 @@ int cpm_uart_drv_get_platform_data(struct platform_device *pdev, int is_con)
1117 1117
1118 line = cpm_uart_id2nr(idx); 1118 line = cpm_uart_id2nr(idx);
1119 if(line < 0) { 1119 if(line < 0) {
1120 printk(KERN_ERR"%s(): port %d is not registered", __FUNCTION__, idx); 1120 printk(KERN_ERR"%s(): port %d is not registered", __func__, idx);
1121 return -EINVAL; 1121 return -EINVAL;
1122 } 1122 }
1123 1123
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 88e7c1d5b919..f9fa237aa949 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -1788,7 +1788,7 @@ static unsigned int handle_descr_data(struct e100_serial *info,
1788 1788
1789 if (info->recv_cnt + recvl > 65536) { 1789 if (info->recv_cnt + recvl > 65536) {
1790 printk(KERN_CRIT 1790 printk(KERN_CRIT
1791 "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __FUNCTION__, recvl); 1791 "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __func__, recvl);
1792 return 0; 1792 return 0;
1793 } 1793 }
1794 1794
@@ -1801,7 +1801,7 @@ static unsigned int handle_descr_data(struct e100_serial *info,
1801 append_recv_buffer(info, buffer); 1801 append_recv_buffer(info, buffer);
1802 1802
1803 if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE))) 1803 if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
1804 panic("%s: Failed to allocate memory for receive buffer!\n", __FUNCTION__); 1804 panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
1805 1805
1806 descr->buf = virt_to_phys(buffer->buffer); 1806 descr->buf = virt_to_phys(buffer->buffer);
1807 1807
@@ -1925,7 +1925,7 @@ static int start_recv_dma(struct e100_serial *info)
1925 /* Set up the receiving descriptors */ 1925 /* Set up the receiving descriptors */
1926 for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) { 1926 for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) {
1927 if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE))) 1927 if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
1928 panic("%s: Failed to allocate memory for receive buffer!\n", __FUNCTION__); 1928 panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
1929 1929
1930 descr[i].ctrl = d_int; 1930 descr[i].ctrl = d_int;
1931 descr[i].buf = virt_to_phys(buffer->buffer); 1931 descr[i].buf = virt_to_phys(buffer->buffer);
@@ -3581,8 +3581,9 @@ rs_tiocmset(struct tty_struct *tty, struct file *file,
3581 unsigned int set, unsigned int clear) 3581 unsigned int set, unsigned int clear)
3582{ 3582{
3583 struct e100_serial *info = (struct e100_serial *)tty->driver_data; 3583 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
3584 unsigned long flags;
3584 3585
3585 lock_kernel(); 3586 local_irq_save(flags);
3586 3587
3587 if (clear & TIOCM_RTS) 3588 if (clear & TIOCM_RTS)
3588 e100_rts(info, 0); 3589 e100_rts(info, 0);
@@ -3604,7 +3605,7 @@ rs_tiocmset(struct tty_struct *tty, struct file *file,
3604 if (set & TIOCM_CD) 3605 if (set & TIOCM_CD)
3605 e100_cd_out(info, 1); 3606 e100_cd_out(info, 1);
3606 3607
3607 unlock_kernel(); 3608 local_irq_restore(flags);
3608 return 0; 3609 return 0;
3609} 3610}
3610 3611
@@ -3613,8 +3614,10 @@ rs_tiocmget(struct tty_struct *tty, struct file *file)
3613{ 3614{
3614 struct e100_serial *info = (struct e100_serial *)tty->driver_data; 3615 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
3615 unsigned int result; 3616 unsigned int result;
3617 unsigned long flags;
3618
3619 local_irq_save(flags);
3616 3620
3617 lock_kernel();
3618 result = 3621 result =
3619 (!E100_RTS_GET(info) ? TIOCM_RTS : 0) 3622 (!E100_RTS_GET(info) ? TIOCM_RTS : 0)
3620 | (!E100_DTR_GET(info) ? TIOCM_DTR : 0) 3623 | (!E100_DTR_GET(info) ? TIOCM_DTR : 0)
@@ -3623,7 +3626,7 @@ rs_tiocmget(struct tty_struct *tty, struct file *file)
3623 | (!E100_CD_GET(info) ? TIOCM_CAR : 0) 3626 | (!E100_CD_GET(info) ? TIOCM_CAR : 0)
3624 | (!E100_CTS_GET(info) ? TIOCM_CTS : 0); 3627 | (!E100_CTS_GET(info) ? TIOCM_CTS : 0);
3625 3628
3626 unlock_kernel(); 3629 local_irq_restore(flags);
3627 3630
3628#ifdef SERIAL_DEBUG_IO 3631#ifdef SERIAL_DEBUG_IO
3629 printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n", 3632 printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n",
@@ -3702,10 +3705,6 @@ rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3702{ 3705{
3703 struct e100_serial *info = (struct e100_serial *)tty->driver_data; 3706 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
3704 3707
3705 if (tty->termios->c_cflag == old_termios->c_cflag &&
3706 tty->termios->c_iflag == old_termios->c_iflag)
3707 return;
3708
3709 change_speed(info); 3708 change_speed(info);
3710 3709
3711 /* Handle turning off CRTSCTS */ 3710 /* Handle turning off CRTSCTS */
@@ -3808,10 +3807,8 @@ rs_close(struct tty_struct *tty, struct file * filp)
3808#endif 3807#endif
3809 3808
3810 shutdown(info); 3809 shutdown(info);
3811 if (tty->driver->flush_buffer) 3810 rs_flush_buffer(tty);
3812 tty->driver->flush_buffer(tty); 3811 tty_ldisc_flush_buffer(tty);
3813 if (tty->ldisc.flush_buffer)
3814 tty->ldisc.flush_buffer(tty);
3815 tty->closing = 0; 3812 tty->closing = 0;
3816 info->event = 0; 3813 info->event = 0;
3817 info->tty = 0; 3814 info->tty = 0;
@@ -3885,6 +3882,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
3885 * Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO 3882 * Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO
3886 * R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k) 3883 * R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k)
3887 */ 3884 */
3885 lock_kernel();
3888 orig_jiffies = jiffies; 3886 orig_jiffies = jiffies;
3889 while (info->xmit.head != info->xmit.tail || /* More in send queue */ 3887 while (info->xmit.head != info->xmit.tail || /* More in send queue */
3890 (*info->ostatusadr & 0x007f) || /* more in FIFO */ 3888 (*info->ostatusadr & 0x007f) || /* more in FIFO */
@@ -3901,6 +3899,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
3901 curr_time_usec - info->last_tx_active_usec; 3899 curr_time_usec - info->last_tx_active_usec;
3902 } 3900 }
3903 set_current_state(TASK_RUNNING); 3901 set_current_state(TASK_RUNNING);
3902 unlock_kernel();
3904} 3903}
3905 3904
3906/* 3905/*
@@ -4520,7 +4519,7 @@ rs_init(void)
4520 4519
4521 if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, 4520 if (request_irq(SERIAL_IRQ_NBR, ser_interrupt,
4522 IRQF_SHARED | IRQF_DISABLED, "serial ", driver)) 4521 IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
4523 panic("%s: Failed to request irq8", __FUNCTION__); 4522 panic("%s: Failed to request irq8", __func__);
4524 4523
4525#endif 4524#endif
4526#endif /* CONFIG_SVINTO_SIM */ 4525#endif /* CONFIG_SVINTO_SIM */
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 168073f12cec..4f1af71e9a1b 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -52,7 +52,7 @@ static unsigned int Submodule_slot;
52#define DPRINT_CONFIG(_x...) ; 52#define DPRINT_CONFIG(_x...) ;
53//#define DPRINT_CONFIG(_x...) printk _x 53//#define DPRINT_CONFIG(_x...) printk _x
54#define NOT_PROGRESS() ; 54#define NOT_PROGRESS() ;
55//#define NOT_PROGRESS() printk("%s : fails %d\n", __FUNCTION__, __LINE__) 55//#define NOT_PROGRESS() printk("%s : fails %d\n", __func__, __LINE__)
56 56
57/* number of characters we want to transmit to the lower level at a time */ 57/* number of characters we want to transmit to the lower level at a time */
58#define MAX_CHARS 256 58#define MAX_CHARS 256
@@ -445,7 +445,7 @@ static int inline port_init(struct ioc3_port *port)
445 sbbr_h = &idd->vma->sbbr_h; 445 sbbr_h = &idd->vma->sbbr_h;
446 ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf; 446 ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf;
447 DPRINT_CONFIG(("%s: ring_pci_addr 0x%p\n", 447 DPRINT_CONFIG(("%s: ring_pci_addr 0x%p\n",
448 __FUNCTION__, (void *)ring_pci_addr)); 448 __func__, (void *)ring_pci_addr));
449 449
450 writel((unsigned int)((uint64_t) ring_pci_addr >> 32), sbbr_h); 450 writel((unsigned int)((uint64_t) ring_pci_addr >> 32), sbbr_h);
451 writel((unsigned int)ring_pci_addr | BUF_SIZE_BIT, sbbr_l); 451 writel((unsigned int)ring_pci_addr | BUF_SIZE_BIT, sbbr_l);
@@ -593,7 +593,7 @@ config_port(struct ioc3_port *port,
593 593
594 DPRINT_CONFIG(("%s: line %d baud %d byte_size %d stop %d parenb %d " 594 DPRINT_CONFIG(("%s: line %d baud %d byte_size %d stop %d parenb %d "
595 "parodd %d\n", 595 "parodd %d\n",
596 __FUNCTION__, ((struct uart_port *)port->ip_port)->line, 596 __func__, ((struct uart_port *)port->ip_port)->line,
597 baud, byte_size, stop_bits, parenb, parodd)); 597 baud, byte_size, stop_bits, parenb, parodd));
598 598
599 if (set_baud(port, baud)) 599 if (set_baud(port, baud))
@@ -871,14 +871,14 @@ static int ioc3_set_proto(struct ioc3_port *port, int proto)
871 default: 871 default:
872 case PROTO_RS232: 872 case PROTO_RS232:
873 /* Clear the appropriate GIO pin */ 873 /* Clear the appropriate GIO pin */
874 DPRINT_CONFIG(("%s: rs232\n", __FUNCTION__)); 874 DPRINT_CONFIG(("%s: rs232\n", __func__));
875 writel(0, (&port->ip_idd->vma->gppr[0] 875 writel(0, (&port->ip_idd->vma->gppr[0]
876 + hooks->rs422_select_pin)); 876 + hooks->rs422_select_pin));
877 break; 877 break;
878 878
879 case PROTO_RS422: 879 case PROTO_RS422:
880 /* Set the appropriate GIO pin */ 880 /* Set the appropriate GIO pin */
881 DPRINT_CONFIG(("%s: rs422\n", __FUNCTION__)); 881 DPRINT_CONFIG(("%s: rs422\n", __func__));
882 writel(1, (&port->ip_idd->vma->gppr[0] 882 writel(1, (&port->ip_idd->vma->gppr[0]
883 + hooks->rs422_select_pin)); 883 + hooks->rs422_select_pin));
884 break; 884 break;
@@ -988,7 +988,7 @@ ioc3_change_speed(struct uart_port *the_port,
988 } 988 }
989 baud = uart_get_baud_rate(the_port, new_termios, old_termios, 989 baud = uart_get_baud_rate(the_port, new_termios, old_termios,
990 MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED); 990 MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED);
991 DPRINT_CONFIG(("%s: returned baud %d for line %d\n", __FUNCTION__, baud, 991 DPRINT_CONFIG(("%s: returned baud %d for line %d\n", __func__, baud,
992 the_port->line)); 992 the_port->line));
993 993
994 if (!the_port->fifosize) 994 if (!the_port->fifosize)
@@ -1026,7 +1026,7 @@ ioc3_change_speed(struct uart_port *the_port,
1026 DPRINT_CONFIG(("%s : port 0x%p line %d cflag 0%o " 1026 DPRINT_CONFIG(("%s : port 0x%p line %d cflag 0%o "
1027 "config_port(baud %d data %d stop %d penable %d " 1027 "config_port(baud %d data %d stop %d penable %d "
1028 " parity %d), notification 0x%x\n", 1028 " parity %d), notification 0x%x\n",
1029 __FUNCTION__, (void *)port, the_port->line, cflag, baud, 1029 __func__, (void *)port, the_port->line, cflag, baud,
1030 new_data, new_stop, new_parity_enable, new_parity, 1030 new_data, new_stop, new_parity_enable, new_parity,
1031 the_port->ignore_status_mask)); 1031 the_port->ignore_status_mask));
1032 1032
@@ -1919,7 +1919,7 @@ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is,
1919 struct pci_dev *pdev = idd->pdev; 1919 struct pci_dev *pdev = idd->pdev;
1920 1920
1921 DPRINT_CONFIG(("%s: attach pdev 0x%p - card_ptr 0x%p\n", 1921 DPRINT_CONFIG(("%s: attach pdev 0x%p - card_ptr 0x%p\n",
1922 __FUNCTION__, pdev, (void *)card_ptr)); 1922 __func__, pdev, (void *)card_ptr));
1923 1923
1924 if (!card_ptr) 1924 if (!card_ptr)
1925 return -ENODEV; 1925 return -ENODEV;
@@ -1933,7 +1933,7 @@ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is,
1933 port->ip_port = the_port; 1933 port->ip_port = the_port;
1934 1934
1935 DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p [%d/%d]\n", 1935 DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p [%d/%d]\n",
1936 __FUNCTION__, (void *)the_port, (void *)port, 1936 __func__, (void *)the_port, (void *)port,
1937 phys_port, ii)); 1937 phys_port, ii));
1938 1938
1939 /* membase, iobase and mapbase just need to be non-0 */ 1939 /* membase, iobase and mapbase just need to be non-0 */
@@ -1950,7 +1950,7 @@ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is,
1950 if (uart_add_one_port(&ioc3_uart, the_port) < 0) { 1950 if (uart_add_one_port(&ioc3_uart, the_port) < 0) {
1951 printk(KERN_WARNING 1951 printk(KERN_WARNING
1952 "%s: unable to add port %d bus %d\n", 1952 "%s: unable to add port %d bus %d\n",
1953 __FUNCTION__, the_port->line, pdev->bus->number); 1953 __func__, the_port->line, pdev->bus->number);
1954 } else { 1954 } else {
1955 DPRINT_CONFIG(("IOC3 serial port %d irq %d bus %d\n", 1955 DPRINT_CONFIG(("IOC3 serial port %d irq %d bus %d\n",
1956 the_port->line, the_port->irq, pdev->bus->number)); 1956 the_port->line, the_port->irq, pdev->bus->number));
@@ -2017,7 +2017,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2017 struct ioc3_port *ports[PORTS_PER_CARD]; 2017 struct ioc3_port *ports[PORTS_PER_CARD];
2018 int phys_port; 2018 int phys_port;
2019 2019
2020 DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __FUNCTION__, is, idd)); 2020 DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd));
2021 2021
2022 card_ptr = kzalloc(sizeof(struct ioc3_card), GFP_KERNEL); 2022 card_ptr = kzalloc(sizeof(struct ioc3_card), GFP_KERNEL);
2023 if (!card_ptr) { 2023 if (!card_ptr) {
@@ -2067,7 +2067,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2067 2067
2068 DPRINT_CONFIG(("%s : Port A ip_serial_regs 0x%p " 2068 DPRINT_CONFIG(("%s : Port A ip_serial_regs 0x%p "
2069 "ip_uart_regs 0x%p\n", 2069 "ip_uart_regs 0x%p\n",
2070 __FUNCTION__, 2070 __func__,
2071 (void *)port->ip_serial_regs, 2071 (void *)port->ip_serial_regs,
2072 (void *)port->ip_uart_regs)); 2072 (void *)port->ip_uart_regs));
2073 2073
@@ -2082,7 +2082,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2082 DPRINT_CONFIG(("%s : Port A ip_cpu_ringbuf 0x%p " 2082 DPRINT_CONFIG(("%s : Port A ip_cpu_ringbuf 0x%p "
2083 "ip_dma_ringbuf 0x%p, ip_inring 0x%p " 2083 "ip_dma_ringbuf 0x%p, ip_inring 0x%p "
2084 "ip_outring 0x%p\n", 2084 "ip_outring 0x%p\n",
2085 __FUNCTION__, 2085 __func__,
2086 (void *)port->ip_cpu_ringbuf, 2086 (void *)port->ip_cpu_ringbuf,
2087 (void *)port->ip_dma_ringbuf, 2087 (void *)port->ip_dma_ringbuf,
2088 (void *)port->ip_inring, 2088 (void *)port->ip_inring,
@@ -2094,7 +2094,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2094 2094
2095 DPRINT_CONFIG(("%s : Port B ip_serial_regs 0x%p " 2095 DPRINT_CONFIG(("%s : Port B ip_serial_regs 0x%p "
2096 "ip_uart_regs 0x%p\n", 2096 "ip_uart_regs 0x%p\n",
2097 __FUNCTION__, 2097 __func__,
2098 (void *)port->ip_serial_regs, 2098 (void *)port->ip_serial_regs,
2099 (void *)port->ip_uart_regs)); 2099 (void *)port->ip_uart_regs));
2100 2100
@@ -2108,7 +2108,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2108 DPRINT_CONFIG(("%s : Port B ip_cpu_ringbuf 0x%p " 2108 DPRINT_CONFIG(("%s : Port B ip_cpu_ringbuf 0x%p "
2109 "ip_dma_ringbuf 0x%p, ip_inring 0x%p " 2109 "ip_dma_ringbuf 0x%p, ip_inring 0x%p "
2110 "ip_outring 0x%p\n", 2110 "ip_outring 0x%p\n",
2111 __FUNCTION__, 2111 __func__,
2112 (void *)port->ip_cpu_ringbuf, 2112 (void *)port->ip_cpu_ringbuf,
2113 (void *)port->ip_dma_ringbuf, 2113 (void *)port->ip_dma_ringbuf,
2114 (void *)port->ip_inring, 2114 (void *)port->ip_inring,
@@ -2116,7 +2116,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2116 } 2116 }
2117 2117
2118 DPRINT_CONFIG(("%s : port %d [addr 0x%p] card_ptr 0x%p", 2118 DPRINT_CONFIG(("%s : port %d [addr 0x%p] card_ptr 0x%p",
2119 __FUNCTION__, 2119 __func__,
2120 phys_port, (void *)port, (void *)card_ptr)); 2120 phys_port, (void *)port, (void *)card_ptr));
2121 DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n", 2121 DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n",
2122 (void *)port->ip_serial_regs, 2122 (void *)port->ip_serial_regs,
@@ -2127,7 +2127,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2127 2127
2128 DPRINT_CONFIG(("%s: phys_port %d port 0x%p inring 0x%p " 2128 DPRINT_CONFIG(("%s: phys_port %d port 0x%p inring 0x%p "
2129 "outring 0x%p\n", 2129 "outring 0x%p\n",
2130 __FUNCTION__, 2130 __func__,
2131 phys_port, (void *)port, 2131 phys_port, (void *)port,
2132 (void *)port->ip_inring, 2132 (void *)port->ip_inring,
2133 (void *)port->ip_outring)); 2133 (void *)port->ip_outring));
@@ -2170,7 +2170,7 @@ static int __devinit ioc3uart_init(void)
2170 if ((ret = uart_register_driver(&ioc3_uart)) < 0) { 2170 if ((ret = uart_register_driver(&ioc3_uart)) < 0) {
2171 printk(KERN_WARNING 2171 printk(KERN_WARNING
2172 "%s: Couldn't register IOC3 uart serial driver\n", 2172 "%s: Couldn't register IOC3 uart serial driver\n",
2173 __FUNCTION__); 2173 __func__);
2174 return ret; 2174 return ret;
2175 } 2175 }
2176 ret = ioc3_register_submodule(&ioc3uart_submodule); 2176 ret = ioc3_register_submodule(&ioc3uart_submodule);
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 0c179384fb0c..49b8a82b7b9f 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -889,7 +889,7 @@ static int inline port_init(struct ioc4_port *port)
889 889
890 ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf; 890 ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf;
891 DPRINT_CONFIG(("%s: ring_pci_addr 0x%lx\n", 891 DPRINT_CONFIG(("%s: ring_pci_addr 0x%lx\n",
892 __FUNCTION__, ring_pci_addr)); 892 __func__, ring_pci_addr));
893 893
894 writel((unsigned int)((uint64_t)ring_pci_addr >> 32), sbbr_h); 894 writel((unsigned int)((uint64_t)ring_pci_addr >> 32), sbbr_h);
895 writel((unsigned int)ring_pci_addr | IOC4_BUF_SIZE_BIT, sbbr_l); 895 writel((unsigned int)ring_pci_addr | IOC4_BUF_SIZE_BIT, sbbr_l);
@@ -1028,7 +1028,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
1028 spin_lock_irqsave(&soft->is_ir_lock, flag); 1028 spin_lock_irqsave(&soft->is_ir_lock, flag);
1029 printk ("%s : %d : mem 0x%p sio_ir 0x%x sio_ies 0x%x " 1029 printk ("%s : %d : mem 0x%p sio_ir 0x%x sio_ies 0x%x "
1030 "other_ir 0x%x other_ies 0x%x mask 0x%x\n", 1030 "other_ir 0x%x other_ies 0x%x mask 0x%x\n",
1031 __FUNCTION__, __LINE__, 1031 __func__, __LINE__,
1032 (void *)mem, readl(&mem->sio_ir.raw), 1032 (void *)mem, readl(&mem->sio_ir.raw),
1033 readl(&mem->sio_ies.raw), 1033 readl(&mem->sio_ies.raw),
1034 readl(&mem->other_ir.raw), 1034 readl(&mem->other_ir.raw),
@@ -1155,14 +1155,14 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
1155 (TOTAL_RING_BUF_SIZE - 1)) == 0)); 1155 (TOTAL_RING_BUF_SIZE - 1)) == 0));
1156 DPRINT_CONFIG(("%s : ip_cpu_ringbuf 0x%p " 1156 DPRINT_CONFIG(("%s : ip_cpu_ringbuf 0x%p "
1157 "ip_dma_ringbuf 0x%p\n", 1157 "ip_dma_ringbuf 0x%p\n",
1158 __FUNCTION__, 1158 __func__,
1159 (void *)port->ip_cpu_ringbuf, 1159 (void *)port->ip_cpu_ringbuf,
1160 (void *)port->ip_dma_ringbuf)); 1160 (void *)port->ip_dma_ringbuf));
1161 port->ip_inring = RING(port, RX_0_OR_2); 1161 port->ip_inring = RING(port, RX_0_OR_2);
1162 port->ip_outring = RING(port, TX_0_OR_2); 1162 port->ip_outring = RING(port, TX_0_OR_2);
1163 } 1163 }
1164 DPRINT_CONFIG(("%s : port %d [addr 0x%p] control 0x%p", 1164 DPRINT_CONFIG(("%s : port %d [addr 0x%p] control 0x%p",
1165 __FUNCTION__, 1165 __func__,
1166 port_number, (void *)port, (void *)control)); 1166 port_number, (void *)port, (void *)control));
1167 DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n", 1167 DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n",
1168 (void *)port->ip_serial_regs, 1168 (void *)port->ip_serial_regs,
@@ -1173,7 +1173,7 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
1173 1173
1174 DPRINT_CONFIG(("%s: port_number %d port 0x%p inring 0x%p " 1174 DPRINT_CONFIG(("%s: port_number %d port 0x%p inring 0x%p "
1175 "outring 0x%p\n", 1175 "outring 0x%p\n",
1176 __FUNCTION__, 1176 __func__,
1177 port_number, (void *)port, 1177 port_number, (void *)port,
1178 (void *)port->ip_inring, 1178 (void *)port->ip_inring,
1179 (void *)port->ip_outring)); 1179 (void *)port->ip_outring));
@@ -1317,7 +1317,7 @@ config_port(struct ioc4_port *port,
1317 int spiniter = 0; 1317 int spiniter = 0;
1318 1318
1319 DPRINT_CONFIG(("%s: baud %d byte_size %d stop %d parenb %d parodd %d\n", 1319 DPRINT_CONFIG(("%s: baud %d byte_size %d stop %d parenb %d parodd %d\n",
1320 __FUNCTION__, baud, byte_size, stop_bits, parenb, parodd)); 1320 __func__, baud, byte_size, stop_bits, parenb, parodd));
1321 1321
1322 if (set_baud(port, baud)) 1322 if (set_baud(port, baud))
1323 return 1; 1323 return 1;
@@ -1725,7 +1725,7 @@ ioc4_change_speed(struct uart_port *the_port,
1725 } 1725 }
1726 baud = uart_get_baud_rate(the_port, new_termios, old_termios, 1726 baud = uart_get_baud_rate(the_port, new_termios, old_termios,
1727 MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED); 1727 MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED);
1728 DPRINT_CONFIG(("%s: returned baud %d\n", __FUNCTION__, baud)); 1728 DPRINT_CONFIG(("%s: returned baud %d\n", __func__, baud));
1729 1729
1730 /* default is 9600 */ 1730 /* default is 9600 */
1731 if (!baud) 1731 if (!baud)
@@ -1765,7 +1765,7 @@ ioc4_change_speed(struct uart_port *the_port,
1765 DPRINT_CONFIG(("%s : port 0x%p cflag 0%o " 1765 DPRINT_CONFIG(("%s : port 0x%p cflag 0%o "
1766 "config_port(baud %d data %d stop %d p enable %d parity %d)," 1766 "config_port(baud %d data %d stop %d p enable %d parity %d),"
1767 " notification 0x%x\n", 1767 " notification 0x%x\n",
1768 __FUNCTION__, (void *)port, cflag, baud, new_data, new_stop, 1768 __func__, (void *)port, cflag, baud, new_data, new_stop,
1769 new_parity_enable, new_parity, the_port->ignore_status_mask)); 1769 new_parity_enable, new_parity, the_port->ignore_status_mask));
1770 1770
1771 if ((config_port(port, baud, /* baud */ 1771 if ((config_port(port, baud, /* baud */
@@ -2715,7 +2715,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
2715 2715
2716 2716
2717 DPRINT_CONFIG(("%s: attach pdev 0x%p - control 0x%p\n", 2717 DPRINT_CONFIG(("%s: attach pdev 0x%p - control 0x%p\n",
2718 __FUNCTION__, pdev, (void *)control)); 2718 __func__, pdev, (void *)control));
2719 2719
2720 if (!control) 2720 if (!control)
2721 return -ENODEV; 2721 return -ENODEV;
@@ -2734,7 +2734,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
2734 port->ip_all_ports[port_type_idx] = the_port; 2734 port->ip_all_ports[port_type_idx] = the_port;
2735 2735
2736 DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p : type %s\n", 2736 DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p : type %s\n",
2737 __FUNCTION__, (void *)the_port, 2737 __func__, (void *)the_port,
2738 (void *)port, 2738 (void *)port,
2739 port_type == PROTO_RS232 ? "rs232" : "rs422")); 2739 port_type == PROTO_RS232 ? "rs232" : "rs422"));
2740 2740
@@ -2752,7 +2752,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
2752 if (uart_add_one_port(u_driver, the_port) < 0) { 2752 if (uart_add_one_port(u_driver, the_port) < 0) {
2753 printk(KERN_WARNING 2753 printk(KERN_WARNING
2754 "%s: unable to add port %d bus %d\n", 2754 "%s: unable to add port %d bus %d\n",
2755 __FUNCTION__, the_port->line, pdev->bus->number); 2755 __func__, the_port->line, pdev->bus->number);
2756 } else { 2756 } else {
2757 DPRINT_CONFIG( 2757 DPRINT_CONFIG(
2758 ("IOC4 serial port %d irq = %d, bus %d\n", 2758 ("IOC4 serial port %d irq = %d, bus %d\n",
@@ -2777,7 +2777,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
2777 int ret = 0; 2777 int ret = 0;
2778 2778
2779 2779
2780 DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __FUNCTION__, idd->idd_pdev, 2780 DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, idd->idd_pdev,
2781 idd->idd_pci_id)); 2781 idd->idd_pci_id));
2782 2782
2783 /* PCI-RT does not bring out serial connections. 2783 /* PCI-RT does not bring out serial connections.
@@ -2806,7 +2806,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
2806 goto out2; 2806 goto out2;
2807 } 2807 }
2808 DPRINT_CONFIG(("%s : mem 0x%p, serial 0x%p\n", 2808 DPRINT_CONFIG(("%s : mem 0x%p, serial 0x%p\n",
2809 __FUNCTION__, (void *)idd->idd_misc_regs, 2809 __func__, (void *)idd->idd_misc_regs,
2810 (void *)serial)); 2810 (void *)serial));
2811 2811
2812 /* Get memory for the new card */ 2812 /* Get memory for the new card */
@@ -2858,7 +2858,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
2858 } else { 2858 } else {
2859 printk(KERN_WARNING 2859 printk(KERN_WARNING
2860 "%s : request_irq fails for IRQ 0x%x\n ", 2860 "%s : request_irq fails for IRQ 0x%x\n ",
2861 __FUNCTION__, idd->idd_pdev->irq); 2861 __func__, idd->idd_pdev->irq);
2862 } 2862 }
2863 ret = ioc4_attach_local(idd); 2863 ret = ioc4_attach_local(idd);
2864 if (ret) 2864 if (ret)
@@ -2911,13 +2911,13 @@ int ioc4_serial_init(void)
2911 if ((ret = uart_register_driver(&ioc4_uart_rs232)) < 0) { 2911 if ((ret = uart_register_driver(&ioc4_uart_rs232)) < 0) {
2912 printk(KERN_WARNING 2912 printk(KERN_WARNING
2913 "%s: Couldn't register rs232 IOC4 serial driver\n", 2913 "%s: Couldn't register rs232 IOC4 serial driver\n",
2914 __FUNCTION__); 2914 __func__);
2915 return ret; 2915 return ret;
2916 } 2916 }
2917 if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) { 2917 if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) {
2918 printk(KERN_WARNING 2918 printk(KERN_WARNING
2919 "%s: Couldn't register rs422 IOC4 serial driver\n", 2919 "%s: Couldn't register rs422 IOC4 serial driver\n",
2920 __FUNCTION__); 2920 __func__);
2921 return ret; 2921 return ret;
2922 } 2922 }
2923 2923
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index 12c934a1f274..8871aaa3dba6 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -373,6 +373,7 @@ struct neo_uart_struct {
373#define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME "Neo 2 - DB9 Universal PCI - Powered Ring Indicator" 373#define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME "Neo 2 - DB9 Universal PCI - Powered Ring Indicator"
374#define PCI_DEVICE_NEO_2RJ45_PCI_NAME "Neo 2 - RJ45 Universal PCI" 374#define PCI_DEVICE_NEO_2RJ45_PCI_NAME "Neo 2 - RJ45 Universal PCI"
375#define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator" 375#define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator"
376#define PCIE_DEVICE_NEO_IBM_PCI_NAME "Neo 4 - PCI Express - IBM"
376 377
377/* 378/*
378 * Our Global Variables. 379 * Our Global Variables.
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index 6767ee381cd1..338cf8a08b43 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -82,7 +82,10 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
82 /* store the info for the board we've found */ 82 /* store the info for the board we've found */
83 brd->boardnum = adapter_count++; 83 brd->boardnum = adapter_count++;
84 brd->pci_dev = pdev; 84 brd->pci_dev = pdev;
85 brd->maxports = 2; 85 if (pdev->device == PCIE_DEVICE_ID_NEO_4_IBM)
86 brd->maxports = 4;
87 else
88 brd->maxports = 2;
86 89
87 spin_lock_init(&brd->bd_lock); 90 spin_lock_init(&brd->bd_lock);
88 spin_lock_init(&brd->bd_intr_lock); 91 spin_lock_init(&brd->bd_intr_lock);
@@ -208,6 +211,7 @@ static struct pci_device_id jsm_pci_tbl[] = {
208 { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9PRI), 0, 0, 1 }, 211 { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9PRI), 0, 0, 1 },
209 { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45), 0, 0, 2 }, 212 { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45), 0, 0, 2 },
210 { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45PRI), 0, 0, 3 }, 213 { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45PRI), 0, 0, 3 },
214 { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4_IBM), 0, 0, 4 },
211 { 0, } 215 { 0, }
212}; 216};
213MODULE_DEVICE_TABLE(pci, jsm_pci_tbl); 217MODULE_DEVICE_TABLE(pci, jsm_pci_tbl);
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index 9cf03327386a..eadc1ab6bbce 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -96,12 +96,14 @@ static void cleanup_kgdboc(void)
96 96
97static int kgdboc_get_char(void) 97static int kgdboc_get_char(void)
98{ 98{
99 return kgdb_tty_driver->poll_get_char(kgdb_tty_driver, kgdb_tty_line); 99 return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
100 kgdb_tty_line);
100} 101}
101 102
102static void kgdboc_put_char(u8 chr) 103static void kgdboc_put_char(u8 chr)
103{ 104{
104 kgdb_tty_driver->poll_put_char(kgdb_tty_driver, kgdb_tty_line, chr); 105 kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
106 kgdb_tty_line, chr);
105} 107}
106 108
107static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) 109static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index ddd3aa50d4ad..43af40d59b8a 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -1072,18 +1072,6 @@ static int mcfrs_ioctl(struct tty_struct *tty, struct file * file,
1072 tty_wait_until_sent(tty, 0); 1072 tty_wait_until_sent(tty, 0);
1073 send_break(info, arg ? arg*(HZ/10) : HZ/4); 1073 send_break(info, arg ? arg*(HZ/10) : HZ/4);
1074 return 0; 1074 return 0;
1075 case TIOCGSOFTCAR:
1076 error = put_user(C_CLOCAL(tty) ? 1 : 0,
1077 (unsigned long *) arg);
1078 if (error)
1079 return error;
1080 return 0;
1081 case TIOCSSOFTCAR:
1082 get_user(arg, (unsigned long *) arg);
1083 tty->termios->c_cflag =
1084 ((tty->termios->c_cflag & ~CLOCAL) |
1085 (arg ? CLOCAL : 0));
1086 return 0;
1087 case TIOCGSERIAL: 1075 case TIOCGSERIAL:
1088 if (access_ok(VERIFY_WRITE, (void *) arg, 1076 if (access_ok(VERIFY_WRITE, (void *) arg,
1089 sizeof(struct serial_struct))) 1077 sizeof(struct serial_struct)))
@@ -1222,8 +1210,7 @@ static void mcfrs_close(struct tty_struct *tty, struct file * filp)
1222 } else 1210 } else
1223#endif 1211#endif
1224 shutdown(info); 1212 shutdown(info);
1225 if (tty->driver->flush_buffer) 1213 mcfrs_flush_buffer(tty);
1226 tty->driver->flush_buffer(tty);
1227 tty_ldisc_flush(tty); 1214 tty_ldisc_flush(tty);
1228 1215
1229 tty->closing = 0; 1216 tty->closing = 0;
@@ -1276,6 +1263,8 @@ mcfrs_wait_until_sent(struct tty_struct *tty, int timeout)
1276 * Note: we have to use pretty tight timings here to satisfy 1263 * Note: we have to use pretty tight timings here to satisfy
1277 * the NIST-PCTS. 1264 * the NIST-PCTS.
1278 */ 1265 */
1266 lock_kernel();
1267
1279 fifo_time = (MCF5272_FIFO_SIZE * HZ * 10) / info->baud; 1268 fifo_time = (MCF5272_FIFO_SIZE * HZ * 10) / info->baud;
1280 char_time = fifo_time / 5; 1269 char_time = fifo_time / 5;
1281 if (char_time == 0) 1270 if (char_time == 0)
@@ -1312,6 +1301,7 @@ mcfrs_wait_until_sent(struct tty_struct *tty, int timeout)
1312 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 1301 if (timeout && time_after(jiffies, orig_jiffies + timeout))
1313 break; 1302 break;
1314 } 1303 }
1304 unlock_kernel();
1315#else 1305#else
1316 /* 1306 /*
1317 * For the other coldfire models, assume all data has been sent 1307 * For the other coldfire models, assume all data has been sent
@@ -1907,7 +1897,7 @@ static struct tty_driver *mcfrs_console_device(struct console *c, int *index)
1907 * This is used for console output. 1897 * This is used for console output.
1908 */ 1898 */
1909 1899
1910void mcfrs_put_char(char ch) 1900int mcfrs_put_char(char ch)
1911{ 1901{
1912 volatile unsigned char *uartp; 1902 volatile unsigned char *uartp;
1913 unsigned long flags; 1903 unsigned long flags;
@@ -1931,7 +1921,7 @@ void mcfrs_put_char(char ch)
1931 mcfrs_init_console(); /* try and get it back */ 1921 mcfrs_init_console(); /* try and get it back */
1932 local_irq_restore(flags); 1922 local_irq_restore(flags);
1933 1923
1934 return; 1924 return 1;
1935} 1925}
1936 1926
1937 1927
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index d93b3578c5e2..efc971d9647b 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -783,7 +783,9 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
783 } 783 }
784 } 784 }
785 785
786 spin_unlock(&port->lock);
786 tty_flip_buffer_push(tty); 787 tty_flip_buffer_push(tty);
788 spin_lock(&port->lock);
787 789
788 return psc_ops->raw_rx_rdy(port); 790 return psc_ops->raw_rx_rdy(port);
789} 791}
@@ -1221,8 +1223,8 @@ static struct of_device_id mpc52xx_uart_of_match[] = {
1221#endif 1223#endif
1222#ifdef CONFIG_PPC_MPC512x 1224#ifdef CONFIG_PPC_MPC512x
1223 { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, }, 1225 { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, },
1224 {},
1225#endif 1226#endif
1227 {},
1226}; 1228};
1227 1229
1228static int __devinit 1230static int __devinit
diff --git a/drivers/serial/netx-serial.c b/drivers/serial/netx-serial.c
index 3123ffeac8ad..81ac9bb4f39b 100644
--- a/drivers/serial/netx-serial.c
+++ b/drivers/serial/netx-serial.c
@@ -287,6 +287,7 @@ static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl)
287{ 287{
288 unsigned int val; 288 unsigned int val;
289 289
290 /* FIXME: Locking needed ? */
290 if (mctrl & TIOCM_RTS) { 291 if (mctrl & TIOCM_RTS) {
291 val = readl(port->membase + UART_RTS_CR); 292 val = readl(port->membase + UART_RTS_CR);
292 writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR); 293 writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR);
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 4ffa2585429a..2b6a013639e6 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -1022,6 +1022,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
1022 struct uart_port *port = &ourport->port; 1022 struct uart_port *port = &ourport->port;
1023 struct s3c2410_uartcfg *cfg; 1023 struct s3c2410_uartcfg *cfg;
1024 struct resource *res; 1024 struct resource *res;
1025 int ret;
1025 1026
1026 dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev); 1027 dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev);
1027 1028
@@ -1064,9 +1065,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
1064 1065
1065 port->mapbase = res->start; 1066 port->mapbase = res->start;
1066 port->membase = S3C24XX_VA_UART + (res->start - S3C24XX_PA_UART); 1067 port->membase = S3C24XX_VA_UART + (res->start - S3C24XX_PA_UART);
1067 port->irq = platform_get_irq(platdev, 0); 1068 ret = platform_get_irq(platdev, 0);
1068 if (port->irq < 0) 1069 if (ret < 0)
1069 port->irq = 0; 1070 port->irq = 0;
1071 else
1072 port->irq = ret;
1070 1073
1071 ourport->clk = clk_get(&platdev->dev, "uart"); 1074 ourport->clk = clk_get(&platdev->dev, "uart");
1072 1075
@@ -1093,13 +1096,13 @@ static int s3c24xx_serial_probe(struct platform_device *dev,
1093 ourport = &s3c24xx_serial_ports[probe_index]; 1096 ourport = &s3c24xx_serial_ports[probe_index];
1094 probe_index++; 1097 probe_index++;
1095 1098
1096 dbg("%s: initialising port %p...\n", __FUNCTION__, ourport); 1099 dbg("%s: initialising port %p...\n", __func__, ourport);
1097 1100
1098 ret = s3c24xx_serial_init_port(ourport, info, dev); 1101 ret = s3c24xx_serial_init_port(ourport, info, dev);
1099 if (ret < 0) 1102 if (ret < 0)
1100 goto probe_err; 1103 goto probe_err;
1101 1104
1102 dbg("%s: adding port\n", __FUNCTION__); 1105 dbg("%s: adding port\n", __func__);
1103 uart_add_one_port(&s3c24xx_uart_drv, &ourport->port); 1106 uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
1104 platform_set_drvdata(dev, &ourport->port); 1107 platform_set_drvdata(dev, &ourport->port);
1105 1108
@@ -1584,7 +1587,7 @@ static int s3c2412_serial_resetport(struct uart_port *port,
1584 unsigned long ucon = rd_regl(port, S3C2410_UCON); 1587 unsigned long ucon = rd_regl(port, S3C2410_UCON);
1585 1588
1586 dbg("%s: port=%p (%08lx), cfg=%p\n", 1589 dbg("%s: port=%p (%08lx), cfg=%p\n",
1587 __FUNCTION__, port, port->mapbase, cfg); 1590 __func__, port, port->mapbase, cfg);
1588 1591
1589 /* ensure we don't change the clock settings... */ 1592 /* ensure we don't change the clock settings... */
1590 1593
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index 67b2338913c2..62b38582f5e9 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -655,7 +655,7 @@ void __init sa1100_register_uart_fns(struct sa1100_port_fns *fns)
655void __init sa1100_register_uart(int idx, int port) 655void __init sa1100_register_uart(int idx, int port)
656{ 656{
657 if (idx >= NR_PORTS) { 657 if (idx >= NR_PORTS) {
658 printk(KERN_ERR "%s: bad index number %d\n", __FUNCTION__, idx); 658 printk(KERN_ERR "%s: bad index number %d\n", __func__, idx);
659 return; 659 return;
660 } 660 }
661 661
@@ -682,7 +682,7 @@ void __init sa1100_register_uart(int idx, int port)
682 break; 682 break;
683 683
684 default: 684 default:
685 printk(KERN_ERR "%s: bad port number %d\n", __FUNCTION__, port); 685 printk(KERN_ERR "%s: bad port number %d\n", __func__, port);
686 } 686 }
687} 687}
688 688
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 977ce820ce30..1e2b9d826f69 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -422,6 +422,7 @@ uart_get_divisor(struct uart_port *port, unsigned int baud)
422 422
423EXPORT_SYMBOL(uart_get_divisor); 423EXPORT_SYMBOL(uart_get_divisor);
424 424
425/* FIXME: Consistent locking policy */
425static void 426static void
426uart_change_speed(struct uart_state *state, struct ktermios *old_termios) 427uart_change_speed(struct uart_state *state, struct ktermios *old_termios)
427{ 428{
@@ -454,27 +455,30 @@ uart_change_speed(struct uart_state *state, struct ktermios *old_termios)
454 port->ops->set_termios(port, termios, old_termios); 455 port->ops->set_termios(port, termios, old_termios);
455} 456}
456 457
457static inline void 458static inline int
458__uart_put_char(struct uart_port *port, struct circ_buf *circ, unsigned char c) 459__uart_put_char(struct uart_port *port, struct circ_buf *circ, unsigned char c)
459{ 460{
460 unsigned long flags; 461 unsigned long flags;
462 int ret = 0;
461 463
462 if (!circ->buf) 464 if (!circ->buf)
463 return; 465 return 0;
464 466
465 spin_lock_irqsave(&port->lock, flags); 467 spin_lock_irqsave(&port->lock, flags);
466 if (uart_circ_chars_free(circ) != 0) { 468 if (uart_circ_chars_free(circ) != 0) {
467 circ->buf[circ->head] = c; 469 circ->buf[circ->head] = c;
468 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); 470 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
471 ret = 1;
469 } 472 }
470 spin_unlock_irqrestore(&port->lock, flags); 473 spin_unlock_irqrestore(&port->lock, flags);
474 return ret;
471} 475}
472 476
473static void uart_put_char(struct tty_struct *tty, unsigned char ch) 477static int uart_put_char(struct tty_struct *tty, unsigned char ch)
474{ 478{
475 struct uart_state *state = tty->driver_data; 479 struct uart_state *state = tty->driver_data;
476 480
477 __uart_put_char(state->port, &state->info->xmit, ch); 481 return __uart_put_char(state->port, &state->info->xmit, ch);
478} 482}
479 483
480static void uart_flush_chars(struct tty_struct *tty) 484static void uart_flush_chars(struct tty_struct *tty)
@@ -528,15 +532,25 @@ uart_write(struct tty_struct *tty, const unsigned char *buf, int count)
528static int uart_write_room(struct tty_struct *tty) 532static int uart_write_room(struct tty_struct *tty)
529{ 533{
530 struct uart_state *state = tty->driver_data; 534 struct uart_state *state = tty->driver_data;
535 unsigned long flags;
536 int ret;
531 537
532 return uart_circ_chars_free(&state->info->xmit); 538 spin_lock_irqsave(&state->port->lock, flags);
539 ret = uart_circ_chars_free(&state->info->xmit);
540 spin_unlock_irqrestore(&state->port->lock, flags);
541 return ret;
533} 542}
534 543
535static int uart_chars_in_buffer(struct tty_struct *tty) 544static int uart_chars_in_buffer(struct tty_struct *tty)
536{ 545{
537 struct uart_state *state = tty->driver_data; 546 struct uart_state *state = tty->driver_data;
547 unsigned long flags;
548 int ret;
538 549
539 return uart_circ_chars_pending(&state->info->xmit); 550 spin_lock_irqsave(&state->port->lock, flags);
551 ret = uart_circ_chars_pending(&state->info->xmit);
552 spin_unlock_irqrestore(&state->port->lock, flags);
553 return ret;
540} 554}
541 555
542static void uart_flush_buffer(struct tty_struct *tty) 556static void uart_flush_buffer(struct tty_struct *tty)
@@ -618,6 +632,11 @@ static int uart_get_info(struct uart_state *state,
618 struct serial_struct tmp; 632 struct serial_struct tmp;
619 633
620 memset(&tmp, 0, sizeof(tmp)); 634 memset(&tmp, 0, sizeof(tmp));
635
636 /* Ensure the state we copy is consistent and no hardware changes
637 occur as we go */
638 mutex_lock(&state->mutex);
639
621 tmp.type = port->type; 640 tmp.type = port->type;
622 tmp.line = port->line; 641 tmp.line = port->line;
623 tmp.port = port->iobase; 642 tmp.port = port->iobase;
@@ -637,6 +656,8 @@ static int uart_get_info(struct uart_state *state,
637 tmp.iomem_reg_shift = port->regshift; 656 tmp.iomem_reg_shift = port->regshift;
638 tmp.iomem_base = (void *)(unsigned long)port->mapbase; 657 tmp.iomem_base = (void *)(unsigned long)port->mapbase;
639 658
659 mutex_unlock(&state->mutex);
660
640 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) 661 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
641 return -EFAULT; 662 return -EFAULT;
642 return 0; 663 return 0;
@@ -914,8 +935,6 @@ static void uart_break_ctl(struct tty_struct *tty, int break_state)
914 struct uart_state *state = tty->driver_data; 935 struct uart_state *state = tty->driver_data;
915 struct uart_port *port = state->port; 936 struct uart_port *port = state->port;
916 937
917 BUG_ON(!kernel_locked());
918
919 mutex_lock(&state->mutex); 938 mutex_lock(&state->mutex);
920 939
921 if (port->type != PORT_UNKNOWN) 940 if (port->type != PORT_UNKNOWN)
@@ -1059,7 +1078,7 @@ static int uart_get_count(struct uart_state *state,
1059} 1078}
1060 1079
1061/* 1080/*
1062 * Called via sys_ioctl under the BKL. We can use spin_lock_irq() here. 1081 * Called via sys_ioctl. We can use spin_lock_irq() here.
1063 */ 1082 */
1064static int 1083static int
1065uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, 1084uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
@@ -1069,7 +1088,6 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
1069 void __user *uarg = (void __user *)arg; 1088 void __user *uarg = (void __user *)arg;
1070 int ret = -ENOIOCTLCMD; 1089 int ret = -ENOIOCTLCMD;
1071 1090
1072 BUG_ON(!kernel_locked());
1073 1091
1074 /* 1092 /*
1075 * These ioctls don't rely on the hardware to be present. 1093 * These ioctls don't rely on the hardware to be present.
@@ -1140,9 +1158,9 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
1140 break; 1158 break;
1141 } 1159 }
1142 } 1160 }
1143 out_up: 1161out_up:
1144 mutex_unlock(&state->mutex); 1162 mutex_unlock(&state->mutex);
1145 out: 1163out:
1146 return ret; 1164 return ret;
1147} 1165}
1148 1166
@@ -1153,7 +1171,6 @@ static void uart_set_termios(struct tty_struct *tty,
1153 unsigned long flags; 1171 unsigned long flags;
1154 unsigned int cflag = tty->termios->c_cflag; 1172 unsigned int cflag = tty->termios->c_cflag;
1155 1173
1156 BUG_ON(!kernel_locked());
1157 1174
1158 /* 1175 /*
1159 * These are the bits that are used to setup various 1176 * These are the bits that are used to setup various
@@ -1165,8 +1182,9 @@ static void uart_set_termios(struct tty_struct *tty,
1165 if ((cflag ^ old_termios->c_cflag) == 0 && 1182 if ((cflag ^ old_termios->c_cflag) == 0 &&
1166 tty->termios->c_ospeed == old_termios->c_ospeed && 1183 tty->termios->c_ospeed == old_termios->c_ospeed &&
1167 tty->termios->c_ispeed == old_termios->c_ispeed && 1184 tty->termios->c_ispeed == old_termios->c_ispeed &&
1168 RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) 1185 RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) {
1169 return; 1186 return;
1187 }
1170 1188
1171 uart_change_speed(state, old_termios); 1189 uart_change_speed(state, old_termios);
1172 1190
@@ -1200,7 +1218,6 @@ static void uart_set_termios(struct tty_struct *tty,
1200 } 1218 }
1201 spin_unlock_irqrestore(&state->port->lock, flags); 1219 spin_unlock_irqrestore(&state->port->lock, flags);
1202 } 1220 }
1203
1204#if 0 1221#if 0
1205 /* 1222 /*
1206 * No need to wake up processes in open wait, since they 1223 * No need to wake up processes in open wait, since they
@@ -1316,11 +1333,11 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
1316 struct uart_port *port = state->port; 1333 struct uart_port *port = state->port;
1317 unsigned long char_time, expire; 1334 unsigned long char_time, expire;
1318 1335
1319 BUG_ON(!kernel_locked());
1320
1321 if (port->type == PORT_UNKNOWN || port->fifosize == 0) 1336 if (port->type == PORT_UNKNOWN || port->fifosize == 0)
1322 return; 1337 return;
1323 1338
1339 lock_kernel();
1340
1324 /* 1341 /*
1325 * Set the check interval to be 1/5 of the estimated time to 1342 * Set the check interval to be 1/5 of the estimated time to
1326 * send a single character, and make it at least 1. The check 1343 * send a single character, and make it at least 1. The check
@@ -1366,6 +1383,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
1366 break; 1383 break;
1367 } 1384 }
1368 set_current_state(TASK_RUNNING); /* might not be needed */ 1385 set_current_state(TASK_RUNNING); /* might not be needed */
1386 unlock_kernel();
1369} 1387}
1370 1388
1371/* 1389/*
@@ -2079,7 +2097,9 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
2079 int ret; 2097 int ret;
2080 2098
2081 uart_change_pm(state, 0); 2099 uart_change_pm(state, 0);
2100 spin_lock_irq(&port->lock);
2082 ops->set_mctrl(port, 0); 2101 ops->set_mctrl(port, 0);
2102 spin_unlock_irq(&port->lock);
2083 ret = ops->startup(port); 2103 ret = ops->startup(port);
2084 if (ret == 0) { 2104 if (ret == 0) {
2085 uart_change_speed(state, NULL); 2105 uart_change_speed(state, NULL);
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index c2ea5d4df44a..969106187718 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -855,7 +855,7 @@ static int sci_notifier(struct notifier_block *self,
855 855
856 printk(KERN_INFO "%s: got a postchange notification " 856 printk(KERN_INFO "%s: got a postchange notification "
857 "for cpu %d (old %d, new %d)\n", 857 "for cpu %d (old %d, new %d)\n",
858 __FUNCTION__, freqs->cpu, freqs->old, freqs->new); 858 __func__, freqs->cpu, freqs->old, freqs->new);
859 } 859 }
860 860
861 return NOTIFY_OK; 861 return NOTIFY_OK;
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 41fc61264443..019da2e05f0b 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -839,7 +839,7 @@ static int __init sn_sal_module_init(void)
839 839
840 if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) { 840 if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) {
841 /* error - not sure what I'd do - so I'll do nothing */ 841 /* error - not sure what I'd do - so I'll do nothing */
842 printk(KERN_ERR "%s: unable to add port\n", __FUNCTION__); 842 printk(KERN_ERR "%s: unable to add port\n", __func__);
843 } 843 }
844 844
845 /* when this driver is compiled in, the console initialization 845 /* when this driver is compiled in, the console initialization
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index be0fe152891b..145c0281495d 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -392,7 +392,7 @@ static struct uart_ops sunhv_pops = {
392 392
393static struct uart_driver sunhv_reg = { 393static struct uart_driver sunhv_reg = {
394 .owner = THIS_MODULE, 394 .owner = THIS_MODULE,
395 .driver_name = "serial", 395 .driver_name = "sunhv",
396 .dev_name = "ttyS", 396 .dev_name = "ttyS",
397 .major = TTY_MAJOR, 397 .major = TTY_MAJOR,
398}; 398};
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 543f93741e6f..9ff5b38f3bee 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -826,7 +826,7 @@ static struct uart_ops sunsab_pops = {
826 826
827static struct uart_driver sunsab_reg = { 827static struct uart_driver sunsab_reg = {
828 .owner = THIS_MODULE, 828 .owner = THIS_MODULE,
829 .driver_name = "serial", 829 .driver_name = "sunsab",
830 .dev_name = "ttyS", 830 .dev_name = "ttyS",
831 .major = TTY_MAJOR, 831 .major = TTY_MAJOR,
832}; 832};
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 4e2302d43ab1..03806a935209 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1173,7 +1173,7 @@ out:
1173 1173
1174static struct uart_driver sunsu_reg = { 1174static struct uart_driver sunsu_reg = {
1175 .owner = THIS_MODULE, 1175 .owner = THIS_MODULE,
1176 .driver_name = "serial", 1176 .driver_name = "sunsu",
1177 .dev_name = "ttyS", 1177 .dev_name = "ttyS",
1178 .major = TTY_MAJOR, 1178 .major = TTY_MAJOR,
1179}; 1179};
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 90a20a152ebf..7e9fa5ef0eb7 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1023,7 +1023,7 @@ static struct uart_sunzilog_port *sunzilog_irq_chain;
1023 1023
1024static struct uart_driver sunzilog_reg = { 1024static struct uart_driver sunzilog_reg = {
1025 .owner = THIS_MODULE, 1025 .owner = THIS_MODULE,
1026 .driver_name = "ttyS", 1026 .driver_name = "sunzilog",
1027 .dev_name = "ttyS", 1027 .dev_name = "ttyS",
1028 .major = TTY_MAJOR, 1028 .major = TTY_MAJOR,
1029}; 1029};
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index b565d5a37499..b51c24245be4 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -584,7 +584,7 @@ ulite_of_probe(struct of_device *op, const struct of_device_id *match)
584 const unsigned int *id; 584 const unsigned int *id;
585 int irq, rc; 585 int irq, rc;
586 586
587 dev_dbg(&op->dev, "%s(%p, %p)\n", __FUNCTION__, op, match); 587 dev_dbg(&op->dev, "%s(%p, %p)\n", __func__, op, match);
588 588
589 rc = of_address_to_resource(op->node, 0, &res); 589 rc = of_address_to_resource(op->node, 0, &res);
590 if (rc) { 590 if (rc) {
diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c
index 5e4310ccd591..01917c433f17 100644
--- a/drivers/serial/ucc_uart.c
+++ b/drivers/serial/ucc_uart.c
@@ -215,7 +215,7 @@ static inline dma_addr_t cpu2qe_addr(void *addr, struct uart_qe_port *qe_port)
215 return qe_port->bd_dma_addr + (addr - qe_port->bd_virt); 215 return qe_port->bd_dma_addr + (addr - qe_port->bd_virt);
216 216
217 /* something nasty happened */ 217 /* something nasty happened */
218 printk(KERN_ERR "%s: addr=%p\n", __FUNCTION__, addr); 218 printk(KERN_ERR "%s: addr=%p\n", __func__, addr);
219 BUG(); 219 BUG();
220 return 0; 220 return 0;
221} 221}
@@ -234,7 +234,7 @@ static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)
234 return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); 234 return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);
235 235
236 /* something nasty happened */ 236 /* something nasty happened */
237 printk(KERN_ERR "%s: addr=%x\n", __FUNCTION__, addr); 237 printk(KERN_ERR "%s: addr=%x\n", __func__, addr);
238 BUG(); 238 BUG();
239 return NULL; 239 return NULL;
240} 240}
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 02c8e305b14f..e81d59d78910 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -497,7 +497,7 @@ static int atmel_spi_setup(struct spi_device *spi)
497 struct atmel_spi *as; 497 struct atmel_spi *as;
498 u32 scbr, csr; 498 u32 scbr, csr;
499 unsigned int bits = spi->bits_per_word; 499 unsigned int bits = spi->bits_per_word;
500 unsigned long bus_hz, sck_hz; 500 unsigned long bus_hz;
501 unsigned int npcs_pin; 501 unsigned int npcs_pin;
502 int ret; 502 int ret;
503 503
@@ -536,14 +536,25 @@ static int atmel_spi_setup(struct spi_device *spi)
536 return -EINVAL; 536 return -EINVAL;
537 } 537 }
538 538
539 /* speed zero convention is used by some upper layers */ 539 /*
540 * Pre-new_1 chips start out at half the peripheral
541 * bus speed.
542 */
540 bus_hz = clk_get_rate(as->clk); 543 bus_hz = clk_get_rate(as->clk);
544 if (!as->new_1)
545 bus_hz /= 2;
546
541 if (spi->max_speed_hz) { 547 if (spi->max_speed_hz) {
542 /* assume div32/fdiv/mbz == 0 */ 548 /*
543 if (!as->new_1) 549 * Calculate the lowest divider that satisfies the
544 bus_hz /= 2; 550 * constraint, assuming div32/fdiv/mbz == 0.
545 scbr = ((bus_hz + spi->max_speed_hz - 1) 551 */
546 / spi->max_speed_hz); 552 scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz);
553
554 /*
555 * If the resulting divider doesn't fit into the
556 * register bitfield, we can't satisfy the constraint.
557 */
547 if (scbr >= (1 << SPI_SCBR_SIZE)) { 558 if (scbr >= (1 << SPI_SCBR_SIZE)) {
548 dev_dbg(&spi->dev, 559 dev_dbg(&spi->dev,
549 "setup: %d Hz too slow, scbr %u; min %ld Hz\n", 560 "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
@@ -551,8 +562,8 @@ static int atmel_spi_setup(struct spi_device *spi)
551 return -EINVAL; 562 return -EINVAL;
552 } 563 }
553 } else 564 } else
565 /* speed zero means "as slow as possible" */
554 scbr = 0xff; 566 scbr = 0xff;
555 sck_hz = bus_hz / scbr;
556 567
557 csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8); 568 csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
558 if (spi->mode & SPI_CPOL) 569 if (spi->mode & SPI_CPOL)
@@ -589,7 +600,7 @@ static int atmel_spi_setup(struct spi_device *spi)
589 600
590 dev_dbg(&spi->dev, 601 dev_dbg(&spi->dev,
591 "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", 602 "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
592 sck_hz, bits, spi->mode, spi->chip_select, csr); 603 bus_hz / scbr, bits, spi->mode, spi->chip_select, csr);
593 604
594 spi_writel(as, CSR0 + 4 * spi->chip_select, csr); 605 spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
595 606
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index a9ac1fdb3094..7fea3cf4588a 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -608,6 +608,7 @@ static void pump_transfers(unsigned long data)
608 u8 width; 608 u8 width;
609 u16 cr, dma_width, dma_config; 609 u16 cr, dma_width, dma_config;
610 u32 tranf_success = 1; 610 u32 tranf_success = 1;
611 u8 full_duplex = 0;
611 612
612 /* Get current state information */ 613 /* Get current state information */
613 message = drv_data->cur_msg; 614 message = drv_data->cur_msg;
@@ -658,6 +659,7 @@ static void pump_transfers(unsigned long data)
658 } 659 }
659 660
660 if (transfer->rx_buf != NULL) { 661 if (transfer->rx_buf != NULL) {
662 full_duplex = transfer->tx_buf != NULL;
661 drv_data->rx = transfer->rx_buf; 663 drv_data->rx = transfer->rx_buf;
662 drv_data->rx_end = drv_data->rx + transfer->len; 664 drv_data->rx_end = drv_data->rx + transfer->len;
663 dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n", 665 dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n",
@@ -740,7 +742,8 @@ static void pump_transfers(unsigned long data)
740 * successful use different way to r/w according to 742 * successful use different way to r/w according to
741 * drv_data->cur_chip->enable_dma 743 * drv_data->cur_chip->enable_dma
742 */ 744 */
743 if (drv_data->cur_chip->enable_dma && drv_data->len > 6) { 745 if (!full_duplex && drv_data->cur_chip->enable_dma
746 && drv_data->len > 6) {
744 747
745 disable_dma(drv_data->dma_channel); 748 disable_dma(drv_data->dma_channel);
746 clear_dma_irqstat(drv_data->dma_channel); 749 clear_dma_irqstat(drv_data->dma_channel);
@@ -828,7 +831,7 @@ static void pump_transfers(unsigned long data)
828 /* IO mode write then read */ 831 /* IO mode write then read */
829 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); 832 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
830 833
831 if (drv_data->tx != NULL && drv_data->rx != NULL) { 834 if (full_duplex) {
832 /* full duplex mode */ 835 /* full duplex mode */
833 BUG_ON((drv_data->tx_end - drv_data->tx) != 836 BUG_ON((drv_data->tx_end - drv_data->tx) !=
834 (drv_data->rx_end - drv_data->rx)); 837 (drv_data->rx_end - drv_data->rx));
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 34bfb7dd7764..0885cc357a37 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -125,10 +125,10 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
125 /* is clk = pclk / (2 * (pre+1)), or is it 125 /* is clk = pclk / (2 * (pre+1)), or is it
126 * clk = (pclk * 2) / ( pre + 1) */ 126 * clk = (pclk * 2) / ( pre + 1) */
127 127
128 div = (div / 2) - 1; 128 div /= 2;
129 129
130 if (div < 0) 130 if (div > 0)
131 div = 1; 131 div -= 1;
132 132
133 if (div > 255) 133 if (div > 255)
134 div = 255; 134 div = 255;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 17e71d56f31e..4b628526df09 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menuconfig THERMAL 5menuconfig THERMAL
6 bool "Generic Thermal sysfs driver" 6 tristate "Generic Thermal sysfs driver"
7 help 7 help
8 Generic Thermal Sysfs driver offers a generic mechanism for 8 Generic Thermal Sysfs driver offers a generic mechanism for
9 thermal management. Usually it's made up of one or more thermal 9 thermal management. Usually it's made up of one or more thermal
@@ -11,4 +11,4 @@ menuconfig THERMAL
11 Each thermal zone contains its own temperature, trip points, 11 Each thermal zone contains its own temperature, trip points,
12 cooling devices. 12 cooling devices.
13 All platforms with ACPI thermal support can use this driver. 13 All platforms with ACPI thermal support can use this driver.
14 If you want this support, you should say Y here. 14 If you want this support, you should say Y or M here.
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 8ef1232de376..31108a01c22e 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -2,4 +2,4 @@
2# Makefile for sensor chip drivers. 2# Makefile for sensor chip drivers.
3# 3#
4 4
5obj-$(CONFIG_THERMAL) += thermal.o 5obj-$(CONFIG_THERMAL) += thermal_sys.o
diff --git a/drivers/thermal/thermal.c b/drivers/thermal/thermal_sys.c
index 7f79bbf652d7..6098787341f3 100644
--- a/drivers/thermal/thermal.c
+++ b/drivers/thermal/thermal_sys.c
@@ -31,7 +31,7 @@
31#include <linux/thermal.h> 31#include <linux/thermal.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33 33
34MODULE_AUTHOR("Zhang Rui") 34MODULE_AUTHOR("Zhang Rui");
35MODULE_DESCRIPTION("Generic thermal management sysfs support"); 35MODULE_DESCRIPTION("Generic thermal management sysfs support");
36MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
37 37
@@ -295,6 +295,164 @@ thermal_cooling_device_trip_point_show(struct device *dev,
295 295
296/* Device management */ 296/* Device management */
297 297
298#if defined(CONFIG_HWMON) || \
299 (defined(CONFIG_HWMON_MODULE) && defined(CONFIG_THERMAL_MODULE))
300/* hwmon sys I/F */
301#include <linux/hwmon.h>
302static LIST_HEAD(thermal_hwmon_list);
303
304static ssize_t
305name_show(struct device *dev, struct device_attribute *attr, char *buf)
306{
307 struct thermal_hwmon_device *hwmon = dev->driver_data;
308 return sprintf(buf, "%s\n", hwmon->type);
309}
310static DEVICE_ATTR(name, 0444, name_show, NULL);
311
312static ssize_t
313temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
314{
315 struct thermal_hwmon_attr *hwmon_attr
316 = container_of(attr, struct thermal_hwmon_attr, attr);
317 struct thermal_zone_device *tz
318 = container_of(hwmon_attr, struct thermal_zone_device,
319 temp_input);
320
321 return tz->ops->get_temp(tz, buf);
322}
323
324static ssize_t
325temp_crit_show(struct device *dev, struct device_attribute *attr,
326 char *buf)
327{
328 struct thermal_hwmon_attr *hwmon_attr
329 = container_of(attr, struct thermal_hwmon_attr, attr);
330 struct thermal_zone_device *tz
331 = container_of(hwmon_attr, struct thermal_zone_device,
332 temp_crit);
333
334 return tz->ops->get_trip_temp(tz, 0, buf);
335}
336
337
338static int
339thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
340{
341 struct thermal_hwmon_device *hwmon;
342 int new_hwmon_device = 1;
343 int result;
344
345 mutex_lock(&thermal_list_lock);
346 list_for_each_entry(hwmon, &thermal_hwmon_list, node)
347 if (!strcmp(hwmon->type, tz->type)) {
348 new_hwmon_device = 0;
349 mutex_unlock(&thermal_list_lock);
350 goto register_sys_interface;
351 }
352 mutex_unlock(&thermal_list_lock);
353
354 hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL);
355 if (!hwmon)
356 return -ENOMEM;
357
358 INIT_LIST_HEAD(&hwmon->tz_list);
359 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
360 hwmon->device = hwmon_device_register(NULL);
361 if (IS_ERR(hwmon->device)) {
362 result = PTR_ERR(hwmon->device);
363 goto free_mem;
364 }
365 hwmon->device->driver_data = hwmon;
366 result = device_create_file(hwmon->device, &dev_attr_name);
367 if (result)
368 goto unregister_hwmon_device;
369
370 register_sys_interface:
371 tz->hwmon = hwmon;
372 hwmon->count++;
373
374 snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH,
375 "temp%d_input", hwmon->count);
376 tz->temp_input.attr.attr.name = tz->temp_input.name;
377 tz->temp_input.attr.attr.mode = 0444;
378 tz->temp_input.attr.show = temp_input_show;
379 result = device_create_file(hwmon->device, &tz->temp_input.attr);
380 if (result)
381 goto unregister_hwmon_device;
382
383 if (tz->ops->get_crit_temp) {
384 unsigned long temperature;
385 if (!tz->ops->get_crit_temp(tz, &temperature)) {
386 snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH,
387 "temp%d_crit", hwmon->count);
388 tz->temp_crit.attr.attr.name = tz->temp_crit.name;
389 tz->temp_crit.attr.attr.mode = 0444;
390 tz->temp_crit.attr.show = temp_crit_show;
391 result = device_create_file(hwmon->device,
392 &tz->temp_crit.attr);
393 if (result)
394 goto unregister_hwmon_device;
395 }
396 }
397
398 mutex_lock(&thermal_list_lock);
399 if (new_hwmon_device)
400 list_add_tail(&hwmon->node, &thermal_hwmon_list);
401 list_add_tail(&tz->hwmon_node, &hwmon->tz_list);
402 mutex_unlock(&thermal_list_lock);
403
404 return 0;
405
406 unregister_hwmon_device:
407 device_remove_file(hwmon->device, &tz->temp_crit.attr);
408 device_remove_file(hwmon->device, &tz->temp_input.attr);
409 if (new_hwmon_device) {
410 device_remove_file(hwmon->device, &dev_attr_name);
411 hwmon_device_unregister(hwmon->device);
412 }
413 free_mem:
414 if (new_hwmon_device)
415 kfree(hwmon);
416
417 return result;
418}
419
420static void
421thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
422{
423 struct thermal_hwmon_device *hwmon = tz->hwmon;
424
425 tz->hwmon = NULL;
426 device_remove_file(hwmon->device, &tz->temp_input.attr);
427 device_remove_file(hwmon->device, &tz->temp_crit.attr);
428
429 mutex_lock(&thermal_list_lock);
430 list_del(&tz->hwmon_node);
431 if (!list_empty(&hwmon->tz_list)) {
432 mutex_unlock(&thermal_list_lock);
433 return;
434 }
435 list_del(&hwmon->node);
436 mutex_unlock(&thermal_list_lock);
437
438 device_remove_file(hwmon->device, &dev_attr_name);
439 hwmon_device_unregister(hwmon->device);
440 kfree(hwmon);
441}
442#else
443static int
444thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
445{
446 return 0;
447}
448
449static void
450thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
451{
452}
453#endif
454
455
298/** 456/**
299 * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone 457 * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone
300 * @tz: thermal zone device 458 * @tz: thermal zone device
@@ -642,6 +800,10 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
642 goto unregister; 800 goto unregister;
643 } 801 }
644 802
803 result = thermal_add_hwmon_sysfs(tz);
804 if (result)
805 goto unregister;
806
645 mutex_lock(&thermal_list_lock); 807 mutex_lock(&thermal_list_lock);
646 list_add_tail(&tz->node, &thermal_tz_list); 808 list_add_tail(&tz->node, &thermal_tz_list);
647 if (ops->bind) 809 if (ops->bind)
@@ -700,6 +862,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
700 for (count = 0; count < tz->trips; count++) 862 for (count = 0; count < tz->trips; count++)
701 TRIP_POINT_ATTR_REMOVE(&tz->device, count); 863 TRIP_POINT_ATTR_REMOVE(&tz->device, count);
702 864
865 thermal_remove_hwmon_sysfs(tz);
703 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); 866 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
704 idr_destroy(&tz->idr); 867 idr_destroy(&tz->idr);
705 mutex_destroy(&tz->lock); 868 mutex_destroy(&tz->lock);
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 516a6400db43..a419c42e880e 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -17,6 +17,8 @@ obj-$(CONFIG_USB_SL811_HCD) += host/
17obj-$(CONFIG_USB_U132_HCD) += host/ 17obj-$(CONFIG_USB_U132_HCD) += host/
18obj-$(CONFIG_USB_R8A66597_HCD) += host/ 18obj-$(CONFIG_USB_R8A66597_HCD) += host/
19 19
20obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
21
20obj-$(CONFIG_USB_ACM) += class/ 22obj-$(CONFIG_USB_ACM) += class/
21obj-$(CONFIG_USB_PRINTER) += class/ 23obj-$(CONFIG_USB_PRINTER) += class/
22 24
diff --git a/drivers/usb/atm/Kconfig b/drivers/usb/atm/Kconfig
index 86e64035edb0..be0b8daac9c7 100644
--- a/drivers/usb/atm/Kconfig
+++ b/drivers/usb/atm/Kconfig
@@ -19,7 +19,6 @@ if USB_ATM
19 19
20config USB_SPEEDTOUCH 20config USB_SPEEDTOUCH
21 tristate "Speedtouch USB support" 21 tristate "Speedtouch USB support"
22 depends on USB_ATM
23 select FW_LOADER 22 select FW_LOADER
24 help 23 help
25 Say Y here if you have an SpeedTouch USB or SpeedTouch 330 24 Say Y here if you have an SpeedTouch USB or SpeedTouch 330
@@ -32,7 +31,6 @@ config USB_SPEEDTOUCH
32 31
33config USB_CXACRU 32config USB_CXACRU
34 tristate "Conexant AccessRunner USB support" 33 tristate "Conexant AccessRunner USB support"
35 depends on USB_ATM
36 select FW_LOADER 34 select FW_LOADER
37 help 35 help
38 Say Y here if you have an ADSL USB modem based on the Conexant 36 Say Y here if you have an ADSL USB modem based on the Conexant
@@ -45,7 +43,6 @@ config USB_CXACRU
45 43
46config USB_UEAGLEATM 44config USB_UEAGLEATM
47 tristate "ADI 930 and eagle USB DSL modem" 45 tristate "ADI 930 and eagle USB DSL modem"
48 depends on USB_ATM
49 select FW_LOADER 46 select FW_LOADER
50 help 47 help
51 Say Y here if you have an ADSL USB modem based on the ADI 930 48 Say Y here if you have an ADSL USB modem based on the ADI 930
@@ -58,7 +55,6 @@ config USB_UEAGLEATM
58 55
59config USB_XUSBATM 56config USB_XUSBATM
60 tristate "Other USB DSL modem support" 57 tristate "Other USB DSL modem support"
61 depends on USB_ATM
62 help 58 help
63 Say Y here if you have a DSL USB modem not explicitly supported by 59 Say Y here if you have a DSL USB modem not explicitly supported by
64 another USB DSL drivers. In order to use your modem you will need to 60 another USB DSL drivers. In order to use your modem you will need to
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 4220f22b6660..5f71ff3aee35 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -305,8 +305,6 @@ enum {
305 */ 305 */
306 306
307#define FW_GET_BYTE(p) *((__u8 *) (p)) 307#define FW_GET_BYTE(p) *((__u8 *) (p))
308#define FW_GET_WORD(p) le16_to_cpu(get_unaligned((__le16 *) (p)))
309#define FW_GET_LONG(p) le32_to_cpu(get_unaligned((__le32 *) (p)))
310 308
311#define FW_DIR "ueagle-atm/" 309#define FW_DIR "ueagle-atm/"
312#define NB_MODEM 4 310#define NB_MODEM 4
@@ -621,7 +619,7 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte
621 if (size < 4) 619 if (size < 4)
622 goto err_fw_corrupted; 620 goto err_fw_corrupted;
623 621
624 crc = FW_GET_LONG(pfw); 622 crc = get_unaligned_le32(pfw);
625 pfw += 4; 623 pfw += 4;
626 size -= 4; 624 size -= 4;
627 if (crc32_be(0, pfw, size) != crc) 625 if (crc32_be(0, pfw, size) != crc)
@@ -640,7 +638,7 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte
640 638
641 while (size > 3) { 639 while (size > 3) {
642 u8 len = FW_GET_BYTE(pfw); 640 u8 len = FW_GET_BYTE(pfw);
643 u16 add = FW_GET_WORD(pfw + 1); 641 u16 add = get_unaligned_le16(pfw + 1);
644 642
645 size -= len + 3; 643 size -= len + 3;
646 if (size < 0) 644 if (size < 0)
@@ -738,7 +736,7 @@ static int check_dsp_e1(u8 *dsp, unsigned int len)
738 736
739 for (i = 0; i < pagecount; i++) { 737 for (i = 0; i < pagecount; i++) {
740 738
741 pageoffset = FW_GET_LONG(dsp + p); 739 pageoffset = get_unaligned_le32(dsp + p);
742 p += 4; 740 p += 4;
743 741
744 if (pageoffset == 0) 742 if (pageoffset == 0)
@@ -759,7 +757,7 @@ static int check_dsp_e1(u8 *dsp, unsigned int len)
759 return 1; 757 return 1;
760 758
761 pp += 2; /* skip blockaddr */ 759 pp += 2; /* skip blockaddr */
762 blocksize = FW_GET_WORD(dsp + pp); 760 blocksize = get_unaligned_le16(dsp + pp);
763 pp += 2; 761 pp += 2;
764 762
765 /* enough space for block data? */ 763 /* enough space for block data? */
@@ -928,7 +926,7 @@ static void uea_load_page_e1(struct work_struct *work)
928 goto bad1; 926 goto bad1;
929 927
930 p += 4 * pageno; 928 p += 4 * pageno;
931 pageoffset = FW_GET_LONG(p); 929 pageoffset = get_unaligned_le32(p);
932 930
933 if (pageoffset == 0) 931 if (pageoffset == 0)
934 goto bad1; 932 goto bad1;
@@ -945,10 +943,10 @@ static void uea_load_page_e1(struct work_struct *work)
945 bi.wOvlOffset = cpu_to_le16(ovl | 0x8000); 943 bi.wOvlOffset = cpu_to_le16(ovl | 0x8000);
946 944
947 for (i = 0; i < blockcount; i++) { 945 for (i = 0; i < blockcount; i++) {
948 blockaddr = FW_GET_WORD(p); 946 blockaddr = get_unaligned_le16(p);
949 p += 2; 947 p += 2;
950 948
951 blocksize = FW_GET_WORD(p); 949 blocksize = get_unaligned_le16(p);
952 p += 2; 950 p += 2;
953 951
954 bi.wSize = cpu_to_le16(blocksize); 952 bi.wSize = cpu_to_le16(blocksize);
@@ -1152,9 +1150,9 @@ static int uea_cmv_e1(struct uea_softc *sc,
1152 cmv.bDirection = E1_HOSTTOMODEM; 1150 cmv.bDirection = E1_HOSTTOMODEM;
1153 cmv.bFunction = function; 1151 cmv.bFunction = function;
1154 cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx); 1152 cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx);
1155 put_unaligned(cpu_to_le32(address), &cmv.dwSymbolicAddress); 1153 put_unaligned_le32(address, &cmv.dwSymbolicAddress);
1156 cmv.wOffsetAddress = cpu_to_le16(offset); 1154 cmv.wOffsetAddress = cpu_to_le16(offset);
1157 put_unaligned(cpu_to_le32(data >> 16 | data << 16), &cmv.dwData); 1155 put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData);
1158 1156
1159 ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv); 1157 ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv);
1160 if (ret < 0) 1158 if (ret < 0)
@@ -1646,7 +1644,7 @@ static int request_cmvs(struct uea_softc *sc,
1646 if (size < 5) 1644 if (size < 5)
1647 goto err_fw_corrupted; 1645 goto err_fw_corrupted;
1648 1646
1649 crc = FW_GET_LONG(data); 1647 crc = get_unaligned_le32(data);
1650 data += 4; 1648 data += 4;
1651 size -= 4; 1649 size -= 4;
1652 if (crc32_be(0, data, size) != crc) 1650 if (crc32_be(0, data, size) != crc)
@@ -1696,9 +1694,9 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
1696 "please update your firmware\n"); 1694 "please update your firmware\n");
1697 1695
1698 for (i = 0; i < len; i++) { 1696 for (i = 0; i < len; i++) {
1699 ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v1[i].address), 1697 ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v1[i].address),
1700 FW_GET_WORD(&cmvs_v1[i].offset), 1698 get_unaligned_le16(&cmvs_v1[i].offset),
1701 FW_GET_LONG(&cmvs_v1[i].data)); 1699 get_unaligned_le32(&cmvs_v1[i].data));
1702 if (ret < 0) 1700 if (ret < 0)
1703 goto out; 1701 goto out;
1704 } 1702 }
@@ -1706,9 +1704,9 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
1706 struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr; 1704 struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
1707 1705
1708 for (i = 0; i < len; i++) { 1706 for (i = 0; i < len; i++) {
1709 ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v2[i].address), 1707 ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v2[i].address),
1710 (u16) FW_GET_LONG(&cmvs_v2[i].offset), 1708 (u16) get_unaligned_le32(&cmvs_v2[i].offset),
1711 FW_GET_LONG(&cmvs_v2[i].data)); 1709 get_unaligned_le32(&cmvs_v2[i].data));
1712 if (ret < 0) 1710 if (ret < 0)
1713 goto out; 1711 goto out;
1714 } 1712 }
@@ -1759,10 +1757,10 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
1759 1757
1760 for (i = 0; i < len; i++) { 1758 for (i = 0; i < len; i++) {
1761 ret = uea_write_cmv_e4(sc, 1, 1759 ret = uea_write_cmv_e4(sc, 1,
1762 FW_GET_LONG(&cmvs_v2[i].group), 1760 get_unaligned_le32(&cmvs_v2[i].group),
1763 FW_GET_LONG(&cmvs_v2[i].address), 1761 get_unaligned_le32(&cmvs_v2[i].address),
1764 FW_GET_LONG(&cmvs_v2[i].offset), 1762 get_unaligned_le32(&cmvs_v2[i].offset),
1765 FW_GET_LONG(&cmvs_v2[i].data)); 1763 get_unaligned_le32(&cmvs_v2[i].data));
1766 if (ret < 0) 1764 if (ret < 0)
1767 goto out; 1765 goto out;
1768 } 1766 }
@@ -1964,7 +1962,7 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
1964 if (UEA_CHIP_VERSION(sc) == ADI930 1962 if (UEA_CHIP_VERSION(sc) == ADI930
1965 && cmv->bFunction == E1_MAKEFUNCTION(2, 2)) { 1963 && cmv->bFunction == E1_MAKEFUNCTION(2, 2)) {
1966 cmv->wIndex = cpu_to_le16(dsc->idx); 1964 cmv->wIndex = cpu_to_le16(dsc->idx);
1967 put_unaligned(cpu_to_le32(dsc->address), &cmv->dwSymbolicAddress); 1965 put_unaligned_le32(dsc->address, &cmv->dwSymbolicAddress);
1968 cmv->wOffsetAddress = cpu_to_le16(dsc->offset); 1966 cmv->wOffsetAddress = cpu_to_le16(dsc->offset);
1969 } else 1967 } else
1970 goto bad2; 1968 goto bad2;
@@ -1978,11 +1976,11 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
1978 1976
1979 /* in case of MEMACCESS */ 1977 /* in case of MEMACCESS */
1980 if (le16_to_cpu(cmv->wIndex) != dsc->idx || 1978 if (le16_to_cpu(cmv->wIndex) != dsc->idx ||
1981 le32_to_cpu(get_unaligned(&cmv->dwSymbolicAddress)) != dsc->address || 1979 get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address ||
1982 le16_to_cpu(cmv->wOffsetAddress) != dsc->offset) 1980 le16_to_cpu(cmv->wOffsetAddress) != dsc->offset)
1983 goto bad2; 1981 goto bad2;
1984 1982
1985 sc->data = le32_to_cpu(get_unaligned(&cmv->dwData)); 1983 sc->data = get_unaligned_le32(&cmv->dwData);
1986 sc->data = sc->data << 16 | sc->data >> 16; 1984 sc->data = sc->data << 16 | sc->data >> 16;
1987 1985
1988 wake_up_cmv_ack(sc); 1986 wake_up_cmv_ack(sc);
diff --git a/drivers/usb/c67x00/Makefile b/drivers/usb/c67x00/Makefile
new file mode 100644
index 000000000000..868bc41b5980
--- /dev/null
+++ b/drivers/usb/c67x00/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for Cypress C67X00 USB Controller
3#
4
5ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG
6
7obj-$(CONFIG_USB_C67X00_HCD) += c67x00.o
8
9c67x00-objs := c67x00-drv.o c67x00-ll-hpi.o c67x00-hcd.o c67x00-sched.o
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
new file mode 100644
index 000000000000..5633bc5c8bf2
--- /dev/null
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -0,0 +1,243 @@
1/*
2 * c67x00-drv.c: Cypress C67X00 USB Common infrastructure
3 *
4 * Copyright (C) 2006-2008 Barco N.V.
5 * Derived from the Cypress cy7c67200/300 ezusb linux driver and
6 * based on multiple host controller drivers inside the linux kernel.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21 * MA 02110-1301 USA.
22 */
23
24/*
25 * This file implements the common infrastructure for using the c67x00.
26 * It is both the link between the platform configuration and subdrivers and
27 * the link between the common hardware parts and the subdrivers (e.g.
28 * interrupt handling).
29 *
30 * The c67x00 has 2 SIE's (serial interface engine) wich can be configured
31 * to be host, device or OTG (with some limitations, E.G. only SIE1 can be OTG).
32 *
33 * Depending on the platform configuration, the SIE's are created and
34 * the corresponding subdriver is initialized (c67x00_probe_sie).
35 */
36
37#include <linux/device.h>
38#include <linux/io.h>
39#include <linux/list.h>
40#include <linux/usb.h>
41#include <linux/usb/c67x00.h>
42
43#include "c67x00.h"
44#include "c67x00-hcd.h"
45
46static void c67x00_probe_sie(struct c67x00_sie *sie,
47 struct c67x00_device *dev, int sie_num)
48{
49 spin_lock_init(&sie->lock);
50 sie->dev = dev;
51 sie->sie_num = sie_num;
52 sie->mode = c67x00_sie_config(dev->pdata->sie_config, sie_num);
53
54 switch (sie->mode) {
55 case C67X00_SIE_HOST:
56 c67x00_hcd_probe(sie);
57 break;
58
59 case C67X00_SIE_UNUSED:
60 dev_info(sie_dev(sie),
61 "Not using SIE %d as requested\n", sie->sie_num);
62 break;
63
64 default:
65 dev_err(sie_dev(sie),
66 "Unsupported configuration: 0x%x for SIE %d\n",
67 sie->mode, sie->sie_num);
68 break;
69 }
70}
71
72static void c67x00_remove_sie(struct c67x00_sie *sie)
73{
74 switch (sie->mode) {
75 case C67X00_SIE_HOST:
76 c67x00_hcd_remove(sie);
77 break;
78
79 default:
80 break;
81 }
82}
83
84static irqreturn_t c67x00_irq(int irq, void *__dev)
85{
86 struct c67x00_device *c67x00 = __dev;
87 struct c67x00_sie *sie;
88 u16 msg, int_status;
89 int i, count = 8;
90
91 int_status = c67x00_ll_hpi_status(c67x00);
92 if (!int_status)
93 return IRQ_NONE;
94
95 while (int_status != 0 && (count-- >= 0)) {
96 c67x00_ll_irq(c67x00, int_status);
97 for (i = 0; i < C67X00_SIES; i++) {
98 sie = &c67x00->sie[i];
99 msg = 0;
100 if (int_status & SIEMSG_FLG(i))
101 msg = c67x00_ll_fetch_siemsg(c67x00, i);
102 if (sie->irq)
103 sie->irq(sie, int_status, msg);
104 }
105 int_status = c67x00_ll_hpi_status(c67x00);
106 }
107
108 if (int_status)
109 dev_warn(&c67x00->pdev->dev, "Not all interrupts handled! "
110 "status = 0x%04x\n", int_status);
111
112 return IRQ_HANDLED;
113}
114
115/* ------------------------------------------------------------------------- */
116
117static int __devinit c67x00_drv_probe(struct platform_device *pdev)
118{
119 struct c67x00_device *c67x00;
120 struct c67x00_platform_data *pdata;
121 struct resource *res, *res2;
122 int ret, i;
123
124 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
125 if (!res)
126 return -ENODEV;
127
128 res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
129 if (!res2)
130 return -ENODEV;
131
132 pdata = pdev->dev.platform_data;
133 if (!pdata)
134 return -ENODEV;
135
136 c67x00 = kzalloc(sizeof(*c67x00), GFP_KERNEL);
137 if (!c67x00)
138 return -ENOMEM;
139
140 if (!request_mem_region(res->start, res->end - res->start + 1,
141 pdev->name)) {
142 dev_err(&pdev->dev, "Memory region busy\n");
143 ret = -EBUSY;
144 goto request_mem_failed;
145 }
146 c67x00->hpi.base = ioremap(res->start, res->end - res->start + 1);
147 if (!c67x00->hpi.base) {
148 dev_err(&pdev->dev, "Unable to map HPI registers\n");
149 ret = -EIO;
150 goto map_failed;
151 }
152
153 spin_lock_init(&c67x00->hpi.lock);
154 c67x00->hpi.regstep = pdata->hpi_regstep;
155 c67x00->pdata = pdev->dev.platform_data;
156 c67x00->pdev = pdev;
157
158 c67x00_ll_init(c67x00);
159 c67x00_ll_hpi_reg_init(c67x00);
160
161 ret = request_irq(res2->start, c67x00_irq, 0, pdev->name, c67x00);
162 if (ret) {
163 dev_err(&pdev->dev, "Cannot claim IRQ\n");
164 goto request_irq_failed;
165 }
166
167 ret = c67x00_ll_reset(c67x00);
168 if (ret) {
169 dev_err(&pdev->dev, "Device reset failed\n");
170 goto reset_failed;
171 }
172
173 for (i = 0; i < C67X00_SIES; i++)
174 c67x00_probe_sie(&c67x00->sie[i], c67x00, i);
175
176 platform_set_drvdata(pdev, c67x00);
177
178 return 0;
179
180 reset_failed:
181 free_irq(res2->start, c67x00);
182 request_irq_failed:
183 iounmap(c67x00->hpi.base);
184 map_failed:
185 release_mem_region(res->start, res->end - res->start + 1);
186 request_mem_failed:
187 kfree(c67x00);
188
189 return ret;
190}
191
192static int __devexit c67x00_drv_remove(struct platform_device *pdev)
193{
194 struct c67x00_device *c67x00 = platform_get_drvdata(pdev);
195 struct resource *res;
196 int i;
197
198 for (i = 0; i < C67X00_SIES; i++)
199 c67x00_remove_sie(&c67x00->sie[i]);
200
201 c67x00_ll_release(c67x00);
202
203 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
204 if (res)
205 free_irq(res->start, c67x00);
206
207 iounmap(c67x00->hpi.base);
208
209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
210 if (res)
211 release_mem_region(res->start, res->end - res->start + 1);
212
213 kfree(c67x00);
214
215 return 0;
216}
217
218static struct platform_driver c67x00_driver = {
219 .probe = c67x00_drv_probe,
220 .remove = __devexit_p(c67x00_drv_remove),
221 .driver = {
222 .owner = THIS_MODULE,
223 .name = "c67x00",
224 },
225};
226MODULE_ALIAS("platform:c67x00");
227
228static int __init c67x00_init(void)
229{
230 return platform_driver_register(&c67x00_driver);
231}
232
233static void __exit c67x00_exit(void)
234{
235 platform_driver_unregister(&c67x00_driver);
236}
237
238module_init(c67x00_init);
239module_exit(c67x00_exit);
240
241MODULE_AUTHOR("Peter Korsgaard, Jan Veldeman, Grant Likely");
242MODULE_DESCRIPTION("Cypress C67X00 USB Controller Driver");
243MODULE_LICENSE("GPL");
diff --git a/drivers/usb/c67x00/c67x00-hcd.c b/drivers/usb/c67x00/c67x00-hcd.c
new file mode 100644
index 000000000000..a22b887f4e9e
--- /dev/null
+++ b/drivers/usb/c67x00/c67x00-hcd.c
@@ -0,0 +1,412 @@
1/*
2 * c67x00-hcd.c: Cypress C67X00 USB Host Controller Driver
3 *
4 * Copyright (C) 2006-2008 Barco N.V.
5 * Derived from the Cypress cy7c67200/300 ezusb linux driver and
6 * based on multiple host controller drivers inside the linux kernel.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21 * MA 02110-1301 USA.
22 */
23
24#include <linux/device.h>
25#include <linux/platform_device.h>
26#include <linux/usb.h>
27
28#include "c67x00.h"
29#include "c67x00-hcd.h"
30
31/* --------------------------------------------------------------------------
32 * Root Hub Support
33 */
34
35static __u8 c67x00_hub_des[] = {
36 0x09, /* __u8 bLength; */
37 0x29, /* __u8 bDescriptorType; Hub-descriptor */
38 0x02, /* __u8 bNbrPorts; */
39 0x00, /* __u16 wHubCharacteristics; */
40 0x00, /* (per-port OC, no power switching) */
41 0x32, /* __u8 bPwrOn2pwrGood; 2ms */
42 0x00, /* __u8 bHubContrCurrent; 0 mA */
43 0x00, /* __u8 DeviceRemovable; ** 7 Ports max ** */
44 0xff, /* __u8 PortPwrCtrlMask; ** 7 ports max ** */
45};
46
47static void c67x00_hub_reset_host_port(struct c67x00_sie *sie, int port)
48{
49 struct c67x00_hcd *c67x00 = sie->private_data;
50 unsigned long flags;
51
52 c67x00_ll_husb_reset(sie, port);
53
54 spin_lock_irqsave(&c67x00->lock, flags);
55 c67x00_ll_husb_reset_port(sie, port);
56 spin_unlock_irqrestore(&c67x00->lock, flags);
57
58 c67x00_ll_set_husb_eot(sie->dev, DEFAULT_EOT);
59}
60
61static int c67x00_hub_status_data(struct usb_hcd *hcd, char *buf)
62{
63 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
64 struct c67x00_sie *sie = c67x00->sie;
65 u16 status;
66 int i;
67
68 *buf = 0;
69 status = c67x00_ll_usb_get_status(sie);
70 for (i = 0; i < C67X00_PORTS; i++)
71 if (status & PORT_CONNECT_CHANGE(i))
72 *buf |= (1 << i);
73
74 /* bit 0 denotes hub change, b1..n port change */
75 *buf <<= 1;
76
77 return !!*buf;
78}
79
80static int c67x00_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
81 u16 wIndex, char *buf, u16 wLength)
82{
83 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
84 struct c67x00_sie *sie = c67x00->sie;
85 u16 status, usb_status;
86 int len = 0;
87 unsigned int port = wIndex-1;
88 u16 wPortChange, wPortStatus;
89
90 switch (typeReq) {
91
92 case GetHubStatus:
93 *(__le32 *) buf = cpu_to_le32(0);
94 len = 4; /* hub power */
95 break;
96
97 case GetPortStatus:
98 if (wIndex > C67X00_PORTS)
99 return -EPIPE;
100
101 status = c67x00_ll_usb_get_status(sie);
102 usb_status = c67x00_ll_get_usb_ctl(sie);
103
104 wPortChange = 0;
105 if (status & PORT_CONNECT_CHANGE(port))
106 wPortChange |= USB_PORT_STAT_C_CONNECTION;
107
108 wPortStatus = USB_PORT_STAT_POWER;
109 if (!(status & PORT_SE0_STATUS(port)))
110 wPortStatus |= USB_PORT_STAT_CONNECTION;
111 if (usb_status & LOW_SPEED_PORT(port)) {
112 wPortStatus |= USB_PORT_STAT_LOW_SPEED;
113 c67x00->low_speed_ports |= (1 << port);
114 } else
115 c67x00->low_speed_ports &= ~(1 << port);
116
117 if (usb_status & SOF_EOP_EN(port))
118 wPortStatus |= USB_PORT_STAT_ENABLE;
119
120 *(__le16 *) buf = cpu_to_le16(wPortStatus);
121 *(__le16 *) (buf + 2) = cpu_to_le16(wPortChange);
122 len = 4;
123 break;
124
125 case SetHubFeature: /* We don't implement these */
126 case ClearHubFeature:
127 switch (wValue) {
128 case C_HUB_OVER_CURRENT:
129 case C_HUB_LOCAL_POWER:
130 len = 0;
131 break;
132
133 default:
134 return -EPIPE;
135 }
136 break;
137
138 case SetPortFeature:
139 if (wIndex > C67X00_PORTS)
140 return -EPIPE;
141
142 switch (wValue) {
143 case USB_PORT_FEAT_SUSPEND:
144 dev_dbg(c67x00_hcd_dev(c67x00),
145 "SetPortFeature %d (SUSPEND)\n", port);
146 len = 0;
147 break;
148
149 case USB_PORT_FEAT_RESET:
150 c67x00_hub_reset_host_port(sie, port);
151 len = 0;
152 break;
153
154 case USB_PORT_FEAT_POWER:
155 /* Power always enabled */
156 len = 0;
157 break;
158
159 default:
160 dev_dbg(c67x00_hcd_dev(c67x00),
161 "%s: SetPortFeature %d (0x%04x) Error!\n",
162 __func__, port, wValue);
163 return -EPIPE;
164 }
165 break;
166
167 case ClearPortFeature:
168 if (wIndex > C67X00_PORTS)
169 return -EPIPE;
170
171 switch (wValue) {
172 case USB_PORT_FEAT_ENABLE:
173 /* Reset the port so that the c67x00 also notices the
174 * disconnect */
175 c67x00_hub_reset_host_port(sie, port);
176 len = 0;
177 break;
178
179 case USB_PORT_FEAT_C_ENABLE:
180 dev_dbg(c67x00_hcd_dev(c67x00),
181 "ClearPortFeature (%d): C_ENABLE\n", port);
182 len = 0;
183 break;
184
185 case USB_PORT_FEAT_SUSPEND:
186 dev_dbg(c67x00_hcd_dev(c67x00),
187 "ClearPortFeature (%d): SUSPEND\n", port);
188 len = 0;
189 break;
190
191 case USB_PORT_FEAT_C_SUSPEND:
192 dev_dbg(c67x00_hcd_dev(c67x00),
193 "ClearPortFeature (%d): C_SUSPEND\n", port);
194 len = 0;
195 break;
196
197 case USB_PORT_FEAT_POWER:
198 dev_dbg(c67x00_hcd_dev(c67x00),
199 "ClearPortFeature (%d): POWER\n", port);
200 return -EPIPE;
201
202 case USB_PORT_FEAT_C_CONNECTION:
203 c67x00_ll_usb_clear_status(sie,
204 PORT_CONNECT_CHANGE(port));
205 len = 0;
206 break;
207
208 case USB_PORT_FEAT_C_OVER_CURRENT:
209 dev_dbg(c67x00_hcd_dev(c67x00),
210 "ClearPortFeature (%d): OVER_CURRENT\n", port);
211 len = 0;
212 break;
213
214 case USB_PORT_FEAT_C_RESET:
215 dev_dbg(c67x00_hcd_dev(c67x00),
216 "ClearPortFeature (%d): C_RESET\n", port);
217 len = 0;
218 break;
219
220 default:
221 dev_dbg(c67x00_hcd_dev(c67x00),
222 "%s: ClearPortFeature %d (0x%04x) Error!\n",
223 __func__, port, wValue);
224 return -EPIPE;
225 }
226 break;
227
228 case GetHubDescriptor:
229 len = min_t(unsigned int, sizeof(c67x00_hub_des), wLength);
230 memcpy(buf, c67x00_hub_des, len);
231 break;
232
233 default:
234 dev_dbg(c67x00_hcd_dev(c67x00), "%s: unknown\n", __func__);
235 return -EPIPE;
236 }
237
238 return 0;
239}
240
241/* ---------------------------------------------------------------------
242 * Main part of host controller driver
243 */
244
245/**
246 * c67x00_hcd_irq
247 *
248 * This function is called from the interrupt handler in c67x00-drv.c
249 */
250static void c67x00_hcd_irq(struct c67x00_sie *sie, u16 int_status, u16 msg)
251{
252 struct c67x00_hcd *c67x00 = sie->private_data;
253 struct usb_hcd *hcd = c67x00_hcd_to_hcd(c67x00);
254
255 /* Handle sie message flags */
256 if (msg) {
257 if (msg & HUSB_TDListDone)
258 c67x00_sched_kick(c67x00);
259 else
260 dev_warn(c67x00_hcd_dev(c67x00),
261 "Unknown SIE msg flag(s): 0x%04x\n", msg);
262 }
263
264 if (unlikely(hcd->state == HC_STATE_HALT))
265 return;
266
267 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
268 return;
269
270 /* Handle Start of frame events */
271 if (int_status & SOFEOP_FLG(sie->sie_num)) {
272 c67x00_ll_usb_clear_status(sie, SOF_EOP_IRQ_FLG);
273 c67x00_sched_kick(c67x00);
274 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
275 }
276}
277
278/**
279 * c67x00_hcd_start: Host controller start hook
280 */
281static int c67x00_hcd_start(struct usb_hcd *hcd)
282{
283 hcd->uses_new_polling = 1;
284 hcd->state = HC_STATE_RUNNING;
285 hcd->poll_rh = 1;
286
287 return 0;
288}
289
290/**
291 * c67x00_hcd_stop: Host controller stop hook
292 */
293static void c67x00_hcd_stop(struct usb_hcd *hcd)
294{
295 /* Nothing to do */
296}
297
298static int c67x00_hcd_get_frame(struct usb_hcd *hcd)
299{
300 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
301 u16 temp_val;
302
303 dev_dbg(c67x00_hcd_dev(c67x00), "%s\n", __func__);
304 temp_val = c67x00_ll_husb_get_frame(c67x00->sie);
305 temp_val &= HOST_FRAME_MASK;
306 return temp_val ? (temp_val - 1) : HOST_FRAME_MASK;
307}
308
309static struct hc_driver c67x00_hc_driver = {
310 .description = "c67x00-hcd",
311 .product_desc = "Cypress C67X00 Host Controller",
312 .hcd_priv_size = sizeof(struct c67x00_hcd),
313 .flags = HCD_USB11 | HCD_MEMORY,
314
315 /*
316 * basic lifecycle operations
317 */
318 .start = c67x00_hcd_start,
319 .stop = c67x00_hcd_stop,
320
321 /*
322 * managing i/o requests and associated device resources
323 */
324 .urb_enqueue = c67x00_urb_enqueue,
325 .urb_dequeue = c67x00_urb_dequeue,
326 .endpoint_disable = c67x00_endpoint_disable,
327
328 /*
329 * scheduling support
330 */
331 .get_frame_number = c67x00_hcd_get_frame,
332
333 /*
334 * root hub support
335 */
336 .hub_status_data = c67x00_hub_status_data,
337 .hub_control = c67x00_hub_control,
338};
339
340/* ---------------------------------------------------------------------
341 * Setup/Teardown routines
342 */
343
344int c67x00_hcd_probe(struct c67x00_sie *sie)
345{
346 struct c67x00_hcd *c67x00;
347 struct usb_hcd *hcd;
348 unsigned long flags;
349 int retval;
350
351 if (usb_disabled())
352 return -ENODEV;
353
354 hcd = usb_create_hcd(&c67x00_hc_driver, sie_dev(sie), "c67x00_sie");
355 if (!hcd) {
356 retval = -ENOMEM;
357 goto err0;
358 }
359 c67x00 = hcd_to_c67x00_hcd(hcd);
360
361 spin_lock_init(&c67x00->lock);
362 c67x00->sie = sie;
363
364 INIT_LIST_HEAD(&c67x00->list[PIPE_ISOCHRONOUS]);
365 INIT_LIST_HEAD(&c67x00->list[PIPE_INTERRUPT]);
366 INIT_LIST_HEAD(&c67x00->list[PIPE_CONTROL]);
367 INIT_LIST_HEAD(&c67x00->list[PIPE_BULK]);
368 c67x00->urb_count = 0;
369 INIT_LIST_HEAD(&c67x00->td_list);
370 c67x00->td_base_addr = CY_HCD_BUF_ADDR + SIE_TD_OFFSET(sie->sie_num);
371 c67x00->buf_base_addr = CY_HCD_BUF_ADDR + SIE_BUF_OFFSET(sie->sie_num);
372 c67x00->max_frame_bw = MAX_FRAME_BW_STD;
373
374 c67x00_ll_husb_init_host_port(sie);
375
376 init_completion(&c67x00->endpoint_disable);
377 retval = c67x00_sched_start_scheduler(c67x00);
378 if (retval)
379 goto err1;
380
381 retval = usb_add_hcd(hcd, 0, 0);
382 if (retval) {
383 dev_dbg(sie_dev(sie), "%s: usb_add_hcd returned %d\n",
384 __func__, retval);
385 goto err2;
386 }
387
388 spin_lock_irqsave(&sie->lock, flags);
389 sie->private_data = c67x00;
390 sie->irq = c67x00_hcd_irq;
391 spin_unlock_irqrestore(&sie->lock, flags);
392
393 return retval;
394
395 err2:
396 c67x00_sched_stop_scheduler(c67x00);
397 err1:
398 usb_put_hcd(hcd);
399 err0:
400 return retval;
401}
402
403/* may be called with controller, bus, and devices active */
404void c67x00_hcd_remove(struct c67x00_sie *sie)
405{
406 struct c67x00_hcd *c67x00 = sie->private_data;
407 struct usb_hcd *hcd = c67x00_hcd_to_hcd(c67x00);
408
409 c67x00_sched_stop_scheduler(c67x00);
410 usb_remove_hcd(hcd);
411 usb_put_hcd(hcd);
412}
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
new file mode 100644
index 000000000000..e8c6d94b2514
--- /dev/null
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -0,0 +1,133 @@
1/*
2 * c67x00-hcd.h: Cypress C67X00 USB HCD
3 *
4 * Copyright (C) 2006-2008 Barco N.V.
5 * Derived from the Cypress cy7c67200/300 ezusb linux driver and
6 * based on multiple host controller drivers inside the linux kernel.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21 * MA 02110-1301 USA.
22 */
23
24#ifndef _USB_C67X00_HCD_H
25#define _USB_C67X00_HCD_H
26
27#include <linux/kernel.h>
28#include <linux/spinlock.h>
29#include <linux/list.h>
30#include <linux/usb.h>
31#include "../core/hcd.h"
32#include "c67x00.h"
33
34/*
35 * The following parameters depend on the CPU speed, bus speed, ...
36 * These can be tuned for specific use cases, e.g. if isochronous transfers
37 * are very important, bandwith can be sacrificed to guarantee that the
38 * 1ms deadline will be met.
39 * If bulk transfers are important, the MAX_FRAME_BW can be increased,
40 * but some (or many) isochronous deadlines might not be met.
41 *
42 * The values are specified in bittime.
43 */
44
45/*
46 * The current implementation switches between _STD (default) and _ISO (when
47 * isochronous transfers are scheduled), in order to optimize the throughput
48 * in normal cicrumstances, but also provide good isochronous behaviour.
49 *
50 * Bandwidth is described in bit time so with a 12MHz USB clock and 1ms
51 * frames; there are 12000 bit times per frame.
52 */
53
54#define TOTAL_FRAME_BW 12000
55#define DEFAULT_EOT 2250
56
57#define MAX_FRAME_BW_STD (TOTAL_FRAME_BW - DEFAULT_EOT)
58#define MAX_FRAME_BW_ISO 2400
59
60/*
61 * Periodic transfers may only use 90% of the full frame, but as
62 * we currently don't even use 90% of the full frame, we may
63 * use the full usable time for periodic transfers.
64 */
65#define MAX_PERIODIC_BW(full_bw) full_bw
66
67/* -------------------------------------------------------------------------- */
68
69struct c67x00_hcd {
70 spinlock_t lock;
71 struct c67x00_sie *sie;
72 unsigned int low_speed_ports; /* bitmask of low speed ports */
73 unsigned int urb_count;
74 unsigned int urb_iso_count;
75
76 struct list_head list[4]; /* iso, int, ctrl, bulk */
77#if PIPE_BULK != 3
78#error "Sanity check failed, this code presumes PIPE_... to range from 0 to 3"
79#endif
80
81 /* USB bandwidth allocated to td_list */
82 int bandwidth_allocated;
83 /* USB bandwidth allocated for isoc/int transfer */
84 int periodic_bw_allocated;
85 struct list_head td_list;
86 int max_frame_bw;
87
88 u16 td_base_addr;
89 u16 buf_base_addr;
90 u16 next_td_addr;
91 u16 next_buf_addr;
92
93 struct tasklet_struct tasklet;
94
95 struct completion endpoint_disable;
96
97 u16 current_frame;
98 u16 last_frame;
99};
100
101static inline struct c67x00_hcd *hcd_to_c67x00_hcd(struct usb_hcd *hcd)
102{
103 return (struct c67x00_hcd *)(hcd->hcd_priv);
104}
105
106static inline struct usb_hcd *c67x00_hcd_to_hcd(struct c67x00_hcd *c67x00)
107{
108 return container_of((void *)c67x00, struct usb_hcd, hcd_priv);
109}
110
111/* ---------------------------------------------------------------------
112 * Functions used by c67x00-drv
113 */
114
115int c67x00_hcd_probe(struct c67x00_sie *sie);
116void c67x00_hcd_remove(struct c67x00_sie *sie);
117
118/* ---------------------------------------------------------------------
119 * Transfer Descriptor scheduling functions
120 */
121int c67x00_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
122int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
123void c67x00_endpoint_disable(struct usb_hcd *hcd,
124 struct usb_host_endpoint *ep);
125
126void c67x00_hcd_msg_received(struct c67x00_sie *sie, u16 msg);
127void c67x00_sched_kick(struct c67x00_hcd *c67x00);
128int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00);
129void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00);
130
131#define c67x00_hcd_dev(x) (c67x00_hcd_to_hcd(x)->self.controller)
132
133#endif /* _USB_C67X00_HCD_H */
diff --git a/drivers/usb/c67x00/c67x00-ll-hpi.c b/drivers/usb/c67x00/c67x00-ll-hpi.c
new file mode 100644
index 000000000000..f3430b372f09
--- /dev/null
+++ b/drivers/usb/c67x00/c67x00-ll-hpi.c
@@ -0,0 +1,480 @@
1/*
2 * c67x00-ll-hpi.c: Cypress C67X00 USB Low level interface using HPI
3 *
4 * Copyright (C) 2006-2008 Barco N.V.
5 * Derived from the Cypress cy7c67200/300 ezusb linux driver and
6 * based on multiple host controller drivers inside the linux kernel.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21 * MA 02110-1301 USA.
22 */
23
24#include <asm/byteorder.h>
25#include <linux/io.h>
26#include <linux/usb/c67x00.h>
27#include "c67x00.h"
28
29#define COMM_REGS 14
30
31struct c67x00_lcp_int_data {
32 u16 regs[COMM_REGS];
33};
34
35/* -------------------------------------------------------------------------- */
36/* Interface definitions */
37
38#define COMM_ACK 0x0FED
39#define COMM_NAK 0xDEAD
40
41#define COMM_RESET 0xFA50
42#define COMM_EXEC_INT 0xCE01
43#define COMM_INT_NUM 0x01C2
44
45/* Registers 0 to COMM_REGS-1 */
46#define COMM_R(x) (0x01C4 + 2 * (x))
47
48#define HUSB_SIE_pCurrentTDPtr(x) ((x) ? 0x01B2 : 0x01B0)
49#define HUSB_SIE_pTDListDone_Sem(x) ((x) ? 0x01B8 : 0x01B6)
50#define HUSB_pEOT 0x01B4
51
52/* Software interrupts */
53/* 114, 115: */
54#define HUSB_SIE_INIT_INT(x) ((x) ? 0x0073 : 0x0072)
55#define HUSB_RESET_INT 0x0074
56
57#define SUSB_INIT_INT 0x0071
58#define SUSB_INIT_INT_LOC (SUSB_INIT_INT * 2)
59
60/* -----------------------------------------------------------------------
61 * HPI implementation
62 *
63 * The c67x00 chip also support control via SPI or HSS serial
64 * interfaces. However, this driver assumes that register access can
65 * be performed from IRQ context. While this is a safe assuption with
66 * the HPI interface, it is not true for the serial interfaces.
67 */
68
69/* HPI registers */
70#define HPI_DATA 0
71#define HPI_MAILBOX 1
72#define HPI_ADDR 2
73#define HPI_STATUS 3
74
75static inline u16 hpi_read_reg(struct c67x00_device *dev, int reg)
76{
77 return __raw_readw(dev->hpi.base + reg * dev->hpi.regstep);
78}
79
80static inline void hpi_write_reg(struct c67x00_device *dev, int reg, u16 value)
81{
82 __raw_writew(value, dev->hpi.base + reg * dev->hpi.regstep);
83}
84
85static inline u16 hpi_read_word_nolock(struct c67x00_device *dev, u16 reg)
86{
87 hpi_write_reg(dev, HPI_ADDR, reg);
88 return hpi_read_reg(dev, HPI_DATA);
89}
90
91static u16 hpi_read_word(struct c67x00_device *dev, u16 reg)
92{
93 u16 value;
94 unsigned long flags;
95
96 spin_lock_irqsave(&dev->hpi.lock, flags);
97 value = hpi_read_word_nolock(dev, reg);
98 spin_unlock_irqrestore(&dev->hpi.lock, flags);
99
100 return value;
101}
102
103static void hpi_write_word_nolock(struct c67x00_device *dev, u16 reg, u16 value)
104{
105 hpi_write_reg(dev, HPI_ADDR, reg);
106 hpi_write_reg(dev, HPI_DATA, value);
107}
108
109static void hpi_write_word(struct c67x00_device *dev, u16 reg, u16 value)
110{
111 unsigned long flags;
112
113 spin_lock_irqsave(&dev->hpi.lock, flags);
114 hpi_write_word_nolock(dev, reg, value);
115 spin_unlock_irqrestore(&dev->hpi.lock, flags);
116}
117
118/*
119 * Only data is little endian, addr has cpu endianess
120 */
121static void hpi_write_words_le16(struct c67x00_device *dev, u16 addr,
122 u16 *data, u16 count)
123{
124 unsigned long flags;
125 int i;
126
127 spin_lock_irqsave(&dev->hpi.lock, flags);
128
129 hpi_write_reg(dev, HPI_ADDR, addr);
130 for (i = 0; i < count; i++)
131 hpi_write_reg(dev, HPI_DATA, cpu_to_le16(*data++));
132
133 spin_unlock_irqrestore(&dev->hpi.lock, flags);
134}
135
136/*
137 * Only data is little endian, addr has cpu endianess
138 */
139static void hpi_read_words_le16(struct c67x00_device *dev, u16 addr,
140 u16 *data, u16 count)
141{
142 unsigned long flags;
143 int i;
144
145 spin_lock_irqsave(&dev->hpi.lock, flags);
146 hpi_write_reg(dev, HPI_ADDR, addr);
147 for (i = 0; i < count; i++)
148 *data++ = le16_to_cpu(hpi_read_reg(dev, HPI_DATA));
149
150 spin_unlock_irqrestore(&dev->hpi.lock, flags);
151}
152
153static void hpi_set_bits(struct c67x00_device *dev, u16 reg, u16 mask)
154{
155 u16 value;
156 unsigned long flags;
157
158 spin_lock_irqsave(&dev->hpi.lock, flags);
159 value = hpi_read_word_nolock(dev, reg);
160 hpi_write_word_nolock(dev, reg, value | mask);
161 spin_unlock_irqrestore(&dev->hpi.lock, flags);
162}
163
164static void hpi_clear_bits(struct c67x00_device *dev, u16 reg, u16 mask)
165{
166 u16 value;
167 unsigned long flags;
168
169 spin_lock_irqsave(&dev->hpi.lock, flags);
170 value = hpi_read_word_nolock(dev, reg);
171 hpi_write_word_nolock(dev, reg, value & ~mask);
172 spin_unlock_irqrestore(&dev->hpi.lock, flags);
173}
174
175static u16 hpi_recv_mbox(struct c67x00_device *dev)
176{
177 u16 value;
178 unsigned long flags;
179
180 spin_lock_irqsave(&dev->hpi.lock, flags);
181 value = hpi_read_reg(dev, HPI_MAILBOX);
182 spin_unlock_irqrestore(&dev->hpi.lock, flags);
183
184 return value;
185}
186
187static u16 hpi_send_mbox(struct c67x00_device *dev, u16 value)
188{
189 unsigned long flags;
190
191 spin_lock_irqsave(&dev->hpi.lock, flags);
192 hpi_write_reg(dev, HPI_MAILBOX, value);
193 spin_unlock_irqrestore(&dev->hpi.lock, flags);
194
195 return value;
196}
197
198u16 c67x00_ll_hpi_status(struct c67x00_device *dev)
199{
200 u16 value;
201 unsigned long flags;
202
203 spin_lock_irqsave(&dev->hpi.lock, flags);
204 value = hpi_read_reg(dev, HPI_STATUS);
205 spin_unlock_irqrestore(&dev->hpi.lock, flags);
206
207 return value;
208}
209
210void c67x00_ll_hpi_reg_init(struct c67x00_device *dev)
211{
212 int i;
213
214 hpi_recv_mbox(dev);
215 c67x00_ll_hpi_status(dev);
216 hpi_write_word(dev, HPI_IRQ_ROUTING_REG, 0);
217
218 for (i = 0; i < C67X00_SIES; i++) {
219 hpi_write_word(dev, SIEMSG_REG(i), 0);
220 hpi_read_word(dev, SIEMSG_REG(i));
221 }
222}
223
224void c67x00_ll_hpi_enable_sofeop(struct c67x00_sie *sie)
225{
226 hpi_set_bits(sie->dev, HPI_IRQ_ROUTING_REG,
227 SOFEOP_TO_HPI_EN(sie->sie_num));
228}
229
230void c67x00_ll_hpi_disable_sofeop(struct c67x00_sie *sie)
231{
232 hpi_clear_bits(sie->dev, HPI_IRQ_ROUTING_REG,
233 SOFEOP_TO_HPI_EN(sie->sie_num));
234}
235
236/* -------------------------------------------------------------------------- */
237/* Transactions */
238
239static inline u16 ll_recv_msg(struct c67x00_device *dev)
240{
241 u16 res;
242
243 res = wait_for_completion_timeout(&dev->hpi.lcp.msg_received, 5 * HZ);
244 WARN_ON(!res);
245
246 return (res == 0) ? -EIO : 0;
247}
248
249/* -------------------------------------------------------------------------- */
250/* General functions */
251
252u16 c67x00_ll_fetch_siemsg(struct c67x00_device *dev, int sie_num)
253{
254 u16 val;
255
256 val = hpi_read_word(dev, SIEMSG_REG(sie_num));
257 /* clear register to allow next message */
258 hpi_write_word(dev, SIEMSG_REG(sie_num), 0);
259
260 return val;
261}
262
263u16 c67x00_ll_get_usb_ctl(struct c67x00_sie *sie)
264{
265 return hpi_read_word(sie->dev, USB_CTL_REG(sie->sie_num));
266}
267
268/**
269 * c67x00_ll_usb_clear_status - clear the USB status bits
270 */
271void c67x00_ll_usb_clear_status(struct c67x00_sie *sie, u16 bits)
272{
273 hpi_write_word(sie->dev, USB_STAT_REG(sie->sie_num), bits);
274}
275
276u16 c67x00_ll_usb_get_status(struct c67x00_sie *sie)
277{
278 return hpi_read_word(sie->dev, USB_STAT_REG(sie->sie_num));
279}
280
281/* -------------------------------------------------------------------------- */
282
283static int c67x00_comm_exec_int(struct c67x00_device *dev, u16 nr,
284 struct c67x00_lcp_int_data *data)
285{
286 int i, rc;
287
288 mutex_lock(&dev->hpi.lcp.mutex);
289 hpi_write_word(dev, COMM_INT_NUM, nr);
290 for (i = 0; i < COMM_REGS; i++)
291 hpi_write_word(dev, COMM_R(i), data->regs[i]);
292 hpi_send_mbox(dev, COMM_EXEC_INT);
293 rc = ll_recv_msg(dev);
294 mutex_unlock(&dev->hpi.lcp.mutex);
295
296 return rc;
297}
298
299/* -------------------------------------------------------------------------- */
300/* Host specific functions */
301
302void c67x00_ll_set_husb_eot(struct c67x00_device *dev, u16 value)
303{
304 mutex_lock(&dev->hpi.lcp.mutex);
305 hpi_write_word(dev, HUSB_pEOT, value);
306 mutex_unlock(&dev->hpi.lcp.mutex);
307}
308
309static inline void c67x00_ll_husb_sie_init(struct c67x00_sie *sie)
310{
311 struct c67x00_device *dev = sie->dev;
312 struct c67x00_lcp_int_data data;
313 int rc;
314
315 rc = c67x00_comm_exec_int(dev, HUSB_SIE_INIT_INT(sie->sie_num), &data);
316 BUG_ON(rc); /* No return path for error code; crash spectacularly */
317}
318
319void c67x00_ll_husb_reset(struct c67x00_sie *sie, int port)
320{
321 struct c67x00_device *dev = sie->dev;
322 struct c67x00_lcp_int_data data;
323 int rc;
324
325 data.regs[0] = 50; /* Reset USB port for 50ms */
326 data.regs[1] = port | (sie->sie_num << 1);
327 rc = c67x00_comm_exec_int(dev, HUSB_RESET_INT, &data);
328 BUG_ON(rc); /* No return path for error code; crash spectacularly */
329}
330
331void c67x00_ll_husb_set_current_td(struct c67x00_sie *sie, u16 addr)
332{
333 hpi_write_word(sie->dev, HUSB_SIE_pCurrentTDPtr(sie->sie_num), addr);
334}
335
336u16 c67x00_ll_husb_get_current_td(struct c67x00_sie *sie)
337{
338 return hpi_read_word(sie->dev, HUSB_SIE_pCurrentTDPtr(sie->sie_num));
339}
340
341u16 c67x00_ll_husb_get_frame(struct c67x00_sie *sie)
342{
343 return hpi_read_word(sie->dev, HOST_FRAME_REG(sie->sie_num));
344}
345
346void c67x00_ll_husb_init_host_port(struct c67x00_sie *sie)
347{
348 /* Set port into host mode */
349 hpi_set_bits(sie->dev, USB_CTL_REG(sie->sie_num), HOST_MODE);
350 c67x00_ll_husb_sie_init(sie);
351 /* Clear interrupts */
352 c67x00_ll_usb_clear_status(sie, HOST_STAT_MASK);
353 /* Check */
354 if (!(hpi_read_word(sie->dev, USB_CTL_REG(sie->sie_num)) & HOST_MODE))
355 dev_warn(sie_dev(sie),
356 "SIE %d not set to host mode\n", sie->sie_num);
357}
358
359void c67x00_ll_husb_reset_port(struct c67x00_sie *sie, int port)
360{
361 /* Clear connect change */
362 c67x00_ll_usb_clear_status(sie, PORT_CONNECT_CHANGE(port));
363
364 /* Enable interrupts */
365 hpi_set_bits(sie->dev, HPI_IRQ_ROUTING_REG,
366 SOFEOP_TO_CPU_EN(sie->sie_num));
367 hpi_set_bits(sie->dev, HOST_IRQ_EN_REG(sie->sie_num),
368 SOF_EOP_IRQ_EN | DONE_IRQ_EN);
369
370 /* Enable pull down transistors */
371 hpi_set_bits(sie->dev, USB_CTL_REG(sie->sie_num), PORT_RES_EN(port));
372}
373
374/* -------------------------------------------------------------------------- */
375
376void c67x00_ll_irq(struct c67x00_device *dev, u16 int_status)
377{
378 if ((int_status & MBX_OUT_FLG) == 0)
379 return;
380
381 dev->hpi.lcp.last_msg = hpi_recv_mbox(dev);
382 complete(&dev->hpi.lcp.msg_received);
383}
384
385/* -------------------------------------------------------------------------- */
386
387int c67x00_ll_reset(struct c67x00_device *dev)
388{
389 int rc;
390
391 mutex_lock(&dev->hpi.lcp.mutex);
392 hpi_send_mbox(dev, COMM_RESET);
393 rc = ll_recv_msg(dev);
394 mutex_unlock(&dev->hpi.lcp.mutex);
395
396 return rc;
397}
398
399/* -------------------------------------------------------------------------- */
400
401/**
402 * c67x00_ll_write_mem_le16 - write into c67x00 memory
403 * Only data is little endian, addr has cpu endianess.
404 */
405void c67x00_ll_write_mem_le16(struct c67x00_device *dev, u16 addr,
406 void *data, int len)
407{
408 u8 *buf = data;
409
410 /* Sanity check */
411 if (addr + len > 0xffff) {
412 dev_err(&dev->pdev->dev,
413 "Trying to write beyond writable region!\n");
414 return;
415 }
416
417 if (addr & 0x01) {
418 /* unaligned access */
419 u16 tmp;
420 tmp = hpi_read_word(dev, addr - 1);
421 tmp = (tmp & 0x00ff) | (*buf++ << 8);
422 hpi_write_word(dev, addr - 1, tmp);
423 addr++;
424 len--;
425 }
426
427 hpi_write_words_le16(dev, addr, (u16 *)buf, len / 2);
428 buf += len & ~0x01;
429 addr += len & ~0x01;
430 len &= 0x01;
431
432 if (len) {
433 u16 tmp;
434 tmp = hpi_read_word(dev, addr);
435 tmp = (tmp & 0xff00) | *buf;
436 hpi_write_word(dev, addr, tmp);
437 }
438}
439
440/**
441 * c67x00_ll_read_mem_le16 - read from c67x00 memory
442 * Only data is little endian, addr has cpu endianess.
443 */
444void c67x00_ll_read_mem_le16(struct c67x00_device *dev, u16 addr,
445 void *data, int len)
446{
447 u8 *buf = data;
448
449 if (addr & 0x01) {
450 /* unaligned access */
451 u16 tmp;
452 tmp = hpi_read_word(dev, addr - 1);
453 *buf++ = (tmp >> 8) & 0x00ff;
454 addr++;
455 len--;
456 }
457
458 hpi_read_words_le16(dev, addr, (u16 *)buf, len / 2);
459 buf += len & ~0x01;
460 addr += len & ~0x01;
461 len &= 0x01;
462
463 if (len) {
464 u16 tmp;
465 tmp = hpi_read_word(dev, addr);
466 *buf = tmp & 0x00ff;
467 }
468}
469
470/* -------------------------------------------------------------------------- */
471
472void c67x00_ll_init(struct c67x00_device *dev)
473{
474 mutex_init(&dev->hpi.lcp.mutex);
475 init_completion(&dev->hpi.lcp.msg_received);
476}
477
478void c67x00_ll_release(struct c67x00_device *dev)
479{
480}
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
new file mode 100644
index 000000000000..85dfe2965661
--- /dev/null
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -0,0 +1,1170 @@
1/*
2 * c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
3 *
4 * Copyright (C) 2006-2008 Barco N.V.
5 * Derived from the Cypress cy7c67200/300 ezusb linux driver and
6 * based on multiple host controller drivers inside the linux kernel.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21 * MA 02110-1301 USA.
22 */
23
24#include <linux/kthread.h>
25
26#include "c67x00.h"
27#include "c67x00-hcd.h"
28
29/*
30 * These are the stages for a control urb, they are kept
31 * in both urb->interval and td->privdata.
32 */
33#define SETUP_STAGE 0
34#define DATA_STAGE 1
35#define STATUS_STAGE 2
36
37/* -------------------------------------------------------------------------- */
38
39/**
40 * struct c67x00_ep_data: Host endpoint data structure
41 */
42struct c67x00_ep_data {
43 struct list_head queue;
44 struct list_head node;
45 struct usb_host_endpoint *hep;
46 struct usb_device *dev;
47 u16 next_frame; /* For int/isoc transactions */
48};
49
50/**
51 * struct c67x00_td
52 *
53 * Hardware parts are little endiannes, SW in CPU endianess.
54 */
55struct c67x00_td {
56 /* HW specific part */
57 __le16 ly_base_addr; /* Bytes 0-1 */
58 __le16 port_length; /* Bytes 2-3 */
59 u8 pid_ep; /* Byte 4 */
60 u8 dev_addr; /* Byte 5 */
61 u8 ctrl_reg; /* Byte 6 */
62 u8 status; /* Byte 7 */
63 u8 retry_cnt; /* Byte 8 */
64#define TT_OFFSET 2
65#define TT_CONTROL 0
66#define TT_ISOCHRONOUS 1
67#define TT_BULK 2
68#define TT_INTERRUPT 3
69 u8 residue; /* Byte 9 */
70 __le16 next_td_addr; /* Bytes 10-11 */
71 /* SW part */
72 struct list_head td_list;
73 u16 td_addr;
74 void *data;
75 struct urb *urb;
76 unsigned long privdata;
77
78 /* These are needed for handling the toggle bits:
79 * an urb can be dequeued while a td is in progress
80 * after checking the td, the toggle bit might need to
81 * be fixed */
82 struct c67x00_ep_data *ep_data;
83 unsigned int pipe;
84};
85
86struct c67x00_urb_priv {
87 struct list_head hep_node;
88 struct urb *urb;
89 int port;
90 int cnt; /* packet number for isoc */
91 int status;
92 struct c67x00_ep_data *ep_data;
93};
94
95#define td_udev(td) ((td)->ep_data->dev)
96
97#define CY_TD_SIZE 12
98
99#define TD_PIDEP_OFFSET 0x04
100#define TD_PIDEPMASK_PID 0xF0
101#define TD_PIDEPMASK_EP 0x0F
102#define TD_PORTLENMASK_DL 0x02FF
103#define TD_PORTLENMASK_PN 0xC000
104
105#define TD_STATUS_OFFSET 0x07
106#define TD_STATUSMASK_ACK 0x01
107#define TD_STATUSMASK_ERR 0x02
108#define TD_STATUSMASK_TMOUT 0x04
109#define TD_STATUSMASK_SEQ 0x08
110#define TD_STATUSMASK_SETUP 0x10
111#define TD_STATUSMASK_OVF 0x20
112#define TD_STATUSMASK_NAK 0x40
113#define TD_STATUSMASK_STALL 0x80
114
115#define TD_ERROR_MASK (TD_STATUSMASK_ERR | TD_STATUSMASK_TMOUT | \
116 TD_STATUSMASK_STALL)
117
118#define TD_RETRYCNT_OFFSET 0x08
119#define TD_RETRYCNTMASK_ACT_FLG 0x10
120#define TD_RETRYCNTMASK_TX_TYPE 0x0C
121#define TD_RETRYCNTMASK_RTY_CNT 0x03
122
123#define TD_RESIDUE_OVERFLOW 0x80
124
125#define TD_PID_IN 0x90
126
127/* Residue: signed 8bits, neg -> OVERFLOW, pos -> UNDERFLOW */
128#define td_residue(td) ((__s8)(td->residue))
129#define td_ly_base_addr(td) (__le16_to_cpu((td)->ly_base_addr))
130#define td_port_length(td) (__le16_to_cpu((td)->port_length))
131#define td_next_td_addr(td) (__le16_to_cpu((td)->next_td_addr))
132
133#define td_active(td) ((td)->retry_cnt & TD_RETRYCNTMASK_ACT_FLG)
134#define td_length(td) (td_port_length(td) & TD_PORTLENMASK_DL)
135
136#define td_sequence_ok(td) (!td->status || \
137 (!(td->status & TD_STATUSMASK_SEQ) == \
138 !(td->ctrl_reg & SEQ_SEL)))
139
140#define td_acked(td) (!td->status || \
141 (td->status & TD_STATUSMASK_ACK))
142#define td_actual_bytes(td) (td_length(td) - td_residue(td))
143
144/* -------------------------------------------------------------------------- */
145
146#ifdef DEBUG
147
148/**
149 * dbg_td - Dump the contents of the TD
150 */
151static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
152{
153 struct device *dev = c67x00_hcd_dev(c67x00);
154
155 dev_dbg(dev, "### %s at 0x%04x\n", msg, td->td_addr);
156 dev_dbg(dev, "urb: 0x%p\n", td->urb);
157 dev_dbg(dev, "endpoint: %4d\n", usb_pipeendpoint(td->pipe));
158 dev_dbg(dev, "pipeout: %4d\n", usb_pipeout(td->pipe));
159 dev_dbg(dev, "ly_base_addr: 0x%04x\n", td_ly_base_addr(td));
160 dev_dbg(dev, "port_length: 0x%04x\n", td_port_length(td));
161 dev_dbg(dev, "pid_ep: 0x%02x\n", td->pid_ep);
162 dev_dbg(dev, "dev_addr: 0x%02x\n", td->dev_addr);
163 dev_dbg(dev, "ctrl_reg: 0x%02x\n", td->ctrl_reg);
164 dev_dbg(dev, "status: 0x%02x\n", td->status);
165 dev_dbg(dev, "retry_cnt: 0x%02x\n", td->retry_cnt);
166 dev_dbg(dev, "residue: 0x%02x\n", td->residue);
167 dev_dbg(dev, "next_td_addr: 0x%04x\n", td_next_td_addr(td));
168 dev_dbg(dev, "data:");
169 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
170 td->data, td_length(td), 1);
171}
172#else /* DEBUG */
173
174static inline void
175dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) { }
176
177#endif /* DEBUG */
178
179/* -------------------------------------------------------------------------- */
180/* Helper functions */
181
182static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00)
183{
184 return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK;
185}
186
187/**
188 * frame_add
189 * Software wraparound for framenumbers.
190 */
191static inline u16 frame_add(u16 a, u16 b)
192{
193 return (a + b) & HOST_FRAME_MASK;
194}
195
196/**
197 * frame_after - is frame a after frame b
198 */
199static inline int frame_after(u16 a, u16 b)
200{
201 return ((HOST_FRAME_MASK + a - b) & HOST_FRAME_MASK) <
202 (HOST_FRAME_MASK / 2);
203}
204
205/**
206 * frame_after_eq - is frame a after or equal to frame b
207 */
208static inline int frame_after_eq(u16 a, u16 b)
209{
210 return ((HOST_FRAME_MASK + 1 + a - b) & HOST_FRAME_MASK) <
211 (HOST_FRAME_MASK / 2);
212}
213
214/* -------------------------------------------------------------------------- */
215
216/**
217 * c67x00_release_urb - remove link from all tds to this urb
218 * Disconnects the urb from it's tds, so that it can be given back.
219 * pre: urb->hcpriv != NULL
220 */
221static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb)
222{
223 struct c67x00_td *td;
224 struct c67x00_urb_priv *urbp;
225
226 BUG_ON(!urb);
227
228 c67x00->urb_count--;
229
230 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
231 c67x00->urb_iso_count--;
232 if (c67x00->urb_iso_count == 0)
233 c67x00->max_frame_bw = MAX_FRAME_BW_STD;
234 }
235
236 /* TODO this might be not so efficient when we've got many urbs!
237 * Alternatives:
238 * * only clear when needed
239 * * keep a list of tds with each urbp
240 */
241 list_for_each_entry(td, &c67x00->td_list, td_list)
242 if (urb == td->urb)
243 td->urb = NULL;
244
245 urbp = urb->hcpriv;
246 urb->hcpriv = NULL;
247 list_del(&urbp->hep_node);
248 kfree(urbp);
249}
250
251/* -------------------------------------------------------------------------- */
252
253static struct c67x00_ep_data *
254c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb)
255{
256 struct usb_host_endpoint *hep = urb->ep;
257 struct c67x00_ep_data *ep_data;
258 int type;
259
260 c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
261
262 /* Check if endpoint already has a c67x00_ep_data struct allocated */
263 if (hep->hcpriv) {
264 ep_data = hep->hcpriv;
265 if (frame_after(c67x00->current_frame, ep_data->next_frame))
266 ep_data->next_frame =
267 frame_add(c67x00->current_frame, 1);
268 return hep->hcpriv;
269 }
270
271 /* Allocate and initialize a new c67x00 endpoint data structure */
272 ep_data = kzalloc(sizeof(*ep_data), GFP_ATOMIC);
273 if (!ep_data)
274 return NULL;
275
276 INIT_LIST_HEAD(&ep_data->queue);
277 INIT_LIST_HEAD(&ep_data->node);
278 ep_data->hep = hep;
279
280 /* hold a reference to udev as long as this endpoint lives,
281 * this is needed to possibly fix the data toggle */
282 ep_data->dev = usb_get_dev(urb->dev);
283 hep->hcpriv = ep_data;
284
285 /* For ISOC and INT endpoints, start ASAP: */
286 ep_data->next_frame = frame_add(c67x00->current_frame, 1);
287
288 /* Add the endpoint data to one of the pipe lists; must be added
289 in order of endpoint address */
290 type = usb_pipetype(urb->pipe);
291 if (list_empty(&ep_data->node)) {
292 list_add(&ep_data->node, &c67x00->list[type]);
293 } else {
294 struct c67x00_ep_data *prev;
295
296 list_for_each_entry(prev, &c67x00->list[type], node) {
297 if (prev->hep->desc.bEndpointAddress >
298 hep->desc.bEndpointAddress) {
299 list_add(&ep_data->node, prev->node.prev);
300 break;
301 }
302 }
303 }
304
305 return ep_data;
306}
307
308static int c67x00_ep_data_free(struct usb_host_endpoint *hep)
309{
310 struct c67x00_ep_data *ep_data = hep->hcpriv;
311
312 if (!ep_data)
313 return 0;
314
315 if (!list_empty(&ep_data->queue))
316 return -EBUSY;
317
318 usb_put_dev(ep_data->dev);
319 list_del(&ep_data->queue);
320 list_del(&ep_data->node);
321
322 kfree(ep_data);
323 hep->hcpriv = NULL;
324
325 return 0;
326}
327
328void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
329{
330 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
331 unsigned long flags;
332
333 if (!list_empty(&ep->urb_list))
334 dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n");
335
336 spin_lock_irqsave(&c67x00->lock, flags);
337
338 /* loop waiting for all transfers in the endpoint queue to complete */
339 while (c67x00_ep_data_free(ep)) {
340 /* Drop the lock so we can sleep waiting for the hardware */
341 spin_unlock_irqrestore(&c67x00->lock, flags);
342
343 /* it could happen that we reinitialize this completion, while
344 * somebody was waiting for that completion. The timeout and
345 * while loop handle such cases, but this might be improved */
346 INIT_COMPLETION(c67x00->endpoint_disable);
347 c67x00_sched_kick(c67x00);
348 wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
349
350 spin_lock_irqsave(&c67x00->lock, flags);
351 }
352
353 spin_unlock_irqrestore(&c67x00->lock, flags);
354}
355
356/* -------------------------------------------------------------------------- */
357
358static inline int get_root_port(struct usb_device *dev)
359{
360 while (dev->parent->parent)
361 dev = dev->parent;
362 return dev->portnum;
363}
364
365int c67x00_urb_enqueue(struct usb_hcd *hcd,
366 struct urb *urb, gfp_t mem_flags)
367{
368 int ret;
369 unsigned long flags;
370 struct c67x00_urb_priv *urbp;
371 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
372 int port = get_root_port(urb->dev)-1;
373
374 spin_lock_irqsave(&c67x00->lock, flags);
375
376 /* Make sure host controller is running */
377 if (!HC_IS_RUNNING(hcd->state)) {
378 ret = -ENODEV;
379 goto err_not_linked;
380 }
381
382 ret = usb_hcd_link_urb_to_ep(hcd, urb);
383 if (ret)
384 goto err_not_linked;
385
386 /* Allocate and initialize urb private data */
387 urbp = kzalloc(sizeof(*urbp), mem_flags);
388 if (!urbp) {
389 ret = -ENOMEM;
390 goto err_urbp;
391 }
392
393 INIT_LIST_HEAD(&urbp->hep_node);
394 urbp->urb = urb;
395 urbp->port = port;
396
397 urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb);
398
399 if (!urbp->ep_data) {
400 ret = -ENOMEM;
401 goto err_epdata;
402 }
403
404 /* TODO claim bandwidth with usb_claim_bandwidth?
405 * also release it somewhere! */
406
407 urb->hcpriv = urbp;
408
409 urb->actual_length = 0; /* Nothing received/transmitted yet */
410
411 switch (usb_pipetype(urb->pipe)) {
412 case PIPE_CONTROL:
413 urb->interval = SETUP_STAGE;
414 break;
415 case PIPE_INTERRUPT:
416 break;
417 case PIPE_BULK:
418 break;
419 case PIPE_ISOCHRONOUS:
420 if (c67x00->urb_iso_count == 0)
421 c67x00->max_frame_bw = MAX_FRAME_BW_ISO;
422 c67x00->urb_iso_count++;
423 /* Assume always URB_ISO_ASAP, FIXME */
424 if (list_empty(&urbp->ep_data->queue))
425 urb->start_frame = urbp->ep_data->next_frame;
426 else {
427 /* Go right after the last one */
428 struct urb *last_urb;
429
430 last_urb = list_entry(urbp->ep_data->queue.prev,
431 struct c67x00_urb_priv,
432 hep_node)->urb;
433 urb->start_frame =
434 frame_add(last_urb->start_frame,
435 last_urb->number_of_packets *
436 last_urb->interval);
437 }
438 urbp->cnt = 0;
439 break;
440 }
441
442 /* Add the URB to the endpoint queue */
443 list_add_tail(&urbp->hep_node, &urbp->ep_data->queue);
444
445 /* If this is the only URB, kick start the controller */
446 if (!c67x00->urb_count++)
447 c67x00_ll_hpi_enable_sofeop(c67x00->sie);
448
449 c67x00_sched_kick(c67x00);
450 spin_unlock_irqrestore(&c67x00->lock, flags);
451
452 return 0;
453
454err_epdata:
455 kfree(urbp);
456err_urbp:
457 usb_hcd_unlink_urb_from_ep(hcd, urb);
458err_not_linked:
459 spin_unlock_irqrestore(&c67x00->lock, flags);
460
461 return ret;
462}
463
464int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
465{
466 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
467 unsigned long flags;
468 int rc;
469
470 spin_lock_irqsave(&c67x00->lock, flags);
471 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
472 if (rc)
473 goto done;
474
475 c67x00_release_urb(c67x00, urb);
476 usb_hcd_unlink_urb_from_ep(hcd, urb);
477
478 spin_unlock(&c67x00->lock);
479 usb_hcd_giveback_urb(hcd, urb, status);
480 spin_lock(&c67x00->lock);
481
482 spin_unlock_irqrestore(&c67x00->lock, flags);
483
484 return 0;
485
486 done:
487 spin_unlock_irqrestore(&c67x00->lock, flags);
488 return rc;
489}
490
491/* -------------------------------------------------------------------------- */
492
493/*
494 * pre: c67x00 locked, urb unlocked
495 */
496static void
497c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
498{
499 struct c67x00_urb_priv *urbp;
500
501 if (!urb)
502 return;
503
504 urbp = urb->hcpriv;
505 urbp->status = status;
506
507 list_del_init(&urbp->hep_node);
508
509 c67x00_release_urb(c67x00, urb);
510 usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
511 spin_unlock(&c67x00->lock);
512 usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status);
513 spin_lock(&c67x00->lock);
514}
515
516/* -------------------------------------------------------------------------- */
517
518static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb,
519 int len, int periodic)
520{
521 struct c67x00_urb_priv *urbp = urb->hcpriv;
522 int bit_time;
523
524 /* According to the C67x00 BIOS user manual, page 3-18,19, the
525 * following calculations provide the full speed bit times for
526 * a transaction.
527 *
528 * FS(in) = 112.5 + 9.36*BC + HOST_DELAY
529 * FS(in,iso) = 90.5 + 9.36*BC + HOST_DELAY
530 * FS(out) = 112.5 + 9.36*BC + HOST_DELAY
531 * FS(out,iso) = 78.4 + 9.36*BC + HOST_DELAY
532 * LS(in) = 802.4 + 75.78*BC + HOST_DELAY
533 * LS(out) = 802.6 + 74.67*BC + HOST_DELAY
534 *
535 * HOST_DELAY == 106 for the c67200 and c67300.
536 */
537
538 /* make calculations in 1/100 bit times to maintain resolution */
539 if (urbp->ep_data->dev->speed == USB_SPEED_LOW) {
540 /* Low speed pipe */
541 if (usb_pipein(urb->pipe))
542 bit_time = 80240 + 7578*len;
543 else
544 bit_time = 80260 + 7467*len;
545 } else {
546 /* FS pipes */
547 if (usb_pipeisoc(urb->pipe))
548 bit_time = usb_pipein(urb->pipe) ? 9050 : 7840;
549 else
550 bit_time = 11250;
551 bit_time += 936*len;
552 }
553
554 /* Scale back down to integer bit times. Use a host delay of 106.
555 * (this is the only place it is used) */
556 bit_time = ((bit_time+50) / 100) + 106;
557
558 if (unlikely(bit_time + c67x00->bandwidth_allocated >=
559 c67x00->max_frame_bw))
560 return -EMSGSIZE;
561
562 if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >=
563 c67x00->td_base_addr + SIE_TD_SIZE))
564 return -EMSGSIZE;
565
566 if (unlikely(c67x00->next_buf_addr + len >=
567 c67x00->buf_base_addr + SIE_TD_BUF_SIZE))
568 return -EMSGSIZE;
569
570 if (periodic) {
571 if (unlikely(bit_time + c67x00->periodic_bw_allocated >=
572 MAX_PERIODIC_BW(c67x00->max_frame_bw)))
573 return -EMSGSIZE;
574 c67x00->periodic_bw_allocated += bit_time;
575 }
576
577 c67x00->bandwidth_allocated += bit_time;
578 return 0;
579}
580
581/* -------------------------------------------------------------------------- */
582
583/**
584 * td_addr and buf_addr must be word aligned
585 */
586static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb,
587 void *data, int len, int pid, int toggle,
588 unsigned long privdata)
589{
590 struct c67x00_td *td;
591 struct c67x00_urb_priv *urbp = urb->hcpriv;
592 const __u8 active_flag = 1, retry_cnt = 1;
593 __u8 cmd = 0;
594 int tt = 0;
595
596 if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
597 || usb_pipeint(urb->pipe)))
598 return -EMSGSIZE; /* Not really an error, but expected */
599
600 td = kzalloc(sizeof(*td), GFP_ATOMIC);
601 if (!td)
602 return -ENOMEM;
603
604 td->pipe = urb->pipe;
605 td->ep_data = urbp->ep_data;
606
607 if ((td_udev(td)->speed == USB_SPEED_LOW) &&
608 !(c67x00->low_speed_ports & (1 << urbp->port)))
609 cmd |= PREAMBLE_EN;
610
611 switch (usb_pipetype(td->pipe)) {
612 case PIPE_ISOCHRONOUS:
613 tt = TT_ISOCHRONOUS;
614 cmd |= ISO_EN;
615 break;
616 case PIPE_CONTROL:
617 tt = TT_CONTROL;
618 break;
619 case PIPE_BULK:
620 tt = TT_BULK;
621 break;
622 case PIPE_INTERRUPT:
623 tt = TT_INTERRUPT;
624 break;
625 }
626
627 if (toggle)
628 cmd |= SEQ_SEL;
629
630 cmd |= ARM_EN;
631
632 /* SW part */
633 td->td_addr = c67x00->next_td_addr;
634 c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE;
635
636 /* HW part */
637 td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr);
638 td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) |
639 (urbp->port << 14) | (len & 0x3FF));
640 td->pid_ep = ((pid & 0xF) << TD_PIDEP_OFFSET) |
641 (usb_pipeendpoint(td->pipe) & 0xF);
642 td->dev_addr = usb_pipedevice(td->pipe) & 0x7F;
643 td->ctrl_reg = cmd;
644 td->status = 0;
645 td->retry_cnt = (tt << TT_OFFSET) | (active_flag << 4) | retry_cnt;
646 td->residue = 0;
647 td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr);
648
649 /* SW part */
650 td->data = data;
651 td->urb = urb;
652 td->privdata = privdata;
653
654 c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */
655
656 list_add_tail(&td->td_list, &c67x00->td_list);
657 return 0;
658}
659
660static inline void c67x00_release_td(struct c67x00_td *td)
661{
662 list_del_init(&td->td_list);
663 kfree(td);
664}
665
666/* -------------------------------------------------------------------------- */
667
668static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
669{
670 int remaining;
671 int toggle;
672 int pid;
673 int ret = 0;
674 int maxps;
675 int need_empty;
676
677 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
678 usb_pipeout(urb->pipe));
679 remaining = urb->transfer_buffer_length - urb->actual_length;
680
681 maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
682
683 need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
684 usb_pipeout(urb->pipe) && !(remaining % maxps);
685
686 while (remaining || need_empty) {
687 int len;
688 char *td_buf;
689
690 len = (remaining > maxps) ? maxps : remaining;
691 if (!len)
692 need_empty = 0;
693
694 pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
695 td_buf = urb->transfer_buffer + urb->transfer_buffer_length -
696 remaining;
697 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
698 DATA_STAGE);
699 if (ret)
700 return ret; /* td wasn't created */
701
702 toggle ^= 1;
703 remaining -= len;
704 if (usb_pipecontrol(urb->pipe))
705 break;
706 }
707
708 return 0;
709}
710
711/**
712 * return 0 in case more bandwidth is available, else errorcode
713 */
714static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
715{
716 int ret;
717 int pid;
718
719 switch (urb->interval) {
720 default:
721 case SETUP_STAGE:
722 ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
723 8, USB_PID_SETUP, 0, SETUP_STAGE);
724 if (ret)
725 return ret;
726 urb->interval = SETUP_STAGE;
727 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
728 usb_pipeout(urb->pipe), 1);
729 break;
730 case DATA_STAGE:
731 if (urb->transfer_buffer_length) {
732 ret = c67x00_add_data_urb(c67x00, urb);
733 if (ret)
734 return ret;
735 break;
736 } /* else fallthrough */
737 case STATUS_STAGE:
738 pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
739 ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
740 STATUS_STAGE);
741 if (ret)
742 return ret;
743 break;
744 }
745
746 return 0;
747}
748
749/*
750 * return 0 in case more bandwidth is available, else errorcode
751 */
752static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb)
753{
754 struct c67x00_urb_priv *urbp = urb->hcpriv;
755
756 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
757 urbp->ep_data->next_frame =
758 frame_add(urbp->ep_data->next_frame, urb->interval);
759 return c67x00_add_data_urb(c67x00, urb);
760 }
761 return 0;
762}
763
764static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
765{
766 struct c67x00_urb_priv *urbp = urb->hcpriv;
767
768 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
769 char *td_buf;
770 int len, pid, ret;
771
772 BUG_ON(urbp->cnt >= urb->number_of_packets);
773
774 td_buf = urb->transfer_buffer +
775 urb->iso_frame_desc[urbp->cnt].offset;
776 len = urb->iso_frame_desc[urbp->cnt].length;
777 pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
778
779 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
780 urbp->cnt);
781 if (ret) {
782 printk(KERN_DEBUG "create failed: %d\n", ret);
783 urb->iso_frame_desc[urbp->cnt].actual_length = 0;
784 urb->iso_frame_desc[urbp->cnt].status = ret;
785 if (urbp->cnt + 1 == urb->number_of_packets)
786 c67x00_giveback_urb(c67x00, urb, 0);
787 }
788
789 urbp->ep_data->next_frame =
790 frame_add(urbp->ep_data->next_frame, urb->interval);
791 urbp->cnt++;
792 }
793 return 0;
794}
795
796/* -------------------------------------------------------------------------- */
797
798static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type,
799 int (*add)(struct c67x00_hcd *, struct urb *))
800{
801 struct c67x00_ep_data *ep_data;
802 struct urb *urb;
803
804 /* traverse every endpoint on the list */
805 list_for_each_entry(ep_data, &c67x00->list[type], node) {
806 if (!list_empty(&ep_data->queue)) {
807 /* and add the first urb */
808 /* isochronous transfer rely on this */
809 urb = list_entry(ep_data->queue.next,
810 struct c67x00_urb_priv,
811 hep_node)->urb;
812 add(c67x00, urb);
813 }
814 }
815}
816
817static void c67x00_fill_frame(struct c67x00_hcd *c67x00)
818{
819 struct c67x00_td *td, *ttd;
820
821 /* Check if we can proceed */
822 if (!list_empty(&c67x00->td_list)) {
823 dev_warn(c67x00_hcd_dev(c67x00),
824 "TD list not empty! This should not happen!\n");
825 list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) {
826 dbg_td(c67x00, td, "Unprocessed td");
827 c67x00_release_td(td);
828 }
829 }
830
831 /* Reinitialize variables */
832 c67x00->bandwidth_allocated = 0;
833 c67x00->periodic_bw_allocated = 0;
834
835 c67x00->next_td_addr = c67x00->td_base_addr;
836 c67x00->next_buf_addr = c67x00->buf_base_addr;
837
838 /* Fill the list */
839 c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
840 c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
841 c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
842 c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
843}
844
845/* -------------------------------------------------------------------------- */
846
847/**
848 * Get TD from C67X00
849 */
850static inline void
851c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
852{
853 c67x00_ll_read_mem_le16(c67x00->sie->dev,
854 td->td_addr, td, CY_TD_SIZE);
855
856 if (usb_pipein(td->pipe) && td_actual_bytes(td))
857 c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
858 td->data, td_actual_bytes(td));
859}
860
861static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td)
862{
863 if (td->status & TD_STATUSMASK_ERR) {
864 dbg_td(c67x00, td, "ERROR_FLAG");
865 return -EILSEQ;
866 }
867 if (td->status & TD_STATUSMASK_STALL) {
868 /* dbg_td(c67x00, td, "STALL"); */
869 return -EPIPE;
870 }
871 if (td->status & TD_STATUSMASK_TMOUT) {
872 dbg_td(c67x00, td, "TIMEOUT");
873 return -ETIMEDOUT;
874 }
875
876 return 0;
877}
878
879static inline int c67x00_end_of_data(struct c67x00_td *td)
880{
881 int maxps, need_empty, remaining;
882 struct urb *urb = td->urb;
883 int act_bytes;
884
885 act_bytes = td_actual_bytes(td);
886
887 if (unlikely(!act_bytes))
888 return 1; /* This was an empty packet */
889
890 maxps = usb_maxpacket(td_udev(td), td->pipe, usb_pipeout(td->pipe));
891
892 if (unlikely(act_bytes < maxps))
893 return 1; /* Smaller then full packet */
894
895 remaining = urb->transfer_buffer_length - urb->actual_length;
896 need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
897 usb_pipeout(urb->pipe) && !(remaining % maxps);
898
899 if (unlikely(!remaining && !need_empty))
900 return 1;
901
902 return 0;
903}
904
905/* -------------------------------------------------------------------------- */
906
907/* Remove all td's from the list which come
908 * after last_td and are meant for the same pipe.
909 * This is used when a short packet has occured */
910static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00,
911 struct c67x00_td *last_td)
912{
913 struct c67x00_td *td, *tmp;
914 td = last_td;
915 tmp = last_td;
916 while (td->td_list.next != &c67x00->td_list) {
917 td = list_entry(td->td_list.next, struct c67x00_td, td_list);
918 if (td->pipe == last_td->pipe) {
919 c67x00_release_td(td);
920 td = tmp;
921 }
922 tmp = td;
923 }
924}
925
926/* -------------------------------------------------------------------------- */
927
928static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
929 struct c67x00_td *td)
930{
931 struct urb *urb = td->urb;
932
933 if (!urb)
934 return;
935
936 urb->actual_length += td_actual_bytes(td);
937
938 switch (usb_pipetype(td->pipe)) {
939 /* isochronous tds are handled separately */
940 case PIPE_CONTROL:
941 switch (td->privdata) {
942 case SETUP_STAGE:
943 urb->interval =
944 urb->transfer_buffer_length ?
945 DATA_STAGE : STATUS_STAGE;
946 /* Don't count setup_packet with normal data: */
947 urb->actual_length = 0;
948 break;
949
950 case DATA_STAGE:
951 if (c67x00_end_of_data(td)) {
952 urb->interval = STATUS_STAGE;
953 c67x00_clear_pipe(c67x00, td);
954 }
955 break;
956
957 case STATUS_STAGE:
958 urb->interval = 0;
959 c67x00_giveback_urb(c67x00, urb, 0);
960 break;
961 }
962 break;
963
964 case PIPE_INTERRUPT:
965 case PIPE_BULK:
966 if (unlikely(c67x00_end_of_data(td))) {
967 c67x00_clear_pipe(c67x00, td);
968 c67x00_giveback_urb(c67x00, urb, 0);
969 }
970 break;
971 }
972}
973
974static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
975{
976 struct urb *urb = td->urb;
977 struct c67x00_urb_priv *urbp;
978 int cnt;
979
980 if (!urb)
981 return;
982
983 urbp = urb->hcpriv;
984 cnt = td->privdata;
985
986 if (td->status & TD_ERROR_MASK)
987 urb->error_count++;
988
989 urb->iso_frame_desc[cnt].actual_length = td_actual_bytes(td);
990 urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td);
991 if (cnt + 1 == urb->number_of_packets) /* Last packet */
992 c67x00_giveback_urb(c67x00, urb, 0);
993}
994
995/* -------------------------------------------------------------------------- */
996
997/**
998 * c67x00_check_td_list - handle tds which have been processed by the c67x00
999 * pre: current_td == 0
1000 */
1001static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00)
1002{
1003 struct c67x00_td *td, *tmp;
1004 struct urb *urb;
1005 int ack_ok;
1006 int clear_endpoint;
1007
1008 list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) {
1009 /* get the TD */
1010 c67x00_parse_td(c67x00, td);
1011 urb = td->urb; /* urb can be NULL! */
1012 ack_ok = 0;
1013 clear_endpoint = 1;
1014
1015 /* Handle isochronous transfers separately */
1016 if (usb_pipeisoc(td->pipe)) {
1017 clear_endpoint = 0;
1018 c67x00_handle_isoc(c67x00, td);
1019 goto cont;
1020 }
1021
1022 /* When an error occurs, all td's for that pipe go into an
1023 * inactive state. This state matches successful transfers so
1024 * we must make sure not to service them. */
1025 if (td->status & TD_ERROR_MASK) {
1026 c67x00_giveback_urb(c67x00, urb,
1027 c67x00_td_to_error(c67x00, td));
1028 goto cont;
1029 }
1030
1031 if ((td->status & TD_STATUSMASK_NAK) || !td_sequence_ok(td) ||
1032 !td_acked(td))
1033 goto cont;
1034
1035 /* Sequence ok and acked, don't need to fix toggle */
1036 ack_ok = 1;
1037
1038 if (unlikely(td->status & TD_STATUSMASK_OVF)) {
1039 if (td_residue(td) & TD_RESIDUE_OVERFLOW) {
1040 /* Overflow */
1041 c67x00_giveback_urb(c67x00, urb, -EOVERFLOW);
1042 goto cont;
1043 }
1044 }
1045
1046 clear_endpoint = 0;
1047 c67x00_handle_successful_td(c67x00, td);
1048
1049cont:
1050 if (clear_endpoint)
1051 c67x00_clear_pipe(c67x00, td);
1052 if (ack_ok)
1053 usb_settoggle(td_udev(td), usb_pipeendpoint(td->pipe),
1054 usb_pipeout(td->pipe),
1055 !(td->ctrl_reg & SEQ_SEL));
1056 /* next in list could have been removed, due to clear_pipe! */
1057 tmp = list_entry(td->td_list.next, typeof(*td), td_list);
1058 c67x00_release_td(td);
1059 }
1060}
1061
1062/* -------------------------------------------------------------------------- */
1063
1064static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00)
1065{
1066 /* If all tds are processed, we can check the previous frame (if
1067 * there was any) and start our next frame.
1068 */
1069 return !c67x00_ll_husb_get_current_td(c67x00->sie);
1070}
1071
1072/**
1073 * Send td to C67X00
1074 */
1075static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
1076{
1077 int len = td_length(td);
1078
1079 if (len && ((td->pid_ep & TD_PIDEPMASK_PID) != TD_PID_IN))
1080 c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
1081 td->data, len);
1082
1083 c67x00_ll_write_mem_le16(c67x00->sie->dev,
1084 td->td_addr, td, CY_TD_SIZE);
1085}
1086
1087static void c67x00_send_frame(struct c67x00_hcd *c67x00)
1088{
1089 struct c67x00_td *td;
1090
1091 if (list_empty(&c67x00->td_list))
1092 dev_warn(c67x00_hcd_dev(c67x00),
1093 "%s: td list should not be empty here!\n",
1094 __func__);
1095
1096 list_for_each_entry(td, &c67x00->td_list, td_list) {
1097 if (td->td_list.next == &c67x00->td_list)
1098 td->next_td_addr = 0; /* Last td in list */
1099
1100 c67x00_send_td(c67x00, td);
1101 }
1102
1103 c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr);
1104}
1105
1106/* -------------------------------------------------------------------------- */
1107
1108/**
1109 * c67x00_do_work - Schedulers state machine
1110 */
1111static void c67x00_do_work(struct c67x00_hcd *c67x00)
1112{
1113 spin_lock(&c67x00->lock);
1114 /* Make sure all tds are processed */
1115 if (!c67x00_all_tds_processed(c67x00))
1116 goto out;
1117
1118 c67x00_check_td_list(c67x00);
1119
1120 /* no td's are being processed (current == 0)
1121 * and all have been "checked" */
1122 complete(&c67x00->endpoint_disable);
1123
1124 if (!list_empty(&c67x00->td_list))
1125 goto out;
1126
1127 c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
1128 if (c67x00->current_frame == c67x00->last_frame)
1129 goto out; /* Don't send tds in same frame */
1130 c67x00->last_frame = c67x00->current_frame;
1131
1132 /* If no urbs are scheduled, our work is done */
1133 if (!c67x00->urb_count) {
1134 c67x00_ll_hpi_disable_sofeop(c67x00->sie);
1135 goto out;
1136 }
1137
1138 c67x00_fill_frame(c67x00);
1139 if (!list_empty(&c67x00->td_list))
1140 /* TD's have been added to the frame */
1141 c67x00_send_frame(c67x00);
1142
1143 out:
1144 spin_unlock(&c67x00->lock);
1145}
1146
1147/* -------------------------------------------------------------------------- */
1148
1149static void c67x00_sched_tasklet(unsigned long __c67x00)
1150{
1151 struct c67x00_hcd *c67x00 = (struct c67x00_hcd *)__c67x00;
1152 c67x00_do_work(c67x00);
1153}
1154
1155void c67x00_sched_kick(struct c67x00_hcd *c67x00)
1156{
1157 tasklet_hi_schedule(&c67x00->tasklet);
1158}
1159
1160int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
1161{
1162 tasklet_init(&c67x00->tasklet, c67x00_sched_tasklet,
1163 (unsigned long)c67x00);
1164 return 0;
1165}
1166
1167void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
1168{
1169 tasklet_kill(&c67x00->tasklet);
1170}
diff --git a/drivers/usb/c67x00/c67x00.h b/drivers/usb/c67x00/c67x00.h
new file mode 100644
index 000000000000..a26e9ded0f32
--- /dev/null
+++ b/drivers/usb/c67x00/c67x00.h
@@ -0,0 +1,294 @@
1/*
2 * c67x00.h: Cypress C67X00 USB register and field definitions
3 *
4 * Copyright (C) 2006-2008 Barco N.V.
5 * Derived from the Cypress cy7c67200/300 ezusb linux driver and
6 * based on multiple host controller drivers inside the linux kernel.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21 * MA 02110-1301 USA.
22 */
23
24#ifndef _USB_C67X00_H
25#define _USB_C67X00_H
26
27#include <linux/spinlock.h>
28#include <linux/platform_device.h>
29#include <linux/completion.h>
30#include <linux/mutex.h>
31
32/* ---------------------------------------------------------------------
33 * Cypress C67x00 register definitions
34 */
35
36/* Hardware Revision Register */
37#define HW_REV_REG 0xC004
38
39/* General USB registers */
40/* ===================== */
41
42/* USB Control Register */
43#define USB_CTL_REG(x) ((x) ? 0xC0AA : 0xC08A)
44
45#define LOW_SPEED_PORT(x) ((x) ? 0x0800 : 0x0400)
46#define HOST_MODE 0x0200
47#define PORT_RES_EN(x) ((x) ? 0x0100 : 0x0080)
48#define SOF_EOP_EN(x) ((x) ? 0x0002 : 0x0001)
49
50/* USB status register - Notice it has different content in hcd/udc mode */
51#define USB_STAT_REG(x) ((x) ? 0xC0B0 : 0xC090)
52
53#define EP0_IRQ_FLG 0x0001
54#define EP1_IRQ_FLG 0x0002
55#define EP2_IRQ_FLG 0x0004
56#define EP3_IRQ_FLG 0x0008
57#define EP4_IRQ_FLG 0x0010
58#define EP5_IRQ_FLG 0x0020
59#define EP6_IRQ_FLG 0x0040
60#define EP7_IRQ_FLG 0x0080
61#define RESET_IRQ_FLG 0x0100
62#define SOF_EOP_IRQ_FLG 0x0200
63#define ID_IRQ_FLG 0x4000
64#define VBUS_IRQ_FLG 0x8000
65
66/* USB Host only registers */
67/* ======================= */
68
69/* Host n Control Register */
70#define HOST_CTL_REG(x) ((x) ? 0xC0A0 : 0xC080)
71
72#define PREAMBLE_EN 0x0080 /* Preamble enable */
73#define SEQ_SEL 0x0040 /* Data Toggle Sequence Bit Select */
74#define ISO_EN 0x0010 /* Isochronous enable */
75#define ARM_EN 0x0001 /* Arm operation */
76
77/* Host n Interrupt Enable Register */
78#define HOST_IRQ_EN_REG(x) ((x) ? 0xC0AC : 0xC08C)
79
80#define SOF_EOP_IRQ_EN 0x0200 /* SOF/EOP Interrupt Enable */
81#define SOF_EOP_TMOUT_IRQ_EN 0x0800 /* SOF/EOP Timeout Interrupt Enable */
82#define ID_IRQ_EN 0x4000 /* ID interrupt enable */
83#define VBUS_IRQ_EN 0x8000 /* VBUS interrupt enable */
84#define DONE_IRQ_EN 0x0001 /* Done Interrupt Enable */
85
86/* USB status register */
87#define HOST_STAT_MASK 0x02FD
88#define PORT_CONNECT_CHANGE(x) ((x) ? 0x0020 : 0x0010)
89#define PORT_SE0_STATUS(x) ((x) ? 0x0008 : 0x0004)
90
91/* Host Frame Register */
92#define HOST_FRAME_REG(x) ((x) ? 0xC0B6 : 0xC096)
93
94#define HOST_FRAME_MASK 0x07FF
95
96/* USB Peripheral only registers */
97/* ============================= */
98
99/* Device n Port Sel reg */
100#define DEVICE_N_PORT_SEL(x) ((x) ? 0xC0A4 : 0xC084)
101
102/* Device n Interrupt Enable Register */
103#define DEVICE_N_IRQ_EN_REG(x) ((x) ? 0xC0AC : 0xC08C)
104
105#define DEVICE_N_ENDPOINT_N_CTL_REG(dev, ep) ((dev) \
106 ? (0x0280 + (ep << 4)) \
107 : (0x0200 + (ep << 4)))
108#define DEVICE_N_ENDPOINT_N_STAT_REG(dev, ep) ((dev) \
109 ? (0x0286 + (ep << 4)) \
110 : (0x0206 + (ep << 4)))
111
112#define DEVICE_N_ADDRESS(dev) ((dev) ? (0xC0AE) : (0xC08E))
113
114/* HPI registers */
115/* ============= */
116
117/* HPI Status register */
118#define SOFEOP_FLG(x) (1 << ((x) ? 12 : 10))
119#define SIEMSG_FLG(x) (1 << (4 + (x)))
120#define RESET_FLG(x) ((x) ? 0x0200 : 0x0002)
121#define DONE_FLG(x) (1 << (2 + (x)))
122#define RESUME_FLG(x) (1 << (6 + (x)))
123#define MBX_OUT_FLG 0x0001 /* Message out available */
124#define MBX_IN_FLG 0x0100
125#define ID_FLG 0x4000
126#define VBUS_FLG 0x8000
127
128/* Interrupt routing register */
129#define HPI_IRQ_ROUTING_REG 0x0142
130
131#define HPI_SWAP_ENABLE(x) ((x) ? 0x0100 : 0x0001)
132#define RESET_TO_HPI_ENABLE(x) ((x) ? 0x0200 : 0x0002)
133#define DONE_TO_HPI_ENABLE(x) ((x) ? 0x0008 : 0x0004)
134#define RESUME_TO_HPI_ENABLE(x) ((x) ? 0x0080 : 0x0040)
135#define SOFEOP_TO_HPI_EN(x) ((x) ? 0x2000 : 0x0800)
136#define SOFEOP_TO_CPU_EN(x) ((x) ? 0x1000 : 0x0400)
137#define ID_TO_HPI_ENABLE 0x4000
138#define VBUS_TO_HPI_ENABLE 0x8000
139
140/* SIE msg registers */
141#define SIEMSG_REG(x) ((x) ? 0x0148 : 0x0144)
142
143#define HUSB_TDListDone 0x1000
144
145#define SUSB_EP0_MSG 0x0001
146#define SUSB_EP1_MSG 0x0002
147#define SUSB_EP2_MSG 0x0004
148#define SUSB_EP3_MSG 0x0008
149#define SUSB_EP4_MSG 0x0010
150#define SUSB_EP5_MSG 0x0020
151#define SUSB_EP6_MSG 0x0040
152#define SUSB_EP7_MSG 0x0080
153#define SUSB_RST_MSG 0x0100
154#define SUSB_SOF_MSG 0x0200
155#define SUSB_CFG_MSG 0x0400
156#define SUSB_SUS_MSG 0x0800
157#define SUSB_ID_MSG 0x4000
158#define SUSB_VBUS_MSG 0x8000
159
160/* BIOS interrupt routines */
161
162#define SUSBx_RECEIVE_INT(x) ((x) ? 97 : 81)
163#define SUSBx_SEND_INT(x) ((x) ? 96 : 80)
164
165#define SUSBx_DEV_DESC_VEC(x) ((x) ? 0x00D4 : 0x00B4)
166#define SUSBx_CONF_DESC_VEC(x) ((x) ? 0x00D6 : 0x00B6)
167#define SUSBx_STRING_DESC_VEC(x) ((x) ? 0x00D8 : 0x00B8)
168
169#define CY_HCD_BUF_ADDR 0x500 /* Base address for host */
170#define SIE_TD_SIZE 0x200 /* size of the td list */
171#define SIE_TD_BUF_SIZE 0x400 /* size of the data buffer */
172
173#define SIE_TD_OFFSET(host) ((host) ? (SIE_TD_SIZE+SIE_TD_BUF_SIZE) : 0)
174#define SIE_BUF_OFFSET(host) (SIE_TD_OFFSET(host) + SIE_TD_SIZE)
175
176/* Base address of HCD + 2 x TD_SIZE + 2 x TD_BUF_SIZE */
177#define CY_UDC_REQ_HEADER_BASE 0x1100
178/* 8- byte request headers for IN/OUT transfers */
179#define CY_UDC_REQ_HEADER_SIZE 8
180
181#define CY_UDC_REQ_HEADER_ADDR(ep_num) (CY_UDC_REQ_HEADER_BASE + \
182 ((ep_num) * CY_UDC_REQ_HEADER_SIZE))
183#define CY_UDC_DESC_BASE_ADDRESS (CY_UDC_REQ_HEADER_ADDR(8))
184
185#define CY_UDC_BIOS_REPLACE_BASE 0x1800
186#define CY_UDC_REQ_BUFFER_BASE 0x2000
187#define CY_UDC_REQ_BUFFER_SIZE 0x0400
188#define CY_UDC_REQ_BUFFER_ADDR(ep_num) (CY_UDC_REQ_BUFFER_BASE + \
189 ((ep_num) * CY_UDC_REQ_BUFFER_SIZE))
190
191/* ---------------------------------------------------------------------
192 * Driver data structures
193 */
194
195struct c67x00_device;
196
197/**
198 * struct c67x00_sie - Common data associated with a SIE
199 * @lock: lock to protect this struct and the associated chip registers
200 * @private_data: subdriver dependent data
201 * @irq: subdriver dependent irq handler, set NULL when not used
202 * @dev: link to common driver structure
203 * @sie_num: SIE number on chip, starting from 0
204 * @mode: SIE mode (host/peripheral/otg/not used)
205 */
206struct c67x00_sie {
207 /* Entries to be used by the subdrivers */
208 spinlock_t lock; /* protect this structure */
209 void *private_data;
210 void (*irq) (struct c67x00_sie *sie, u16 int_status, u16 msg);
211
212 /* Read only: */
213 struct c67x00_device *dev;
214 int sie_num;
215 int mode;
216};
217
218#define sie_dev(s) (&(s)->dev->pdev->dev)
219
220/**
221 * struct c67x00_lcp
222 */
223struct c67x00_lcp {
224 /* Internal use only */
225 struct mutex mutex;
226 struct completion msg_received;
227 u16 last_msg;
228};
229
230/*
231 * struct c67x00_hpi
232 */
233struct c67x00_hpi {
234 void __iomem *base;
235 int regstep;
236 spinlock_t lock;
237 struct c67x00_lcp lcp;
238};
239
240#define C67X00_SIES 2
241#define C67X00_PORTS 2
242
243/**
244 * struct c67x00_device - Common data associated with a c67x00 instance
245 * @hpi: hpi addresses
246 * @sie: array of sie's on this chip
247 * @pdev: platform device of instance
248 * @pdata: configuration provided by the platform
249 */
250struct c67x00_device {
251 struct c67x00_hpi hpi;
252 struct c67x00_sie sie[C67X00_SIES];
253 struct platform_device *pdev;
254 struct c67x00_platform_data *pdata;
255};
256
257/* ---------------------------------------------------------------------
258 * Low level interface functions
259 */
260
261/* Host Port Interface (HPI) functions */
262u16 c67x00_ll_hpi_status(struct c67x00_device *dev);
263void c67x00_ll_hpi_reg_init(struct c67x00_device *dev);
264void c67x00_ll_hpi_enable_sofeop(struct c67x00_sie *sie);
265void c67x00_ll_hpi_disable_sofeop(struct c67x00_sie *sie);
266
267/* General functions */
268u16 c67x00_ll_fetch_siemsg(struct c67x00_device *dev, int sie_num);
269u16 c67x00_ll_get_usb_ctl(struct c67x00_sie *sie);
270void c67x00_ll_usb_clear_status(struct c67x00_sie *sie, u16 bits);
271u16 c67x00_ll_usb_get_status(struct c67x00_sie *sie);
272void c67x00_ll_write_mem_le16(struct c67x00_device *dev, u16 addr,
273 void *data, int len);
274void c67x00_ll_read_mem_le16(struct c67x00_device *dev, u16 addr,
275 void *data, int len);
276
277/* Host specific functions */
278void c67x00_ll_set_husb_eot(struct c67x00_device *dev, u16 value);
279void c67x00_ll_husb_reset(struct c67x00_sie *sie, int port);
280void c67x00_ll_husb_set_current_td(struct c67x00_sie *sie, u16 addr);
281u16 c67x00_ll_husb_get_current_td(struct c67x00_sie *sie);
282u16 c67x00_ll_husb_get_frame(struct c67x00_sie *sie);
283void c67x00_ll_husb_init_host_port(struct c67x00_sie *sie);
284void c67x00_ll_husb_reset_port(struct c67x00_sie *sie, int port);
285
286/* Called by c67x00_irq to handle lcp interrupts */
287void c67x00_ll_irq(struct c67x00_device *dev, u16 int_status);
288
289/* Setup and teardown */
290void c67x00_ll_init(struct c67x00_device *dev);
291void c67x00_ll_release(struct c67x00_device *dev);
292int c67x00_ll_reset(struct c67x00_device *dev);
293
294#endif /* _USB_C67X00_H */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7b572e75e73c..cefe7f2c6f75 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -280,7 +280,7 @@ static void acm_ctrl_irq(struct urb *urb)
280 280
281 case USB_CDC_NOTIFY_SERIAL_STATE: 281 case USB_CDC_NOTIFY_SERIAL_STATE:
282 282
283 newctrl = le16_to_cpu(get_unaligned((__le16 *) data)); 283 newctrl = get_unaligned_le16(data);
284 284
285 if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { 285 if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
286 dbg("calling hangup"); 286 dbg("calling hangup");
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 8607846e3c3f..1d253dd4ea81 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -773,7 +773,7 @@ int __init usbfs_init(void)
773 usb_register_notify(&usbfs_nb); 773 usb_register_notify(&usbfs_nb);
774 774
775 /* create mount point for usbfs */ 775 /* create mount point for usbfs */
776 usbdir = proc_mkdir("usb", proc_bus); 776 usbdir = proc_mkdir("bus/usb", NULL);
777 777
778 return 0; 778 return 0;
779} 779}
@@ -783,6 +783,6 @@ void usbfs_cleanup(void)
783 usb_unregister_notify(&usbfs_nb); 783 usb_unregister_notify(&usbfs_nb);
784 unregister_filesystem(&usb_fs_type); 784 unregister_filesystem(&usb_fs_type);
785 if (usbdir) 785 if (usbdir)
786 remove_proc_entry("usb", proc_bus); 786 remove_proc_entry("bus/usb", NULL);
787} 787}
788 788
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e819e5359d57..3e69266e1f4d 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -394,7 +394,9 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
394 if (!io->urbs) 394 if (!io->urbs)
395 goto nomem; 395 goto nomem;
396 396
397 urb_flags = URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT; 397 urb_flags = URB_NO_INTERRUPT;
398 if (dma)
399 urb_flags |= URB_NO_TRANSFER_DMA_MAP;
398 if (usb_pipein(pipe)) 400 if (usb_pipein(pipe))
399 urb_flags |= URB_SHORT_NOT_OK; 401 urb_flags |= URB_SHORT_NOT_OK;
400 402
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index f7b54651dd42..6e784d2db423 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -231,6 +231,26 @@ config SUPERH_BUILT_IN_M66592
231 However, this problem is improved if change a value of 231 However, this problem is improved if change a value of
232 NET_IP_ALIGN to 4. 232 NET_IP_ALIGN to 4.
233 233
234config USB_GADGET_PXA27X
235 boolean "PXA 27x"
236 depends on ARCH_PXA && PXA27x
237 help
238 Intel's PXA 27x series XScale ARM v5TE processors include
239 an integrated full speed USB 1.1 device controller.
240
241 It has up to 23 endpoints, as well as endpoint zero (for
242 control transfers).
243
244 Say "y" to link the driver statically, or "m" to build a
245 dynamically linked module called "pxa27x_udc" and force all
246 gadget drivers to also be dynamically linked.
247
248config USB_PXA27X
249 tristate
250 depends on USB_GADGET_PXA27X
251 default USB_GADGET
252 select USB_GADGET_SELECTED
253
234config USB_GADGET_GOKU 254config USB_GADGET_GOKU
235 boolean "Toshiba TC86C001 'Goku-S'" 255 boolean "Toshiba TC86C001 'Goku-S'"
236 depends on PCI 256 depends on PCI
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index c3aab80b6c76..12357255d740 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
9obj-$(CONFIG_USB_NET2280) += net2280.o 9obj-$(CONFIG_USB_NET2280) += net2280.o
10obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o 10obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o
11obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o 11obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o
12obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o
12obj-$(CONFIG_USB_GOKU) += goku_udc.o 13obj-$(CONFIG_USB_GOKU) += goku_udc.o
13obj-$(CONFIG_USB_OMAP) += omap_udc.o 14obj-$(CONFIG_USB_OMAP) += omap_udc.o
14obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o 15obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 9b913afb2e6d..274c60a970cd 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -231,6 +231,7 @@ static int proc_udc_open(struct inode *inode, struct file *file)
231} 231}
232 232
233static const struct file_operations proc_ops = { 233static const struct file_operations proc_ops = {
234 .owner = THIS_MODULE,
234 .open = proc_udc_open, 235 .open = proc_udc_open,
235 .read = seq_read, 236 .read = seq_read,
236 .llseek = seq_lseek, 237 .llseek = seq_lseek,
@@ -239,15 +240,7 @@ static const struct file_operations proc_ops = {
239 240
240static void create_debug_file(struct at91_udc *udc) 241static void create_debug_file(struct at91_udc *udc)
241{ 242{
242 struct proc_dir_entry *pde; 243 udc->pde = proc_create_data(debug_filename, 0, NULL, &proc_ops, udc);
243
244 pde = create_proc_entry (debug_filename, 0, NULL);
245 udc->pde = pde;
246 if (pde == NULL)
247 return;
248
249 pde->proc_fops = &proc_ops;
250 pde->data = udc;
251} 244}
252 245
253static void remove_debug_file(struct at91_udc *udc) 246static void remove_debug_file(struct at91_udc *udc)
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index bb93bdd76593..8d61ea67a817 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -235,10 +235,6 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
235#define DEV_CONFIG_CDC 235#define DEV_CONFIG_CDC
236#endif 236#endif
237 237
238#ifdef CONFIG_USB_GADGET_PXA27X
239#define DEV_CONFIG_CDC
240#endif
241
242#ifdef CONFIG_USB_GADGET_S3C2410 238#ifdef CONFIG_USB_GADGET_S3C2410
243#define DEV_CONFIG_CDC 239#define DEV_CONFIG_CDC
244#endif 240#endif
@@ -270,6 +266,10 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
270#define DEV_CONFIG_SUBSET 266#define DEV_CONFIG_SUBSET
271#endif 267#endif
272 268
269#ifdef CONFIG_USB_GADGET_PXA27X
270#define DEV_CONFIG_SUBSET
271#endif
272
273#ifdef CONFIG_USB_GADGET_SUPERH 273#ifdef CONFIG_USB_GADGET_SUPERH
274#define DEV_CONFIG_SUBSET 274#define DEV_CONFIG_SUBSET
275#endif 275#endif
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index bf3f946fd455..47bb9f09a1aa 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -2307,6 +2307,29 @@ static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
2307 return rc; 2307 return rc;
2308} 2308}
2309 2309
2310static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
2311{
2312 int rc;
2313
2314 DBG(fsg, "bulk-in set wedge\n");
2315 rc = usb_ep_set_wedge(fsg->bulk_in);
2316 if (rc == -EAGAIN)
2317 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
2318 while (rc != 0) {
2319 if (rc != -EAGAIN) {
2320 WARN(fsg, "usb_ep_set_wedge -> %d\n", rc);
2321 rc = 0;
2322 break;
2323 }
2324
2325 /* Wait for a short time and then try again */
2326 if (msleep_interruptible(100) != 0)
2327 return -EINTR;
2328 rc = usb_ep_set_wedge(fsg->bulk_in);
2329 }
2330 return rc;
2331}
2332
2310static int pad_with_zeros(struct fsg_dev *fsg) 2333static int pad_with_zeros(struct fsg_dev *fsg)
2311{ 2334{
2312 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; 2335 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
@@ -2957,7 +2980,7 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2957 * We aren't required to halt the OUT endpoint; instead 2980 * We aren't required to halt the OUT endpoint; instead
2958 * we can simply accept and discard any data received 2981 * we can simply accept and discard any data received
2959 * until the next reset. */ 2982 * until the next reset. */
2960 halt_bulk_in_endpoint(fsg); 2983 wedge_bulk_in_endpoint(fsg);
2961 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2984 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2962 return -EINVAL; 2985 return -EINVAL;
2963 } 2986 }
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 64a592cbbe7b..be6613afedbf 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -127,7 +127,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
127 127
128 /* enabling the no-toggle interrupt mode would need an api hook */ 128 /* enabling the no-toggle interrupt mode would need an api hook */
129 mode = 0; 129 mode = 0;
130 max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize)); 130 max = get_unaligned_le16(&desc->wMaxPacketSize);
131 switch (max) { 131 switch (max) {
132 case 64: mode++; 132 case 64: mode++;
133 case 32: mode++; 133 case 32: mode++;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 95f7662376f1..881d74c3d964 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -2504,6 +2504,7 @@ static int proc_udc_open(struct inode *inode, struct file *file)
2504} 2504}
2505 2505
2506static const struct file_operations proc_ops = { 2506static const struct file_operations proc_ops = {
2507 .owner = THIS_MODULE,
2507 .open = proc_udc_open, 2508 .open = proc_udc_open,
2508 .read = seq_read, 2509 .read = seq_read,
2509 .llseek = seq_lseek, 2510 .llseek = seq_lseek,
@@ -2512,11 +2513,7 @@ static const struct file_operations proc_ops = {
2512 2513
2513static void create_proc_file(void) 2514static void create_proc_file(void)
2514{ 2515{
2515 struct proc_dir_entry *pde; 2516 proc_create(proc_filename, 0, NULL, &proc_ops);
2516
2517 pde = create_proc_entry (proc_filename, 0, NULL);
2518 if (pde)
2519 pde->proc_fops = &proc_ops;
2520} 2517}
2521 2518
2522static void remove_proc_file(void) 2519static void remove_proc_file(void)
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
new file mode 100644
index 000000000000..75eba202f737
--- /dev/null
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -0,0 +1,2404 @@
1/*
2 * Handles the Intel 27x USB Device Controller (UDC)
3 *
4 * Inspired by original driver by Frank Becker, David Brownell, and others.
5 * Copyright (C) 2008 Robert Jarzmik
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/version.h>
26#include <linux/errno.h>
27#include <linux/platform_device.h>
28#include <linux/delay.h>
29#include <linux/list.h>
30#include <linux/interrupt.h>
31#include <linux/proc_fs.h>
32#include <linux/clk.h>
33#include <linux/irq.h>
34
35#include <asm/byteorder.h>
36#include <asm/hardware.h>
37
38#include <linux/usb.h>
39#include <linux/usb/ch9.h>
40#include <linux/usb/gadget.h>
41
42#include <asm/arch/udc.h>
43
44#include "pxa27x_udc.h"
45
46/*
47 * This driver handles the USB Device Controller (UDC) in Intel's PXA 27x
48 * series processors.
49 *
50 * Such controller drivers work with a gadget driver. The gadget driver
51 * returns descriptors, implements configuration and data protocols used
52 * by the host to interact with this device, and allocates endpoints to
53 * the different protocol interfaces. The controller driver virtualizes
54 * usb hardware so that the gadget drivers will be more portable.
55 *
56 * This UDC hardware wants to implement a bit too much USB protocol. The
57 * biggest issues are: that the endpoints have to be set up before the
58 * controller can be enabled (minor, and not uncommon); and each endpoint
59 * can only have one configuration, interface and alternative interface
60 * number (major, and very unusual). Once set up, these cannot be changed
61 * without a controller reset.
62 *
63 * The workaround is to setup all combinations necessary for the gadgets which
64 * will work with this driver. This is done in pxa_udc structure, statically.
65 * See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep.
66 * (You could modify this if needed. Some drivers have a "fifo_mode" module
67 * parameter to facilitate such changes.)
68 *
69 * The combinations have been tested with these gadgets :
70 * - zero gadget
71 * - file storage gadget
72 * - ether gadget
73 *
74 * The driver doesn't use DMA, only IO access and IRQ callbacks. No use is
75 * made of UDC's double buffering either. USB "On-The-Go" is not implemented.
76 *
77 * All the requests are handled the same way :
78 * - the drivers tries to handle the request directly to the IO
79 * - if the IO fifo is not big enough, the remaining is send/received in
80 * interrupt handling.
81 */
82
83#define DRIVER_VERSION "2008-04-18"
84#define DRIVER_DESC "PXA 27x USB Device Controller driver"
85
86static const char driver_name[] = "pxa27x_udc";
87static struct pxa_udc *the_controller;
88
89static void handle_ep(struct pxa_ep *ep);
90
91/*
92 * Debug filesystem
93 */
94#ifdef CONFIG_USB_GADGET_DEBUG_FS
95
96#include <linux/debugfs.h>
97#include <linux/uaccess.h>
98#include <linux/seq_file.h>
99
100static int state_dbg_show(struct seq_file *s, void *p)
101{
102 struct pxa_udc *udc = s->private;
103 int pos = 0, ret;
104 u32 tmp;
105
106 ret = -ENODEV;
107 if (!udc->driver)
108 goto out;
109
110 /* basic device status */
111 pos += seq_printf(s, DRIVER_DESC "\n"
112 "%s version: %s\nGadget driver: %s\n",
113 driver_name, DRIVER_VERSION,
114 udc->driver ? udc->driver->driver.name : "(none)");
115
116 tmp = udc_readl(udc, UDCCR);
117 pos += seq_printf(s,
118 "udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), "
119 "con=%d,inter=%d,altinter=%d\n", tmp,
120 (tmp & UDCCR_OEN) ? " oen":"",
121 (tmp & UDCCR_AALTHNP) ? " aalthnp":"",
122 (tmp & UDCCR_AHNP) ? " rem" : "",
123 (tmp & UDCCR_BHNP) ? " rstir" : "",
124 (tmp & UDCCR_DWRE) ? " dwre" : "",
125 (tmp & UDCCR_SMAC) ? " smac" : "",
126 (tmp & UDCCR_EMCE) ? " emce" : "",
127 (tmp & UDCCR_UDR) ? " udr" : "",
128 (tmp & UDCCR_UDA) ? " uda" : "",
129 (tmp & UDCCR_UDE) ? " ude" : "",
130 (tmp & UDCCR_ACN) >> UDCCR_ACN_S,
131 (tmp & UDCCR_AIN) >> UDCCR_AIN_S,
132 (tmp & UDCCR_AAISN) >> UDCCR_AAISN_S);
133 /* registers for device and ep0 */
134 pos += seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n",
135 udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1));
136 pos += seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n",
137 udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1));
138 pos += seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR));
139 pos += seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, "
140 "reconfig=%lu\n",
141 udc->stats.irqs_reset, udc->stats.irqs_suspend,
142 udc->stats.irqs_resume, udc->stats.irqs_reconfig);
143
144 ret = 0;
145out:
146 return ret;
147}
148
149static int queues_dbg_show(struct seq_file *s, void *p)
150{
151 struct pxa_udc *udc = s->private;
152 struct pxa_ep *ep;
153 struct pxa27x_request *req;
154 int pos = 0, i, maxpkt, ret;
155
156 ret = -ENODEV;
157 if (!udc->driver)
158 goto out;
159
160 /* dump endpoint queues */
161 for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
162 ep = &udc->pxa_ep[i];
163 maxpkt = ep->fifo_size;
164 pos += seq_printf(s, "%-12s max_pkt=%d %s\n",
165 EPNAME(ep), maxpkt, "pio");
166
167 if (list_empty(&ep->queue)) {
168 pos += seq_printf(s, "\t(nothing queued)\n");
169 continue;
170 }
171
172 list_for_each_entry(req, &ep->queue, queue) {
173 pos += seq_printf(s, "\treq %p len %d/%d buf %p\n",
174 &req->req, req->req.actual,
175 req->req.length, req->req.buf);
176 }
177 }
178
179 ret = 0;
180out:
181 return ret;
182}
183
184static int eps_dbg_show(struct seq_file *s, void *p)
185{
186 struct pxa_udc *udc = s->private;
187 struct pxa_ep *ep;
188 int pos = 0, i, ret;
189 u32 tmp;
190
191 ret = -ENODEV;
192 if (!udc->driver)
193 goto out;
194
195 ep = &udc->pxa_ep[0];
196 tmp = udc_ep_readl(ep, UDCCSR);
197 pos += seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n", tmp,
198 (tmp & UDCCSR0_SA) ? " sa" : "",
199 (tmp & UDCCSR0_RNE) ? " rne" : "",
200 (tmp & UDCCSR0_FST) ? " fst" : "",
201 (tmp & UDCCSR0_SST) ? " sst" : "",
202 (tmp & UDCCSR0_DME) ? " dme" : "",
203 (tmp & UDCCSR0_IPR) ? " ipr" : "",
204 (tmp & UDCCSR0_OPC) ? " opc" : "");
205 for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
206 ep = &udc->pxa_ep[i];
207 tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR);
208 pos += seq_printf(s, "%-12s: "
209 "IN %lu(%lu reqs), OUT %lu(%lu reqs), "
210 "irqs=%lu, udccr=0x%08x, udccsr=0x%03x, "
211 "udcbcr=%d\n",
212 EPNAME(ep),
213 ep->stats.in_bytes, ep->stats.in_ops,
214 ep->stats.out_bytes, ep->stats.out_ops,
215 ep->stats.irqs,
216 tmp, udc_ep_readl(ep, UDCCSR),
217 udc_ep_readl(ep, UDCBCR));
218 }
219
220 ret = 0;
221out:
222 return ret;
223}
224
225static int eps_dbg_open(struct inode *inode, struct file *file)
226{
227 return single_open(file, eps_dbg_show, inode->i_private);
228}
229
230static int queues_dbg_open(struct inode *inode, struct file *file)
231{
232 return single_open(file, queues_dbg_show, inode->i_private);
233}
234
235static int state_dbg_open(struct inode *inode, struct file *file)
236{
237 return single_open(file, state_dbg_show, inode->i_private);
238}
239
240static const struct file_operations state_dbg_fops = {
241 .owner = THIS_MODULE,
242 .open = state_dbg_open,
243 .llseek = seq_lseek,
244 .read = seq_read,
245 .release = single_release,
246};
247
248static const struct file_operations queues_dbg_fops = {
249 .owner = THIS_MODULE,
250 .open = queues_dbg_open,
251 .llseek = seq_lseek,
252 .read = seq_read,
253 .release = single_release,
254};
255
256static const struct file_operations eps_dbg_fops = {
257 .owner = THIS_MODULE,
258 .open = eps_dbg_open,
259 .llseek = seq_lseek,
260 .read = seq_read,
261 .release = single_release,
262};
263
264static void pxa_init_debugfs(struct pxa_udc *udc)
265{
266 struct dentry *root, *state, *queues, *eps;
267
268 root = debugfs_create_dir(udc->gadget.name, NULL);
269 if (IS_ERR(root) || !root)
270 goto err_root;
271
272 state = debugfs_create_file("udcstate", 0400, root, udc,
273 &state_dbg_fops);
274 if (!state)
275 goto err_state;
276 queues = debugfs_create_file("queues", 0400, root, udc,
277 &queues_dbg_fops);
278 if (!queues)
279 goto err_queues;
280 eps = debugfs_create_file("epstate", 0400, root, udc,
281 &eps_dbg_fops);
282 if (!queues)
283 goto err_eps;
284
285 udc->debugfs_root = root;
286 udc->debugfs_state = state;
287 udc->debugfs_queues = queues;
288 udc->debugfs_eps = eps;
289 return;
290err_eps:
291 debugfs_remove(eps);
292err_queues:
293 debugfs_remove(queues);
294err_state:
295 debugfs_remove(root);
296err_root:
297 dev_err(udc->dev, "debugfs is not available\n");
298}
299
300static void pxa_cleanup_debugfs(struct pxa_udc *udc)
301{
302 debugfs_remove(udc->debugfs_eps);
303 debugfs_remove(udc->debugfs_queues);
304 debugfs_remove(udc->debugfs_state);
305 debugfs_remove(udc->debugfs_root);
306 udc->debugfs_eps = NULL;
307 udc->debugfs_queues = NULL;
308 udc->debugfs_state = NULL;
309 udc->debugfs_root = NULL;
310}
311
312#else
313static inline void pxa_init_debugfs(struct pxa_udc *udc)
314{
315}
316
317static inline void pxa_cleanup_debugfs(struct pxa_udc *udc)
318{
319}
320#endif
321
322/**
323 * is_match_usb_pxa - check if usb_ep and pxa_ep match
324 * @udc_usb_ep: usb endpoint
325 * @ep: pxa endpoint
326 * @config: configuration required in pxa_ep
327 * @interface: interface required in pxa_ep
328 * @altsetting: altsetting required in pxa_ep
329 *
330 * Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise
331 */
332static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep,
333 int config, int interface, int altsetting)
334{
335 if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr)
336 return 0;
337 if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in)
338 return 0;
339 if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type)
340 return 0;
341 if ((ep->config != config) || (ep->interface != interface)
342 || (ep->alternate != altsetting))
343 return 0;
344 return 1;
345}
346
347/**
348 * find_pxa_ep - find pxa_ep structure matching udc_usb_ep
349 * @udc: pxa udc
350 * @udc_usb_ep: udc_usb_ep structure
351 *
352 * Match udc_usb_ep and all pxa_ep available, to see if one matches.
353 * This is necessary because of the strong pxa hardware restriction requiring
354 * that once pxa endpoints are initialized, their configuration is freezed, and
355 * no change can be made to their address, direction, or in which configuration,
356 * interface or altsetting they are active ... which differs from more usual
357 * models which have endpoints be roughly just addressable fifos, and leave
358 * configuration events up to gadget drivers (like all control messages).
359 *
360 * Note that there is still a blurred point here :
361 * - we rely on UDCCR register "active interface" and "active altsetting".
362 * This is a nonsense in regard of USB spec, where multiple interfaces are
363 * active at the same time.
364 * - if we knew for sure that the pxa can handle multiple interface at the
365 * same time, assuming Intel's Developer Guide is wrong, this function
366 * should be reviewed, and a cache of couples (iface, altsetting) should
367 * be kept in the pxa_udc structure. In this case this function would match
368 * against the cache of couples instead of the "last altsetting" set up.
369 *
370 * Returns the matched pxa_ep structure or NULL if none found
371 */
372static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc,
373 struct udc_usb_ep *udc_usb_ep)
374{
375 int i;
376 struct pxa_ep *ep;
377 int cfg = udc->config;
378 int iface = udc->last_interface;
379 int alt = udc->last_alternate;
380
381 if (udc_usb_ep == &udc->udc_usb_ep[0])
382 return &udc->pxa_ep[0];
383
384 for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
385 ep = &udc->pxa_ep[i];
386 if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt))
387 return ep;
388 }
389 return NULL;
390}
391
392/**
393 * update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep
394 * @udc: pxa udc
395 *
396 * Context: in_interrupt()
397 *
398 * Updates all pxa_ep fields in udc_usb_ep structures, if this field was
399 * previously set up (and is not NULL). The update is necessary is a
400 * configuration change or altsetting change was issued by the USB host.
401 */
402static void update_pxa_ep_matches(struct pxa_udc *udc)
403{
404 int i;
405 struct udc_usb_ep *udc_usb_ep;
406
407 for (i = 1; i < NR_USB_ENDPOINTS; i++) {
408 udc_usb_ep = &udc->udc_usb_ep[i];
409 if (udc_usb_ep->pxa_ep)
410 udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep);
411 }
412}
413
414/**
415 * pio_irq_enable - Enables irq generation for one endpoint
416 * @ep: udc endpoint
417 */
418static void pio_irq_enable(struct pxa_ep *ep)
419{
420 struct pxa_udc *udc = ep->dev;
421 int index = EPIDX(ep);
422 u32 udcicr0 = udc_readl(udc, UDCICR0);
423 u32 udcicr1 = udc_readl(udc, UDCICR1);
424
425 if (index < 16)
426 udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2)));
427 else
428 udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2)));
429}
430
431/**
432 * pio_irq_disable - Disables irq generation for one endpoint
433 * @ep: udc endpoint
434 * @index: endpoint number
435 */
436static void pio_irq_disable(struct pxa_ep *ep)
437{
438 struct pxa_udc *udc = ep->dev;
439 int index = EPIDX(ep);
440 u32 udcicr0 = udc_readl(udc, UDCICR0);
441 u32 udcicr1 = udc_readl(udc, UDCICR1);
442
443 if (index < 16)
444 udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2)));
445 else
446 udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2)));
447}
448
449/**
450 * udc_set_mask_UDCCR - set bits in UDCCR
451 * @udc: udc device
452 * @mask: bits to set in UDCCR
453 *
454 * Sets bits in UDCCR, leaving DME and FST bits as they were.
455 */
456static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask)
457{
458 u32 udccr = udc_readl(udc, UDCCR);
459 udc_writel(udc, UDCCR,
460 (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS));
461}
462
463/**
464 * udc_clear_mask_UDCCR - clears bits in UDCCR
465 * @udc: udc device
466 * @mask: bit to clear in UDCCR
467 *
468 * Clears bits in UDCCR, leaving DME and FST bits as they were.
469 */
470static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
471{
472 u32 udccr = udc_readl(udc, UDCCR);
473 udc_writel(udc, UDCCR,
474 (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS));
475}
476
477/**
478 * ep_count_bytes_remain - get how many bytes in udc endpoint
479 * @ep: udc endpoint
480 *
481 * Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP)
482 */
483static int ep_count_bytes_remain(struct pxa_ep *ep)
484{
485 if (ep->dir_in)
486 return -EOPNOTSUPP;
487 return udc_ep_readl(ep, UDCBCR) & 0x3ff;
488}
489
490/**
491 * ep_is_empty - checks if ep has byte ready for reading
492 * @ep: udc endpoint
493 *
494 * If endpoint is the control endpoint, checks if there are bytes in the
495 * control endpoint fifo. If endpoint is a data endpoint, checks if bytes
496 * are ready for reading on OUT endpoint.
497 *
498 * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint
499 */
500static int ep_is_empty(struct pxa_ep *ep)
501{
502 int ret;
503
504 if (!is_ep0(ep) && ep->dir_in)
505 return -EOPNOTSUPP;
506 if (is_ep0(ep))
507 ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE);
508 else
509 ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE);
510 return ret;
511}
512
513/**
514 * ep_is_full - checks if ep has place to write bytes
515 * @ep: udc endpoint
516 *
517 * If endpoint is not the control endpoint and is an IN endpoint, checks if
518 * there is place to write bytes into the endpoint.
519 *
520 * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint
521 */
522static int ep_is_full(struct pxa_ep *ep)
523{
524 if (is_ep0(ep))
525 return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR);
526 if (!ep->dir_in)
527 return -EOPNOTSUPP;
528 return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF));
529}
530
531/**
532 * epout_has_pkt - checks if OUT endpoint fifo has a packet available
533 * @ep: pxa endpoint
534 *
535 * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep.
536 */
537static int epout_has_pkt(struct pxa_ep *ep)
538{
539 if (!is_ep0(ep) && ep->dir_in)
540 return -EOPNOTSUPP;
541 if (is_ep0(ep))
542 return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC);
543 return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC);
544}
545
546/**
547 * set_ep0state - Set ep0 automata state
548 * @dev: udc device
549 * @state: state
550 */
551static void set_ep0state(struct pxa_udc *udc, int state)
552{
553 struct pxa_ep *ep = &udc->pxa_ep[0];
554 char *old_stname = EP0_STNAME(udc);
555
556 udc->ep0state = state;
557 ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname,
558 EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR),
559 udc_ep_readl(ep, UDCBCR));
560}
561
562/**
563 * ep0_idle - Put control endpoint into idle state
564 * @dev: udc device
565 */
566static void ep0_idle(struct pxa_udc *dev)
567{
568 set_ep0state(dev, WAIT_FOR_SETUP);
569}
570
571/**
572 * inc_ep_stats_reqs - Update ep stats counts
573 * @ep: physical endpoint
574 * @req: usb request
575 * @is_in: ep direction (USB_DIR_IN or 0)
576 *
577 */
578static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in)
579{
580 if (is_in)
581 ep->stats.in_ops++;
582 else
583 ep->stats.out_ops++;
584}
585
586/**
587 * inc_ep_stats_bytes - Update ep stats counts
588 * @ep: physical endpoint
589 * @count: bytes transfered on endpoint
590 * @req: usb request
591 * @is_in: ep direction (USB_DIR_IN or 0)
592 */
593static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
594{
595 if (is_in)
596 ep->stats.in_bytes += count;
597 else
598 ep->stats.out_bytes += count;
599}
600
601/**
602 * pxa_ep_setup - Sets up an usb physical endpoint
603 * @ep: pxa27x physical endpoint
604 *
605 * Find the physical pxa27x ep, and setup its UDCCR
606 */
607static __init void pxa_ep_setup(struct pxa_ep *ep)
608{
609 u32 new_udccr;
610
611 new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN)
612 | ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN)
613 | ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN)
614 | ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN)
615 | ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET)
616 | ((ep->dir_in) ? UDCCONR_ED : 0)
617 | ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS)
618 | UDCCONR_EE;
619
620 udc_ep_writel(ep, UDCCR, new_udccr);
621}
622
623/**
624 * pxa_eps_setup - Sets up all usb physical endpoints
625 * @dev: udc device
626 *
627 * Setup all pxa physical endpoints, except ep0
628 */
629static __init void pxa_eps_setup(struct pxa_udc *dev)
630{
631 unsigned int i;
632
633 dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev);
634
635 for (i = 1; i < NR_PXA_ENDPOINTS; i++)
636 pxa_ep_setup(&dev->pxa_ep[i]);
637}
638
639/**
640 * pxa_ep_alloc_request - Allocate usb request
641 * @_ep: usb endpoint
642 * @gfp_flags:
643 *
644 * For the pxa27x, these can just wrap kmalloc/kfree. gadget drivers
645 * must still pass correctly initialized endpoints, since other controller
646 * drivers may care about how it's currently set up (dma issues etc).
647 */
648static struct usb_request *
649pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
650{
651 struct pxa27x_request *req;
652
653 req = kzalloc(sizeof *req, gfp_flags);
654 if (!req || !_ep)
655 return NULL;
656
657 INIT_LIST_HEAD(&req->queue);
658 req->in_use = 0;
659 req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
660
661 return &req->req;
662}
663
664/**
665 * pxa_ep_free_request - Free usb request
666 * @_ep: usb endpoint
667 * @_req: usb request
668 *
669 * Wrapper around kfree to free _req
670 */
671static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
672{
673 struct pxa27x_request *req;
674
675 req = container_of(_req, struct pxa27x_request, req);
676 WARN_ON(!list_empty(&req->queue));
677 kfree(req);
678}
679
680/**
681 * ep_add_request - add a request to the endpoint's queue
682 * @ep: usb endpoint
683 * @req: usb request
684 *
685 * Context: ep->lock held
686 *
687 * Queues the request in the endpoint's queue, and enables the interrupts
688 * on the endpoint.
689 */
690static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req)
691{
692 if (unlikely(!req))
693 return;
694 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
695 req->req.length, udc_ep_readl(ep, UDCCSR));
696
697 req->in_use = 1;
698 list_add_tail(&req->queue, &ep->queue);
699 pio_irq_enable(ep);
700}
701
702/**
703 * ep_del_request - removes a request from the endpoint's queue
704 * @ep: usb endpoint
705 * @req: usb request
706 *
707 * Context: ep->lock held
708 *
709 * Unqueue the request from the endpoint's queue. If there are no more requests
710 * on the endpoint, and if it's not the control endpoint, interrupts are
711 * disabled on the endpoint.
712 */
713static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
714{
715 if (unlikely(!req))
716 return;
717 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
718 req->req.length, udc_ep_readl(ep, UDCCSR));
719
720 list_del_init(&req->queue);
721 req->in_use = 0;
722 if (!is_ep0(ep) && list_empty(&ep->queue))
723 pio_irq_disable(ep);
724}
725
726/**
727 * req_done - Complete an usb request
728 * @ep: pxa physical endpoint
729 * @req: pxa request
730 * @status: usb request status sent to gadget API
731 *
732 * Context: ep->lock held
733 *
734 * Retire a pxa27x usb request. Endpoint must be locked.
735 */
736static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status)
737{
738 ep_del_request(ep, req);
739 if (likely(req->req.status == -EINPROGRESS))
740 req->req.status = status;
741 else
742 status = req->req.status;
743
744 if (status && status != -ESHUTDOWN)
745 ep_dbg(ep, "complete req %p stat %d len %u/%u\n",
746 &req->req, status,
747 req->req.actual, req->req.length);
748
749 req->req.complete(&req->udc_usb_ep->usb_ep, &req->req);
750}
751
752/**
753 * ep_end_out_req - Ends control endpoint in request
754 * @ep: physical endpoint
755 * @req: pxa request
756 *
757 * Context: ep->lock held
758 *
759 * Ends endpoint in request (completes usb request).
760 */
761static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
762{
763 inc_ep_stats_reqs(ep, !USB_DIR_IN);
764 req_done(ep, req, 0);
765}
766
767/**
768 * ep0_end_out_req - Ends control endpoint in request (ends data stage)
769 * @ep: physical endpoint
770 * @req: pxa request
771 *
772 * Context: ep->lock held
773 *
774 * Ends control endpoint in request (completes usb request), and puts
775 * control endpoint into idle state
776 */
777static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
778{
779 set_ep0state(ep->dev, OUT_STATUS_STAGE);
780 ep_end_out_req(ep, req);
781 ep0_idle(ep->dev);
782}
783
784/**
785 * ep_end_in_req - Ends endpoint out request
786 * @ep: physical endpoint
787 * @req: pxa request
788 *
789 * Context: ep->lock held
790 *
791 * Ends endpoint out request (completes usb request).
792 */
793static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
794{
795 inc_ep_stats_reqs(ep, USB_DIR_IN);
796 req_done(ep, req, 0);
797}
798
799/**
800 * ep0_end_in_req - Ends control endpoint out request (ends data stage)
801 * @ep: physical endpoint
802 * @req: pxa request
803 *
804 * Context: ep->lock held
805 *
806 * Ends control endpoint out request (completes usb request), and puts
807 * control endpoint into status state
808 */
809static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
810{
811 struct pxa_udc *udc = ep->dev;
812
813 set_ep0state(udc, IN_STATUS_STAGE);
814 ep_end_in_req(ep, req);
815}
816
817/**
818 * nuke - Dequeue all requests
819 * @ep: pxa endpoint
820 * @status: usb request status
821 *
822 * Context: ep->lock held
823 *
824 * Dequeues all requests on an endpoint. As a side effect, interrupts will be
825 * disabled on that endpoint (because no more requests).
826 */
827static void nuke(struct pxa_ep *ep, int status)
828{
829 struct pxa27x_request *req;
830
831 while (!list_empty(&ep->queue)) {
832 req = list_entry(ep->queue.next, struct pxa27x_request, queue);
833 req_done(ep, req, status);
834 }
835}
836
837/**
838 * read_packet - transfer 1 packet from an OUT endpoint into request
839 * @ep: pxa physical endpoint
840 * @req: usb request
841 *
842 * Takes bytes from OUT endpoint and transfers them info the usb request.
843 * If there is less space in request than bytes received in OUT endpoint,
844 * bytes are left in the OUT endpoint.
845 *
846 * Returns how many bytes were actually transfered
847 */
848static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
849{
850 u32 *buf;
851 int bytes_ep, bufferspace, count, i;
852
853 bytes_ep = ep_count_bytes_remain(ep);
854 bufferspace = req->req.length - req->req.actual;
855
856 buf = (u32 *)(req->req.buf + req->req.actual);
857 prefetchw(buf);
858
859 if (likely(!ep_is_empty(ep)))
860 count = min(bytes_ep, bufferspace);
861 else /* zlp */
862 count = 0;
863
864 for (i = count; i > 0; i -= 4)
865 *buf++ = udc_ep_readl(ep, UDCDR);
866 req->req.actual += count;
867
868 udc_ep_writel(ep, UDCCSR, UDCCSR_PC);
869
870 return count;
871}
872
873/**
874 * write_packet - transfer 1 packet from request into an IN endpoint
875 * @ep: pxa physical endpoint
876 * @req: usb request
877 * @max: max bytes that fit into endpoint
878 *
879 * Takes bytes from usb request, and transfers them into the physical
880 * endpoint. If there are no bytes to transfer, doesn't write anything
881 * to physical endpoint.
882 *
883 * Returns how many bytes were actually transfered.
884 */
885static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req,
886 unsigned int max)
887{
888 int length, count, remain, i;
889 u32 *buf;
890 u8 *buf_8;
891
892 buf = (u32 *)(req->req.buf + req->req.actual);
893 prefetch(buf);
894
895 length = min(req->req.length - req->req.actual, max);
896 req->req.actual += length;
897
898 remain = length & 0x3;
899 count = length & ~(0x3);
900 for (i = count; i > 0 ; i -= 4)
901 udc_ep_writel(ep, UDCDR, *buf++);
902
903 buf_8 = (u8 *)buf;
904 for (i = remain; i > 0; i--)
905 udc_ep_writeb(ep, UDCDR, *buf_8++);
906
907 ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain,
908 udc_ep_readl(ep, UDCCSR));
909
910 return length;
911}
912
913/**
914 * read_fifo - Transfer packets from OUT endpoint into usb request
915 * @ep: pxa physical endpoint
916 * @req: usb request
917 *
918 * Context: callable when in_interrupt()
919 *
920 * Unload as many packets as possible from the fifo we use for usb OUT
921 * transfers and put them into the request. Caller should have made sure
922 * there's at least one packet ready.
923 * Doesn't complete the request, that's the caller's job
924 *
925 * Returns 1 if the request completed, 0 otherwise
926 */
927static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
928{
929 int count, is_short, completed = 0;
930
931 while (epout_has_pkt(ep)) {
932 count = read_packet(ep, req);
933 inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
934
935 is_short = (count < ep->fifo_size);
936 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
937 udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
938 &req->req, req->req.actual, req->req.length);
939
940 /* completion */
941 if (is_short || req->req.actual == req->req.length) {
942 completed = 1;
943 break;
944 }
945 /* finished that packet. the next one may be waiting... */
946 }
947 return completed;
948}
949
950/**
951 * write_fifo - transfer packets from usb request into an IN endpoint
952 * @ep: pxa physical endpoint
953 * @req: pxa usb request
954 *
955 * Write to an IN endpoint fifo, as many packets as possible.
956 * irqs will use this to write the rest later.
957 * caller guarantees at least one packet buffer is ready (or a zlp).
958 * Doesn't complete the request, that's the caller's job
959 *
960 * Returns 1 if request fully transfered, 0 if partial transfer
961 */
962static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
963{
964 unsigned max;
965 int count, is_short, is_last = 0, completed = 0, totcount = 0;
966 u32 udccsr;
967
968 max = ep->fifo_size;
969 do {
970 is_short = 0;
971
972 udccsr = udc_ep_readl(ep, UDCCSR);
973 if (udccsr & UDCCSR_PC) {
974 ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
975 udccsr);
976 udc_ep_writel(ep, UDCCSR, UDCCSR_PC);
977 }
978 if (udccsr & UDCCSR_TRN) {
979 ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
980 udccsr);
981 udc_ep_writel(ep, UDCCSR, UDCCSR_TRN);
982 }
983
984 count = write_packet(ep, req, max);
985 inc_ep_stats_bytes(ep, count, USB_DIR_IN);
986 totcount += count;
987
988 /* last packet is usually short (or a zlp) */
989 if (unlikely(count < max)) {
990 is_last = 1;
991 is_short = 1;
992 } else {
993 if (likely(req->req.length > req->req.actual)
994 || req->req.zero)
995 is_last = 0;
996 else
997 is_last = 1;
998 /* interrupt/iso maxpacket may not fill the fifo */
999 is_short = unlikely(max < ep->fifo_size);
1000 }
1001
1002 if (is_short)
1003 udc_ep_writel(ep, UDCCSR, UDCCSR_SP);
1004
1005 /* requests complete when all IN data is in the FIFO */
1006 if (is_last) {
1007 completed = 1;
1008 break;
1009 }
1010 } while (!ep_is_full(ep));
1011
1012 ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n",
1013 totcount, is_last ? "/L" : "", is_short ? "/S" : "",
1014 req->req.length - req->req.actual, &req->req);
1015
1016 return completed;
1017}
1018
1019/**
1020 * read_ep0_fifo - Transfer packets from control endpoint into usb request
1021 * @ep: control endpoint
1022 * @req: pxa usb request
1023 *
1024 * Special ep0 version of the above read_fifo. Reads as many bytes from control
1025 * endpoint as can be read, and stores them into usb request (limited by request
1026 * maximum length).
1027 *
1028 * Returns 0 if usb request only partially filled, 1 if fully filled
1029 */
1030static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1031{
1032 int count, is_short, completed = 0;
1033
1034 while (epout_has_pkt(ep)) {
1035 count = read_packet(ep, req);
1036 udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC);
1037 inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
1038
1039 is_short = (count < ep->fifo_size);
1040 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
1041 udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
1042 &req->req, req->req.actual, req->req.length);
1043
1044 if (is_short || req->req.actual >= req->req.length) {
1045 completed = 1;
1046 break;
1047 }
1048 }
1049
1050 return completed;
1051}
1052
1053/**
1054 * write_ep0_fifo - Send a request to control endpoint (ep0 in)
1055 * @ep: control endpoint
1056 * @req: request
1057 *
1058 * Context: callable when in_interrupt()
1059 *
1060 * Sends a request (or a part of the request) to the control endpoint (ep0 in).
1061 * If the request doesn't fit, the remaining part will be sent from irq.
1062 * The request is considered fully written only if either :
1063 * - last write transfered all remaining bytes, but fifo was not fully filled
1064 * - last write was a 0 length write
1065 *
1066 * Returns 1 if request fully written, 0 if request only partially sent
1067 */
1068static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1069{
1070 unsigned count;
1071 int is_last, is_short;
1072
1073 count = write_packet(ep, req, EP0_FIFO_SIZE);
1074 inc_ep_stats_bytes(ep, count, USB_DIR_IN);
1075
1076 is_short = (count < EP0_FIFO_SIZE);
1077 is_last = ((count == 0) || (count < EP0_FIFO_SIZE));
1078
1079 /* Sends either a short packet or a 0 length packet */
1080 if (unlikely(is_short))
1081 udc_ep_writel(ep, UDCCSR, UDCCSR0_IPR);
1082
1083 ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
1084 count, is_short ? "/S" : "", is_last ? "/L" : "",
1085 req->req.length - req->req.actual,
1086 &req->req, udc_ep_readl(ep, UDCCSR));
1087
1088 return is_last;
1089}
1090
1091/**
1092 * pxa_ep_queue - Queue a request into an IN endpoint
1093 * @_ep: usb endpoint
1094 * @_req: usb request
1095 * @gfp_flags: flags
1096 *
1097 * Context: normally called when !in_interrupt, but callable when in_interrupt()
1098 * in the special case of ep0 setup :
1099 * (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue)
1100 *
1101 * Returns 0 if succedeed, error otherwise
1102 */
1103static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1104 gfp_t gfp_flags)
1105{
1106 struct udc_usb_ep *udc_usb_ep;
1107 struct pxa_ep *ep;
1108 struct pxa27x_request *req;
1109 struct pxa_udc *dev;
1110 unsigned long flags;
1111 int rc = 0;
1112 int is_first_req;
1113 unsigned length;
1114
1115 req = container_of(_req, struct pxa27x_request, req);
1116 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1117
1118 if (unlikely(!_req || !_req->complete || !_req->buf))
1119 return -EINVAL;
1120
1121 if (unlikely(!_ep))
1122 return -EINVAL;
1123
1124 dev = udc_usb_ep->dev;
1125 ep = udc_usb_ep->pxa_ep;
1126 if (unlikely(!ep))
1127 return -EINVAL;
1128
1129 dev = ep->dev;
1130 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
1131 ep_dbg(ep, "bogus device state\n");
1132 return -ESHUTDOWN;
1133 }
1134
1135 /* iso is always one packet per request, that's the only way
1136 * we can report per-packet status. that also helps with dma.
1137 */
1138 if (unlikely(EPXFERTYPE_is_ISO(ep)
1139 && req->req.length > ep->fifo_size))
1140 return -EMSGSIZE;
1141
1142 spin_lock_irqsave(&ep->lock, flags);
1143
1144 is_first_req = list_empty(&ep->queue);
1145 ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
1146 _req, is_first_req ? "yes" : "no",
1147 _req->length, _req->buf);
1148
1149 if (!ep->enabled) {
1150 _req->status = -ESHUTDOWN;
1151 rc = -ESHUTDOWN;
1152 goto out;
1153 }
1154
1155 if (req->in_use) {
1156 ep_err(ep, "refusing to queue req %p (already queued)\n", req);
1157 goto out;
1158 }
1159
1160 length = _req->length;
1161 _req->status = -EINPROGRESS;
1162 _req->actual = 0;
1163
1164 ep_add_request(ep, req);
1165
1166 if (is_ep0(ep)) {
1167 switch (dev->ep0state) {
1168 case WAIT_ACK_SET_CONF_INTERF:
1169 if (length == 0) {
1170 ep_end_in_req(ep, req);
1171 } else {
1172 ep_err(ep, "got a request of %d bytes while"
1173 "in state WATI_ACK_SET_CONF_INTERF\n",
1174 length);
1175 ep_del_request(ep, req);
1176 rc = -EL2HLT;
1177 }
1178 ep0_idle(ep->dev);
1179 break;
1180 case IN_DATA_STAGE:
1181 if (!ep_is_full(ep))
1182 if (write_ep0_fifo(ep, req))
1183 ep0_end_in_req(ep, req);
1184 break;
1185 case OUT_DATA_STAGE:
1186 if ((length == 0) || !epout_has_pkt(ep))
1187 if (read_ep0_fifo(ep, req))
1188 ep0_end_out_req(ep, req);
1189 break;
1190 default:
1191 ep_err(ep, "odd state %s to send me a request\n",
1192 EP0_STNAME(ep->dev));
1193 ep_del_request(ep, req);
1194 rc = -EL2HLT;
1195 break;
1196 }
1197 } else {
1198 handle_ep(ep);
1199 }
1200
1201out:
1202 spin_unlock_irqrestore(&ep->lock, flags);
1203 return rc;
1204}
1205
1206/**
1207 * pxa_ep_dequeue - Dequeue one request
1208 * @_ep: usb endpoint
1209 * @_req: usb request
1210 *
1211 * Return 0 if no error, -EINVAL or -ECONNRESET otherwise
1212 */
1213static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1214{
1215 struct pxa_ep *ep;
1216 struct udc_usb_ep *udc_usb_ep;
1217 struct pxa27x_request *req;
1218 unsigned long flags;
1219 int rc;
1220
1221 if (!_ep)
1222 return -EINVAL;
1223 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1224 ep = udc_usb_ep->pxa_ep;
1225 if (!ep || is_ep0(ep))
1226 return -EINVAL;
1227
1228 spin_lock_irqsave(&ep->lock, flags);
1229
1230 /* make sure it's actually queued on this endpoint */
1231 list_for_each_entry(req, &ep->queue, queue) {
1232 if (&req->req == _req)
1233 break;
1234 }
1235
1236 rc = -EINVAL;
1237 if (&req->req != _req)
1238 goto out;
1239
1240 rc = 0;
1241 req_done(ep, req, -ECONNRESET);
1242out:
1243 spin_unlock_irqrestore(&ep->lock, flags);
1244 return rc;
1245}
1246
1247/**
1248 * pxa_ep_set_halt - Halts operations on one endpoint
1249 * @_ep: usb endpoint
1250 * @value:
1251 *
1252 * Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise
1253 */
1254static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
1255{
1256 struct pxa_ep *ep;
1257 struct udc_usb_ep *udc_usb_ep;
1258 unsigned long flags;
1259 int rc;
1260
1261
1262 if (!_ep)
1263 return -EINVAL;
1264 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1265 ep = udc_usb_ep->pxa_ep;
1266 if (!ep || is_ep0(ep))
1267 return -EINVAL;
1268
1269 if (value == 0) {
1270 /*
1271 * This path (reset toggle+halt) is needed to implement
1272 * SET_INTERFACE on normal hardware. but it can't be
1273 * done from software on the PXA UDC, and the hardware
1274 * forgets to do it as part of SET_INTERFACE automagic.
1275 */
1276 ep_dbg(ep, "only host can clear halt\n");
1277 return -EROFS;
1278 }
1279
1280 spin_lock_irqsave(&ep->lock, flags);
1281
1282 rc = -EAGAIN;
1283 if (ep->dir_in && (ep_is_full(ep) || !list_empty(&ep->queue)))
1284 goto out;
1285
1286 /* FST, FEF bits are the same for control and non control endpoints */
1287 rc = 0;
1288 udc_ep_writel(ep, UDCCSR, UDCCSR_FST | UDCCSR_FEF);
1289 if (is_ep0(ep))
1290 set_ep0state(ep->dev, STALL);
1291
1292out:
1293 spin_unlock_irqrestore(&ep->lock, flags);
1294 return rc;
1295}
1296
1297/**
1298 * pxa_ep_fifo_status - Get how many bytes in physical endpoint
1299 * @_ep: usb endpoint
1300 *
1301 * Returns number of bytes in OUT fifos. Broken for IN fifos.
1302 */
1303static int pxa_ep_fifo_status(struct usb_ep *_ep)
1304{
1305 struct pxa_ep *ep;
1306 struct udc_usb_ep *udc_usb_ep;
1307
1308 if (!_ep)
1309 return -ENODEV;
1310 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1311 ep = udc_usb_ep->pxa_ep;
1312 if (!ep || is_ep0(ep))
1313 return -ENODEV;
1314
1315 if (ep->dir_in)
1316 return -EOPNOTSUPP;
1317 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep))
1318 return 0;
1319 else
1320 return ep_count_bytes_remain(ep) + 1;
1321}
1322
1323/**
1324 * pxa_ep_fifo_flush - Flushes one endpoint
1325 * @_ep: usb endpoint
1326 *
1327 * Discards all data in one endpoint(IN or OUT), except control endpoint.
1328 */
1329static void pxa_ep_fifo_flush(struct usb_ep *_ep)
1330{
1331 struct pxa_ep *ep;
1332 struct udc_usb_ep *udc_usb_ep;
1333 unsigned long flags;
1334
1335 if (!_ep)
1336 return;
1337 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1338 ep = udc_usb_ep->pxa_ep;
1339 if (!ep || is_ep0(ep))
1340 return;
1341
1342 spin_lock_irqsave(&ep->lock, flags);
1343
1344 if (unlikely(!list_empty(&ep->queue)))
1345 ep_dbg(ep, "called while queue list not empty\n");
1346 ep_dbg(ep, "called\n");
1347
1348 /* for OUT, just read and discard the FIFO contents. */
1349 if (!ep->dir_in) {
1350 while (!ep_is_empty(ep))
1351 udc_ep_readl(ep, UDCDR);
1352 } else {
1353 /* most IN status is the same, but ISO can't stall */
1354 udc_ep_writel(ep, UDCCSR,
1355 UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
1356 | (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
1357 }
1358
1359 spin_unlock_irqrestore(&ep->lock, flags);
1360
1361 return;
1362}
1363
1364/**
1365 * pxa_ep_enable - Enables usb endpoint
1366 * @_ep: usb endpoint
1367 * @desc: usb endpoint descriptor
1368 *
1369 * Nothing much to do here, as ep configuration is done once and for all
1370 * before udc is enabled. After udc enable, no physical endpoint configuration
1371 * can be changed.
1372 * Function makes sanity checks and flushes the endpoint.
1373 */
1374static int pxa_ep_enable(struct usb_ep *_ep,
1375 const struct usb_endpoint_descriptor *desc)
1376{
1377 struct pxa_ep *ep;
1378 struct udc_usb_ep *udc_usb_ep;
1379 struct pxa_udc *udc;
1380
1381 if (!_ep || !desc)
1382 return -EINVAL;
1383
1384 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1385 if (udc_usb_ep->pxa_ep) {
1386 ep = udc_usb_ep->pxa_ep;
1387 ep_warn(ep, "usb_ep %s already enabled, doing nothing\n",
1388 _ep->name);
1389 } else {
1390 ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep);
1391 }
1392
1393 if (!ep || is_ep0(ep)) {
1394 dev_err(udc_usb_ep->dev->dev,
1395 "unable to match pxa_ep for ep %s\n",
1396 _ep->name);
1397 return -EINVAL;
1398 }
1399
1400 if ((desc->bDescriptorType != USB_DT_ENDPOINT)
1401 || (ep->type != usb_endpoint_type(desc))) {
1402 ep_err(ep, "type mismatch\n");
1403 return -EINVAL;
1404 }
1405
1406 if (ep->fifo_size < le16_to_cpu(desc->wMaxPacketSize)) {
1407 ep_err(ep, "bad maxpacket\n");
1408 return -ERANGE;
1409 }
1410
1411 udc_usb_ep->pxa_ep = ep;
1412 udc = ep->dev;
1413
1414 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
1415 ep_err(ep, "bogus device state\n");
1416 return -ESHUTDOWN;
1417 }
1418
1419 ep->enabled = 1;
1420
1421 /* flush fifo (mostly for OUT buffers) */
1422 pxa_ep_fifo_flush(_ep);
1423
1424 ep_dbg(ep, "enabled\n");
1425 return 0;
1426}
1427
1428/**
1429 * pxa_ep_disable - Disable usb endpoint
1430 * @_ep: usb endpoint
1431 *
1432 * Same as for pxa_ep_enable, no physical endpoint configuration can be
1433 * changed.
1434 * Function flushes the endpoint and related requests.
1435 */
1436static int pxa_ep_disable(struct usb_ep *_ep)
1437{
1438 struct pxa_ep *ep;
1439 struct udc_usb_ep *udc_usb_ep;
1440 unsigned long flags;
1441
1442 if (!_ep)
1443 return -EINVAL;
1444
1445 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1446 ep = udc_usb_ep->pxa_ep;
1447 if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
1448 return -EINVAL;
1449
1450 spin_lock_irqsave(&ep->lock, flags);
1451 ep->enabled = 0;
1452 nuke(ep, -ESHUTDOWN);
1453 spin_unlock_irqrestore(&ep->lock, flags);
1454
1455 pxa_ep_fifo_flush(_ep);
1456 udc_usb_ep->pxa_ep = NULL;
1457
1458 ep_dbg(ep, "disabled\n");
1459 return 0;
1460}
1461
1462static struct usb_ep_ops pxa_ep_ops = {
1463 .enable = pxa_ep_enable,
1464 .disable = pxa_ep_disable,
1465
1466 .alloc_request = pxa_ep_alloc_request,
1467 .free_request = pxa_ep_free_request,
1468
1469 .queue = pxa_ep_queue,
1470 .dequeue = pxa_ep_dequeue,
1471
1472 .set_halt = pxa_ep_set_halt,
1473 .fifo_status = pxa_ep_fifo_status,
1474 .fifo_flush = pxa_ep_fifo_flush,
1475};
1476
1477
1478/**
1479 * pxa_udc_get_frame - Returns usb frame number
1480 * @_gadget: usb gadget
1481 */
1482static int pxa_udc_get_frame(struct usb_gadget *_gadget)
1483{
1484 struct pxa_udc *udc = to_gadget_udc(_gadget);
1485
1486 return (udc_readl(udc, UDCFNR) & 0x7ff);
1487}
1488
1489/**
1490 * pxa_udc_wakeup - Force udc device out of suspend
1491 * @_gadget: usb gadget
1492 *
1493 * Returns 0 if succesfull, error code otherwise
1494 */
1495static int pxa_udc_wakeup(struct usb_gadget *_gadget)
1496{
1497 struct pxa_udc *udc = to_gadget_udc(_gadget);
1498
1499 /* host may not have enabled remote wakeup */
1500 if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0)
1501 return -EHOSTUNREACH;
1502 udc_set_mask_UDCCR(udc, UDCCR_UDR);
1503 return 0;
1504}
1505
1506static const struct usb_gadget_ops pxa_udc_ops = {
1507 .get_frame = pxa_udc_get_frame,
1508 .wakeup = pxa_udc_wakeup,
1509 /* current versions must always be self-powered */
1510};
1511
1512/**
1513 * udc_disable - disable udc device controller
1514 * @udc: udc device
1515 *
1516 * Disables the udc device : disables clocks, udc interrupts, control endpoint
1517 * interrupts.
1518 */
1519static void udc_disable(struct pxa_udc *udc)
1520{
1521 udc_writel(udc, UDCICR0, 0);
1522 udc_writel(udc, UDCICR1, 0);
1523
1524 udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1525 clk_disable(udc->clk);
1526
1527 ep0_idle(udc);
1528 udc->gadget.speed = USB_SPEED_UNKNOWN;
1529 udc->mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
1530}
1531
1532/**
1533 * udc_init_data - Initialize udc device data structures
1534 * @dev: udc device
1535 *
1536 * Initializes gadget endpoint list, endpoints locks. No action is taken
1537 * on the hardware.
1538 */
1539static __init void udc_init_data(struct pxa_udc *dev)
1540{
1541 int i;
1542 struct pxa_ep *ep;
1543
1544 /* device/ep0 records init */
1545 INIT_LIST_HEAD(&dev->gadget.ep_list);
1546 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1547 dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
1548 ep0_idle(dev);
1549 strcpy(dev->dev->bus_id, "");
1550
1551 /* PXA endpoints init */
1552 for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
1553 ep = &dev->pxa_ep[i];
1554
1555 ep->enabled = is_ep0(ep);
1556 INIT_LIST_HEAD(&ep->queue);
1557 spin_lock_init(&ep->lock);
1558 }
1559
1560 /* USB endpoints init */
1561 for (i = 0; i < NR_USB_ENDPOINTS; i++)
1562 if (i != 0)
1563 list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list,
1564 &dev->gadget.ep_list);
1565}
1566
1567/**
1568 * udc_enable - Enables the udc device
1569 * @dev: udc device
1570 *
1571 * Enables the udc device : enables clocks, udc interrupts, control endpoint
1572 * interrupts, sets usb as UDC client and setups endpoints.
1573 */
1574static void udc_enable(struct pxa_udc *udc)
1575{
1576 udc_writel(udc, UDCICR0, 0);
1577 udc_writel(udc, UDCICR1, 0);
1578 udc_writel(udc, UP2OCR, UP2OCR_HXOE);
1579 udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1580
1581 clk_enable(udc->clk);
1582
1583 ep0_idle(udc);
1584 udc->gadget.speed = USB_SPEED_FULL;
1585 memset(&udc->stats, 0, sizeof(udc->stats));
1586
1587 udc_set_mask_UDCCR(udc, UDCCR_UDE);
1588 udelay(2);
1589 if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
1590 dev_err(udc->dev, "Configuration errors, udc disabled\n");
1591
1592 /*
1593 * Caller must be able to sleep in order to cope with startup transients
1594 */
1595 msleep(100);
1596
1597 /* enable suspend/resume and reset irqs */
1598 udc_writel(udc, UDCICR1,
1599 UDCICR1_IECC | UDCICR1_IERU
1600 | UDCICR1_IESU | UDCICR1_IERS);
1601
1602 /* enable ep0 irqs */
1603 pio_irq_enable(&udc->pxa_ep[0]);
1604
1605 dev_info(udc->dev, "UDC connecting\n");
1606 if (udc->mach->udc_command)
1607 udc->mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
1608}
1609
1610/**
1611 * usb_gadget_register_driver - Register gadget driver
1612 * @driver: gadget driver
1613 *
1614 * When a driver is successfully registered, it will receive control requests
1615 * including set_configuration(), which enables non-control requests. Then
1616 * usb traffic follows until a disconnect is reported. Then a host may connect
1617 * again, or the driver might get unbound.
1618 *
1619 * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
1620 */
1621int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1622{
1623 struct pxa_udc *udc = the_controller;
1624 int retval;
1625
1626 if (!driver || driver->speed != USB_SPEED_FULL || !driver->bind
1627 || !driver->disconnect || !driver->setup)
1628 return -EINVAL;
1629 if (!udc)
1630 return -ENODEV;
1631 if (udc->driver)
1632 return -EBUSY;
1633
1634 /* first hook up the driver ... */
1635 udc->driver = driver;
1636 udc->gadget.dev.driver = &driver->driver;
1637
1638 retval = device_add(&udc->gadget.dev);
1639 if (retval) {
1640 dev_err(udc->dev, "device_add error %d\n", retval);
1641 goto add_fail;
1642 }
1643 retval = driver->bind(&udc->gadget);
1644 if (retval) {
1645 dev_err(udc->dev, "bind to driver %s --> error %d\n",
1646 driver->driver.name, retval);
1647 goto bind_fail;
1648 }
1649 dev_dbg(udc->dev, "registered gadget driver '%s'\n",
1650 driver->driver.name);
1651
1652 udc_enable(udc);
1653 return 0;
1654
1655bind_fail:
1656 device_del(&udc->gadget.dev);
1657add_fail:
1658 udc->driver = NULL;
1659 udc->gadget.dev.driver = NULL;
1660 return retval;
1661}
1662EXPORT_SYMBOL(usb_gadget_register_driver);
1663
1664
1665/**
1666 * stop_activity - Stops udc endpoints
1667 * @udc: udc device
1668 * @driver: gadget driver
1669 *
1670 * Disables all udc endpoints (even control endpoint), report disconnect to
1671 * the gadget user.
1672 */
1673static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
1674{
1675 int i;
1676
1677 /* don't disconnect drivers more than once */
1678 if (udc->gadget.speed == USB_SPEED_UNKNOWN)
1679 driver = NULL;
1680 udc->gadget.speed = USB_SPEED_UNKNOWN;
1681
1682 for (i = 0; i < NR_USB_ENDPOINTS; i++)
1683 pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep);
1684
1685 if (driver)
1686 driver->disconnect(&udc->gadget);
1687}
1688
1689/**
1690 * usb_gadget_unregister_driver - Unregister the gadget driver
1691 * @driver: gadget driver
1692 *
1693 * Returns 0 if no error, -ENODEV, -EINVAL otherwise
1694 */
1695int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1696{
1697 struct pxa_udc *udc = the_controller;
1698
1699 if (!udc)
1700 return -ENODEV;
1701 if (!driver || driver != udc->driver || !driver->unbind)
1702 return -EINVAL;
1703
1704 stop_activity(udc, driver);
1705 udc_disable(udc);
1706
1707 driver->unbind(&udc->gadget);
1708 udc->driver = NULL;
1709
1710 device_del(&udc->gadget.dev);
1711
1712 dev_info(udc->dev, "unregistered gadget driver '%s'\n",
1713 driver->driver.name);
1714 return 0;
1715}
1716EXPORT_SYMBOL(usb_gadget_unregister_driver);
1717
1718/**
1719 * handle_ep0_ctrl_req - handle control endpoint control request
1720 * @udc: udc device
1721 * @req: control request
1722 */
1723static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1724 struct pxa27x_request *req)
1725{
1726 struct pxa_ep *ep = &udc->pxa_ep[0];
1727 union {
1728 struct usb_ctrlrequest r;
1729 u32 word[2];
1730 } u;
1731 int i;
1732 int have_extrabytes = 0;
1733
1734 nuke(ep, -EPROTO);
1735
1736 /* read SETUP packet */
1737 for (i = 0; i < 2; i++) {
1738 if (unlikely(ep_is_empty(ep)))
1739 goto stall;
1740 u.word[i] = udc_ep_readl(ep, UDCDR);
1741 }
1742
1743 have_extrabytes = !ep_is_empty(ep);
1744 while (!ep_is_empty(ep)) {
1745 i = udc_ep_readl(ep, UDCDR);
1746 ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
1747 }
1748
1749 le16_to_cpus(&u.r.wValue);
1750 le16_to_cpus(&u.r.wIndex);
1751 le16_to_cpus(&u.r.wLength);
1752
1753 ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1754 u.r.bRequestType, u.r.bRequest,
1755 u.r.wValue, u.r.wIndex, u.r.wLength);
1756 if (unlikely(have_extrabytes))
1757 goto stall;
1758
1759 if (u.r.bRequestType & USB_DIR_IN)
1760 set_ep0state(udc, IN_DATA_STAGE);
1761 else
1762 set_ep0state(udc, OUT_DATA_STAGE);
1763
1764 /* Tell UDC to enter Data Stage */
1765 udc_ep_writel(ep, UDCCSR, UDCCSR0_SA | UDCCSR0_OPC);
1766
1767 i = udc->driver->setup(&udc->gadget, &u.r);
1768 if (i < 0)
1769 goto stall;
1770out:
1771 return;
1772stall:
1773 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
1774 udc_ep_readl(ep, UDCCSR), i);
1775 udc_ep_writel(ep, UDCCSR, UDCCSR0_FST | UDCCSR0_FTF);
1776 set_ep0state(udc, STALL);
1777 goto out;
1778}
1779
1780/**
1781 * handle_ep0 - Handle control endpoint data transfers
1782 * @udc: udc device
1783 * @fifo_irq: 1 if triggered by fifo service type irq
1784 * @opc_irq: 1 if triggered by output packet complete type irq
1785 *
1786 * Context : when in_interrupt() or with ep->lock held
1787 *
1788 * Tries to transfer all pending request data into the endpoint and/or
1789 * transfer all pending data in the endpoint into usb requests.
1790 * Handles states of ep0 automata.
1791 *
1792 * PXA27x hardware handles several standard usb control requests without
1793 * driver notification. The requests fully handled by hardware are :
1794 * SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE,
1795 * GET_STATUS
1796 * The requests handled by hardware, but with irq notification are :
1797 * SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE
1798 * The remaining standard requests really handled by handle_ep0 are :
1799 * GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests.
1800 * Requests standardized outside of USB 2.0 chapter 9 are handled more
1801 * uniformly, by gadget drivers.
1802 *
1803 * The control endpoint state machine is _not_ USB spec compliant, it's even
1804 * hardly compliant with Intel PXA270 developers guide.
1805 * The key points which inferred this state machine are :
1806 * - on every setup token, bit UDCCSR0_SA is raised and held until cleared by
1807 * software.
1808 * - on every OUT packet received, UDCCSR0_OPC is raised and held until
1809 * cleared by software.
1810 * - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
1811 * before reading ep0.
1812 * - irq can be called on a "packet complete" event (opc_irq=1), while
1813 * UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
1814 * from experimentation).
1815 * - as UDCCSR0_SA can be activated while in irq handling, and clearing
1816 * UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC
1817 * => we never actually read the "status stage" packet of an IN data stage
1818 * => this is not documented in Intel documentation
1819 * - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA
1820 * STAGE. The driver add STATUS STAGE to send last zero length packet in
1821 * OUT_STATUS_STAGE.
1822 * - special attention was needed for IN_STATUS_STAGE. If a packet complete
1823 * event is detected, we terminate the status stage without ackowledging the
1824 * packet (not to risk to loose a potential SETUP packet)
1825 */
1826static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
1827{
1828 u32 udccsr0;
1829 struct pxa_ep *ep = &udc->pxa_ep[0];
1830 struct pxa27x_request *req = NULL;
1831 int completed = 0;
1832
1833 udccsr0 = udc_ep_readl(ep, UDCCSR);
1834 ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n",
1835 EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR),
1836 (fifo_irq << 1 | opc_irq));
1837
1838 if (!list_empty(&ep->queue))
1839 req = list_entry(ep->queue.next, struct pxa27x_request, queue);
1840
1841 if (udccsr0 & UDCCSR0_SST) {
1842 ep_dbg(ep, "clearing stall status\n");
1843 nuke(ep, -EPIPE);
1844 udc_ep_writel(ep, UDCCSR, UDCCSR0_SST);
1845 ep0_idle(udc);
1846 }
1847
1848 if (udccsr0 & UDCCSR0_SA) {
1849 nuke(ep, 0);
1850 set_ep0state(udc, SETUP_STAGE);
1851 }
1852
1853 switch (udc->ep0state) {
1854 case WAIT_FOR_SETUP:
1855 /*
1856 * Hardware bug : beware, we cannot clear OPC, since we would
1857 * miss a potential OPC irq for a setup packet.
1858 * So, we only do ... nothing, and hope for a next irq with
1859 * UDCCSR0_SA set.
1860 */
1861 break;
1862 case SETUP_STAGE:
1863 udccsr0 &= UDCCSR0_CTRL_REQ_MASK;
1864 if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK))
1865 handle_ep0_ctrl_req(udc, req);
1866 break;
1867 case IN_DATA_STAGE: /* GET_DESCRIPTOR */
1868 if (epout_has_pkt(ep))
1869 udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC);
1870 if (req && !ep_is_full(ep))
1871 completed = write_ep0_fifo(ep, req);
1872 if (completed)
1873 ep0_end_in_req(ep, req);
1874 break;
1875 case OUT_DATA_STAGE: /* SET_DESCRIPTOR */
1876 if (epout_has_pkt(ep) && req)
1877 completed = read_ep0_fifo(ep, req);
1878 if (completed)
1879 ep0_end_out_req(ep, req);
1880 break;
1881 case STALL:
1882 udc_ep_writel(ep, UDCCSR, UDCCSR0_FST);
1883 break;
1884 case IN_STATUS_STAGE:
1885 /*
1886 * Hardware bug : beware, we cannot clear OPC, since we would
1887 * miss a potential PC irq for a setup packet.
1888 * So, we only put the ep0 into WAIT_FOR_SETUP state.
1889 */
1890 if (opc_irq)
1891 ep0_idle(udc);
1892 break;
1893 case OUT_STATUS_STAGE:
1894 case WAIT_ACK_SET_CONF_INTERF:
1895 ep_warn(ep, "should never get in %s state here!!!\n",
1896 EP0_STNAME(ep->dev));
1897 ep0_idle(udc);
1898 break;
1899 }
1900}
1901
1902/**
1903 * handle_ep - Handle endpoint data tranfers
1904 * @ep: pxa physical endpoint
1905 *
1906 * Tries to transfer all pending request data into the endpoint and/or
1907 * transfer all pending data in the endpoint into usb requests.
1908 *
1909 * Is always called when in_interrupt() or with ep->lock held.
1910 */
1911static void handle_ep(struct pxa_ep *ep)
1912{
1913 struct pxa27x_request *req;
1914 int completed;
1915 u32 udccsr;
1916 int is_in = ep->dir_in;
1917 int loop = 0;
1918
1919 do {
1920 completed = 0;
1921 udccsr = udc_ep_readl(ep, UDCCSR);
1922 if (likely(!list_empty(&ep->queue)))
1923 req = list_entry(ep->queue.next,
1924 struct pxa27x_request, queue);
1925 else
1926 req = NULL;
1927
1928 ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n",
1929 req, udccsr, loop++);
1930
1931 if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN)))
1932 udc_ep_writel(ep, UDCCSR,
1933 udccsr & (UDCCSR_SST | UDCCSR_TRN));
1934 if (!req)
1935 break;
1936
1937 if (unlikely(is_in)) {
1938 if (likely(!ep_is_full(ep)))
1939 completed = write_fifo(ep, req);
1940 if (completed)
1941 ep_end_in_req(ep, req);
1942 } else {
1943 if (likely(epout_has_pkt(ep)))
1944 completed = read_fifo(ep, req);
1945 if (completed)
1946 ep_end_out_req(ep, req);
1947 }
1948 } while (completed);
1949}
1950
1951/**
1952 * pxa27x_change_configuration - Handle SET_CONF usb request notification
1953 * @udc: udc device
1954 * @config: usb configuration
1955 *
1956 * Post the request to upper level.
1957 * Don't use any pxa specific harware configuration capabilities
1958 */
1959static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
1960{
1961 struct usb_ctrlrequest req ;
1962
1963 dev_dbg(udc->dev, "config=%d\n", config);
1964
1965 udc->config = config;
1966 udc->last_interface = 0;
1967 udc->last_alternate = 0;
1968
1969 req.bRequestType = 0;
1970 req.bRequest = USB_REQ_SET_CONFIGURATION;
1971 req.wValue = config;
1972 req.wIndex = 0;
1973 req.wLength = 0;
1974
1975 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
1976 udc->driver->setup(&udc->gadget, &req);
1977}
1978
1979/**
1980 * pxa27x_change_interface - Handle SET_INTERF usb request notification
1981 * @udc: udc device
1982 * @iface: interface number
1983 * @alt: alternate setting number
1984 *
1985 * Post the request to upper level.
1986 * Don't use any pxa specific harware configuration capabilities
1987 */
1988static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
1989{
1990 struct usb_ctrlrequest req;
1991
1992 dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt);
1993
1994 udc->last_interface = iface;
1995 udc->last_alternate = alt;
1996
1997 req.bRequestType = USB_RECIP_INTERFACE;
1998 req.bRequest = USB_REQ_SET_INTERFACE;
1999 req.wValue = alt;
2000 req.wIndex = iface;
2001 req.wLength = 0;
2002
2003 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2004 udc->driver->setup(&udc->gadget, &req);
2005}
2006
2007/*
2008 * irq_handle_data - Handle data transfer
2009 * @irq: irq IRQ number
2010 * @udc: dev pxa_udc device structure
2011 *
2012 * Called from irq handler, transferts data to or from endpoint to queue
2013 */
2014static void irq_handle_data(int irq, struct pxa_udc *udc)
2015{
2016 int i;
2017 struct pxa_ep *ep;
2018 u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK;
2019 u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK;
2020
2021 if (udcisr0 & UDCISR_INT_MASK) {
2022 udc->pxa_ep[0].stats.irqs++;
2023 udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK));
2024 handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR),
2025 !!(udcisr0 & UDCICR_PKTCOMPL));
2026 }
2027
2028 udcisr0 >>= 2;
2029 for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) {
2030 if (!(udcisr0 & UDCISR_INT_MASK))
2031 continue;
2032
2033 udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
2034 ep = &udc->pxa_ep[i];
2035 ep->stats.irqs++;
2036 handle_ep(ep);
2037 }
2038
2039 for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
2040 udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK));
2041 if (!(udcisr1 & UDCISR_INT_MASK))
2042 continue;
2043
2044 ep = &udc->pxa_ep[i];
2045 ep->stats.irqs++;
2046 handle_ep(ep);
2047 }
2048
2049}
2050
2051/**
2052 * irq_udc_suspend - Handle IRQ "UDC Suspend"
2053 * @udc: udc device
2054 */
2055static void irq_udc_suspend(struct pxa_udc *udc)
2056{
2057 udc_writel(udc, UDCISR1, UDCISR1_IRSU);
2058 udc->stats.irqs_suspend++;
2059
2060 if (udc->gadget.speed != USB_SPEED_UNKNOWN
2061 && udc->driver && udc->driver->suspend)
2062 udc->driver->suspend(&udc->gadget);
2063 ep0_idle(udc);
2064}
2065
2066/**
2067 * irq_udc_resume - Handle IRQ "UDC Resume"
2068 * @udc: udc device
2069 */
2070static void irq_udc_resume(struct pxa_udc *udc)
2071{
2072 udc_writel(udc, UDCISR1, UDCISR1_IRRU);
2073 udc->stats.irqs_resume++;
2074
2075 if (udc->gadget.speed != USB_SPEED_UNKNOWN
2076 && udc->driver && udc->driver->resume)
2077 udc->driver->resume(&udc->gadget);
2078}
2079
2080/**
2081 * irq_udc_reconfig - Handle IRQ "UDC Change Configuration"
2082 * @udc: udc device
2083 */
2084static void irq_udc_reconfig(struct pxa_udc *udc)
2085{
2086 unsigned config, interface, alternate, config_change;
2087 u32 udccr = udc_readl(udc, UDCCR);
2088
2089 udc_writel(udc, UDCISR1, UDCISR1_IRCC);
2090 udc->stats.irqs_reconfig++;
2091
2092 config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S;
2093 config_change = (config != udc->config);
2094 pxa27x_change_configuration(udc, config);
2095
2096 interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S;
2097 alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S;
2098 pxa27x_change_interface(udc, interface, alternate);
2099
2100 if (config_change)
2101 update_pxa_ep_matches(udc);
2102 udc_set_mask_UDCCR(udc, UDCCR_SMAC);
2103}
2104
2105/**
2106 * irq_udc_reset - Handle IRQ "UDC Reset"
2107 * @udc: udc device
2108 */
2109static void irq_udc_reset(struct pxa_udc *udc)
2110{
2111 u32 udccr = udc_readl(udc, UDCCR);
2112 struct pxa_ep *ep = &udc->pxa_ep[0];
2113
2114 dev_info(udc->dev, "USB reset\n");
2115 udc_writel(udc, UDCISR1, UDCISR1_IRRS);
2116 udc->stats.irqs_reset++;
2117
2118 if ((udccr & UDCCR_UDA) == 0) {
2119 dev_dbg(udc->dev, "USB reset start\n");
2120 stop_activity(udc, udc->driver);
2121 }
2122 udc->gadget.speed = USB_SPEED_FULL;
2123 memset(&udc->stats, 0, sizeof udc->stats);
2124
2125 nuke(ep, -EPROTO);
2126 udc_ep_writel(ep, UDCCSR, UDCCSR0_FTF | UDCCSR0_OPC);
2127 ep0_idle(udc);
2128}
2129
2130/**
2131 * pxa_udc_irq - Main irq handler
2132 * @irq: irq number
2133 * @_dev: udc device
2134 *
2135 * Handles all udc interrupts
2136 */
2137static irqreturn_t pxa_udc_irq(int irq, void *_dev)
2138{
2139 struct pxa_udc *udc = _dev;
2140 u32 udcisr0 = udc_readl(udc, UDCISR0);
2141 u32 udcisr1 = udc_readl(udc, UDCISR1);
2142 u32 udccr = udc_readl(udc, UDCCR);
2143 u32 udcisr1_spec;
2144
2145 dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, "
2146 "UDCCR:0x%08x\n", udcisr0, udcisr1, udccr);
2147
2148 udcisr1_spec = udcisr1 & 0xf8000000;
2149 if (unlikely(udcisr1_spec & UDCISR1_IRSU))
2150 irq_udc_suspend(udc);
2151 if (unlikely(udcisr1_spec & UDCISR1_IRRU))
2152 irq_udc_resume(udc);
2153 if (unlikely(udcisr1_spec & UDCISR1_IRCC))
2154 irq_udc_reconfig(udc);
2155 if (unlikely(udcisr1_spec & UDCISR1_IRRS))
2156 irq_udc_reset(udc);
2157
2158 if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK))
2159 irq_handle_data(irq, udc);
2160
2161 return IRQ_HANDLED;
2162}
2163
2164static struct pxa_udc memory = {
2165 .gadget = {
2166 .ops = &pxa_udc_ops,
2167 .ep0 = &memory.udc_usb_ep[0].usb_ep,
2168 .name = driver_name,
2169 .dev = {
2170 .bus_id = "gadget",
2171 },
2172 },
2173
2174 .udc_usb_ep = {
2175 USB_EP_CTRL,
2176 USB_EP_OUT_BULK(1),
2177 USB_EP_IN_BULK(2),
2178 USB_EP_IN_ISO(3),
2179 USB_EP_OUT_ISO(4),
2180 USB_EP_IN_INT(5),
2181 },
2182
2183 .pxa_ep = {
2184 PXA_EP_CTRL,
2185 /* Endpoints for gadget zero */
2186 PXA_EP_OUT_BULK(1, 1, 3, 0, 0),
2187 PXA_EP_IN_BULK(2, 2, 3, 0, 0),
2188 /* Endpoints for ether gadget, file storage gadget */
2189 PXA_EP_OUT_BULK(3, 1, 1, 0, 0),
2190 PXA_EP_IN_BULK(4, 2, 1, 0, 0),
2191 PXA_EP_IN_ISO(5, 3, 1, 0, 0),
2192 PXA_EP_OUT_ISO(6, 4, 1, 0, 0),
2193 PXA_EP_IN_INT(7, 5, 1, 0, 0),
2194 /* Endpoints for RNDIS, serial */
2195 PXA_EP_OUT_BULK(8, 1, 2, 0, 0),
2196 PXA_EP_IN_BULK(9, 2, 2, 0, 0),
2197 PXA_EP_IN_INT(10, 5, 2, 0, 0),
2198 /*
2199 * All the following endpoints are only for completion. They
2200 * won't never work, as multiple interfaces are really broken on
2201 * the pxa.
2202 */
2203 PXA_EP_OUT_BULK(11, 1, 2, 1, 0),
2204 PXA_EP_IN_BULK(12, 2, 2, 1, 0),
2205 /* Endpoint for CDC Ether */
2206 PXA_EP_OUT_BULK(13, 1, 1, 1, 1),
2207 PXA_EP_IN_BULK(14, 2, 1, 1, 1),
2208 }
2209};
2210
2211/**
2212 * pxa_udc_probe - probes the udc device
2213 * @_dev: platform device
2214 *
2215 * Perform basic init : allocates udc clock, creates sysfs files, requests
2216 * irq.
2217 */
2218static int __init pxa_udc_probe(struct platform_device *pdev)
2219{
2220 struct resource *regs;
2221 struct pxa_udc *udc = &memory;
2222 int retval;
2223
2224 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2225 if (!regs)
2226 return -ENXIO;
2227 udc->irq = platform_get_irq(pdev, 0);
2228 if (udc->irq < 0)
2229 return udc->irq;
2230
2231 udc->dev = &pdev->dev;
2232 udc->mach = pdev->dev.platform_data;
2233
2234 udc->clk = clk_get(&pdev->dev, "UDCCLK");
2235 if (IS_ERR(udc->clk)) {
2236 retval = PTR_ERR(udc->clk);
2237 goto err_clk;
2238 }
2239
2240 retval = -ENOMEM;
2241 udc->regs = ioremap(regs->start, regs->end - regs->start + 1);
2242 if (!udc->regs) {
2243 dev_err(&pdev->dev, "Unable to map UDC I/O memory\n");
2244 goto err_map;
2245 }
2246
2247 device_initialize(&udc->gadget.dev);
2248 udc->gadget.dev.parent = &pdev->dev;
2249 udc->gadget.dev.dma_mask = NULL;
2250
2251 the_controller = udc;
2252 platform_set_drvdata(pdev, udc);
2253 udc_init_data(udc);
2254 pxa_eps_setup(udc);
2255
2256 /* irq setup after old hardware state is cleaned up */
2257 retval = request_irq(udc->irq, pxa_udc_irq,
2258 IRQF_SHARED, driver_name, udc);
2259 if (retval != 0) {
2260 dev_err(udc->dev, "%s: can't get irq %i, err %d\n",
2261 driver_name, IRQ_USB, retval);
2262 goto err_irq;
2263 }
2264
2265 pxa_init_debugfs(udc);
2266 return 0;
2267err_irq:
2268 iounmap(udc->regs);
2269err_map:
2270 clk_put(udc->clk);
2271 udc->clk = NULL;
2272err_clk:
2273 return retval;
2274}
2275
2276/**
2277 * pxa_udc_remove - removes the udc device driver
2278 * @_dev: platform device
2279 */
2280static int __exit pxa_udc_remove(struct platform_device *_dev)
2281{
2282 struct pxa_udc *udc = platform_get_drvdata(_dev);
2283
2284 usb_gadget_unregister_driver(udc->driver);
2285 free_irq(udc->irq, udc);
2286 pxa_cleanup_debugfs(udc);
2287
2288 platform_set_drvdata(_dev, NULL);
2289 the_controller = NULL;
2290 clk_put(udc->clk);
2291
2292 return 0;
2293}
2294
2295static void pxa_udc_shutdown(struct platform_device *_dev)
2296{
2297 struct pxa_udc *udc = platform_get_drvdata(_dev);
2298
2299 udc_disable(udc);
2300}
2301
2302#ifdef CONFIG_PM
2303/**
2304 * pxa_udc_suspend - Suspend udc device
2305 * @_dev: platform device
2306 * @state: suspend state
2307 *
2308 * Suspends udc : saves configuration registers (UDCCR*), then disables the udc
2309 * device.
2310 */
2311static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
2312{
2313 int i;
2314 struct pxa_udc *udc = platform_get_drvdata(_dev);
2315 struct pxa_ep *ep;
2316
2317 ep = &udc->pxa_ep[0];
2318 udc->udccsr0 = udc_ep_readl(ep, UDCCSR);
2319 for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
2320 ep = &udc->pxa_ep[i];
2321 ep->udccsr_value = udc_ep_readl(ep, UDCCSR);
2322 ep->udccr_value = udc_ep_readl(ep, UDCCR);
2323 ep_dbg(ep, "udccsr:0x%03x, udccr:0x%x\n",
2324 ep->udccsr_value, ep->udccr_value);
2325 }
2326
2327 udc_disable(udc);
2328
2329 return 0;
2330}
2331
2332/**
2333 * pxa_udc_resume - Resume udc device
2334 * @_dev: platform device
2335 *
2336 * Resumes udc : restores configuration registers (UDCCR*), then enables the udc
2337 * device.
2338 */
2339static int pxa_udc_resume(struct platform_device *_dev)
2340{
2341 int i;
2342 struct pxa_udc *udc = platform_get_drvdata(_dev);
2343 struct pxa_ep *ep;
2344
2345 ep = &udc->pxa_ep[0];
2346 udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME));
2347 for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
2348 ep = &udc->pxa_ep[i];
2349 udc_ep_writel(ep, UDCCSR, ep->udccsr_value);
2350 udc_ep_writel(ep, UDCCR, ep->udccr_value);
2351 ep_dbg(ep, "udccsr:0x%03x, udccr:0x%x\n",
2352 ep->udccsr_value, ep->udccr_value);
2353 }
2354
2355 udc_enable(udc);
2356 /*
2357 * We do not handle OTG yet.
2358 *
2359 * OTGPH bit is set when sleep mode is entered.
2360 * it indicates that OTG pad is retaining its state.
2361 * Upon exit from sleep mode and before clearing OTGPH,
2362 * Software must configure the USB OTG pad, UDC, and UHC
2363 * to the state they were in before entering sleep mode.
2364 *
2365 * Should be : PSSR |= PSSR_OTGPH;
2366 */
2367
2368 return 0;
2369}
2370#endif
2371
2372/* work with hotplug and coldplug */
2373MODULE_ALIAS("platform:pxa2xx-udc");
2374
2375static struct platform_driver udc_driver = {
2376 .driver = {
2377 .name = "pxa2xx-udc",
2378 .owner = THIS_MODULE,
2379 },
2380 .remove = __exit_p(pxa_udc_remove),
2381 .shutdown = pxa_udc_shutdown,
2382#ifdef CONFIG_PM
2383 .suspend = pxa_udc_suspend,
2384 .resume = pxa_udc_resume
2385#endif
2386};
2387
2388static int __init udc_init(void)
2389{
2390 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
2391 return platform_driver_probe(&udc_driver, pxa_udc_probe);
2392}
2393module_init(udc_init);
2394
2395
2396static void __exit udc_exit(void)
2397{
2398 platform_driver_unregister(&udc_driver);
2399}
2400module_exit(udc_exit);
2401
2402MODULE_DESCRIPTION(DRIVER_DESC);
2403MODULE_AUTHOR("Robert Jarzmik");
2404MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
new file mode 100644
index 000000000000..1d1b7936ee11
--- /dev/null
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -0,0 +1,487 @@
1/*
2 * linux/drivers/usb/gadget/pxa27x_udc.h
3 * Intel PXA27x on-chip full speed USB device controller
4 *
5 * Inspired by original driver by Frank Becker, David Brownell, and others.
6 * Copyright (C) 2008 Robert Jarzmik
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#ifndef __LINUX_USB_GADGET_PXA27X_H
24#define __LINUX_USB_GADGET_PXA27X_H
25
26#include <linux/types.h>
27#include <linux/spinlock.h>
28#include <linux/io.h>
29
30/*
31 * Register definitions
32 */
33/* Offsets */
34#define UDCCR 0x0000 /* UDC Control Register */
35#define UDCICR0 0x0004 /* UDC Interrupt Control Register0 */
36#define UDCICR1 0x0008 /* UDC Interrupt Control Register1 */
37#define UDCISR0 0x000C /* UDC Interrupt Status Register 0 */
38#define UDCISR1 0x0010 /* UDC Interrupt Status Register 1 */
39#define UDCFNR 0x0014 /* UDC Frame Number Register */
40#define UDCOTGICR 0x0018 /* UDC On-The-Go interrupt control */
41#define UP2OCR 0x0020 /* USB Port 2 Output Control register */
42#define UP3OCR 0x0024 /* USB Port 3 Output Control register */
43#define UDCCSRn(x) (0x0100 + ((x)<<2)) /* UDC Control/Status register */
44#define UDCBCRn(x) (0x0200 + ((x)<<2)) /* UDC Byte Count Register */
45#define UDCDRn(x) (0x0300 + ((x)<<2)) /* UDC Data Register */
46#define UDCCRn(x) (0x0400 + ((x)<<2)) /* UDC Control Register */
47
48#define UDCCR_OEN (1 << 31) /* On-the-Go Enable */
49#define UDCCR_AALTHNP (1 << 30) /* A-device Alternate Host Negotiation
50 Protocol Port Support */
51#define UDCCR_AHNP (1 << 29) /* A-device Host Negotiation Protocol
52 Support */
53#define UDCCR_BHNP (1 << 28) /* B-device Host Negotiation Protocol
54 Enable */
55#define UDCCR_DWRE (1 << 16) /* Device Remote Wake-up Enable */
56#define UDCCR_ACN (0x03 << 11) /* Active UDC configuration Number */
57#define UDCCR_ACN_S 11
58#define UDCCR_AIN (0x07 << 8) /* Active UDC interface Number */
59#define UDCCR_AIN_S 8
60#define UDCCR_AAISN (0x07 << 5) /* Active UDC Alternate Interface
61 Setting Number */
62#define UDCCR_AAISN_S 5
63#define UDCCR_SMAC (1 << 4) /* Switch Endpoint Memory to Active
64 Configuration */
65#define UDCCR_EMCE (1 << 3) /* Endpoint Memory Configuration
66 Error */
67#define UDCCR_UDR (1 << 2) /* UDC Resume */
68#define UDCCR_UDA (1 << 1) /* UDC Active */
69#define UDCCR_UDE (1 << 0) /* UDC Enable */
70
71#define UDCICR_INT(n, intr) (((intr) & 0x03) << (((n) & 0x0F) * 2))
72#define UDCICR1_IECC (1 << 31) /* IntEn - Configuration Change */
73#define UDCICR1_IESOF (1 << 30) /* IntEn - Start of Frame */
74#define UDCICR1_IERU (1 << 29) /* IntEn - Resume */
75#define UDCICR1_IESU (1 << 28) /* IntEn - Suspend */
76#define UDCICR1_IERS (1 << 27) /* IntEn - Reset */
77#define UDCICR_FIFOERR (1 << 1) /* FIFO Error interrupt for EP */
78#define UDCICR_PKTCOMPL (1 << 0) /* Packet Complete interrupt for EP */
79#define UDCICR_INT_MASK (UDCICR_FIFOERR | UDCICR_PKTCOMPL)
80
81#define UDCISR_INT(n, intr) (((intr) & 0x03) << (((n) & 0x0F) * 2))
82#define UDCISR1_IRCC (1 << 31) /* IntReq - Configuration Change */
83#define UDCISR1_IRSOF (1 << 30) /* IntReq - Start of Frame */
84#define UDCISR1_IRRU (1 << 29) /* IntReq - Resume */
85#define UDCISR1_IRSU (1 << 28) /* IntReq - Suspend */
86#define UDCISR1_IRRS (1 << 27) /* IntReq - Reset */
87#define UDCISR_INT_MASK (UDCICR_FIFOERR | UDCICR_PKTCOMPL)
88
89#define UDCOTGICR_IESF (1 << 24) /* OTG SET_FEATURE command recvd */
90#define UDCOTGICR_IEXR (1 << 17) /* Extra Transciever Interrupt
91 Rising Edge Interrupt Enable */
92#define UDCOTGICR_IEXF (1 << 16) /* Extra Transciever Interrupt
93 Falling Edge Interrupt Enable */
94#define UDCOTGICR_IEVV40R (1 << 9) /* OTG Vbus Valid 4.0V Rising Edge
95 Interrupt Enable */
96#define UDCOTGICR_IEVV40F (1 << 8) /* OTG Vbus Valid 4.0V Falling Edge
97 Interrupt Enable */
98#define UDCOTGICR_IEVV44R (1 << 7) /* OTG Vbus Valid 4.4V Rising Edge
99 Interrupt Enable */
100#define UDCOTGICR_IEVV44F (1 << 6) /* OTG Vbus Valid 4.4V Falling Edge
101 Interrupt Enable */
102#define UDCOTGICR_IESVR (1 << 5) /* OTG Session Valid Rising Edge
103 Interrupt Enable */
104#define UDCOTGICR_IESVF (1 << 4) /* OTG Session Valid Falling Edge
105 Interrupt Enable */
106#define UDCOTGICR_IESDR (1 << 3) /* OTG A-Device SRP Detect Rising
107 Edge Interrupt Enable */
108#define UDCOTGICR_IESDF (1 << 2) /* OTG A-Device SRP Detect Falling
109 Edge Interrupt Enable */
110#define UDCOTGICR_IEIDR (1 << 1) /* OTG ID Change Rising Edge
111 Interrupt Enable */
112#define UDCOTGICR_IEIDF (1 << 0) /* OTG ID Change Falling Edge
113 Interrupt Enable */
114
115/* Host Port 2 field bits */
116#define UP2OCR_CPVEN (1 << 0) /* Charge Pump Vbus Enable */
117#define UP2OCR_CPVPE (1 << 1) /* Charge Pump Vbus Pulse Enable */
118 /* Transceiver enablers */
119#define UP2OCR_DPPDE (1 << 2) /* D+ Pull Down Enable */
120#define UP2OCR_DMPDE (1 << 3) /* D- Pull Down Enable */
121#define UP2OCR_DPPUE (1 << 4) /* D+ Pull Up Enable */
122#define UP2OCR_DMPUE (1 << 5) /* D- Pull Up Enable */
123#define UP2OCR_DPPUBE (1 << 6) /* D+ Pull Up Bypass Enable */
124#define UP2OCR_DMPUBE (1 << 7) /* D- Pull Up Bypass Enable */
125#define UP2OCR_EXSP (1 << 8) /* External Transceiver Speed Control */
126#define UP2OCR_EXSUS (1 << 9) /* External Transceiver Speed Enable */
127#define UP2OCR_IDON (1 << 10) /* OTG ID Read Enable */
128#define UP2OCR_HXS (1 << 16) /* Transceiver Output Select */
129#define UP2OCR_HXOE (1 << 17) /* Transceiver Output Enable */
130#define UP2OCR_SEOS (1 << 24) /* Single-Ended Output Select */
131
132#define UDCCSR0_SA (1 << 7) /* Setup Active */
133#define UDCCSR0_RNE (1 << 6) /* Receive FIFO Not Empty */
134#define UDCCSR0_FST (1 << 5) /* Force Stall */
135#define UDCCSR0_SST (1 << 4) /* Sent Stall */
136#define UDCCSR0_DME (1 << 3) /* DMA Enable */
137#define UDCCSR0_FTF (1 << 2) /* Flush Transmit FIFO */
138#define UDCCSR0_IPR (1 << 1) /* IN Packet Ready */
139#define UDCCSR0_OPC (1 << 0) /* OUT Packet Complete */
140
141#define UDCCSR_DPE (1 << 9) /* Data Packet Error */
142#define UDCCSR_FEF (1 << 8) /* Flush Endpoint FIFO */
143#define UDCCSR_SP (1 << 7) /* Short Packet Control/Status */
144#define UDCCSR_BNE (1 << 6) /* Buffer Not Empty (IN endpoints) */
145#define UDCCSR_BNF (1 << 6) /* Buffer Not Full (OUT endpoints) */
146#define UDCCSR_FST (1 << 5) /* Force STALL */
147#define UDCCSR_SST (1 << 4) /* Sent STALL */
148#define UDCCSR_DME (1 << 3) /* DMA Enable */
149#define UDCCSR_TRN (1 << 2) /* Tx/Rx NAK */
150#define UDCCSR_PC (1 << 1) /* Packet Complete */
151#define UDCCSR_FS (1 << 0) /* FIFO needs service */
152
153#define UDCCONR_CN (0x03 << 25) /* Configuration Number */
154#define UDCCONR_CN_S 25
155#define UDCCONR_IN (0x07 << 22) /* Interface Number */
156#define UDCCONR_IN_S 22
157#define UDCCONR_AISN (0x07 << 19) /* Alternate Interface Number */
158#define UDCCONR_AISN_S 19
159#define UDCCONR_EN (0x0f << 15) /* Endpoint Number */
160#define UDCCONR_EN_S 15
161#define UDCCONR_ET (0x03 << 13) /* Endpoint Type: */
162#define UDCCONR_ET_S 13
163#define UDCCONR_ET_INT (0x03 << 13) /* Interrupt */
164#define UDCCONR_ET_BULK (0x02 << 13) /* Bulk */
165#define UDCCONR_ET_ISO (0x01 << 13) /* Isochronous */
166#define UDCCONR_ET_NU (0x00 << 13) /* Not used */
167#define UDCCONR_ED (1 << 12) /* Endpoint Direction */
168#define UDCCONR_MPS (0x3ff << 2) /* Maximum Packet Size */
169#define UDCCONR_MPS_S 2
170#define UDCCONR_DE (1 << 1) /* Double Buffering Enable */
171#define UDCCONR_EE (1 << 0) /* Endpoint Enable */
172
173#define UDCCR_MASK_BITS (UDCCR_OEN | UDCCR_SMAC | UDCCR_UDR | UDCCR_UDE)
174#define UDCCSR_WR_MASK (UDCCSR_DME | UDCCSR_FST)
175#define UDC_FNR_MASK (0x7ff)
176#define UDC_BCR_MASK (0x3ff)
177
178/*
179 * UDCCR = UDC Endpoint Configuration Registers
180 * UDCCSR = UDC Control/Status Register for this EP
181 * UDCBCR = UDC Byte Count Remaining (contents of OUT fifo)
182 * UDCDR = UDC Endpoint Data Register (the fifo)
183 */
184#define ofs_UDCCR(ep) (UDCCRn(ep->idx))
185#define ofs_UDCCSR(ep) (UDCCSRn(ep->idx))
186#define ofs_UDCBCR(ep) (UDCBCRn(ep->idx))
187#define ofs_UDCDR(ep) (UDCDRn(ep->idx))
188
189/* Register access macros */
190#define udc_ep_readl(ep, reg) \
191 __raw_readl((ep)->dev->regs + ofs_##reg(ep))
192#define udc_ep_writel(ep, reg, value) \
193 __raw_writel((value), ep->dev->regs + ofs_##reg(ep))
194#define udc_ep_readb(ep, reg) \
195 __raw_readb((ep)->dev->regs + ofs_##reg(ep))
196#define udc_ep_writeb(ep, reg, value) \
197 __raw_writeb((value), ep->dev->regs + ofs_##reg(ep))
198#define udc_readl(dev, reg) \
199 __raw_readl((dev)->regs + (reg))
200#define udc_writel(udc, reg, value) \
201 __raw_writel((value), (udc)->regs + (reg))
202
203#define UDCCSR_MASK (UDCCSR_FST | UDCCSR_DME)
204#define UDCCISR0_EP_MASK ~0
205#define UDCCISR1_EP_MASK 0xffff
206#define UDCCSR0_CTRL_REQ_MASK (UDCCSR0_OPC | UDCCSR0_SA | UDCCSR0_RNE)
207
208#define EPIDX(ep) (ep->idx)
209#define EPADDR(ep) (ep->addr)
210#define EPXFERTYPE(ep) (ep->type)
211#define EPNAME(ep) (ep->name)
212#define is_ep0(ep) (!ep->idx)
213#define EPXFERTYPE_is_ISO(ep) (EPXFERTYPE(ep) == USB_ENDPOINT_XFER_ISOC)
214
215/*
216 * Endpoint definitions
217 *
218 * Once enabled, pxa endpoint configuration is freezed, and cannot change
219 * unless a reset happens or the udc is disabled.
220 * Therefore, we must define all pxa potential endpoint definitions needed for
221 * all gadget and set them up before the udc is enabled.
222 *
223 * As the architecture chosen is fully static, meaning the pxa endpoint
224 * configurations are set up once and for all, we must provide a way to match
225 * one usb endpoint (usb_ep) to several pxa endpoints. The reason is that gadget
226 * layer autoconf doesn't choose the usb_ep endpoint on (config, interface, alt)
227 * criteria, while the pxa architecture requires that.
228 *
229 * The solution is to define several pxa endpoints matching one usb_ep. Ex:
230 * - "ep1-in" matches pxa endpoint EPA (which is an IN ep at addr 1, when
231 * the udc talks on (config=3, interface=0, alt=0)
232 * - "ep1-in" matches pxa endpoint EPB (which is an IN ep at addr 1, when
233 * the udc talks on (config=3, interface=0, alt=1)
234 * - "ep1-in" matches pxa endpoint EPC (which is an IN ep at addr 1, when
235 * the udc talks on (config=2, interface=0, alt=0)
236 *
237 * We'll define the pxa endpoint by its index (EPA => idx=1, EPB => idx=2, ...)
238 */
239
240/*
241 * Endpoint definition helpers
242 */
243#define USB_EP_DEF(addr, bname, dir, type, maxpkt) \
244{ .usb_ep = { .name = bname, .ops = &pxa_ep_ops, .maxpacket = maxpkt, }, \
245 .desc = { .bEndpointAddress = addr | (dir ? USB_DIR_IN : 0), \
246 .bmAttributes = type, \
247 .wMaxPacketSize = maxpkt, }, \
248 .dev = &memory \
249}
250#define USB_EP_BULK(addr, bname, dir) \
251 USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_BULK, BULK_FIFO_SIZE)
252#define USB_EP_ISO(addr, bname, dir) \
253 USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_ISOC, ISO_FIFO_SIZE)
254#define USB_EP_INT(addr, bname, dir) \
255 USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_INT, INT_FIFO_SIZE)
256#define USB_EP_IN_BULK(n) USB_EP_BULK(n, "ep" #n "in-bulk", 1)
257#define USB_EP_OUT_BULK(n) USB_EP_BULK(n, "ep" #n "out-bulk", 0)
258#define USB_EP_IN_ISO(n) USB_EP_ISO(n, "ep" #n "in-iso", 1)
259#define USB_EP_OUT_ISO(n) USB_EP_ISO(n, "ep" #n "out-iso", 0)
260#define USB_EP_IN_INT(n) USB_EP_INT(n, "ep" #n "in-int", 1)
261#define USB_EP_CTRL USB_EP_DEF(0, "ep0", 0, 0, EP0_FIFO_SIZE)
262
263#define PXA_EP_DEF(_idx, _addr, dir, _type, maxpkt, _config, iface, altset) \
264{ \
265 .dev = &memory, \
266 .name = "ep" #_idx, \
267 .idx = _idx, .enabled = 0, \
268 .dir_in = dir, .addr = _addr, \
269 .config = _config, .interface = iface, .alternate = altset, \
270 .type = _type, .fifo_size = maxpkt, \
271}
272#define PXA_EP_BULK(_idx, addr, dir, config, iface, alt) \
273 PXA_EP_DEF(_idx, addr, dir, USB_ENDPOINT_XFER_BULK, BULK_FIFO_SIZE, \
274 config, iface, alt)
275#define PXA_EP_ISO(_idx, addr, dir, config, iface, alt) \
276 PXA_EP_DEF(_idx, addr, dir, USB_ENDPOINT_XFER_ISOC, ISO_FIFO_SIZE, \
277 config, iface, alt)
278#define PXA_EP_INT(_idx, addr, dir, config, iface, alt) \
279 PXA_EP_DEF(_idx, addr, dir, USB_ENDPOINT_XFER_INT, INT_FIFO_SIZE, \
280 config, iface, alt)
281#define PXA_EP_IN_BULK(i, adr, c, f, a) PXA_EP_BULK(i, adr, 1, c, f, a)
282#define PXA_EP_OUT_BULK(i, adr, c, f, a) PXA_EP_BULK(i, adr, 0, c, f, a)
283#define PXA_EP_IN_ISO(i, adr, c, f, a) PXA_EP_ISO(i, adr, 1, c, f, a)
284#define PXA_EP_OUT_ISO(i, adr, c, f, a) PXA_EP_ISO(i, adr, 0, c, f, a)
285#define PXA_EP_IN_INT(i, adr, c, f, a) PXA_EP_INT(i, adr, 1, c, f, a)
286#define PXA_EP_CTRL PXA_EP_DEF(0, 0, 0, 0, EP0_FIFO_SIZE, 0, 0, 0)
287
288struct pxa27x_udc;
289
290struct stats {
291 unsigned long in_ops;
292 unsigned long out_ops;
293 unsigned long in_bytes;
294 unsigned long out_bytes;
295 unsigned long irqs;
296};
297
298/**
299 * struct udc_usb_ep - container of each usb_ep structure
300 * @usb_ep: usb endpoint
301 * @desc: usb descriptor, especially type and address
302 * @dev: udc managing this endpoint
303 * @pxa_ep: matching pxa_ep (cache of find_pxa_ep() call)
304 */
305struct udc_usb_ep {
306 struct usb_ep usb_ep;
307 struct usb_endpoint_descriptor desc;
308 struct pxa_udc *dev;
309 struct pxa_ep *pxa_ep;
310};
311
312/**
313 * struct pxa_ep - pxa endpoint
314 * @dev: udc device
315 * @queue: requests queue
316 * @lock: lock to pxa_ep data (queues and stats)
317 * @enabled: true when endpoint enabled (not stopped by gadget layer)
318 * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX)
319 * @name: endpoint name (for trace/debug purpose)
320 * @dir_in: 1 if IN endpoint, 0 if OUT endpoint
321 * @addr: usb endpoint number
322 * @config: configuration in which this endpoint is active
323 * @interface: interface in which this endpoint is active
324 * @alternate: altsetting in which this endpoitn is active
325 * @fifo_size: max packet size in the endpoint fifo
326 * @type: endpoint type (bulk, iso, int, ...)
327 * @udccsr_value: save register of UDCCSR0 for suspend/resume
328 * @udccr_value: save register of UDCCR for suspend/resume
329 * @stats: endpoint statistics
330 *
331 * The *PROBLEM* is that pxa's endpoint configuration scheme is both misdesigned
332 * (cares about config/interface/altsetting, thus placing needless limits on
333 * device capability) and full of implementation bugs forcing it to be set up
334 * for use more or less like a pxa255.
335 *
336 * As we define the pxa_ep statically, we must guess all needed pxa_ep for all
337 * gadget which may work with this udc driver.
338 */
339struct pxa_ep {
340 struct pxa_udc *dev;
341
342 struct list_head queue;
343 spinlock_t lock; /* Protects this structure */
344 /* (queues, stats) */
345 unsigned enabled:1;
346
347 unsigned idx:5;
348 char *name;
349
350 /*
351 * Specific pxa endpoint data, needed for hardware initialization
352 */
353 unsigned dir_in:1;
354 unsigned addr:3;
355 unsigned config:2;
356 unsigned interface:3;
357 unsigned alternate:3;
358 unsigned fifo_size;
359 unsigned type;
360
361#ifdef CONFIG_PM
362 u32 udccsr_value;
363 u32 udccr_value;
364#endif
365 struct stats stats;
366};
367
368/**
369 * struct pxa27x_request - container of each usb_request structure
370 * @req: usb request
371 * @udc_usb_ep: usb endpoint the request was submitted on
372 * @in_use: sanity check if request already queued on an pxa_ep
373 * @queue: linked list of requests, linked on pxa_ep->queue
374 */
375struct pxa27x_request {
376 struct usb_request req;
377 struct udc_usb_ep *udc_usb_ep;
378 unsigned in_use:1;
379 struct list_head queue;
380};
381
382enum ep0_state {
383 WAIT_FOR_SETUP,
384 SETUP_STAGE,
385 IN_DATA_STAGE,
386 OUT_DATA_STAGE,
387 IN_STATUS_STAGE,
388 OUT_STATUS_STAGE,
389 STALL,
390 WAIT_ACK_SET_CONF_INTERF
391};
392
393static char *ep0_state_name[] = {
394 "WAIT_FOR_SETUP", "SETUP_STAGE", "IN_DATA_STAGE", "OUT_DATA_STAGE",
395 "IN_STATUS_STAGE", "OUT_STATUS_STAGE", "STALL",
396 "WAIT_ACK_SET_CONF_INTERF"
397};
398#define EP0_STNAME(udc) ep0_state_name[(udc)->ep0state]
399
400#define EP0_FIFO_SIZE 16U
401#define BULK_FIFO_SIZE 64U
402#define ISO_FIFO_SIZE 256U
403#define INT_FIFO_SIZE 16U
404
405struct udc_stats {
406 unsigned long irqs_reset;
407 unsigned long irqs_suspend;
408 unsigned long irqs_resume;
409 unsigned long irqs_reconfig;
410};
411
412#define NR_USB_ENDPOINTS (1 + 5) /* ep0 + ep1in-bulk + .. + ep3in-iso */
413#define NR_PXA_ENDPOINTS (1 + 14) /* ep0 + epA + epB + .. + epX */
414
415/**
416 * struct pxa_udc - udc structure
417 * @regs: mapped IO space
418 * @irq: udc irq
419 * @clk: udc clock
420 * @usb_gadget: udc gadget structure
421 * @driver: bound gadget (zero, g_ether, g_file_storage, ...)
422 * @dev: device
423 * @mach: machine info, used to activate specific GPIO
424 * @ep0state: control endpoint state machine state
425 * @stats: statistics on udc usage
426 * @udc_usb_ep: array of usb endpoints offered by the gadget
427 * @pxa_ep: array of pxa available endpoints
428 * @config: UDC active configuration
429 * @last_interface: UDC interface of the last SET_INTERFACE host request
430 * @last_alternate: UDC altsetting of the last SET_INTERFACE host request
431 * @udccsr0: save of udccsr0 in case of suspend
432 * @debugfs_root: root entry of debug filesystem
433 * @debugfs_state: debugfs entry for "udcstate"
434 * @debugfs_queues: debugfs entry for "queues"
435 * @debugfs_eps: debugfs entry for "epstate"
436 */
437struct pxa_udc {
438 void __iomem *regs;
439 int irq;
440 struct clk *clk;
441
442 struct usb_gadget gadget;
443 struct usb_gadget_driver *driver;
444 struct device *dev;
445 struct pxa2xx_udc_mach_info *mach;
446
447 enum ep0_state ep0state;
448 struct udc_stats stats;
449
450 struct udc_usb_ep udc_usb_ep[NR_USB_ENDPOINTS];
451 struct pxa_ep pxa_ep[NR_PXA_ENDPOINTS];
452
453 unsigned config:2;
454 unsigned last_interface:3;
455 unsigned last_alternate:3;
456
457#ifdef CONFIG_PM
458 unsigned udccsr0;
459#endif
460#ifdef CONFIG_USB_GADGET_DEBUG_FS
461 struct dentry *debugfs_root;
462 struct dentry *debugfs_state;
463 struct dentry *debugfs_queues;
464 struct dentry *debugfs_eps;
465#endif
466};
467
468static inline struct pxa_udc *to_gadget_udc(struct usb_gadget *gadget)
469{
470 return container_of(gadget, struct pxa_udc, gadget);
471}
472
473/*
474 * Debugging/message support
475 */
476#define ep_dbg(ep, fmt, arg...) \
477 dev_dbg(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
478#define ep_vdbg(ep, fmt, arg...) \
479 dev_vdbg(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
480#define ep_err(ep, fmt, arg...) \
481 dev_err(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
482#define ep_info(ep, fmt, arg...) \
483 dev_info(ep->dev->dev, "%s:%s: " fmt, EPNAME(ep), __func__, ## arg)
484#define ep_warn(ep, fmt, arg...) \
485 dev_warn(ep->dev->dev, "%s:%s:" fmt, EPNAME(ep), __func__, ## arg)
486
487#endif /* __LINUX_USB_GADGET_PXA27X_H */
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index bd58dd504f6f..d0677f5d3cd5 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -183,14 +183,10 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
183 DBG("query OID %08x value, len %d:\n", OID, buf_len); 183 DBG("query OID %08x value, len %d:\n", OID, buf_len);
184 for (i = 0; i < buf_len; i += 16) { 184 for (i = 0; i < buf_len; i += 16) {
185 DBG("%03d: %08x %08x %08x %08x\n", i, 185 DBG("%03d: %08x %08x %08x %08x\n", i,
186 le32_to_cpu(get_unaligned((__le32 *) 186 get_unaligned_le32(&buf[i]),
187 &buf[i])), 187 get_unaligned_le32(&buf[i + 4]),
188 le32_to_cpu(get_unaligned((__le32 *) 188 get_unaligned_le32(&buf[i + 8]),
189 &buf[i + 4])), 189 get_unaligned_le32(&buf[i + 12]));
190 le32_to_cpu(get_unaligned((__le32 *)
191 &buf[i + 8])),
192 le32_to_cpu(get_unaligned((__le32 *)
193 &buf[i + 12])));
194 } 190 }
195 } 191 }
196 192
@@ -666,7 +662,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
666 break; 662 break;
667 case OID_PNP_QUERY_POWER: 663 case OID_PNP_QUERY_POWER:
668 DBG("%s: OID_PNP_QUERY_POWER D%d\n", __func__, 664 DBG("%s: OID_PNP_QUERY_POWER D%d\n", __func__,
669 le32_to_cpu(get_unaligned((__le32 *)buf)) - 1); 665 get_unaligned_le32(buf) - 1);
670 /* only suspend is a real power state, and 666 /* only suspend is a real power state, and
671 * it can't be entered by OID_PNP_SET_POWER... 667 * it can't be entered by OID_PNP_SET_POWER...
672 */ 668 */
@@ -705,14 +701,10 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
705 DBG("set OID %08x value, len %d:\n", OID, buf_len); 701 DBG("set OID %08x value, len %d:\n", OID, buf_len);
706 for (i = 0; i < buf_len; i += 16) { 702 for (i = 0; i < buf_len; i += 16) {
707 DBG("%03d: %08x %08x %08x %08x\n", i, 703 DBG("%03d: %08x %08x %08x %08x\n", i,
708 le32_to_cpu(get_unaligned((__le32 *) 704 get_unaligned_le32(&buf[i]),
709 &buf[i])), 705 get_unaligned_le32(&buf[i + 4]),
710 le32_to_cpu(get_unaligned((__le32 *) 706 get_unaligned_le32(&buf[i + 8]),
711 &buf[i + 4])), 707 get_unaligned_le32(&buf[i + 12]));
712 le32_to_cpu(get_unaligned((__le32 *)
713 &buf[i + 8])),
714 le32_to_cpu(get_unaligned((__le32 *)
715 &buf[i + 12])));
716 } 708 }
717 } 709 }
718 710
@@ -726,8 +718,7 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
726 * PROMISCUOUS, DIRECTED, 718 * PROMISCUOUS, DIRECTED,
727 * MULTICAST, ALL_MULTICAST, BROADCAST 719 * MULTICAST, ALL_MULTICAST, BROADCAST
728 */ 720 */
729 *params->filter = (u16) le32_to_cpu(get_unaligned( 721 *params->filter = (u16)get_unaligned_le32(buf);
730 (__le32 *)buf));
731 DBG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n", 722 DBG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
732 __func__, *params->filter); 723 __func__, *params->filter);
733 724
@@ -777,7 +768,7 @@ update_linkstate:
777 * resuming, Windows forces a reset, and then SET_POWER D0. 768 * resuming, Windows forces a reset, and then SET_POWER D0.
778 * FIXME ... then things go batty; Windows wedges itself. 769 * FIXME ... then things go batty; Windows wedges itself.
779 */ 770 */
780 i = le32_to_cpu(get_unaligned((__le32 *)buf)); 771 i = get_unaligned_le32(buf);
781 DBG("%s: OID_PNP_SET_POWER D%d\n", __func__, i - 1); 772 DBG("%s: OID_PNP_SET_POWER D%d\n", __func__, i - 1);
782 switch (i) { 773 switch (i) {
783 case NdisDeviceStateD0: 774 case NdisDeviceStateD0:
@@ -1064,8 +1055,8 @@ int rndis_msg_parser (u8 configNr, u8 *buf)
1064 return -ENOMEM; 1055 return -ENOMEM;
1065 1056
1066 tmp = (__le32 *) buf; 1057 tmp = (__le32 *) buf;
1067 MsgType = le32_to_cpu(get_unaligned(tmp++)); 1058 MsgType = get_unaligned_le32(tmp++);
1068 MsgLength = le32_to_cpu(get_unaligned(tmp++)); 1059 MsgLength = get_unaligned_le32(tmp++);
1069 1060
1070 if (configNr >= RNDIS_MAX_CONFIGS) 1061 if (configNr >= RNDIS_MAX_CONFIGS)
1071 return -ENOTSUPP; 1062 return -ENOTSUPP;
@@ -1296,10 +1287,9 @@ int rndis_rm_hdr(struct sk_buff *skb)
1296 tmp++; 1287 tmp++;
1297 1288
1298 /* DataOffset, DataLength */ 1289 /* DataOffset, DataLength */
1299 if (!skb_pull(skb, le32_to_cpu(get_unaligned(tmp++)) 1290 if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8))
1300 + 8 /* offset of DataOffset */))
1301 return -EOVERFLOW; 1291 return -EOVERFLOW;
1302 skb_trim(skb, le32_to_cpu(get_unaligned(tmp++))); 1292 skb_trim(skb, get_unaligned_le32(tmp++));
1303 1293
1304 return 0; 1294 return 0;
1305} 1295}
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 433b3f44f42e..54cdd6f94034 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -135,7 +135,10 @@ struct gs_port {
135 int port_in_use; /* open/close in progress */ 135 int port_in_use; /* open/close in progress */
136 wait_queue_head_t port_write_wait;/* waiting to write */ 136 wait_queue_head_t port_write_wait;/* waiting to write */
137 struct gs_buf *port_write_buf; 137 struct gs_buf *port_write_buf;
138 struct usb_cdc_line_coding port_line_coding; 138 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
139 u16 port_handshake_bits;
140#define RS232_RTS (1 << 1)
141#define RS232_DTE (1 << 0)
139}; 142};
140 143
141/* the device structure holds info for the USB device */ 144/* the device structure holds info for the USB device */
@@ -170,7 +173,7 @@ static int gs_open(struct tty_struct *tty, struct file *file);
170static void gs_close(struct tty_struct *tty, struct file *file); 173static void gs_close(struct tty_struct *tty, struct file *file);
171static int gs_write(struct tty_struct *tty, 174static int gs_write(struct tty_struct *tty,
172 const unsigned char *buf, int count); 175 const unsigned char *buf, int count);
173static void gs_put_char(struct tty_struct *tty, unsigned char ch); 176static int gs_put_char(struct tty_struct *tty, unsigned char ch);
174static void gs_flush_chars(struct tty_struct *tty); 177static void gs_flush_chars(struct tty_struct *tty);
175static int gs_write_room(struct tty_struct *tty); 178static int gs_write_room(struct tty_struct *tty);
176static int gs_chars_in_buffer(struct tty_struct *tty); 179static int gs_chars_in_buffer(struct tty_struct *tty);
@@ -199,6 +202,8 @@ static int gs_setup_standard(struct usb_gadget *gadget,
199static int gs_setup_class(struct usb_gadget *gadget, 202static int gs_setup_class(struct usb_gadget *gadget,
200 const struct usb_ctrlrequest *ctrl); 203 const struct usb_ctrlrequest *ctrl);
201static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req); 204static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req);
205static void gs_setup_complete_set_line_coding(struct usb_ep *ep,
206 struct usb_request *req);
202static void gs_disconnect(struct usb_gadget *gadget); 207static void gs_disconnect(struct usb_gadget *gadget);
203static int gs_set_config(struct gs_dev *dev, unsigned config); 208static int gs_set_config(struct gs_dev *dev, unsigned config);
204static void gs_reset_config(struct gs_dev *dev); 209static void gs_reset_config(struct gs_dev *dev);
@@ -406,7 +411,7 @@ static struct usb_cdc_acm_descriptor gs_acm_descriptor = {
406 .bLength = sizeof(gs_acm_descriptor), 411 .bLength = sizeof(gs_acm_descriptor),
407 .bDescriptorType = USB_DT_CS_INTERFACE, 412 .bDescriptorType = USB_DT_CS_INTERFACE,
408 .bDescriptorSubType = USB_CDC_ACM_TYPE, 413 .bDescriptorSubType = USB_CDC_ACM_TYPE,
409 .bmCapabilities = 0, 414 .bmCapabilities = (1 << 1),
410}; 415};
411 416
412static const struct usb_cdc_union_desc gs_union_desc = { 417static const struct usb_cdc_union_desc gs_union_desc = {
@@ -883,14 +888,15 @@ exit:
883/* 888/*
884 * gs_put_char 889 * gs_put_char
885 */ 890 */
886static void gs_put_char(struct tty_struct *tty, unsigned char ch) 891static int gs_put_char(struct tty_struct *tty, unsigned char ch)
887{ 892{
888 unsigned long flags; 893 unsigned long flags;
889 struct gs_port *port = tty->driver_data; 894 struct gs_port *port = tty->driver_data;
895 int ret = 0;
890 896
891 if (port == NULL) { 897 if (port == NULL) {
892 pr_err("gs_put_char: NULL port pointer\n"); 898 pr_err("gs_put_char: NULL port pointer\n");
893 return; 899 return 0;
894 } 900 }
895 901
896 gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p\n", 902 gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
@@ -910,10 +916,11 @@ static void gs_put_char(struct tty_struct *tty, unsigned char ch)
910 goto exit; 916 goto exit;
911 } 917 }
912 918
913 gs_buf_put(port->port_write_buf, &ch, 1); 919 ret = gs_buf_put(port->port_write_buf, &ch, 1);
914 920
915exit: 921exit:
916 spin_unlock_irqrestore(&port->port_lock, flags); 922 spin_unlock_irqrestore(&port->port_lock, flags);
923 return ret;
917} 924}
918 925
919/* 926/*
@@ -1500,6 +1507,8 @@ static int gs_setup(struct usb_gadget *gadget,
1500 u16 wValue = le16_to_cpu(ctrl->wValue); 1507 u16 wValue = le16_to_cpu(ctrl->wValue);
1501 u16 wLength = le16_to_cpu(ctrl->wLength); 1508 u16 wLength = le16_to_cpu(ctrl->wLength);
1502 1509
1510 req->complete = gs_setup_complete;
1511
1503 switch (ctrl->bRequestType & USB_TYPE_MASK) { 1512 switch (ctrl->bRequestType & USB_TYPE_MASK) {
1504 case USB_TYPE_STANDARD: 1513 case USB_TYPE_STANDARD:
1505 ret = gs_setup_standard(gadget,ctrl); 1514 ret = gs_setup_standard(gadget,ctrl);
@@ -1677,18 +1686,14 @@ static int gs_setup_class(struct usb_gadget *gadget,
1677 1686
1678 switch (ctrl->bRequest) { 1687 switch (ctrl->bRequest) {
1679 case USB_CDC_REQ_SET_LINE_CODING: 1688 case USB_CDC_REQ_SET_LINE_CODING:
1680 /* FIXME Submit req to read the data; have its completion 1689 if (wLength != sizeof(struct usb_cdc_line_coding))
1681 * handler copy that data to port->port_line_coding (iff 1690 break;
1682 * it's valid) and maybe pass it on. Until then, fail. 1691 ret = wLength;
1683 */ 1692 req->complete = gs_setup_complete_set_line_coding;
1684 pr_warning("gs_setup: set_line_coding "
1685 "unuspported\n");
1686 break; 1693 break;
1687 1694
1688 case USB_CDC_REQ_GET_LINE_CODING: 1695 case USB_CDC_REQ_GET_LINE_CODING:
1689 port = dev->dev_port[0]; /* ACM only has one port */ 1696 ret = min_t(int, wLength, sizeof(struct usb_cdc_line_coding));
1690 ret = min(wLength,
1691 (u16)sizeof(struct usb_cdc_line_coding));
1692 if (port) { 1697 if (port) {
1693 spin_lock(&port->port_lock); 1698 spin_lock(&port->port_lock);
1694 memcpy(req->buf, &port->port_line_coding, ret); 1699 memcpy(req->buf, &port->port_line_coding, ret);
@@ -1697,15 +1702,27 @@ static int gs_setup_class(struct usb_gadget *gadget,
1697 break; 1702 break;
1698 1703
1699 case USB_CDC_REQ_SET_CONTROL_LINE_STATE: 1704 case USB_CDC_REQ_SET_CONTROL_LINE_STATE:
1700 /* FIXME Submit req to read the data; have its completion 1705 if (wLength != 0)
1701 * handler use that to set the state (iff it's valid) and 1706 break;
1702 * maybe pass it on. Until then, fail. 1707 ret = 0;
1703 */ 1708 if (port) {
1704 pr_warning("gs_setup: set_control_line_state " 1709 /* REVISIT: we currently just remember this data.
1705 "unuspported\n"); 1710 * If we change that, update whatever hardware needs
1711 * updating.
1712 */
1713 spin_lock(&port->port_lock);
1714 port->port_handshake_bits = wValue;
1715 spin_unlock(&port->port_lock);
1716 }
1706 break; 1717 break;
1707 1718
1708 default: 1719 default:
1720 /* NOTE: strictly speaking, we should accept AT-commands
1721 * using SEND_ENCPSULATED_COMMAND/GET_ENCAPSULATED_RESPONSE.
1722 * But our call management descriptor says we don't handle
1723 * call management, so we should be able to get by without
1724 * handling those "required" commands (except by stalling).
1725 */
1709 pr_err("gs_setup: unknown class request, " 1726 pr_err("gs_setup: unknown class request, "
1710 "type=%02x, request=%02x, value=%04x, " 1727 "type=%02x, request=%02x, value=%04x, "
1711 "index=%04x, length=%d\n", 1728 "index=%04x, length=%d\n",
@@ -1717,6 +1734,42 @@ static int gs_setup_class(struct usb_gadget *gadget,
1717 return ret; 1734 return ret;
1718} 1735}
1719 1736
1737static void gs_setup_complete_set_line_coding(struct usb_ep *ep,
1738 struct usb_request *req)
1739{
1740 struct gs_dev *dev = ep->driver_data;
1741 struct gs_port *port = dev->dev_port[0]; /* ACM only has one port */
1742
1743 switch (req->status) {
1744 case 0:
1745 /* normal completion */
1746 if (req->actual != sizeof(port->port_line_coding))
1747 usb_ep_set_halt(ep);
1748 else if (port) {
1749 struct usb_cdc_line_coding *value = req->buf;
1750
1751 /* REVISIT: we currently just remember this data.
1752 * If we change that, (a) validate it first, then
1753 * (b) update whatever hardware needs updating.
1754 */
1755 spin_lock(&port->port_lock);
1756 port->port_line_coding = *value;
1757 spin_unlock(&port->port_lock);
1758 }
1759 break;
1760
1761 case -ESHUTDOWN:
1762 /* disconnect */
1763 gs_free_req(ep, req);
1764 break;
1765
1766 default:
1767 /* unexpected */
1768 break;
1769 }
1770 return;
1771}
1772
1720/* 1773/*
1721 * gs_setup_complete 1774 * gs_setup_complete
1722 */ 1775 */
@@ -1904,6 +1957,11 @@ static int gs_set_config(struct gs_dev *dev, unsigned config)
1904 } 1957 }
1905 } 1958 }
1906 1959
1960 /* REVISIT the ACM mode should be able to actually *issue* some
1961 * notifications, for at least serial state change events if
1962 * not also for network connection; say so in bmCapabilities.
1963 */
1964
1907 pr_info("gs_set_config: %s configured, %s speed %s config\n", 1965 pr_info("gs_set_config: %s configured, %s speed %s config\n",
1908 GS_LONG_NAME, 1966 GS_LONG_NAME,
1909 gadget->speed == USB_SPEED_HIGH ? "high" : "full", 1967 gadget->speed == USB_SPEED_HIGH ? "high" : "full",
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 878e428a0ec1..4154be375c7a 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -74,7 +74,7 @@ static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len)
74 goto fail; 74 goto fail;
75 } else 75 } else
76 uchar = c; 76 uchar = c;
77 put_unaligned (cpu_to_le16 (uchar), cp++); 77 put_unaligned_le16(uchar, cp++);
78 count++; 78 count++;
79 len--; 79 len--;
80 } 80 }
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index d3d4f4048e6c..fce4924dbbe8 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -23,9 +23,7 @@
23/* 23/*
24 * Gadget Zero only needs two bulk endpoints, and is an example of how you 24 * Gadget Zero only needs two bulk endpoints, and is an example of how you
25 * can write a hardware-agnostic gadget driver running inside a USB device. 25 * can write a hardware-agnostic gadget driver running inside a USB device.
26 * 26 * Some hardware details are visible, but don't affect most of the driver.
27 * Hardware details are visible (see CONFIG_USB_ZERO_* below) but don't
28 * affect most of the driver.
29 * 27 *
30 * Use it with the Linux host/master side "usbtest" driver to get a basic 28 * Use it with the Linux host/master side "usbtest" driver to get a basic
31 * functional test of your device-side usb stack, or with "usb-skeleton". 29 * functional test of your device-side usb stack, or with "usb-skeleton".
@@ -37,6 +35,7 @@
37 * buflen=N default N=4096, buffer size used 35 * buflen=N default N=4096, buffer size used
38 * qlen=N default N=32, how many buffers in the loopback queue 36 * qlen=N default N=32, how many buffers in the loopback queue
39 * loopdefault default false, list loopback config first 37 * loopdefault default false, list loopback config first
38 * autoresume=N default N=0, seconds before triggering remote wakeup
40 * 39 *
41 * Many drivers will only have one configuration, letting them be much 40 * Many drivers will only have one configuration, letting them be much
42 * simpler if they also don't support high speed operation (like this 41 * simpler if they also don't support high speed operation (like this
@@ -62,13 +61,13 @@
62 61
63/*-------------------------------------------------------------------------*/ 62/*-------------------------------------------------------------------------*/
64 63
65#define DRIVER_VERSION "Lughnasadh, 2007" 64#define DRIVER_VERSION "Earth Day 2008"
66 65
67static const char shortname [] = "zero"; 66static const char shortname[] = "zero";
68static const char longname [] = "Gadget Zero"; 67static const char longname[] = "Gadget Zero";
69 68
70static const char source_sink [] = "source and sink data"; 69static const char source_sink[] = "source and sink data";
71static const char loopback [] = "loop input to output"; 70static const char loopback[] = "loop input to output";
72 71
73/*-------------------------------------------------------------------------*/ 72/*-------------------------------------------------------------------------*/
74 73
@@ -120,16 +119,16 @@ static unsigned buflen = 4096;
120static unsigned qlen = 32; 119static unsigned qlen = 32;
121static unsigned pattern = 0; 120static unsigned pattern = 0;
122 121
123module_param (buflen, uint, S_IRUGO); 122module_param(buflen, uint, S_IRUGO);
124module_param (qlen, uint, S_IRUGO); 123module_param(qlen, uint, S_IRUGO);
125module_param (pattern, uint, S_IRUGO|S_IWUSR); 124module_param(pattern, uint, S_IRUGO|S_IWUSR);
126 125
127/* 126/*
128 * if it's nonzero, autoresume says how many seconds to wait 127 * if it's nonzero, autoresume says how many seconds to wait
129 * before trying to wake up the host after suspend. 128 * before trying to wake up the host after suspend.
130 */ 129 */
131static unsigned autoresume = 0; 130static unsigned autoresume = 0;
132module_param (autoresume, uint, 0); 131module_param(autoresume, uint, 0);
133 132
134/* 133/*
135 * Normally the "loopback" configuration is second (index 1) so 134 * Normally the "loopback" configuration is second (index 1) so
@@ -138,8 +137,7 @@ module_param (autoresume, uint, 0);
138 * Or controllers (like superh) that only support one config. 137 * Or controllers (like superh) that only support one config.
139 */ 138 */
140static int loopdefault = 0; 139static int loopdefault = 0;
141 140module_param(loopdefault, bool, S_IRUGO|S_IWUSR);
142module_param (loopdefault, bool, S_IRUGO|S_IWUSR);
143 141
144/*-------------------------------------------------------------------------*/ 142/*-------------------------------------------------------------------------*/
145 143
@@ -176,24 +174,22 @@ module_param (loopdefault, bool, S_IRUGO|S_IWUSR);
176#define CONFIG_SOURCE_SINK 3 174#define CONFIG_SOURCE_SINK 3
177#define CONFIG_LOOPBACK 2 175#define CONFIG_LOOPBACK 2
178 176
179static struct usb_device_descriptor 177static struct usb_device_descriptor device_desc = {
180device_desc = {
181 .bLength = sizeof device_desc, 178 .bLength = sizeof device_desc,
182 .bDescriptorType = USB_DT_DEVICE, 179 .bDescriptorType = USB_DT_DEVICE,
183 180
184 .bcdUSB = __constant_cpu_to_le16 (0x0200), 181 .bcdUSB = __constant_cpu_to_le16(0x0200),
185 .bDeviceClass = USB_CLASS_VENDOR_SPEC, 182 .bDeviceClass = USB_CLASS_VENDOR_SPEC,
186 183
187 .idVendor = __constant_cpu_to_le16 (DRIVER_VENDOR_NUM), 184 .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_NUM),
188 .idProduct = __constant_cpu_to_le16 (DRIVER_PRODUCT_NUM), 185 .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_NUM),
189 .iManufacturer = STRING_MANUFACTURER, 186 .iManufacturer = STRING_MANUFACTURER,
190 .iProduct = STRING_PRODUCT, 187 .iProduct = STRING_PRODUCT,
191 .iSerialNumber = STRING_SERIAL, 188 .iSerialNumber = STRING_SERIAL,
192 .bNumConfigurations = 2, 189 .bNumConfigurations = 2,
193}; 190};
194 191
195static struct usb_config_descriptor 192static struct usb_config_descriptor source_sink_config = {
196source_sink_config = {
197 .bLength = sizeof source_sink_config, 193 .bLength = sizeof source_sink_config,
198 .bDescriptorType = USB_DT_CONFIG, 194 .bDescriptorType = USB_DT_CONFIG,
199 195
@@ -205,8 +201,7 @@ source_sink_config = {
205 .bMaxPower = 1, /* self-powered */ 201 .bMaxPower = 1, /* self-powered */
206}; 202};
207 203
208static struct usb_config_descriptor 204static struct usb_config_descriptor loopback_config = {
209loopback_config = {
210 .bLength = sizeof loopback_config, 205 .bLength = sizeof loopback_config,
211 .bDescriptorType = USB_DT_CONFIG, 206 .bDescriptorType = USB_DT_CONFIG,
212 207
@@ -218,8 +213,7 @@ loopback_config = {
218 .bMaxPower = 1, /* self-powered */ 213 .bMaxPower = 1, /* self-powered */
219}; 214};
220 215
221static struct usb_otg_descriptor 216static struct usb_otg_descriptor otg_descriptor = {
222otg_descriptor = {
223 .bLength = sizeof otg_descriptor, 217 .bLength = sizeof otg_descriptor,
224 .bDescriptorType = USB_DT_OTG, 218 .bDescriptorType = USB_DT_OTG,
225 219
@@ -228,8 +222,7 @@ otg_descriptor = {
228 222
229/* one interface in each configuration */ 223/* one interface in each configuration */
230 224
231static const struct usb_interface_descriptor 225static const struct usb_interface_descriptor source_sink_intf = {
232source_sink_intf = {
233 .bLength = sizeof source_sink_intf, 226 .bLength = sizeof source_sink_intf,
234 .bDescriptorType = USB_DT_INTERFACE, 227 .bDescriptorType = USB_DT_INTERFACE,
235 228
@@ -238,8 +231,7 @@ source_sink_intf = {
238 .iInterface = STRING_SOURCE_SINK, 231 .iInterface = STRING_SOURCE_SINK,
239}; 232};
240 233
241static const struct usb_interface_descriptor 234static const struct usb_interface_descriptor loopback_intf = {
242loopback_intf = {
243 .bLength = sizeof loopback_intf, 235 .bLength = sizeof loopback_intf,
244 .bDescriptorType = USB_DT_INTERFACE, 236 .bDescriptorType = USB_DT_INTERFACE,
245 237
@@ -250,8 +242,7 @@ loopback_intf = {
250 242
251/* two full speed bulk endpoints; their use is config-dependent */ 243/* two full speed bulk endpoints; their use is config-dependent */
252 244
253static struct usb_endpoint_descriptor 245static struct usb_endpoint_descriptor fs_source_desc = {
254fs_source_desc = {
255 .bLength = USB_DT_ENDPOINT_SIZE, 246 .bLength = USB_DT_ENDPOINT_SIZE,
256 .bDescriptorType = USB_DT_ENDPOINT, 247 .bDescriptorType = USB_DT_ENDPOINT,
257 248
@@ -259,8 +250,7 @@ fs_source_desc = {
259 .bmAttributes = USB_ENDPOINT_XFER_BULK, 250 .bmAttributes = USB_ENDPOINT_XFER_BULK,
260}; 251};
261 252
262static struct usb_endpoint_descriptor 253static struct usb_endpoint_descriptor fs_sink_desc = {
263fs_sink_desc = {
264 .bLength = USB_DT_ENDPOINT_SIZE, 254 .bLength = USB_DT_ENDPOINT_SIZE,
265 .bDescriptorType = USB_DT_ENDPOINT, 255 .bDescriptorType = USB_DT_ENDPOINT,
266 256
@@ -268,7 +258,7 @@ fs_sink_desc = {
268 .bmAttributes = USB_ENDPOINT_XFER_BULK, 258 .bmAttributes = USB_ENDPOINT_XFER_BULK,
269}; 259};
270 260
271static const struct usb_descriptor_header *fs_source_sink_function [] = { 261static const struct usb_descriptor_header *fs_source_sink_function[] = {
272 (struct usb_descriptor_header *) &otg_descriptor, 262 (struct usb_descriptor_header *) &otg_descriptor,
273 (struct usb_descriptor_header *) &source_sink_intf, 263 (struct usb_descriptor_header *) &source_sink_intf,
274 (struct usb_descriptor_header *) &fs_sink_desc, 264 (struct usb_descriptor_header *) &fs_sink_desc,
@@ -276,7 +266,7 @@ static const struct usb_descriptor_header *fs_source_sink_function [] = {
276 NULL, 266 NULL,
277}; 267};
278 268
279static const struct usb_descriptor_header *fs_loopback_function [] = { 269static const struct usb_descriptor_header *fs_loopback_function[] = {
280 (struct usb_descriptor_header *) &otg_descriptor, 270 (struct usb_descriptor_header *) &otg_descriptor,
281 (struct usb_descriptor_header *) &loopback_intf, 271 (struct usb_descriptor_header *) &loopback_intf,
282 (struct usb_descriptor_header *) &fs_sink_desc, 272 (struct usb_descriptor_header *) &fs_sink_desc,
@@ -293,36 +283,33 @@ static const struct usb_descriptor_header *fs_loopback_function [] = {
293 * for the config descriptor. 283 * for the config descriptor.
294 */ 284 */
295 285
296static struct usb_endpoint_descriptor 286static struct usb_endpoint_descriptor hs_source_desc = {
297hs_source_desc = {
298 .bLength = USB_DT_ENDPOINT_SIZE, 287 .bLength = USB_DT_ENDPOINT_SIZE,
299 .bDescriptorType = USB_DT_ENDPOINT, 288 .bDescriptorType = USB_DT_ENDPOINT,
300 289
301 .bmAttributes = USB_ENDPOINT_XFER_BULK, 290 .bmAttributes = USB_ENDPOINT_XFER_BULK,
302 .wMaxPacketSize = __constant_cpu_to_le16 (512), 291 .wMaxPacketSize = __constant_cpu_to_le16(512),
303}; 292};
304 293
305static struct usb_endpoint_descriptor 294static struct usb_endpoint_descriptor hs_sink_desc = {
306hs_sink_desc = {
307 .bLength = USB_DT_ENDPOINT_SIZE, 295 .bLength = USB_DT_ENDPOINT_SIZE,
308 .bDescriptorType = USB_DT_ENDPOINT, 296 .bDescriptorType = USB_DT_ENDPOINT,
309 297
310 .bmAttributes = USB_ENDPOINT_XFER_BULK, 298 .bmAttributes = USB_ENDPOINT_XFER_BULK,
311 .wMaxPacketSize = __constant_cpu_to_le16 (512), 299 .wMaxPacketSize = __constant_cpu_to_le16(512),
312}; 300};
313 301
314static struct usb_qualifier_descriptor 302static struct usb_qualifier_descriptor dev_qualifier = {
315dev_qualifier = {
316 .bLength = sizeof dev_qualifier, 303 .bLength = sizeof dev_qualifier,
317 .bDescriptorType = USB_DT_DEVICE_QUALIFIER, 304 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
318 305
319 .bcdUSB = __constant_cpu_to_le16 (0x0200), 306 .bcdUSB = __constant_cpu_to_le16(0x0200),
320 .bDeviceClass = USB_CLASS_VENDOR_SPEC, 307 .bDeviceClass = USB_CLASS_VENDOR_SPEC,
321 308
322 .bNumConfigurations = 2, 309 .bNumConfigurations = 2,
323}; 310};
324 311
325static const struct usb_descriptor_header *hs_source_sink_function [] = { 312static const struct usb_descriptor_header *hs_source_sink_function[] = {
326 (struct usb_descriptor_header *) &otg_descriptor, 313 (struct usb_descriptor_header *) &otg_descriptor,
327 (struct usb_descriptor_header *) &source_sink_intf, 314 (struct usb_descriptor_header *) &source_sink_intf,
328 (struct usb_descriptor_header *) &hs_source_desc, 315 (struct usb_descriptor_header *) &hs_source_desc,
@@ -330,7 +317,7 @@ static const struct usb_descriptor_header *hs_source_sink_function [] = {
330 NULL, 317 NULL,
331}; 318};
332 319
333static const struct usb_descriptor_header *hs_loopback_function [] = { 320static const struct usb_descriptor_header *hs_loopback_function[] = {
334 (struct usb_descriptor_header *) &otg_descriptor, 321 (struct usb_descriptor_header *) &otg_descriptor,
335 (struct usb_descriptor_header *) &loopback_intf, 322 (struct usb_descriptor_header *) &loopback_intf,
336 (struct usb_descriptor_header *) &hs_source_desc, 323 (struct usb_descriptor_header *) &hs_source_desc,
@@ -355,7 +342,7 @@ static char serial[] = "0123456789.0123456789.0123456789";
355 342
356 343
357/* static strings, in UTF-8 */ 344/* static strings, in UTF-8 */
358static struct usb_string strings [] = { 345static struct usb_string strings[] = {
359 { STRING_MANUFACTURER, manufacturer, }, 346 { STRING_MANUFACTURER, manufacturer, },
360 { STRING_PRODUCT, longname, }, 347 { STRING_PRODUCT, longname, },
361 { STRING_SERIAL, serial, }, 348 { STRING_SERIAL, serial, },
@@ -364,7 +351,7 @@ static struct usb_string strings [] = {
364 { } /* end of list */ 351 { } /* end of list */
365}; 352};
366 353
367static struct usb_gadget_strings stringtab = { 354static struct usb_gadget_strings stringtab = {
368 .language = 0x0409, /* en-us */ 355 .language = 0x0409, /* en-us */
369 .strings = strings, 356 .strings = strings,
370}; 357};
@@ -387,8 +374,7 @@ static struct usb_gadget_strings stringtab = {
387 * high bandwidth modes at high speed. (Maybe work like Intel's test 374 * high bandwidth modes at high speed. (Maybe work like Intel's test
388 * device?) 375 * device?)
389 */ 376 */
390static int 377static int config_buf(struct usb_gadget *gadget,
391config_buf (struct usb_gadget *gadget,
392 u8 *buf, u8 type, unsigned index) 378 u8 *buf, u8 type, unsigned index)
393{ 379{
394 int is_source_sink; 380 int is_source_sink;
@@ -419,7 +405,7 @@ config_buf (struct usb_gadget *gadget,
419 if (!gadget_is_otg(gadget)) 405 if (!gadget_is_otg(gadget))
420 function++; 406 function++;
421 407
422 len = usb_gadget_config_buf (is_source_sink 408 len = usb_gadget_config_buf(is_source_sink
423 ? &source_sink_config 409 ? &source_sink_config
424 : &loopback_config, 410 : &loopback_config,
425 buf, USB_BUFSIZ, function); 411 buf, USB_BUFSIZ, function);
@@ -431,27 +417,26 @@ config_buf (struct usb_gadget *gadget,
431 417
432/*-------------------------------------------------------------------------*/ 418/*-------------------------------------------------------------------------*/
433 419
434static struct usb_request * 420static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length)
435alloc_ep_req (struct usb_ep *ep, unsigned length)
436{ 421{
437 struct usb_request *req; 422 struct usb_request *req;
438 423
439 req = usb_ep_alloc_request (ep, GFP_ATOMIC); 424 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
440 if (req) { 425 if (req) {
441 req->length = length; 426 req->length = length;
442 req->buf = kmalloc(length, GFP_ATOMIC); 427 req->buf = kmalloc(length, GFP_ATOMIC);
443 if (!req->buf) { 428 if (!req->buf) {
444 usb_ep_free_request (ep, req); 429 usb_ep_free_request(ep, req);
445 req = NULL; 430 req = NULL;
446 } 431 }
447 } 432 }
448 return req; 433 return req;
449} 434}
450 435
451static void free_ep_req (struct usb_ep *ep, struct usb_request *req) 436static void free_ep_req(struct usb_ep *ep, struct usb_request *req)
452{ 437{
453 kfree(req->buf); 438 kfree(req->buf);
454 usb_ep_free_request (ep, req); 439 usb_ep_free_request(ep, req);
455} 440}
456 441
457/*-------------------------------------------------------------------------*/ 442/*-------------------------------------------------------------------------*/
@@ -472,7 +457,7 @@ static void free_ep_req (struct usb_ep *ep, struct usb_request *req)
472/* optionally require specific source/sink data patterns */ 457/* optionally require specific source/sink data patterns */
473 458
474static int 459static int
475check_read_data ( 460check_read_data(
476 struct zero_dev *dev, 461 struct zero_dev *dev,
477 struct usb_ep *ep, 462 struct usb_ep *ep,
478 struct usb_request *req 463 struct usb_request *req
@@ -498,8 +483,8 @@ check_read_data (
498 continue; 483 continue;
499 break; 484 break;
500 } 485 }
501 ERROR (dev, "bad OUT byte, buf [%d] = %d\n", i, *buf); 486 ERROR(dev, "bad OUT byte, buf[%d] = %d\n", i, *buf);
502 usb_ep_set_halt (ep); 487 usb_ep_set_halt(ep);
503 return -EINVAL; 488 return -EINVAL;
504 } 489 }
505 return 0; 490 return 0;
@@ -512,7 +497,7 @@ static void reinit_write_data(struct usb_ep *ep, struct usb_request *req)
512 497
513 switch (pattern) { 498 switch (pattern) {
514 case 0: 499 case 0:
515 memset (req->buf, 0, req->length); 500 memset(req->buf, 0, req->length);
516 break; 501 break;
517 case 1: 502 case 1:
518 for (i = 0; i < req->length; i++) 503 for (i = 0; i < req->length; i++)
@@ -525,7 +510,7 @@ static void reinit_write_data(struct usb_ep *ep, struct usb_request *req)
525 * irq delay between end of one request and start of the next. 510 * irq delay between end of one request and start of the next.
526 * that prevents using hardware dma queues. 511 * that prevents using hardware dma queues.
527 */ 512 */
528static void source_sink_complete (struct usb_ep *ep, struct usb_request *req) 513static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
529{ 514{
530 struct zero_dev *dev = ep->driver_data; 515 struct zero_dev *dev = ep->driver_data;
531 int status = req->status; 516 int status = req->status;
@@ -534,8 +519,8 @@ static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
534 519
535 case 0: /* normal completion? */ 520 case 0: /* normal completion? */
536 if (ep == dev->out_ep) { 521 if (ep == dev->out_ep) {
537 check_read_data (dev, ep, req); 522 check_read_data(dev, ep, req);
538 memset (req->buf, 0x55, req->length); 523 memset(req->buf, 0x55, req->length);
539 } else 524 } else
540 reinit_write_data(ep, req); 525 reinit_write_data(ep, req);
541 break; 526 break;
@@ -544,11 +529,11 @@ static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
544 case -ECONNABORTED: /* hardware forced ep reset */ 529 case -ECONNABORTED: /* hardware forced ep reset */
545 case -ECONNRESET: /* request dequeued */ 530 case -ECONNRESET: /* request dequeued */
546 case -ESHUTDOWN: /* disconnect from host */ 531 case -ESHUTDOWN: /* disconnect from host */
547 VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status, 532 VDBG(dev, "%s gone (%d), %d/%d\n", ep->name, status,
548 req->actual, req->length); 533 req->actual, req->length);
549 if (ep == dev->out_ep) 534 if (ep == dev->out_ep)
550 check_read_data (dev, ep, req); 535 check_read_data(dev, ep, req);
551 free_ep_req (ep, req); 536 free_ep_req(ep, req);
552 return; 537 return;
553 538
554 case -EOVERFLOW: /* buffer overrun on read means that 539 case -EOVERFLOW: /* buffer overrun on read means that
@@ -557,18 +542,18 @@ static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
557 */ 542 */
558 default: 543 default:
559#if 1 544#if 1
560 DBG (dev, "%s complete --> %d, %d/%d\n", ep->name, 545 DBG(dev, "%s complete --> %d, %d/%d\n", ep->name,
561 status, req->actual, req->length); 546 status, req->actual, req->length);
562#endif 547#endif
563 case -EREMOTEIO: /* short read */ 548 case -EREMOTEIO: /* short read */
564 break; 549 break;
565 } 550 }
566 551
567 status = usb_ep_queue (ep, req, GFP_ATOMIC); 552 status = usb_ep_queue(ep, req, GFP_ATOMIC);
568 if (status) { 553 if (status) {
569 ERROR (dev, "kill %s: resubmit %d bytes --> %d\n", 554 ERROR(dev, "kill %s: resubmit %d bytes --> %d\n",
570 ep->name, req->length, status); 555 ep->name, req->length, status);
571 usb_ep_set_halt (ep); 556 usb_ep_set_halt(ep);
572 /* FIXME recover later ... somehow */ 557 /* FIXME recover later ... somehow */
573 } 558 }
574} 559}
@@ -578,24 +563,24 @@ static struct usb_request *source_sink_start_ep(struct usb_ep *ep)
578 struct usb_request *req; 563 struct usb_request *req;
579 int status; 564 int status;
580 565
581 req = alloc_ep_req (ep, buflen); 566 req = alloc_ep_req(ep, buflen);
582 if (!req) 567 if (!req)
583 return NULL; 568 return NULL;
584 569
585 memset (req->buf, 0, req->length); 570 memset(req->buf, 0, req->length);
586 req->complete = source_sink_complete; 571 req->complete = source_sink_complete;
587 572
588 if (strcmp (ep->name, EP_IN_NAME) == 0) 573 if (strcmp(ep->name, EP_IN_NAME) == 0)
589 reinit_write_data(ep, req); 574 reinit_write_data(ep, req);
590 else 575 else
591 memset (req->buf, 0x55, req->length); 576 memset(req->buf, 0x55, req->length);
592 577
593 status = usb_ep_queue(ep, req, GFP_ATOMIC); 578 status = usb_ep_queue(ep, req, GFP_ATOMIC);
594 if (status) { 579 if (status) {
595 struct zero_dev *dev = ep->driver_data; 580 struct zero_dev *dev = ep->driver_data;
596 581
597 ERROR (dev, "start %s --> %d\n", ep->name, status); 582 ERROR(dev, "start %s --> %d\n", ep->name, status);
598 free_ep_req (ep, req); 583 free_ep_req(ep, req);
599 req = NULL; 584 req = NULL;
600 } 585 }
601 586
@@ -608,34 +593,34 @@ static int set_source_sink_config(struct zero_dev *dev)
608 struct usb_ep *ep; 593 struct usb_ep *ep;
609 struct usb_gadget *gadget = dev->gadget; 594 struct usb_gadget *gadget = dev->gadget;
610 595
611 gadget_for_each_ep (ep, gadget) { 596 gadget_for_each_ep(ep, gadget) {
612 const struct usb_endpoint_descriptor *d; 597 const struct usb_endpoint_descriptor *d;
613 598
614 /* one endpoint writes (sources) zeroes in (to the host) */ 599 /* one endpoint writes (sources) zeroes in (to the host) */
615 if (strcmp (ep->name, EP_IN_NAME) == 0) { 600 if (strcmp(ep->name, EP_IN_NAME) == 0) {
616 d = ep_desc (gadget, &hs_source_desc, &fs_source_desc); 601 d = ep_desc(gadget, &hs_source_desc, &fs_source_desc);
617 result = usb_ep_enable (ep, d); 602 result = usb_ep_enable(ep, d);
618 if (result == 0) { 603 if (result == 0) {
619 ep->driver_data = dev; 604 ep->driver_data = dev;
620 if (source_sink_start_ep(ep) != NULL) { 605 if (source_sink_start_ep(ep) != NULL) {
621 dev->in_ep = ep; 606 dev->in_ep = ep;
622 continue; 607 continue;
623 } 608 }
624 usb_ep_disable (ep); 609 usb_ep_disable(ep);
625 result = -EIO; 610 result = -EIO;
626 } 611 }
627 612
628 /* one endpoint reads (sinks) anything out (from the host) */ 613 /* one endpoint reads (sinks) anything out (from the host) */
629 } else if (strcmp (ep->name, EP_OUT_NAME) == 0) { 614 } else if (strcmp(ep->name, EP_OUT_NAME) == 0) {
630 d = ep_desc (gadget, &hs_sink_desc, &fs_sink_desc); 615 d = ep_desc(gadget, &hs_sink_desc, &fs_sink_desc);
631 result = usb_ep_enable (ep, d); 616 result = usb_ep_enable(ep, d);
632 if (result == 0) { 617 if (result == 0) {
633 ep->driver_data = dev; 618 ep->driver_data = dev;
634 if (source_sink_start_ep(ep) != NULL) { 619 if (source_sink_start_ep(ep) != NULL) {
635 dev->out_ep = ep; 620 dev->out_ep = ep;
636 continue; 621 continue;
637 } 622 }
638 usb_ep_disable (ep); 623 usb_ep_disable(ep);
639 result = -EIO; 624 result = -EIO;
640 } 625 }
641 626
@@ -644,11 +629,11 @@ static int set_source_sink_config(struct zero_dev *dev)
644 continue; 629 continue;
645 630
646 /* stop on error */ 631 /* stop on error */
647 ERROR (dev, "can't start %s, result %d\n", ep->name, result); 632 ERROR(dev, "can't start %s, result %d\n", ep->name, result);
648 break; 633 break;
649 } 634 }
650 if (result == 0) 635 if (result == 0)
651 DBG (dev, "buflen %d\n", buflen); 636 DBG(dev, "buflen %d\n", buflen);
652 637
653 /* caller is responsible for cleanup on error */ 638 /* caller is responsible for cleanup on error */
654 return result; 639 return result;
@@ -656,7 +641,7 @@ static int set_source_sink_config(struct zero_dev *dev)
656 641
657/*-------------------------------------------------------------------------*/ 642/*-------------------------------------------------------------------------*/
658 643
659static void loopback_complete (struct usb_ep *ep, struct usb_request *req) 644static void loopback_complete(struct usb_ep *ep, struct usb_request *req)
660{ 645{
661 struct zero_dev *dev = ep->driver_data; 646 struct zero_dev *dev = ep->driver_data;
662 int status = req->status; 647 int status = req->status;
@@ -668,19 +653,19 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
668 /* loop this OUT packet back IN to the host */ 653 /* loop this OUT packet back IN to the host */
669 req->zero = (req->actual < req->length); 654 req->zero = (req->actual < req->length);
670 req->length = req->actual; 655 req->length = req->actual;
671 status = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC); 656 status = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
672 if (status == 0) 657 if (status == 0)
673 return; 658 return;
674 659
675 /* "should never get here" */ 660 /* "should never get here" */
676 ERROR (dev, "can't loop %s to %s: %d\n", 661 ERROR(dev, "can't loop %s to %s: %d\n",
677 ep->name, dev->in_ep->name, 662 ep->name, dev->in_ep->name,
678 status); 663 status);
679 } 664 }
680 665
681 /* queue the buffer for some later OUT packet */ 666 /* queue the buffer for some later OUT packet */
682 req->length = buflen; 667 req->length = buflen;
683 status = usb_ep_queue (dev->out_ep, req, GFP_ATOMIC); 668 status = usb_ep_queue(dev->out_ep, req, GFP_ATOMIC);
684 if (status == 0) 669 if (status == 0)
685 return; 670 return;
686 671
@@ -688,7 +673,7 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
688 /* FALLTHROUGH */ 673 /* FALLTHROUGH */
689 674
690 default: 675 default:
691 ERROR (dev, "%s loop complete --> %d, %d/%d\n", ep->name, 676 ERROR(dev, "%s loop complete --> %d, %d/%d\n", ep->name,
692 status, req->actual, req->length); 677 status, req->actual, req->length);
693 /* FALLTHROUGH */ 678 /* FALLTHROUGH */
694 679
@@ -700,7 +685,7 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
700 case -ECONNABORTED: /* hardware forced ep reset */ 685 case -ECONNABORTED: /* hardware forced ep reset */
701 case -ECONNRESET: /* request dequeued */ 686 case -ECONNRESET: /* request dequeued */
702 case -ESHUTDOWN: /* disconnect from host */ 687 case -ESHUTDOWN: /* disconnect from host */
703 free_ep_req (ep, req); 688 free_ep_req(ep, req);
704 return; 689 return;
705 } 690 }
706} 691}
@@ -711,13 +696,13 @@ static int set_loopback_config(struct zero_dev *dev)
711 struct usb_ep *ep; 696 struct usb_ep *ep;
712 struct usb_gadget *gadget = dev->gadget; 697 struct usb_gadget *gadget = dev->gadget;
713 698
714 gadget_for_each_ep (ep, gadget) { 699 gadget_for_each_ep(ep, gadget) {
715 const struct usb_endpoint_descriptor *d; 700 const struct usb_endpoint_descriptor *d;
716 701
717 /* one endpoint writes data back IN to the host */ 702 /* one endpoint writes data back IN to the host */
718 if (strcmp (ep->name, EP_IN_NAME) == 0) { 703 if (strcmp(ep->name, EP_IN_NAME) == 0) {
719 d = ep_desc (gadget, &hs_source_desc, &fs_source_desc); 704 d = ep_desc(gadget, &hs_source_desc, &fs_source_desc);
720 result = usb_ep_enable (ep, d); 705 result = usb_ep_enable(ep, d);
721 if (result == 0) { 706 if (result == 0) {
722 ep->driver_data = dev; 707 ep->driver_data = dev;
723 dev->in_ep = ep; 708 dev->in_ep = ep;
@@ -725,9 +710,9 @@ static int set_loopback_config(struct zero_dev *dev)
725 } 710 }
726 711
727 /* one endpoint just reads OUT packets */ 712 /* one endpoint just reads OUT packets */
728 } else if (strcmp (ep->name, EP_OUT_NAME) == 0) { 713 } else if (strcmp(ep->name, EP_OUT_NAME) == 0) {
729 d = ep_desc (gadget, &hs_sink_desc, &fs_sink_desc); 714 d = ep_desc(gadget, &hs_sink_desc, &fs_sink_desc);
730 result = usb_ep_enable (ep, d); 715 result = usb_ep_enable(ep, d);
731 if (result == 0) { 716 if (result == 0) {
732 ep->driver_data = dev; 717 ep->driver_data = dev;
733 dev->out_ep = ep; 718 dev->out_ep = ep;
@@ -739,7 +724,7 @@ static int set_loopback_config(struct zero_dev *dev)
739 continue; 724 continue;
740 725
741 /* stop on error */ 726 /* stop on error */
742 ERROR (dev, "can't enable %s, result %d\n", ep->name, result); 727 ERROR(dev, "can't enable %s, result %d\n", ep->name, result);
743 break; 728 break;
744 } 729 }
745 730
@@ -753,19 +738,19 @@ static int set_loopback_config(struct zero_dev *dev)
753 738
754 ep = dev->out_ep; 739 ep = dev->out_ep;
755 for (i = 0; i < qlen && result == 0; i++) { 740 for (i = 0; i < qlen && result == 0; i++) {
756 req = alloc_ep_req (ep, buflen); 741 req = alloc_ep_req(ep, buflen);
757 if (req) { 742 if (req) {
758 req->complete = loopback_complete; 743 req->complete = loopback_complete;
759 result = usb_ep_queue (ep, req, GFP_ATOMIC); 744 result = usb_ep_queue(ep, req, GFP_ATOMIC);
760 if (result) 745 if (result)
761 DBG (dev, "%s queue req --> %d\n", 746 DBG(dev, "%s queue req --> %d\n",
762 ep->name, result); 747 ep->name, result);
763 } else 748 } else
764 result = -ENOMEM; 749 result = -ENOMEM;
765 } 750 }
766 } 751 }
767 if (result == 0) 752 if (result == 0)
768 DBG (dev, "qlen %d, buflen %d\n", qlen, buflen); 753 DBG(dev, "qlen %d, buflen %d\n", qlen, buflen);
769 754
770 /* caller is responsible for cleanup on error */ 755 /* caller is responsible for cleanup on error */
771 return result; 756 return result;
@@ -773,26 +758,26 @@ static int set_loopback_config(struct zero_dev *dev)
773 758
774/*-------------------------------------------------------------------------*/ 759/*-------------------------------------------------------------------------*/
775 760
776static void zero_reset_config (struct zero_dev *dev) 761static void zero_reset_config(struct zero_dev *dev)
777{ 762{
778 if (dev->config == 0) 763 if (dev->config == 0)
779 return; 764 return;
780 765
781 DBG (dev, "reset config\n"); 766 DBG(dev, "reset config\n");
782 767
783 /* just disable endpoints, forcing completion of pending i/o. 768 /* just disable endpoints, forcing completion of pending i/o.
784 * all our completion handlers free their requests in this case. 769 * all our completion handlers free their requests in this case.
785 */ 770 */
786 if (dev->in_ep) { 771 if (dev->in_ep) {
787 usb_ep_disable (dev->in_ep); 772 usb_ep_disable(dev->in_ep);
788 dev->in_ep = NULL; 773 dev->in_ep = NULL;
789 } 774 }
790 if (dev->out_ep) { 775 if (dev->out_ep) {
791 usb_ep_disable (dev->out_ep); 776 usb_ep_disable(dev->out_ep);
792 dev->out_ep = NULL; 777 dev->out_ep = NULL;
793 } 778 }
794 dev->config = 0; 779 dev->config = 0;
795 del_timer (&dev->resume); 780 del_timer(&dev->resume);
796} 781}
797 782
798/* change our operational config. this code must agree with the code 783/* change our operational config. this code must agree with the code
@@ -813,12 +798,12 @@ static int zero_set_config(struct zero_dev *dev, unsigned number)
813 if (number == dev->config) 798 if (number == dev->config)
814 return 0; 799 return 0;
815 800
816 if (gadget_is_sa1100 (gadget) && dev->config) { 801 if (gadget_is_sa1100(gadget) && dev->config) {
817 /* tx fifo is full, but we can't clear it...*/ 802 /* tx fifo is full, but we can't clear it...*/
818 ERROR(dev, "can't change configurations\n"); 803 ERROR(dev, "can't change configurations\n");
819 return -ESPIPE; 804 return -ESPIPE;
820 } 805 }
821 zero_reset_config (dev); 806 zero_reset_config(dev);
822 807
823 switch (number) { 808 switch (number) {
824 case CONFIG_SOURCE_SINK: 809 case CONFIG_SOURCE_SINK:
@@ -837,7 +822,7 @@ static int zero_set_config(struct zero_dev *dev, unsigned number)
837 if (!result && (!dev->in_ep || !dev->out_ep)) 822 if (!result && (!dev->in_ep || !dev->out_ep))
838 result = -ENODEV; 823 result = -ENODEV;
839 if (result) 824 if (result)
840 zero_reset_config (dev); 825 zero_reset_config(dev);
841 else { 826 else {
842 char *speed; 827 char *speed;
843 828
@@ -849,7 +834,7 @@ static int zero_set_config(struct zero_dev *dev, unsigned number)
849 } 834 }
850 835
851 dev->config = number; 836 dev->config = number;
852 INFO (dev, "%s speed config #%d: %s\n", speed, number, 837 INFO(dev, "%s speed config #%d: %s\n", speed, number,
853 (number == CONFIG_SOURCE_SINK) 838 (number == CONFIG_SOURCE_SINK)
854 ? source_sink : loopback); 839 ? source_sink : loopback);
855 } 840 }
@@ -858,10 +843,10 @@ static int zero_set_config(struct zero_dev *dev, unsigned number)
858 843
859/*-------------------------------------------------------------------------*/ 844/*-------------------------------------------------------------------------*/
860 845
861static void zero_setup_complete (struct usb_ep *ep, struct usb_request *req) 846static void zero_setup_complete(struct usb_ep *ep, struct usb_request *req)
862{ 847{
863 if (req->status || req->actual != req->length) 848 if (req->status || req->actual != req->length)
864 DBG ((struct zero_dev *) ep->driver_data, 849 DBG((struct zero_dev *) ep->driver_data,
865 "setup complete --> %d, %d/%d\n", 850 "setup complete --> %d, %d/%d\n",
866 req->status, req->actual, req->length); 851 req->status, req->actual, req->length);
867} 852}
@@ -874,9 +859,9 @@ static void zero_setup_complete (struct usb_ep *ep, struct usb_request *req)
874 * the work is in config-specific setup. 859 * the work is in config-specific setup.
875 */ 860 */
876static int 861static int
877zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) 862zero_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
878{ 863{
879 struct zero_dev *dev = get_gadget_data (gadget); 864 struct zero_dev *dev = get_gadget_data(gadget);
880 struct usb_request *req = dev->req; 865 struct usb_request *req = dev->req;
881 int value = -EOPNOTSUPP; 866 int value = -EOPNOTSUPP;
882 u16 w_index = le16_to_cpu(ctrl->wIndex); 867 u16 w_index = le16_to_cpu(ctrl->wIndex);
@@ -895,14 +880,14 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
895 switch (w_value >> 8) { 880 switch (w_value >> 8) {
896 881
897 case USB_DT_DEVICE: 882 case USB_DT_DEVICE:
898 value = min (w_length, (u16) sizeof device_desc); 883 value = min(w_length, (u16) sizeof device_desc);
899 memcpy (req->buf, &device_desc, value); 884 memcpy(req->buf, &device_desc, value);
900 break; 885 break;
901 case USB_DT_DEVICE_QUALIFIER: 886 case USB_DT_DEVICE_QUALIFIER:
902 if (!gadget_is_dualspeed(gadget)) 887 if (!gadget_is_dualspeed(gadget))
903 break; 888 break;
904 value = min (w_length, (u16) sizeof dev_qualifier); 889 value = min(w_length, (u16) sizeof dev_qualifier);
905 memcpy (req->buf, &dev_qualifier, value); 890 memcpy(req->buf, &dev_qualifier, value);
906 break; 891 break;
907 892
908 case USB_DT_OTHER_SPEED_CONFIG: 893 case USB_DT_OTHER_SPEED_CONFIG:
@@ -910,11 +895,11 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
910 break; 895 break;
911 // FALLTHROUGH 896 // FALLTHROUGH
912 case USB_DT_CONFIG: 897 case USB_DT_CONFIG:
913 value = config_buf (gadget, req->buf, 898 value = config_buf(gadget, req->buf,
914 w_value >> 8, 899 w_value >> 8,
915 w_value & 0xff); 900 w_value & 0xff);
916 if (value >= 0) 901 if (value >= 0)
917 value = min (w_length, (u16) value); 902 value = min(w_length, (u16) value);
918 break; 903 break;
919 904
920 case USB_DT_STRING: 905 case USB_DT_STRING:
@@ -923,10 +908,10 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
923 * add string tables for other languages, using 908 * add string tables for other languages, using
924 * any UTF-8 characters 909 * any UTF-8 characters
925 */ 910 */
926 value = usb_gadget_get_string (&stringtab, 911 value = usb_gadget_get_string(&stringtab,
927 w_value & 0xff, req->buf); 912 w_value & 0xff, req->buf);
928 if (value >= 0) 913 if (value >= 0)
929 value = min (w_length, (u16) value); 914 value = min(w_length, (u16) value);
930 break; 915 break;
931 } 916 }
932 break; 917 break;
@@ -936,20 +921,20 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
936 if (ctrl->bRequestType != 0) 921 if (ctrl->bRequestType != 0)
937 goto unknown; 922 goto unknown;
938 if (gadget->a_hnp_support) 923 if (gadget->a_hnp_support)
939 DBG (dev, "HNP available\n"); 924 DBG(dev, "HNP available\n");
940 else if (gadget->a_alt_hnp_support) 925 else if (gadget->a_alt_hnp_support)
941 DBG (dev, "HNP needs a different root port\n"); 926 DBG(dev, "HNP needs a different root port\n");
942 else 927 else
943 VDBG (dev, "HNP inactive\n"); 928 VDBG(dev, "HNP inactive\n");
944 spin_lock (&dev->lock); 929 spin_lock(&dev->lock);
945 value = zero_set_config(dev, w_value); 930 value = zero_set_config(dev, w_value);
946 spin_unlock (&dev->lock); 931 spin_unlock(&dev->lock);
947 break; 932 break;
948 case USB_REQ_GET_CONFIGURATION: 933 case USB_REQ_GET_CONFIGURATION:
949 if (ctrl->bRequestType != USB_DIR_IN) 934 if (ctrl->bRequestType != USB_DIR_IN)
950 goto unknown; 935 goto unknown;
951 *(u8 *)req->buf = dev->config; 936 *(u8 *)req->buf = dev->config;
952 value = min (w_length, (u16) 1); 937 value = min(w_length, (u16) 1);
953 break; 938 break;
954 939
955 /* until we add altsetting support, or other interfaces, 940 /* until we add altsetting support, or other interfaces,
@@ -959,7 +944,7 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
959 case USB_REQ_SET_INTERFACE: 944 case USB_REQ_SET_INTERFACE:
960 if (ctrl->bRequestType != USB_RECIP_INTERFACE) 945 if (ctrl->bRequestType != USB_RECIP_INTERFACE)
961 goto unknown; 946 goto unknown;
962 spin_lock (&dev->lock); 947 spin_lock(&dev->lock);
963 if (dev->config && w_index == 0 && w_value == 0) { 948 if (dev->config && w_index == 0 && w_value == 0) {
964 u8 config = dev->config; 949 u8 config = dev->config;
965 950
@@ -970,11 +955,11 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
970 * if we had more than one interface we couldn't 955 * if we had more than one interface we couldn't
971 * use this "reset the config" shortcut. 956 * use this "reset the config" shortcut.
972 */ 957 */
973 zero_reset_config (dev); 958 zero_reset_config(dev);
974 zero_set_config(dev, config); 959 zero_set_config(dev, config);
975 value = 0; 960 value = 0;
976 } 961 }
977 spin_unlock (&dev->lock); 962 spin_unlock(&dev->lock);
978 break; 963 break;
979 case USB_REQ_GET_INTERFACE: 964 case USB_REQ_GET_INTERFACE:
980 if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) 965 if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
@@ -986,7 +971,7 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
986 break; 971 break;
987 } 972 }
988 *(u8 *)req->buf = 0; 973 *(u8 *)req->buf = 0;
989 value = min (w_length, (u16) 1); 974 value = min(w_length, (u16) 1);
990 break; 975 break;
991 976
992 /* 977 /*
@@ -1018,7 +1003,7 @@ zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1018 1003
1019 default: 1004 default:
1020unknown: 1005unknown:
1021 VDBG (dev, 1006 VDBG(dev,
1022 "unknown control req%02x.%02x v%04x i%04x l%d\n", 1007 "unknown control req%02x.%02x v%04x i%04x l%d\n",
1023 ctrl->bRequestType, ctrl->bRequest, 1008 ctrl->bRequestType, ctrl->bRequest,
1024 w_value, w_index, w_length); 1009 w_value, w_index, w_length);
@@ -1028,11 +1013,11 @@ unknown:
1028 if (value >= 0) { 1013 if (value >= 0) {
1029 req->length = value; 1014 req->length = value;
1030 req->zero = value < w_length; 1015 req->zero = value < w_length;
1031 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); 1016 value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
1032 if (value < 0) { 1017 if (value < 0) {
1033 DBG (dev, "ep_queue --> %d\n", value); 1018 DBG(dev, "ep_queue --> %d\n", value);
1034 req->status = 0; 1019 req->status = 0;
1035 zero_setup_complete (gadget->ep0, req); 1020 zero_setup_complete(gadget->ep0, req);
1036 } 1021 }
1037 } 1022 }
1038 1023
@@ -1040,28 +1025,26 @@ unknown:
1040 return value; 1025 return value;
1041} 1026}
1042 1027
1043static void 1028static void zero_disconnect(struct usb_gadget *gadget)
1044zero_disconnect (struct usb_gadget *gadget)
1045{ 1029{
1046 struct zero_dev *dev = get_gadget_data (gadget); 1030 struct zero_dev *dev = get_gadget_data(gadget);
1047 unsigned long flags; 1031 unsigned long flags;
1048 1032
1049 spin_lock_irqsave (&dev->lock, flags); 1033 spin_lock_irqsave(&dev->lock, flags);
1050 zero_reset_config (dev); 1034 zero_reset_config(dev);
1051 1035
1052 /* a more significant application might have some non-usb 1036 /* a more significant application might have some non-usb
1053 * activities to quiesce here, saving resources like power 1037 * activities to quiesce here, saving resources like power
1054 * or pushing the notification up a network stack. 1038 * or pushing the notification up a network stack.
1055 */ 1039 */
1056 spin_unlock_irqrestore (&dev->lock, flags); 1040 spin_unlock_irqrestore(&dev->lock, flags);
1057 1041
1058 /* next we may get setup() calls to enumerate new connections; 1042 /* next we may get setup() calls to enumerate new connections;
1059 * or an unbind() during shutdown (including removing module). 1043 * or an unbind() during shutdown (including removing module).
1060 */ 1044 */
1061} 1045}
1062 1046
1063static void 1047static void zero_autoresume(unsigned long _dev)
1064zero_autoresume (unsigned long _dev)
1065{ 1048{
1066 struct zero_dev *dev = (struct zero_dev *) _dev; 1049 struct zero_dev *dev = (struct zero_dev *) _dev;
1067 int status; 1050 int status;
@@ -1070,32 +1053,30 @@ zero_autoresume (unsigned long _dev)
1070 * more significant than just a timer firing... 1053 * more significant than just a timer firing...
1071 */ 1054 */
1072 if (dev->gadget->speed != USB_SPEED_UNKNOWN) { 1055 if (dev->gadget->speed != USB_SPEED_UNKNOWN) {
1073 status = usb_gadget_wakeup (dev->gadget); 1056 status = usb_gadget_wakeup(dev->gadget);
1074 DBG (dev, "wakeup --> %d\n", status); 1057 DBG(dev, "wakeup --> %d\n", status);
1075 } 1058 }
1076} 1059}
1077 1060
1078/*-------------------------------------------------------------------------*/ 1061/*-------------------------------------------------------------------------*/
1079 1062
1080static void /* __init_or_exit */ 1063static void zero_unbind(struct usb_gadget *gadget)
1081zero_unbind (struct usb_gadget *gadget)
1082{ 1064{
1083 struct zero_dev *dev = get_gadget_data (gadget); 1065 struct zero_dev *dev = get_gadget_data(gadget);
1084 1066
1085 DBG (dev, "unbind\n"); 1067 DBG(dev, "unbind\n");
1086 1068
1087 /* we've already been disconnected ... no i/o is active */ 1069 /* we've already been disconnected ... no i/o is active */
1088 if (dev->req) { 1070 if (dev->req) {
1089 dev->req->length = USB_BUFSIZ; 1071 dev->req->length = USB_BUFSIZ;
1090 free_ep_req (gadget->ep0, dev->req); 1072 free_ep_req(gadget->ep0, dev->req);
1091 } 1073 }
1092 del_timer_sync (&dev->resume); 1074 del_timer_sync(&dev->resume);
1093 kfree (dev); 1075 kfree(dev);
1094 set_gadget_data (gadget, NULL); 1076 set_gadget_data(gadget, NULL);
1095} 1077}
1096 1078
1097static int __init 1079static int __init zero_bind(struct usb_gadget *gadget)
1098zero_bind (struct usb_gadget *gadget)
1099{ 1080{
1100 struct zero_dev *dev; 1081 struct zero_dev *dev;
1101 struct usb_ep *ep; 1082 struct usb_ep *ep;
@@ -1111,8 +1092,8 @@ zero_bind (struct usb_gadget *gadget)
1111 * autoconfigure on any sane usb controller driver, 1092 * autoconfigure on any sane usb controller driver,
1112 * but there may also be important quirks to address. 1093 * but there may also be important quirks to address.
1113 */ 1094 */
1114 usb_ep_autoconfig_reset (gadget); 1095 usb_ep_autoconfig_reset(gadget);
1115 ep = usb_ep_autoconfig (gadget, &fs_source_desc); 1096 ep = usb_ep_autoconfig(gadget, &fs_source_desc);
1116 if (!ep) { 1097 if (!ep) {
1117autoconf_fail: 1098autoconf_fail:
1118 pr_err("%s: can't autoconfigure on %s\n", 1099 pr_err("%s: can't autoconfigure on %s\n",
@@ -1122,15 +1103,15 @@ autoconf_fail:
1122 EP_IN_NAME = ep->name; 1103 EP_IN_NAME = ep->name;
1123 ep->driver_data = ep; /* claim */ 1104 ep->driver_data = ep; /* claim */
1124 1105
1125 ep = usb_ep_autoconfig (gadget, &fs_sink_desc); 1106 ep = usb_ep_autoconfig(gadget, &fs_sink_desc);
1126 if (!ep) 1107 if (!ep)
1127 goto autoconf_fail; 1108 goto autoconf_fail;
1128 EP_OUT_NAME = ep->name; 1109 EP_OUT_NAME = ep->name;
1129 ep->driver_data = ep; /* claim */ 1110 ep->driver_data = ep; /* claim */
1130 1111
1131 gcnum = usb_gadget_controller_number (gadget); 1112 gcnum = usb_gadget_controller_number(gadget);
1132 if (gcnum >= 0) 1113 if (gcnum >= 0)
1133 device_desc.bcdDevice = cpu_to_le16 (0x0200 + gcnum); 1114 device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
1134 else { 1115 else {
1135 /* gadget zero is so simple (for now, no altsettings) that 1116 /* gadget zero is so simple (for now, no altsettings) that
1136 * it SHOULD NOT have problems with bulk-capable hardware. 1117 * it SHOULD NOT have problems with bulk-capable hardware.
@@ -1141,7 +1122,7 @@ autoconf_fail:
1141 */ 1122 */
1142 pr_warning("%s: controller '%s' not recognized\n", 1123 pr_warning("%s: controller '%s' not recognized\n",
1143 shortname, gadget->name); 1124 shortname, gadget->name);
1144 device_desc.bcdDevice = __constant_cpu_to_le16 (0x9999); 1125 device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
1145 } 1126 }
1146 1127
1147 1128
@@ -1149,12 +1130,16 @@ autoconf_fail:
1149 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1130 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1150 if (!dev) 1131 if (!dev)
1151 return -ENOMEM; 1132 return -ENOMEM;
1152 spin_lock_init (&dev->lock); 1133 spin_lock_init(&dev->lock);
1153 dev->gadget = gadget; 1134 dev->gadget = gadget;
1154 set_gadget_data (gadget, dev); 1135 set_gadget_data(gadget, dev);
1136
1137 init_timer(&dev->resume);
1138 dev->resume.function = zero_autoresume;
1139 dev->resume.data = (unsigned long) dev;
1155 1140
1156 /* preallocate control response and buffer */ 1141 /* preallocate control response and buffer */
1157 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL); 1142 dev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
1158 if (!dev->req) 1143 if (!dev->req)
1159 goto enomem; 1144 goto enomem;
1160 dev->req->buf = kmalloc(USB_BUFSIZ, GFP_KERNEL); 1145 dev->req->buf = kmalloc(USB_BUFSIZ, GFP_KERNEL);
@@ -1182,11 +1167,8 @@ autoconf_fail:
1182 loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 1167 loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1183 } 1168 }
1184 1169
1185 usb_gadget_set_selfpowered (gadget); 1170 usb_gadget_set_selfpowered(gadget);
1186 1171
1187 init_timer (&dev->resume);
1188 dev->resume.function = zero_autoresume;
1189 dev->resume.data = (unsigned long) dev;
1190 if (autoresume) { 1172 if (autoresume) {
1191 source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 1173 source_sink_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
1192 loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 1174 loopback_config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
@@ -1194,45 +1176,43 @@ autoconf_fail:
1194 1176
1195 gadget->ep0->driver_data = dev; 1177 gadget->ep0->driver_data = dev;
1196 1178
1197 INFO (dev, "%s, version: " DRIVER_VERSION "\n", longname); 1179 INFO(dev, "%s, version: " DRIVER_VERSION "\n", longname);
1198 INFO (dev, "using %s, OUT %s IN %s\n", gadget->name, 1180 INFO(dev, "using %s, OUT %s IN %s\n", gadget->name,
1199 EP_OUT_NAME, EP_IN_NAME); 1181 EP_OUT_NAME, EP_IN_NAME);
1200 1182
1201 snprintf (manufacturer, sizeof manufacturer, "%s %s with %s", 1183 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
1202 init_utsname()->sysname, init_utsname()->release, 1184 init_utsname()->sysname, init_utsname()->release,
1203 gadget->name); 1185 gadget->name);
1204 1186
1205 return 0; 1187 return 0;
1206 1188
1207enomem: 1189enomem:
1208 zero_unbind (gadget); 1190 zero_unbind(gadget);
1209 return -ENOMEM; 1191 return -ENOMEM;
1210} 1192}
1211 1193
1212/*-------------------------------------------------------------------------*/ 1194/*-------------------------------------------------------------------------*/
1213 1195
1214static void 1196static void zero_suspend(struct usb_gadget *gadget)
1215zero_suspend (struct usb_gadget *gadget)
1216{ 1197{
1217 struct zero_dev *dev = get_gadget_data (gadget); 1198 struct zero_dev *dev = get_gadget_data(gadget);
1218 1199
1219 if (gadget->speed == USB_SPEED_UNKNOWN) 1200 if (gadget->speed == USB_SPEED_UNKNOWN)
1220 return; 1201 return;
1221 1202
1222 if (autoresume) { 1203 if (autoresume) {
1223 mod_timer (&dev->resume, jiffies + (HZ * autoresume)); 1204 mod_timer(&dev->resume, jiffies + (HZ * autoresume));
1224 DBG (dev, "suspend, wakeup in %d seconds\n", autoresume); 1205 DBG(dev, "suspend, wakeup in %d seconds\n", autoresume);
1225 } else 1206 } else
1226 DBG (dev, "suspend\n"); 1207 DBG(dev, "suspend\n");
1227} 1208}
1228 1209
1229static void 1210static void zero_resume(struct usb_gadget *gadget)
1230zero_resume (struct usb_gadget *gadget)
1231{ 1211{
1232 struct zero_dev *dev = get_gadget_data (gadget); 1212 struct zero_dev *dev = get_gadget_data(gadget);
1233 1213
1234 DBG (dev, "resume\n"); 1214 DBG(dev, "resume\n");
1235 del_timer (&dev->resume); 1215 del_timer(&dev->resume);
1236} 1216}
1237 1217
1238 1218
@@ -1264,15 +1244,15 @@ MODULE_AUTHOR("David Brownell");
1264MODULE_LICENSE("GPL"); 1244MODULE_LICENSE("GPL");
1265 1245
1266 1246
1267static int __init init (void) 1247static int __init init(void)
1268{ 1248{
1269 return usb_gadget_register_driver (&zero_driver); 1249 return usb_gadget_register_driver(&zero_driver);
1270} 1250}
1271module_init (init); 1251module_init(init);
1272 1252
1273static void __exit cleanup (void) 1253static void __exit cleanup(void)
1274{ 1254{
1275 usb_gadget_unregister_driver (&zero_driver); 1255 usb_gadget_unregister_driver(&zero_driver);
1276} 1256}
1277module_exit (cleanup); 1257module_exit(cleanup);
1278 1258
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 0b87480dd713..33b467a8352d 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -4,6 +4,19 @@
4comment "USB Host Controller Drivers" 4comment "USB Host Controller Drivers"
5 depends on USB 5 depends on USB
6 6
7config USB_C67X00_HCD
8 tristate "Cypress C67x00 HCD support"
9 depends on USB
10 help
11 The Cypress C67x00 (EZ-Host/EZ-OTG) chips are dual-role
12 host/peripheral/OTG USB controllers.
13
14 Enable this option to support this chip in host controller mode.
15 If unsure, say N.
16
17 To compile this driver as a module, choose M here: the
18 module will be called c67x00.
19
7config USB_EHCI_HCD 20config USB_EHCI_HCD
8 tristate "EHCI HCD (USB 2.0) support" 21 tristate "EHCI HCD (USB 2.0) support"
9 depends on USB && USB_ARCH_HAS_EHCI 22 depends on USB && USB_ARCH_HAS_EHCI
@@ -95,6 +108,32 @@ config USB_ISP116X_HCD
95 To compile this driver as a module, choose M here: the 108 To compile this driver as a module, choose M here: the
96 module will be called isp116x-hcd. 109 module will be called isp116x-hcd.
97 110
111config USB_ISP1760_HCD
112 tristate "ISP 1760 HCD support"
113 depends on USB && EXPERIMENTAL
114 ---help---
115 The ISP1760 chip is a USB 2.0 host controller.
116
117 This driver does not support isochronous transfers or OTG.
118
119 To compile this driver as a module, choose M here: the
120 module will be called isp1760-hcd.
121
122config USB_ISP1760_PCI
123 bool "Support for the PCI bus"
124 depends on USB_ISP1760_HCD && PCI
125 ---help---
126 Enables support for the device present on the PCI bus.
127 This should only be required if you happen to have the eval kit from
128 NXP and you are going to test it.
129
130config USB_ISP1760_OF
131 bool "Support for the OF platform bus"
132 depends on USB_ISP1760_HCD && OF
133 ---help---
134 Enables support for the device present on the PowerPC
135 OpenFirmware platform bus.
136
98config USB_OHCI_HCD 137config USB_OHCI_HCD
99 tristate "OHCI HCD support" 138 tristate "OHCI HCD support"
100 depends on USB && USB_ARCH_HAS_OHCI 139 depends on USB && USB_ARCH_HAS_OHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index bb8e9d44f371..f1edda2dcfde 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -6,6 +6,8 @@ ifeq ($(CONFIG_USB_DEBUG),y)
6 EXTRA_CFLAGS += -DDEBUG 6 EXTRA_CFLAGS += -DDEBUG
7endif 7endif
8 8
9isp1760-objs := isp1760-hcd.o isp1760-if.o
10
9obj-$(CONFIG_PCI) += pci-quirks.o 11obj-$(CONFIG_PCI) += pci-quirks.o
10 12
11obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o 13obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
@@ -16,4 +18,4 @@ obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
16obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o 18obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
17obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o 19obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
18obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o 20obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
19 21obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index f13d1029aeb2..382587c4457c 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -770,7 +770,7 @@ static int ehci_hub_control (
770 if (status & ~0xffff) /* only if wPortChange is interesting */ 770 if (status & ~0xffff) /* only if wPortChange is interesting */
771#endif 771#endif
772 dbg_port (ehci, "GetStatus", wIndex + 1, temp); 772 dbg_port (ehci, "GetStatus", wIndex + 1, temp);
773 put_unaligned(cpu_to_le32 (status), (__le32 *) buf); 773 put_unaligned_le32(status, buf);
774 break; 774 break;
775 case SetHubFeature: 775 case SetHubFeature:
776 switch (wValue) { 776 switch (wValue) {
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
new file mode 100644
index 000000000000..4ba96c1e060c
--- /dev/null
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -0,0 +1,2231 @@
1/*
2 * Driver for the NXP ISP1760 chip
3 *
4 * However, the code might contain some bugs. What doesn't work for sure is:
5 * - ISO
6 * - OTG
7 e The interrupt line is configured as active low, level.
8 *
9 * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
10 *
11 */
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/list.h>
16#include <linux/usb.h>
17#include <linux/debugfs.h>
18#include <linux/uaccess.h>
19#include <linux/io.h>
20#include <asm/unaligned.h>
21
22#include "../core/hcd.h"
23#include "isp1760-hcd.h"
24
25static struct kmem_cache *qtd_cachep;
26static struct kmem_cache *qh_cachep;
27
28struct isp1760_hcd {
29 u32 hcs_params;
30 spinlock_t lock;
31 struct inter_packet_info atl_ints[32];
32 struct inter_packet_info int_ints[32];
33 struct memory_chunk memory_pool[BLOCKS];
34
35 /* periodic schedule support */
36#define DEFAULT_I_TDPS 1024
37 unsigned periodic_size;
38 unsigned i_thresh;
39 unsigned long reset_done;
40 unsigned long next_statechange;
41};
42
43static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
44{
45 return (struct isp1760_hcd *) (hcd->hcd_priv);
46}
47static inline struct usb_hcd *priv_to_hcd(struct isp1760_hcd *priv)
48{
49 return container_of((void *) priv, struct usb_hcd, hcd_priv);
50}
51
52/* Section 2.2 Host Controller Capability Registers */
53#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
54#define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
55#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
56#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
57#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
58#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
59#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
60
61/* Section 2.3 Host Controller Operational Registers */
62#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
63#define CMD_RESET (1<<1) /* reset HC not bus */
64#define CMD_RUN (1<<0) /* start/stop HC */
65#define STS_PCD (1<<2) /* port change detect */
66#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
67
68#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
69#define PORT_POWER (1<<12) /* true: has power (see PPC) */
70#define PORT_USB11(x) (((x) & (3 << 10)) == (1 << 10)) /* USB 1.1 device */
71#define PORT_RESET (1<<8) /* reset port */
72#define PORT_SUSPEND (1<<7) /* suspend port */
73#define PORT_RESUME (1<<6) /* resume it */
74#define PORT_PE (1<<2) /* port enable */
75#define PORT_CSC (1<<1) /* connect status change */
76#define PORT_CONNECT (1<<0) /* device connected */
77#define PORT_RWC_BITS (PORT_CSC)
78
79struct isp1760_qtd {
80 struct isp1760_qtd *hw_next;
81 u8 packet_type;
82 u8 toggle;
83
84 void *data_buffer;
85 /* the rest is HCD-private */
86 struct list_head qtd_list;
87 struct urb *urb;
88 size_t length;
89
90 /* isp special*/
91 u32 status;
92#define URB_COMPLETE_NOTIFY (1 << 0)
93#define URB_ENQUEUED (1 << 1)
94#define URB_TYPE_ATL (1 << 2)
95#define URB_TYPE_INT (1 << 3)
96};
97
98struct isp1760_qh {
99 /* first part defined by EHCI spec */
100 struct list_head qtd_list;
101 struct isp1760_hcd *priv;
102
103 /* periodic schedule info */
104 unsigned short period; /* polling interval */
105 struct usb_device *dev;
106
107 u32 toggle;
108 u32 ping;
109};
110
111#define ehci_port_speed(priv, portsc) (1 << USB_PORT_FEAT_HIGHSPEED)
112
113static unsigned int isp1760_readl(__u32 __iomem *regs)
114{
115 return readl(regs);
116}
117
118static void isp1760_writel(const unsigned int val, __u32 __iomem *regs)
119{
120 writel(val, regs);
121}
122
123/*
124 * The next two copy via MMIO data to/from the device. memcpy_{to|from}io()
125 * doesn't quite work because some people have to enforce 32-bit access
126 */
127static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
128 __u32 __iomem *dst, u32 offset, u32 len)
129{
130 struct usb_hcd *hcd = priv_to_hcd(priv);
131 u32 val;
132 u8 *buff8;
133
134 if (!src) {
135 printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
136 return;
137 }
138 isp1760_writel(offset, hcd->regs + HC_MEMORY_REG);
139 /* XXX
140 * 90nsec delay, the spec says something how this could be avoided.
141 */
142 mdelay(1);
143
144 while (len >= 4) {
145 *src = __raw_readl(dst);
146 len -= 4;
147 src++;
148 dst++;
149 }
150
151 if (!len)
152 return;
153
154 /* in case we have 3, 2 or 1 by left. The dst buffer may not be fully
155 * allocated.
156 */
157 val = isp1760_readl(dst);
158
159 buff8 = (u8 *)src;
160 while (len) {
161
162 *buff8 = val;
163 val >>= 8;
164 len--;
165 buff8++;
166 }
167}
168
169static void priv_write_copy(const struct isp1760_hcd *priv, const u32 *src,
170 __u32 __iomem *dst, u32 len)
171{
172 while (len >= 4) {
173 __raw_writel(*src, dst);
174 len -= 4;
175 src++;
176 dst++;
177 }
178
179 if (!len)
180 return;
181 /* in case we have 3, 2 or 1 by left. The buffer is allocated and the
182 * extra bytes should not be read by the HW
183 */
184
185 __raw_writel(*src, dst);
186}
187
188/* memory management of the 60kb on the chip from 0x1000 to 0xffff */
189static void init_memory(struct isp1760_hcd *priv)
190{
191 int i;
192 u32 payload;
193
194 payload = 0x1000;
195 for (i = 0; i < BLOCK_1_NUM; i++) {
196 priv->memory_pool[i].start = payload;
197 priv->memory_pool[i].size = BLOCK_1_SIZE;
198 priv->memory_pool[i].free = 1;
199 payload += priv->memory_pool[i].size;
200 }
201
202
203 for (i = BLOCK_1_NUM; i < BLOCK_1_NUM + BLOCK_2_NUM; i++) {
204 priv->memory_pool[i].start = payload;
205 priv->memory_pool[i].size = BLOCK_2_SIZE;
206 priv->memory_pool[i].free = 1;
207 payload += priv->memory_pool[i].size;
208 }
209
210
211 for (i = BLOCK_1_NUM + BLOCK_2_NUM; i < BLOCKS; i++) {
212 priv->memory_pool[i].start = payload;
213 priv->memory_pool[i].size = BLOCK_3_SIZE;
214 priv->memory_pool[i].free = 1;
215 payload += priv->memory_pool[i].size;
216 }
217
218 BUG_ON(payload - priv->memory_pool[i - 1].size > PAYLOAD_SIZE);
219}
220
221static u32 alloc_mem(struct isp1760_hcd *priv, u32 size)
222{
223 int i;
224
225 if (!size)
226 return ISP1760_NULL_POINTER;
227
228 for (i = 0; i < BLOCKS; i++) {
229 if (priv->memory_pool[i].size >= size &&
230 priv->memory_pool[i].free) {
231
232 priv->memory_pool[i].free = 0;
233 return priv->memory_pool[i].start;
234 }
235 }
236
237 printk(KERN_ERR "ISP1760 MEM: can not allocate %d bytes of memory\n",
238 size);
239 printk(KERN_ERR "Current memory map:\n");
240 for (i = 0; i < BLOCKS; i++) {
241 printk(KERN_ERR "Pool %2d size %4d status: %d\n",
242 i, priv->memory_pool[i].size,
243 priv->memory_pool[i].free);
244 }
245 /* XXX maybe -ENOMEM could be possible */
246 BUG();
247 return 0;
248}
249
250static void free_mem(struct isp1760_hcd *priv, u32 mem)
251{
252 int i;
253
254 if (mem == ISP1760_NULL_POINTER)
255 return;
256
257 for (i = 0; i < BLOCKS; i++) {
258 if (priv->memory_pool[i].start == mem) {
259
260 BUG_ON(priv->memory_pool[i].free);
261
262 priv->memory_pool[i].free = 1;
263 return ;
264 }
265 }
266
267 printk(KERN_ERR "Trying to free not-here-allocated memory :%08x\n",
268 mem);
269 BUG();
270}
271
272static void isp1760_init_regs(struct usb_hcd *hcd)
273{
274 isp1760_writel(0, hcd->regs + HC_BUFFER_STATUS_REG);
275 isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
276 HC_ATL_PTD_SKIPMAP_REG);
277 isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
278 HC_INT_PTD_SKIPMAP_REG);
279 isp1760_writel(NO_TRANSFER_ACTIVE, hcd->regs +
280 HC_ISO_PTD_SKIPMAP_REG);
281
282 isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
283 HC_ATL_PTD_DONEMAP_REG);
284 isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
285 HC_INT_PTD_DONEMAP_REG);
286 isp1760_writel(~NO_TRANSFER_ACTIVE, hcd->regs +
287 HC_ISO_PTD_DONEMAP_REG);
288}
289
290static int handshake(struct isp1760_hcd *priv, void __iomem *ptr,
291 u32 mask, u32 done, int usec)
292{
293 u32 result;
294
295 do {
296 result = isp1760_readl(ptr);
297 if (result == ~0)
298 return -ENODEV;
299 result &= mask;
300 if (result == done)
301 return 0;
302 udelay(1);
303 usec--;
304 } while (usec > 0);
305 return -ETIMEDOUT;
306}
307
308/* reset a non-running (STS_HALT == 1) controller */
309static int ehci_reset(struct isp1760_hcd *priv)
310{
311 int retval;
312 struct usb_hcd *hcd = priv_to_hcd(priv);
313 u32 command = isp1760_readl(hcd->regs + HC_USBCMD);
314
315 command |= CMD_RESET;
316 isp1760_writel(command, hcd->regs + HC_USBCMD);
317 hcd->state = HC_STATE_HALT;
318 priv->next_statechange = jiffies;
319 retval = handshake(priv, hcd->regs + HC_USBCMD,
320 CMD_RESET, 0, 250 * 1000);
321 return retval;
322}
323
324static void qh_destroy(struct isp1760_qh *qh)
325{
326 BUG_ON(!list_empty(&qh->qtd_list));
327 kmem_cache_free(qh_cachep, qh);
328}
329
330static struct isp1760_qh *isp1760_qh_alloc(struct isp1760_hcd *priv,
331 gfp_t flags)
332{
333 struct isp1760_qh *qh;
334
335 qh = kmem_cache_zalloc(qh_cachep, flags);
336 if (!qh)
337 return qh;
338
339 INIT_LIST_HEAD(&qh->qtd_list);
340 qh->priv = priv;
341 return qh;
342}
343
344/* magic numbers that can affect system performance */
345#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
346#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
347#define EHCI_TUNE_RL_TT 0
348#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
349#define EHCI_TUNE_MULT_TT 1
350#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
351
352/* one-time init, only for memory state */
353static int priv_init(struct usb_hcd *hcd)
354{
355 struct isp1760_hcd *priv = hcd_to_priv(hcd);
356 u32 hcc_params;
357
358 spin_lock_init(&priv->lock);
359
360 /*
361 * hw default: 1K periodic list heads, one per frame.
362 * periodic_size can shrink by USBCMD update if hcc_params allows.
363 */
364 priv->periodic_size = DEFAULT_I_TDPS;
365
366 /* controllers may cache some of the periodic schedule ... */
367 hcc_params = isp1760_readl(hcd->regs + HC_HCCPARAMS);
368 /* full frame cache */
369 if (HCC_ISOC_CACHE(hcc_params))
370 priv->i_thresh = 8;
371 else /* N microframes cached */
372 priv->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
373
374 return 0;
375}
376
377static int isp1760_hc_setup(struct usb_hcd *hcd)
378{
379 struct isp1760_hcd *priv = hcd_to_priv(hcd);
380 int result;
381 u32 scratch;
382
383 isp1760_writel(0xdeadbabe, hcd->regs + HC_SCRATCH_REG);
384 scratch = isp1760_readl(hcd->regs + HC_SCRATCH_REG);
385 if (scratch != 0xdeadbabe) {
386 printk(KERN_ERR "ISP1760: Scratch test failed.\n");
387 return -ENODEV;
388 }
389
390 /* pre reset */
391 isp1760_init_regs(hcd);
392
393 /* reset */
394 isp1760_writel(SW_RESET_RESET_ALL, hcd->regs + HC_RESET_REG);
395 mdelay(100);
396
397 isp1760_writel(SW_RESET_RESET_HC, hcd->regs + HC_RESET_REG);
398 mdelay(100);
399
400 result = ehci_reset(priv);
401 if (result)
402 return result;
403
404 /* Step 11 passed */
405
406 isp1760_writel(INTERRUPT_ENABLE_MASK, hcd->regs + HC_INTERRUPT_REG);
407 isp1760_writel(INTERRUPT_ENABLE_MASK, hcd->regs + HC_INTERRUPT_ENABLE);
408
409 /* ATL reset */
410 scratch = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
411 isp1760_writel(scratch | ALL_ATX_RESET, hcd->regs + HC_HW_MODE_CTRL);
412 mdelay(10);
413 isp1760_writel(scratch, hcd->regs + HC_HW_MODE_CTRL);
414
415 isp1760_writel(PORT1_POWER | PORT1_INIT2, hcd->regs + HC_PORT1_CTRL);
416 mdelay(10);
417
418 priv->hcs_params = isp1760_readl(hcd->regs + HC_HCSPARAMS);
419
420 return priv_init(hcd);
421}
422
423static void isp1760_init_maps(struct usb_hcd *hcd)
424{
425 /*set last maps, for iso its only 1, else 32 tds bitmap*/
426 isp1760_writel(0x80000000, hcd->regs + HC_ATL_PTD_LASTPTD_REG);
427 isp1760_writel(0x80000000, hcd->regs + HC_INT_PTD_LASTPTD_REG);
428 isp1760_writel(0x00000001, hcd->regs + HC_ISO_PTD_LASTPTD_REG);
429}
430
431static void isp1760_enable_interrupts(struct usb_hcd *hcd)
432{
433 isp1760_writel(0, hcd->regs + HC_ATL_IRQ_MASK_AND_REG);
434 isp1760_writel(0, hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
435 isp1760_writel(0, hcd->regs + HC_INT_IRQ_MASK_AND_REG);
436 isp1760_writel(0, hcd->regs + HC_INT_IRQ_MASK_OR_REG);
437 isp1760_writel(0, hcd->regs + HC_ISO_IRQ_MASK_AND_REG);
438 isp1760_writel(0xffffffff, hcd->regs + HC_ISO_IRQ_MASK_OR_REG);
439 /* step 23 passed */
440}
441
442static int isp1760_run(struct usb_hcd *hcd)
443{
444 struct isp1760_hcd *priv = hcd_to_priv(hcd);
445 int retval;
446 u32 temp;
447 u32 command;
448 u32 chipid;
449
450 hcd->uses_new_polling = 1;
451 hcd->poll_rh = 0;
452
453 hcd->state = HC_STATE_RUNNING;
454 isp1760_enable_interrupts(hcd);
455 temp = isp1760_readl(hcd->regs + HC_HW_MODE_CTRL);
456 temp |= FINAL_HW_CONFIG;
457 isp1760_writel(temp, hcd->regs + HC_HW_MODE_CTRL);
458
459 command = isp1760_readl(hcd->regs + HC_USBCMD);
460 command &= ~(CMD_LRESET|CMD_RESET);
461 command |= CMD_RUN;
462 isp1760_writel(command, hcd->regs + HC_USBCMD);
463
464 retval = handshake(priv, hcd->regs + HC_USBCMD, CMD_RUN, CMD_RUN,
465 250 * 1000);
466 if (retval)
467 return retval;
468
469 /*
470 * XXX
471 * Spec says to write FLAG_CF as last config action, priv code grabs
472 * the semaphore while doing so.
473 */
474 down_write(&ehci_cf_port_reset_rwsem);
475 isp1760_writel(FLAG_CF, hcd->regs + HC_CONFIGFLAG);
476
477 retval = handshake(priv, hcd->regs + HC_CONFIGFLAG, FLAG_CF, FLAG_CF,
478 250 * 1000);
479 up_write(&ehci_cf_port_reset_rwsem);
480 if (retval)
481 return retval;
482
483 chipid = isp1760_readl(hcd->regs + HC_CHIP_ID_REG);
484 isp1760_info(priv, "USB ISP %04x HW rev. %d started\n", chipid & 0xffff,
485 chipid >> 16);
486
487 /* PTD Register Init Part 2, Step 28 */
488 /* enable INTs */
489 isp1760_init_maps(hcd);
490
491 /* GRR this is run-once init(), being done every time the HC starts.
492 * So long as they're part of class devices, we can't do it init()
493 * since the class device isn't created that early.
494 */
495 return 0;
496}
497
498static u32 base_to_chip(u32 base)
499{
500 return ((base - 0x400) >> 3);
501}
502
503static void transform_into_atl(struct isp1760_hcd *priv, struct isp1760_qh *qh,
504 struct isp1760_qtd *qtd, struct urb *urb,
505 u32 payload, struct ptd *ptd)
506{
507 u32 dw0;
508 u32 dw1;
509 u32 dw2;
510 u32 dw3;
511 u32 maxpacket;
512 u32 multi;
513 u32 pid_code;
514 u32 rl = RL_COUNTER;
515 u32 nak = NAK_COUNTER;
516
517 /* according to 3.6.2, max packet len can not be > 0x400 */
518 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
519 multi = 1 + ((maxpacket >> 11) & 0x3);
520 maxpacket &= 0x7ff;
521
522 /* DW0 */
523 dw0 = PTD_VALID;
524 dw0 |= PTD_LENGTH(qtd->length);
525 dw0 |= PTD_MAXPACKET(maxpacket);
526 dw0 |= PTD_ENDPOINT(usb_pipeendpoint(urb->pipe));
527 dw1 = usb_pipeendpoint(urb->pipe) >> 1;
528
529 /* DW1 */
530 dw1 |= PTD_DEVICE_ADDR(usb_pipedevice(urb->pipe));
531
532 pid_code = qtd->packet_type;
533 dw1 |= PTD_PID_TOKEN(pid_code);
534
535 if (usb_pipebulk(urb->pipe))
536 dw1 |= PTD_TRANS_BULK;
537 else if (usb_pipeint(urb->pipe))
538 dw1 |= PTD_TRANS_INT;
539
540 if (urb->dev->speed != USB_SPEED_HIGH) {
541 /* split transaction */
542
543 dw1 |= PTD_TRANS_SPLIT;
544 if (urb->dev->speed == USB_SPEED_LOW)
545 dw1 |= PTD_SE_USB_LOSPEED;
546
547 dw1 |= PTD_PORT_NUM(urb->dev->ttport);
548 dw1 |= PTD_HUB_NUM(urb->dev->tt->hub->devnum);
549
550 /* SE bit for Split INT transfers */
551 if (usb_pipeint(urb->pipe) &&
552 (urb->dev->speed == USB_SPEED_LOW))
553 dw1 |= 2 << 16;
554
555 dw3 = 0;
556 rl = 0;
557 nak = 0;
558 } else {
559 dw0 |= PTD_MULTI(multi);
560 if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe))
561 dw3 = qh->ping;
562 else
563 dw3 = 0;
564 }
565 /* DW2 */
566 dw2 = 0;
567 dw2 |= PTD_DATA_START_ADDR(base_to_chip(payload));
568 dw2 |= PTD_RL_CNT(rl);
569 dw3 |= PTD_NAC_CNT(nak);
570
571 /* DW3 */
572 if (usb_pipecontrol(urb->pipe))
573 dw3 |= PTD_DATA_TOGGLE(qtd->toggle);
574 else
575 dw3 |= qh->toggle;
576
577
578 dw3 |= PTD_ACTIVE;
579 /* Cerr */
580 dw3 |= PTD_CERR(ERR_COUNTER);
581
582 memset(ptd, 0, sizeof(*ptd));
583
584 ptd->dw0 = cpu_to_le32(dw0);
585 ptd->dw1 = cpu_to_le32(dw1);
586 ptd->dw2 = cpu_to_le32(dw2);
587 ptd->dw3 = cpu_to_le32(dw3);
588}
589
590static void transform_add_int(struct isp1760_hcd *priv, struct isp1760_qh *qh,
591 struct isp1760_qtd *qtd, struct urb *urb,
592 u32 payload, struct ptd *ptd)
593{
594 u32 maxpacket;
595 u32 multi;
596 u32 numberofusofs;
597 u32 i;
598 u32 usofmask, usof;
599 u32 period;
600
601 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
602 multi = 1 + ((maxpacket >> 11) & 0x3);
603 maxpacket &= 0x7ff;
604 /* length of the data per uframe */
605 maxpacket = multi * maxpacket;
606
607 numberofusofs = urb->transfer_buffer_length / maxpacket;
608 if (urb->transfer_buffer_length % maxpacket)
609 numberofusofs += 1;
610
611 usofmask = 1;
612 usof = 0;
613 for (i = 0; i < numberofusofs; i++) {
614 usof |= usofmask;
615 usofmask <<= 1;
616 }
617
618 if (urb->dev->speed != USB_SPEED_HIGH) {
619 /* split */
620 ptd->dw5 = __constant_cpu_to_le32(0x1c);
621
622 if (qh->period >= 32)
623 period = qh->period / 2;
624 else
625 period = qh->period;
626
627 } else {
628
629 if (qh->period >= 8)
630 period = qh->period/8;
631 else
632 period = qh->period;
633
634 if (period >= 32)
635 period = 16;
636
637 if (qh->period >= 8) {
638 /* millisecond period */
639 period = (period << 3);
640 } else {
641 /* usof based tranmsfers */
642 /* minimum 4 usofs */
643 usof = 0x11;
644 }
645 }
646
647 ptd->dw2 |= cpu_to_le32(period);
648 ptd->dw4 = cpu_to_le32(usof);
649}
650
651static void transform_into_int(struct isp1760_hcd *priv, struct isp1760_qh *qh,
652 struct isp1760_qtd *qtd, struct urb *urb,
653 u32 payload, struct ptd *ptd)
654{
655 transform_into_atl(priv, qh, qtd, urb, payload, ptd);
656 transform_add_int(priv, qh, qtd, urb, payload, ptd);
657}
658
659static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len,
660 u32 token)
661{
662 int count;
663
664 qtd->data_buffer = databuffer;
665 qtd->packet_type = GET_QTD_TOKEN_TYPE(token);
666 qtd->toggle = GET_DATA_TOGGLE(token);
667
668 if (len > HC_ATL_PL_SIZE)
669 count = HC_ATL_PL_SIZE;
670 else
671 count = len;
672
673 qtd->length = count;
674 return count;
675}
676
677static int check_error(struct ptd *ptd)
678{
679 int error = 0;
680 u32 dw3;
681
682 dw3 = le32_to_cpu(ptd->dw3);
683 if (dw3 & DW3_HALT_BIT)
684 error = -EPIPE;
685
686 if (dw3 & DW3_ERROR_BIT) {
687 printk(KERN_ERR "error bit is set in DW3\n");
688 error = -EPIPE;
689 }
690
691 if (dw3 & DW3_QTD_ACTIVE) {
692 printk(KERN_ERR "transfer active bit is set DW3\n");
693 printk(KERN_ERR "nak counter: %d, rl: %d\n", (dw3 >> 19) & 0xf,
694 (le32_to_cpu(ptd->dw2) >> 25) & 0xf);
695 }
696
697 return error;
698}
699
700static void check_int_err_status(u32 dw4)
701{
702 u32 i;
703
704 dw4 >>= 8;
705
706 for (i = 0; i < 8; i++) {
707 switch (dw4 & 0x7) {
708 case INT_UNDERRUN:
709 printk(KERN_ERR "ERROR: under run , %d\n", i);
710 break;
711
712 case INT_EXACT:
713 printk(KERN_ERR "ERROR: transaction error, %d\n", i);
714 break;
715
716 case INT_BABBLE:
717 printk(KERN_ERR "ERROR: babble error, %d\n", i);
718 break;
719 }
720 dw4 >>= 3;
721 }
722}
723
724static void enqueue_one_qtd(struct isp1760_qtd *qtd, struct isp1760_hcd *priv,
725 u32 payload)
726{
727 u32 token;
728 struct usb_hcd *hcd = priv_to_hcd(priv);
729
730 token = qtd->packet_type;
731
732 if (qtd->length && (qtd->length <= HC_ATL_PL_SIZE)) {
733 switch (token) {
734 case IN_PID:
735 break;
736 case OUT_PID:
737 case SETUP_PID:
738 priv_write_copy(priv, qtd->data_buffer,
739 hcd->regs + payload,
740 qtd->length);
741 }
742 }
743}
744
745static void enqueue_one_atl_qtd(u32 atl_regs, u32 payload,
746 struct isp1760_hcd *priv, struct isp1760_qh *qh,
747 struct urb *urb, u32 slot, struct isp1760_qtd *qtd)
748{
749 struct ptd ptd;
750 struct usb_hcd *hcd = priv_to_hcd(priv);
751
752 transform_into_atl(priv, qh, qtd, urb, payload, &ptd);
753 priv_write_copy(priv, (u32 *)&ptd, hcd->regs + atl_regs, sizeof(ptd));
754 enqueue_one_qtd(qtd, priv, payload);
755
756 priv->atl_ints[slot].urb = urb;
757 priv->atl_ints[slot].qh = qh;
758 priv->atl_ints[slot].qtd = qtd;
759 priv->atl_ints[slot].data_buffer = qtd->data_buffer;
760 priv->atl_ints[slot].payload = payload;
761 qtd->status |= URB_ENQUEUED | URB_TYPE_ATL;
762 qtd->status |= slot << 16;
763}
764
765static void enqueue_one_int_qtd(u32 int_regs, u32 payload,
766 struct isp1760_hcd *priv, struct isp1760_qh *qh,
767 struct urb *urb, u32 slot, struct isp1760_qtd *qtd)
768{
769 struct ptd ptd;
770 struct usb_hcd *hcd = priv_to_hcd(priv);
771
772 transform_into_int(priv, qh, qtd, urb, payload, &ptd);
773 priv_write_copy(priv, (u32 *)&ptd, hcd->regs + int_regs, sizeof(ptd));
774 enqueue_one_qtd(qtd, priv, payload);
775
776 priv->int_ints[slot].urb = urb;
777 priv->int_ints[slot].qh = qh;
778 priv->int_ints[slot].qtd = qtd;
779 priv->int_ints[slot].data_buffer = qtd->data_buffer;
780 priv->int_ints[slot].payload = payload;
781 qtd->status |= URB_ENQUEUED | URB_TYPE_INT;
782 qtd->status |= slot << 16;
783}
784
785void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
786 struct isp1760_qtd *qtd)
787{
788 struct isp1760_hcd *priv = hcd_to_priv(hcd);
789 u32 skip_map, or_map;
790 u32 queue_entry;
791 u32 slot;
792 u32 atl_regs, payload;
793 u32 buffstatus;
794
795 skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
796
797 BUG_ON(!skip_map);
798 slot = __ffs(skip_map);
799 queue_entry = 1 << slot;
800
801 atl_regs = ATL_REGS_OFFSET + slot * sizeof(struct ptd);
802
803 payload = alloc_mem(priv, qtd->length);
804
805 enqueue_one_atl_qtd(atl_regs, payload, priv, qh, qtd->urb, slot, qtd);
806
807 or_map = isp1760_readl(hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
808 or_map |= queue_entry;
809 isp1760_writel(or_map, hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
810
811 skip_map &= ~queue_entry;
812 isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
813
814 buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG);
815 buffstatus |= ATL_BUFFER;
816 isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG);
817}
818
819void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
820 struct isp1760_qtd *qtd)
821{
822 struct isp1760_hcd *priv = hcd_to_priv(hcd);
823 u32 skip_map, or_map;
824 u32 queue_entry;
825 u32 slot;
826 u32 int_regs, payload;
827 u32 buffstatus;
828
829 skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
830
831 BUG_ON(!skip_map);
832 slot = __ffs(skip_map);
833 queue_entry = 1 << slot;
834
835 int_regs = INT_REGS_OFFSET + slot * sizeof(struct ptd);
836
837 payload = alloc_mem(priv, qtd->length);
838
839 enqueue_one_int_qtd(int_regs, payload, priv, qh, qtd->urb, slot, qtd);
840
841 or_map = isp1760_readl(hcd->regs + HC_INT_IRQ_MASK_OR_REG);
842 or_map |= queue_entry;
843 isp1760_writel(or_map, hcd->regs + HC_INT_IRQ_MASK_OR_REG);
844
845 skip_map &= ~queue_entry;
846 isp1760_writel(skip_map, hcd->regs + HC_INT_PTD_SKIPMAP_REG);
847
848 buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG);
849 buffstatus |= INT_BUFFER;
850 isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG);
851}
852
853static void isp1760_urb_done(struct isp1760_hcd *priv, struct urb *urb, int status)
854__releases(priv->lock)
855__acquires(priv->lock)
856{
857 if (!urb->unlinked) {
858 if (status == -EINPROGRESS)
859 status = 0;
860 }
861
862 /* complete() can reenter this HCD */
863 usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
864 spin_unlock(&priv->lock);
865 usb_hcd_giveback_urb(priv_to_hcd(priv), urb, status);
866 spin_lock(&priv->lock);
867}
868
869static void isp1760_qtd_free(struct isp1760_qtd *qtd)
870{
871 kmem_cache_free(qtd_cachep, qtd);
872}
873
874static struct isp1760_qtd *clean_this_qtd(struct isp1760_qtd *qtd)
875{
876 struct isp1760_qtd *tmp_qtd;
877
878 tmp_qtd = qtd->hw_next;
879 list_del(&qtd->qtd_list);
880 isp1760_qtd_free(qtd);
881 return tmp_qtd;
882}
883
884/*
885 * Remove this QTD from the QH list and free its memory. If this QTD
886 * isn't the last one than remove also his successor(s).
887 * Returns the QTD which is part of an new URB and should be enqueued.
888 */
889static struct isp1760_qtd *clean_up_qtdlist(struct isp1760_qtd *qtd)
890{
891 struct isp1760_qtd *tmp_qtd;
892 int last_one;
893
894 do {
895 tmp_qtd = qtd->hw_next;
896 last_one = qtd->status & URB_COMPLETE_NOTIFY;
897 list_del(&qtd->qtd_list);
898 isp1760_qtd_free(qtd);
899 qtd = tmp_qtd;
900 } while (!last_one && qtd);
901
902 return qtd;
903}
904
905static void do_atl_int(struct usb_hcd *usb_hcd)
906{
907 struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
908 u32 done_map, skip_map;
909 struct ptd ptd;
910 struct urb *urb = NULL;
911 u32 atl_regs_base;
912 u32 atl_regs;
913 u32 queue_entry;
914 u32 payload;
915 u32 length;
916 u32 or_map;
917 u32 status = -EINVAL;
918 int error;
919 struct isp1760_qtd *qtd;
920 struct isp1760_qh *qh;
921 u32 rl;
922 u32 nakcount;
923
924 done_map = isp1760_readl(usb_hcd->regs +
925 HC_ATL_PTD_DONEMAP_REG);
926 skip_map = isp1760_readl(usb_hcd->regs +
927 HC_ATL_PTD_SKIPMAP_REG);
928
929 or_map = isp1760_readl(usb_hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
930 or_map &= ~done_map;
931 isp1760_writel(or_map, usb_hcd->regs + HC_ATL_IRQ_MASK_OR_REG);
932
933 atl_regs_base = ATL_REGS_OFFSET;
934 while (done_map) {
935 u32 dw1;
936 u32 dw2;
937 u32 dw3;
938
939 status = 0;
940
941 queue_entry = __ffs(done_map);
942 done_map &= ~(1 << queue_entry);
943 skip_map |= 1 << queue_entry;
944
945 atl_regs = atl_regs_base + queue_entry * sizeof(struct ptd);
946
947 urb = priv->atl_ints[queue_entry].urb;
948 qtd = priv->atl_ints[queue_entry].qtd;
949 qh = priv->atl_ints[queue_entry].qh;
950 payload = priv->atl_ints[queue_entry].payload;
951
952 if (!qh) {
953 printk(KERN_ERR "qh is 0\n");
954 continue;
955 }
956 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs,
957 atl_regs, sizeof(ptd));
958
959 dw1 = le32_to_cpu(ptd.dw1);
960 dw2 = le32_to_cpu(ptd.dw2);
961 dw3 = le32_to_cpu(ptd.dw3);
962 rl = (dw2 >> 25) & 0x0f;
963 nakcount = (dw3 >> 19) & 0xf;
964
965 /* Transfer Error, *but* active and no HALT -> reload */
966 if ((dw3 & DW3_ERROR_BIT) && (dw3 & DW3_QTD_ACTIVE) &&
967 !(dw3 & DW3_HALT_BIT)) {
968
969 /* according to ppriv code, we have to
970 * reload this one if trasfered bytes != requested bytes
971 * else act like everything went smooth..
972 * XXX This just doesn't feel right and hasn't
973 * triggered so far.
974 */
975
976 length = PTD_XFERRED_LENGTH(dw3);
977 printk(KERN_ERR "Should reload now.... transfered %d "
978 "of %zu\n", length, qtd->length);
979 BUG();
980 }
981
982 if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) {
983 u32 buffstatus;
984
985 /* XXX
986 * NAKs are handled in HW by the chip. Usually if the
987 * device is not able to send data fast enough.
988 * This did not trigger for a long time now.
989 */
990 printk(KERN_ERR "Reloading ptd %p/%p... qh %p readed: "
991 "%d of %d done: %08x cur: %08x\n", qtd,
992 urb, qh, PTD_XFERRED_LENGTH(dw3),
993 qtd->length, done_map,
994 (1 << queue_entry));
995
996 /* RL counter = ERR counter */
997 dw3 &= ~(0xf << 19);
998 dw3 |= rl << 19;
999 dw3 &= ~(3 << (55 - 32));
1000 dw3 |= ERR_COUNTER << (55 - 32);
1001
1002 /*
1003 * It is not needed to write skip map back because it
1004 * is unchanged. Just make sure that this entry is
1005 * unskipped once it gets written to the HW.
1006 */
1007 skip_map &= ~(1 << queue_entry);
1008 or_map = isp1760_readl(usb_hcd->regs +
1009 HC_ATL_IRQ_MASK_OR_REG);
1010 or_map |= 1 << queue_entry;
1011 isp1760_writel(or_map, usb_hcd->regs +
1012 HC_ATL_IRQ_MASK_OR_REG);
1013
1014 ptd.dw3 = cpu_to_le32(dw3);
1015 priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs +
1016 atl_regs, sizeof(ptd));
1017
1018 ptd.dw0 |= __constant_cpu_to_le32(PTD_VALID);
1019 priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs +
1020 atl_regs, sizeof(ptd));
1021
1022 buffstatus = isp1760_readl(usb_hcd->regs +
1023 HC_BUFFER_STATUS_REG);
1024 buffstatus |= ATL_BUFFER;
1025 isp1760_writel(buffstatus, usb_hcd->regs +
1026 HC_BUFFER_STATUS_REG);
1027 continue;
1028 }
1029
1030 error = check_error(&ptd);
1031 if (error) {
1032 status = error;
1033 priv->atl_ints[queue_entry].qh->toggle = 0;
1034 priv->atl_ints[queue_entry].qh->ping = 0;
1035 urb->status = -EPIPE;
1036
1037#if 0
1038 printk(KERN_ERR "Error in %s().\n", __func__);
1039 printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x "
1040 "dw3: %08x dw4: %08x dw5: %08x dw6: "
1041 "%08x dw7: %08x\n",
1042 ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3,
1043 ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
1044#endif
1045 } else {
1046 if (usb_pipetype(urb->pipe) == PIPE_BULK) {
1047 priv->atl_ints[queue_entry].qh->toggle = dw3 &
1048 (1 << 25);
1049 priv->atl_ints[queue_entry].qh->ping = dw3 &
1050 (1 << 26);
1051 }
1052 }
1053
1054 length = PTD_XFERRED_LENGTH(dw3);
1055 if (length) {
1056 switch (DW1_GET_PID(dw1)) {
1057 case IN_PID:
1058 priv_read_copy(priv,
1059 priv->atl_ints[queue_entry].data_buffer,
1060 usb_hcd->regs + payload, payload,
1061 length);
1062
1063 case OUT_PID:
1064
1065 urb->actual_length += length;
1066
1067 case SETUP_PID:
1068 break;
1069 }
1070 }
1071
1072 priv->atl_ints[queue_entry].data_buffer = NULL;
1073 priv->atl_ints[queue_entry].urb = NULL;
1074 priv->atl_ints[queue_entry].qtd = NULL;
1075 priv->atl_ints[queue_entry].qh = NULL;
1076
1077 free_mem(priv, payload);
1078
1079 isp1760_writel(skip_map, usb_hcd->regs +
1080 HC_ATL_PTD_SKIPMAP_REG);
1081
1082 if (urb->status == -EPIPE) {
1083 /* HALT was received */
1084
1085 qtd = clean_up_qtdlist(qtd);
1086 isp1760_urb_done(priv, urb, urb->status);
1087
1088 } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
1089 /* short BULK received */
1090
1091 printk(KERN_ERR "short bulk, %d instead %d\n", length,
1092 qtd->length);
1093 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1094 urb->status = -EREMOTEIO;
1095 printk(KERN_ERR "not okey\n");
1096 }
1097
1098 if (urb->status == -EINPROGRESS)
1099 urb->status = 0;
1100
1101 qtd = clean_up_qtdlist(qtd);
1102
1103 isp1760_urb_done(priv, urb, urb->status);
1104
1105 } else if (qtd->status & URB_COMPLETE_NOTIFY) {
1106 /* that was the last qtd of that URB */
1107
1108 if (urb->status == -EINPROGRESS)
1109 urb->status = 0;
1110
1111 qtd = clean_this_qtd(qtd);
1112 isp1760_urb_done(priv, urb, urb->status);
1113
1114 } else {
1115 /* next QTD of this URB */
1116
1117 qtd = clean_this_qtd(qtd);
1118 BUG_ON(!qtd);
1119 }
1120
1121 if (qtd)
1122 enqueue_an_ATL_packet(usb_hcd, qh, qtd);
1123
1124 skip_map = isp1760_readl(usb_hcd->regs +
1125 HC_ATL_PTD_SKIPMAP_REG);
1126 }
1127}
1128
1129static void do_intl_int(struct usb_hcd *usb_hcd)
1130{
1131 struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
1132 u32 done_map, skip_map;
1133 struct ptd ptd;
1134 struct urb *urb = NULL;
1135 u32 int_regs;
1136 u32 int_regs_base;
1137 u32 payload;
1138 u32 length;
1139 u32 or_map;
1140 int error;
1141 u32 queue_entry;
1142 struct isp1760_qtd *qtd;
1143 struct isp1760_qh *qh;
1144
1145 done_map = isp1760_readl(usb_hcd->regs +
1146 HC_INT_PTD_DONEMAP_REG);
1147 skip_map = isp1760_readl(usb_hcd->regs +
1148 HC_INT_PTD_SKIPMAP_REG);
1149
1150 or_map = isp1760_readl(usb_hcd->regs + HC_INT_IRQ_MASK_OR_REG);
1151 or_map &= ~done_map;
1152 isp1760_writel(or_map, usb_hcd->regs + HC_INT_IRQ_MASK_OR_REG);
1153
1154 int_regs_base = INT_REGS_OFFSET;
1155
1156 while (done_map) {
1157 u32 dw1;
1158 u32 dw3;
1159
1160 queue_entry = __ffs(done_map);
1161 done_map &= ~(1 << queue_entry);
1162 skip_map |= 1 << queue_entry;
1163
1164 int_regs = int_regs_base + queue_entry * sizeof(struct ptd);
1165 urb = priv->int_ints[queue_entry].urb;
1166 qtd = priv->int_ints[queue_entry].qtd;
1167 qh = priv->int_ints[queue_entry].qh;
1168 payload = priv->int_ints[queue_entry].payload;
1169
1170 if (!qh) {
1171 printk(KERN_ERR "(INT) qh is 0\n");
1172 continue;
1173 }
1174
1175 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs,
1176 int_regs, sizeof(ptd));
1177 dw1 = le32_to_cpu(ptd.dw1);
1178 dw3 = le32_to_cpu(ptd.dw3);
1179 check_int_err_status(le32_to_cpu(ptd.dw4));
1180
1181 error = check_error(&ptd);
1182 if (error) {
1183#if 0
1184 printk(KERN_ERR "Error in %s().\n", __func__);
1185 printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x "
1186 "dw3: %08x dw4: %08x dw5: %08x dw6: "
1187 "%08x dw7: %08x\n",
1188 ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3,
1189 ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7);
1190#endif
1191 urb->status = -EPIPE;
1192 priv->int_ints[queue_entry].qh->toggle = 0;
1193 priv->int_ints[queue_entry].qh->ping = 0;
1194
1195 } else {
1196 priv->int_ints[queue_entry].qh->toggle =
1197 dw3 & (1 << 25);
1198 priv->int_ints[queue_entry].qh->ping = dw3 & (1 << 26);
1199 }
1200
1201 if (urb->dev->speed != USB_SPEED_HIGH)
1202 length = PTD_XFERRED_LENGTH_LO(dw3);
1203 else
1204 length = PTD_XFERRED_LENGTH(dw3);
1205
1206 if (length) {
1207 switch (DW1_GET_PID(dw1)) {
1208 case IN_PID:
1209 priv_read_copy(priv,
1210 priv->int_ints[queue_entry].data_buffer,
1211 usb_hcd->regs + payload , payload,
1212 length);
1213 case OUT_PID:
1214
1215 urb->actual_length += length;
1216
1217 case SETUP_PID:
1218 break;
1219 }
1220 }
1221
1222 priv->int_ints[queue_entry].data_buffer = NULL;
1223 priv->int_ints[queue_entry].urb = NULL;
1224 priv->int_ints[queue_entry].qtd = NULL;
1225 priv->int_ints[queue_entry].qh = NULL;
1226
1227 isp1760_writel(skip_map, usb_hcd->regs +
1228 HC_INT_PTD_SKIPMAP_REG);
1229 free_mem(priv, payload);
1230
1231 if (urb->status == -EPIPE) {
1232 /* HALT received */
1233
1234 qtd = clean_up_qtdlist(qtd);
1235 isp1760_urb_done(priv, urb, urb->status);
1236
1237 } else if (qtd->status & URB_COMPLETE_NOTIFY) {
1238
1239 if (urb->status == -EINPROGRESS)
1240 urb->status = 0;
1241
1242 qtd = clean_this_qtd(qtd);
1243 isp1760_urb_done(priv, urb, urb->status);
1244
1245 } else {
1246 /* next QTD of this URB */
1247
1248 qtd = clean_this_qtd(qtd);
1249 BUG_ON(!qtd);
1250 }
1251
1252 if (qtd)
1253 enqueue_an_INT_packet(usb_hcd, qh, qtd);
1254
1255 skip_map = isp1760_readl(usb_hcd->regs +
1256 HC_INT_PTD_SKIPMAP_REG);
1257 }
1258}
1259
1260#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
1261static struct isp1760_qh *qh_make(struct isp1760_hcd *priv, struct urb *urb,
1262 gfp_t flags)
1263{
1264 struct isp1760_qh *qh;
1265 int is_input, type;
1266
1267 qh = isp1760_qh_alloc(priv, flags);
1268 if (!qh)
1269 return qh;
1270
1271 /*
1272 * init endpoint/device data for this QH
1273 */
1274 is_input = usb_pipein(urb->pipe);
1275 type = usb_pipetype(urb->pipe);
1276
1277 if (type == PIPE_INTERRUPT) {
1278
1279 if (urb->dev->speed == USB_SPEED_HIGH) {
1280
1281 qh->period = urb->interval >> 3;
1282 if (qh->period == 0 && urb->interval != 1) {
1283 /* NOTE interval 2 or 4 uframes could work.
1284 * But interval 1 scheduling is simpler, and
1285 * includes high bandwidth.
1286 */
1287 printk(KERN_ERR "intr period %d uframes, NYET!",
1288 urb->interval);
1289 qh_destroy(qh);
1290 return NULL;
1291 }
1292 } else {
1293 qh->period = urb->interval;
1294 }
1295 }
1296
1297 /* support for tt scheduling, and access to toggles */
1298 qh->dev = urb->dev;
1299
1300 if (!usb_pipecontrol(urb->pipe))
1301 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input,
1302 1);
1303 return qh;
1304}
1305
1306/*
1307 * For control/bulk/interrupt, return QH with these TDs appended.
1308 * Allocates and initializes the QH if necessary.
1309 * Returns null if it can't allocate a QH it needs to.
1310 * If the QH has TDs (urbs) already, that's great.
1311 */
1312static struct isp1760_qh *qh_append_tds(struct isp1760_hcd *priv,
1313 struct urb *urb, struct list_head *qtd_list, int epnum,
1314 void **ptr)
1315{
1316 struct isp1760_qh *qh;
1317 struct isp1760_qtd *qtd;
1318 struct isp1760_qtd *prev_qtd;
1319
1320 qh = (struct isp1760_qh *)*ptr;
1321 if (!qh) {
1322 /* can't sleep here, we have priv->lock... */
1323 qh = qh_make(priv, urb, GFP_ATOMIC);
1324 if (!qh)
1325 return qh;
1326 *ptr = qh;
1327 }
1328
1329 qtd = list_entry(qtd_list->next, struct isp1760_qtd,
1330 qtd_list);
1331 if (!list_empty(&qh->qtd_list))
1332 prev_qtd = list_entry(qh->qtd_list.prev,
1333 struct isp1760_qtd, qtd_list);
1334 else
1335 prev_qtd = NULL;
1336
1337 list_splice(qtd_list, qh->qtd_list.prev);
1338 if (prev_qtd) {
1339 BUG_ON(prev_qtd->hw_next);
1340 prev_qtd->hw_next = qtd;
1341 }
1342
1343 urb->hcpriv = qh;
1344 return qh;
1345}
1346
1347static void qtd_list_free(struct isp1760_hcd *priv, struct urb *urb,
1348 struct list_head *qtd_list)
1349{
1350 struct list_head *entry, *temp;
1351
1352 list_for_each_safe(entry, temp, qtd_list) {
1353 struct isp1760_qtd *qtd;
1354
1355 qtd = list_entry(entry, struct isp1760_qtd, qtd_list);
1356 list_del(&qtd->qtd_list);
1357 isp1760_qtd_free(qtd);
1358 }
1359}
1360
1361static int isp1760_prepare_enqueue(struct isp1760_hcd *priv, struct urb *urb,
1362 struct list_head *qtd_list, gfp_t mem_flags, packet_enqueue *p)
1363{
1364 struct isp1760_qtd *qtd;
1365 int epnum;
1366 unsigned long flags;
1367 struct isp1760_qh *qh = NULL;
1368 int rc;
1369 int qh_busy;
1370
1371 qtd = list_entry(qtd_list->next, struct isp1760_qtd, qtd_list);
1372 epnum = urb->ep->desc.bEndpointAddress;
1373
1374 spin_lock_irqsave(&priv->lock, flags);
1375 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &priv_to_hcd(priv)->flags)) {
1376 rc = -ESHUTDOWN;
1377 goto done;
1378 }
1379 rc = usb_hcd_link_urb_to_ep(priv_to_hcd(priv), urb);
1380 if (rc)
1381 goto done;
1382
1383 qh = urb->ep->hcpriv;
1384 if (qh)
1385 qh_busy = !list_empty(&qh->qtd_list);
1386 else
1387 qh_busy = 0;
1388
1389 qh = qh_append_tds(priv, urb, qtd_list, epnum, &urb->ep->hcpriv);
1390 if (!qh) {
1391 usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
1392 rc = -ENOMEM;
1393 goto done;
1394 }
1395
1396 if (!qh_busy)
1397 p(priv_to_hcd(priv), qh, qtd);
1398
1399done:
1400 spin_unlock_irqrestore(&priv->lock, flags);
1401 if (!qh)
1402 qtd_list_free(priv, urb, qtd_list);
1403 return rc;
1404}
1405
1406static struct isp1760_qtd *isp1760_qtd_alloc(struct isp1760_hcd *priv,
1407 gfp_t flags)
1408{
1409 struct isp1760_qtd *qtd;
1410
1411 qtd = kmem_cache_zalloc(qtd_cachep, flags);
1412 if (qtd)
1413 INIT_LIST_HEAD(&qtd->qtd_list);
1414
1415 return qtd;
1416}
1417
1418/*
1419 * create a list of filled qtds for this URB; won't link into qh.
1420 */
1421static struct list_head *qh_urb_transaction(struct isp1760_hcd *priv,
1422 struct urb *urb, struct list_head *head, gfp_t flags)
1423{
1424 struct isp1760_qtd *qtd, *qtd_prev;
1425 void *buf;
1426 int len, maxpacket;
1427 int is_input;
1428 u32 token;
1429
1430 /*
1431 * URBs map to sequences of QTDs: one logical transaction
1432 */
1433 qtd = isp1760_qtd_alloc(priv, flags);
1434 if (!qtd)
1435 return NULL;
1436
1437 list_add_tail(&qtd->qtd_list, head);
1438 qtd->urb = urb;
1439 urb->status = -EINPROGRESS;
1440
1441 token = 0;
1442 /* for split transactions, SplitXState initialized to zero */
1443
1444 len = urb->transfer_buffer_length;
1445 is_input = usb_pipein(urb->pipe);
1446 if (usb_pipecontrol(urb->pipe)) {
1447 /* SETUP pid */
1448 qtd_fill(qtd, urb->setup_packet,
1449 sizeof(struct usb_ctrlrequest),
1450 token | SETUP_PID);
1451
1452 /* ... and always at least one more pid */
1453 token ^= DATA_TOGGLE;
1454 qtd_prev = qtd;
1455 qtd = isp1760_qtd_alloc(priv, flags);
1456 if (!qtd)
1457 goto cleanup;
1458 qtd->urb = urb;
1459 qtd_prev->hw_next = qtd;
1460 list_add_tail(&qtd->qtd_list, head);
1461
1462 /* for zero length DATA stages, STATUS is always IN */
1463 if (len == 0)
1464 token |= IN_PID;
1465 }
1466
1467 /*
1468 * data transfer stage: buffer setup
1469 */
1470 buf = urb->transfer_buffer;
1471
1472 if (is_input)
1473 token |= IN_PID;
1474 else
1475 token |= OUT_PID;
1476
1477 maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
1478
1479 /*
1480 * buffer gets wrapped in one or more qtds;
1481 * last one may be "short" (including zero len)
1482 * and may serve as a control status ack
1483 */
1484 for (;;) {
1485 int this_qtd_len;
1486
1487 if (!buf && len) {
1488 /* XXX This looks like usb storage / SCSI bug */
1489 printk(KERN_ERR "buf is null, dma is %08lx len is %d\n",
1490 (long unsigned)urb->transfer_dma, len);
1491 WARN_ON(1);
1492 }
1493
1494 this_qtd_len = qtd_fill(qtd, buf, len, token);
1495 len -= this_qtd_len;
1496 buf += this_qtd_len;
1497
1498 /* qh makes control packets use qtd toggle; maybe switch it */
1499 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
1500 token ^= DATA_TOGGLE;
1501
1502 if (len <= 0)
1503 break;
1504
1505 qtd_prev = qtd;
1506 qtd = isp1760_qtd_alloc(priv, flags);
1507 if (!qtd)
1508 goto cleanup;
1509 qtd->urb = urb;
1510 qtd_prev->hw_next = qtd;
1511 list_add_tail(&qtd->qtd_list, head);
1512 }
1513
1514 /*
1515 * control requests may need a terminating data "status" ack;
1516 * bulk ones may need a terminating short packet (zero length).
1517 */
1518 if (urb->transfer_buffer_length != 0) {
1519 int one_more = 0;
1520
1521 if (usb_pipecontrol(urb->pipe)) {
1522 one_more = 1;
1523 /* "in" <--> "out" */
1524 token ^= IN_PID;
1525 /* force DATA1 */
1526 token |= DATA_TOGGLE;
1527 } else if (usb_pipebulk(urb->pipe)
1528 && (urb->transfer_flags & URB_ZERO_PACKET)
1529 && !(urb->transfer_buffer_length % maxpacket)) {
1530 one_more = 1;
1531 }
1532 if (one_more) {
1533 qtd_prev = qtd;
1534 qtd = isp1760_qtd_alloc(priv, flags);
1535 if (!qtd)
1536 goto cleanup;
1537 qtd->urb = urb;
1538 qtd_prev->hw_next = qtd;
1539 list_add_tail(&qtd->qtd_list, head);
1540
1541 /* never any data in such packets */
1542 qtd_fill(qtd, NULL, 0, token);
1543 }
1544 }
1545
1546 qtd->status = URB_COMPLETE_NOTIFY;
1547 return head;
1548
1549cleanup:
1550 qtd_list_free(priv, urb, head);
1551 return NULL;
1552}
1553
1554static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1555 gfp_t mem_flags)
1556{
1557 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1558 struct list_head qtd_list;
1559 packet_enqueue *pe;
1560
1561 INIT_LIST_HEAD(&qtd_list);
1562
1563 switch (usb_pipetype(urb->pipe)) {
1564 case PIPE_CONTROL:
1565 case PIPE_BULK:
1566
1567 if (!qh_urb_transaction(priv, urb, &qtd_list, mem_flags))
1568 return -ENOMEM;
1569 pe = enqueue_an_ATL_packet;
1570 break;
1571
1572 case PIPE_INTERRUPT:
1573 if (!qh_urb_transaction(priv, urb, &qtd_list, mem_flags))
1574 return -ENOMEM;
1575 pe = enqueue_an_INT_packet;
1576 break;
1577
1578 case PIPE_ISOCHRONOUS:
1579 printk(KERN_ERR "PIPE_ISOCHRONOUS ain't supported\n");
1580 default:
1581 return -EPIPE;
1582 }
1583
1584 isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
1585 return 0;
1586}
1587
1588static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1589 int status)
1590{
1591 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1592 struct inter_packet_info *ints;
1593 u32 i;
1594 u32 reg_base, or_reg, skip_reg;
1595 int flags;
1596 struct ptd ptd;
1597
1598 switch (usb_pipetype(urb->pipe)) {
1599 case PIPE_ISOCHRONOUS:
1600 return -EPIPE;
1601 break;
1602
1603 case PIPE_INTERRUPT:
1604 ints = priv->int_ints;
1605 reg_base = INT_REGS_OFFSET;
1606 or_reg = HC_INT_IRQ_MASK_OR_REG;
1607 skip_reg = HC_INT_PTD_SKIPMAP_REG;
1608 break;
1609
1610 default:
1611 ints = priv->atl_ints;
1612 reg_base = ATL_REGS_OFFSET;
1613 or_reg = HC_ATL_IRQ_MASK_OR_REG;
1614 skip_reg = HC_ATL_PTD_SKIPMAP_REG;
1615 break;
1616 }
1617
1618 memset(&ptd, 0, sizeof(ptd));
1619 spin_lock_irqsave(&priv->lock, flags);
1620
1621 for (i = 0; i < 32; i++) {
1622 if (ints->urb == urb) {
1623 u32 skip_map;
1624 u32 or_map;
1625 struct isp1760_qtd *qtd;
1626
1627 skip_map = isp1760_readl(hcd->regs + skip_reg);
1628 skip_map |= 1 << i;
1629 isp1760_writel(skip_map, hcd->regs + skip_reg);
1630
1631 or_map = isp1760_readl(hcd->regs + or_reg);
1632 or_map &= ~(1 << i);
1633 isp1760_writel(or_map, hcd->regs + or_reg);
1634
1635 priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
1636 + i * sizeof(ptd), sizeof(ptd));
1637 qtd = ints->qtd;
1638
1639 clean_up_qtdlist(qtd);
1640
1641 free_mem(priv, ints->payload);
1642
1643 ints->urb = NULL;
1644 ints->qh = NULL;
1645 ints->qtd = NULL;
1646 ints->data_buffer = NULL;
1647 ints->payload = 0;
1648
1649 isp1760_urb_done(priv, urb, status);
1650 break;
1651 }
1652 ints++;
1653 }
1654
1655 spin_unlock_irqrestore(&priv->lock, flags);
1656 return 0;
1657}
1658
1659static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd)
1660{
1661 struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
1662 u32 imask;
1663 irqreturn_t irqret = IRQ_NONE;
1664
1665 spin_lock(&priv->lock);
1666
1667 if (!(usb_hcd->state & HC_STATE_RUNNING))
1668 goto leave;
1669
1670 imask = isp1760_readl(usb_hcd->regs + HC_INTERRUPT_REG);
1671 if (unlikely(!imask))
1672 goto leave;
1673
1674 isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG);
1675 if (imask & HC_ATL_INT)
1676 do_atl_int(usb_hcd);
1677
1678 if (imask & HC_INTL_INT)
1679 do_intl_int(usb_hcd);
1680
1681 irqret = IRQ_HANDLED;
1682leave:
1683 spin_unlock(&priv->lock);
1684 return irqret;
1685}
1686
1687static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
1688{
1689 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1690 u32 temp, status = 0;
1691 u32 mask;
1692 int retval = 1;
1693 unsigned long flags;
1694
1695 /* if !USB_SUSPEND, root hub timers won't get shut down ... */
1696 if (!HC_IS_RUNNING(hcd->state))
1697 return 0;
1698
1699 /* init status to no-changes */
1700 buf[0] = 0;
1701 mask = PORT_CSC;
1702
1703 spin_lock_irqsave(&priv->lock, flags);
1704 temp = isp1760_readl(hcd->regs + HC_PORTSC1);
1705
1706 if (temp & PORT_OWNER) {
1707 if (temp & PORT_CSC) {
1708 temp &= ~PORT_CSC;
1709 isp1760_writel(temp, hcd->regs + HC_PORTSC1);
1710 goto done;
1711 }
1712 }
1713
1714 /*
1715 * Return status information even for ports with OWNER set.
1716 * Otherwise khubd wouldn't see the disconnect event when a
1717 * high-speed device is switched over to the companion
1718 * controller by the user.
1719 */
1720
1721 if ((temp & mask) != 0
1722 || ((temp & PORT_RESUME) != 0
1723 && time_after_eq(jiffies,
1724 priv->reset_done))) {
1725 buf [0] |= 1 << (0 + 1);
1726 status = STS_PCD;
1727 }
1728 /* FIXME autosuspend idle root hubs */
1729done:
1730 spin_unlock_irqrestore(&priv->lock, flags);
1731 return status ? retval : 0;
1732}
1733
1734static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
1735 struct usb_hub_descriptor *desc)
1736{
1737 int ports = HCS_N_PORTS(priv->hcs_params);
1738 u16 temp;
1739
1740 desc->bDescriptorType = 0x29;
1741 /* priv 1.0, 2.3.9 says 20ms max */
1742 desc->bPwrOn2PwrGood = 10;
1743 desc->bHubContrCurrent = 0;
1744
1745 desc->bNbrPorts = ports;
1746 temp = 1 + (ports / 8);
1747 desc->bDescLength = 7 + 2 * temp;
1748
1749 /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
1750 memset(&desc->bitmap[0], 0, temp);
1751 memset(&desc->bitmap[temp], 0xff, temp);
1752
1753 /* per-port overcurrent reporting */
1754 temp = 0x0008;
1755 if (HCS_PPC(priv->hcs_params))
1756 /* per-port power control */
1757 temp |= 0x0001;
1758 else
1759 /* no power switching */
1760 temp |= 0x0002;
1761 desc->wHubCharacteristics = cpu_to_le16(temp);
1762}
1763
1764#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
1765
1766static int check_reset_complete(struct isp1760_hcd *priv, int index,
1767 u32 __iomem *status_reg, int port_status)
1768{
1769 if (!(port_status & PORT_CONNECT))
1770 return port_status;
1771
1772 /* if reset finished and it's still not enabled -- handoff */
1773 if (!(port_status & PORT_PE)) {
1774
1775 printk(KERN_ERR "port %d full speed --> companion\n",
1776 index + 1);
1777
1778 port_status |= PORT_OWNER;
1779 port_status &= ~PORT_RWC_BITS;
1780 isp1760_writel(port_status, status_reg);
1781
1782 } else
1783 printk(KERN_ERR "port %d high speed\n", index + 1);
1784
1785 return port_status;
1786}
1787
1788static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
1789 u16 wValue, u16 wIndex, char *buf, u16 wLength)
1790{
1791 struct isp1760_hcd *priv = hcd_to_priv(hcd);
1792 int ports = HCS_N_PORTS(priv->hcs_params);
1793 u32 __iomem *status_reg = hcd->regs + HC_PORTSC1;
1794 u32 temp, status;
1795 unsigned long flags;
1796 int retval = 0;
1797 unsigned selector;
1798
1799 /*
1800 * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
1801 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
1802 * (track current state ourselves) ... blink for diagnostics,
1803 * power, "this is the one", etc. EHCI spec supports this.
1804 */
1805
1806 spin_lock_irqsave(&priv->lock, flags);
1807 switch (typeReq) {
1808 case ClearHubFeature:
1809 switch (wValue) {
1810 case C_HUB_LOCAL_POWER:
1811 case C_HUB_OVER_CURRENT:
1812 /* no hub-wide feature/status flags */
1813 break;
1814 default:
1815 goto error;
1816 }
1817 break;
1818 case ClearPortFeature:
1819 if (!wIndex || wIndex > ports)
1820 goto error;
1821 wIndex--;
1822 temp = isp1760_readl(status_reg);
1823
1824 /*
1825 * Even if OWNER is set, so the port is owned by the
1826 * companion controller, khubd needs to be able to clear
1827 * the port-change status bits (especially
1828 * USB_PORT_FEAT_C_CONNECTION).
1829 */
1830
1831 switch (wValue) {
1832 case USB_PORT_FEAT_ENABLE:
1833 isp1760_writel(temp & ~PORT_PE, status_reg);
1834 break;
1835 case USB_PORT_FEAT_C_ENABLE:
1836 /* XXX error? */
1837 break;
1838 case USB_PORT_FEAT_SUSPEND:
1839 if (temp & PORT_RESET)
1840 goto error;
1841
1842 if (temp & PORT_SUSPEND) {
1843 if ((temp & PORT_PE) == 0)
1844 goto error;
1845 /* resume signaling for 20 msec */
1846 temp &= ~(PORT_RWC_BITS);
1847 isp1760_writel(temp | PORT_RESUME,
1848 status_reg);
1849 priv->reset_done = jiffies +
1850 msecs_to_jiffies(20);
1851 }
1852 break;
1853 case USB_PORT_FEAT_C_SUSPEND:
1854 /* we auto-clear this feature */
1855 break;
1856 case USB_PORT_FEAT_POWER:
1857 if (HCS_PPC(priv->hcs_params))
1858 isp1760_writel(temp & ~PORT_POWER, status_reg);
1859 break;
1860 case USB_PORT_FEAT_C_CONNECTION:
1861 isp1760_writel(temp | PORT_CSC,
1862 status_reg);
1863 break;
1864 case USB_PORT_FEAT_C_OVER_CURRENT:
1865 /* XXX error ?*/
1866 break;
1867 case USB_PORT_FEAT_C_RESET:
1868 /* GetPortStatus clears reset */
1869 break;
1870 default:
1871 goto error;
1872 }
1873 isp1760_readl(hcd->regs + HC_USBCMD);
1874 break;
1875 case GetHubDescriptor:
1876 isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
1877 buf);
1878 break;
1879 case GetHubStatus:
1880 /* no hub-wide feature/status flags */
1881 memset(buf, 0, 4);
1882 break;
1883 case GetPortStatus:
1884 if (!wIndex || wIndex > ports)
1885 goto error;
1886 wIndex--;
1887 status = 0;
1888 temp = isp1760_readl(status_reg);
1889
1890 /* wPortChange bits */
1891 if (temp & PORT_CSC)
1892 status |= 1 << USB_PORT_FEAT_C_CONNECTION;
1893
1894
1895 /* whoever resumes must GetPortStatus to complete it!! */
1896 if (temp & PORT_RESUME) {
1897 printk(KERN_ERR "Port resume should be skipped.\n");
1898
1899 /* Remote Wakeup received? */
1900 if (!priv->reset_done) {
1901 /* resume signaling for 20 msec */
1902 priv->reset_done = jiffies
1903 + msecs_to_jiffies(20);
1904 /* check the port again */
1905 mod_timer(&priv_to_hcd(priv)->rh_timer,
1906 priv->reset_done);
1907 }
1908
1909 /* resume completed? */
1910 else if (time_after_eq(jiffies,
1911 priv->reset_done)) {
1912 status |= 1 << USB_PORT_FEAT_C_SUSPEND;
1913 priv->reset_done = 0;
1914
1915 /* stop resume signaling */
1916 temp = isp1760_readl(status_reg);
1917 isp1760_writel(
1918 temp & ~(PORT_RWC_BITS | PORT_RESUME),
1919 status_reg);
1920 retval = handshake(priv, status_reg,
1921 PORT_RESUME, 0, 2000 /* 2msec */);
1922 if (retval != 0) {
1923 isp1760_err(priv,
1924 "port %d resume error %d\n",
1925 wIndex + 1, retval);
1926 goto error;
1927 }
1928 temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
1929 }
1930 }
1931
1932 /* whoever resets must GetPortStatus to complete it!! */
1933 if ((temp & PORT_RESET)
1934 && time_after_eq(jiffies,
1935 priv->reset_done)) {
1936 status |= 1 << USB_PORT_FEAT_C_RESET;
1937 priv->reset_done = 0;
1938
1939 /* force reset to complete */
1940 isp1760_writel(temp & ~PORT_RESET,
1941 status_reg);
1942 /* REVISIT: some hardware needs 550+ usec to clear
1943 * this bit; seems too long to spin routinely...
1944 */
1945 retval = handshake(priv, status_reg,
1946 PORT_RESET, 0, 750);
1947 if (retval != 0) {
1948 isp1760_err(priv, "port %d reset error %d\n",
1949 wIndex + 1, retval);
1950 goto error;
1951 }
1952
1953 /* see what we found out */
1954 temp = check_reset_complete(priv, wIndex, status_reg,
1955 isp1760_readl(status_reg));
1956 }
1957 /*
1958 * Even if OWNER is set, there's no harm letting khubd
1959 * see the wPortStatus values (they should all be 0 except
1960 * for PORT_POWER anyway).
1961 */
1962
1963 if (temp & PORT_OWNER)
1964 printk(KERN_ERR "Warning: PORT_OWNER is set\n");
1965
1966 if (temp & PORT_CONNECT) {
1967 status |= 1 << USB_PORT_FEAT_CONNECTION;
1968 /* status may be from integrated TT */
1969 status |= ehci_port_speed(priv, temp);
1970 }
1971 if (temp & PORT_PE)
1972 status |= 1 << USB_PORT_FEAT_ENABLE;
1973 if (temp & (PORT_SUSPEND|PORT_RESUME))
1974 status |= 1 << USB_PORT_FEAT_SUSPEND;
1975 if (temp & PORT_RESET)
1976 status |= 1 << USB_PORT_FEAT_RESET;
1977 if (temp & PORT_POWER)
1978 status |= 1 << USB_PORT_FEAT_POWER;
1979
1980 put_unaligned(cpu_to_le32(status), (__le32 *) buf);
1981 break;
1982 case SetHubFeature:
1983 switch (wValue) {
1984 case C_HUB_LOCAL_POWER:
1985 case C_HUB_OVER_CURRENT:
1986 /* no hub-wide feature/status flags */
1987 break;
1988 default:
1989 goto error;
1990 }
1991 break;
1992 case SetPortFeature:
1993 selector = wIndex >> 8;
1994 wIndex &= 0xff;
1995 if (!wIndex || wIndex > ports)
1996 goto error;
1997 wIndex--;
1998 temp = isp1760_readl(status_reg);
1999 if (temp & PORT_OWNER)
2000 break;
2001
2002/* temp &= ~PORT_RWC_BITS; */
2003 switch (wValue) {
2004 case USB_PORT_FEAT_ENABLE:
2005 isp1760_writel(temp | PORT_PE, status_reg);
2006 break;
2007
2008 case USB_PORT_FEAT_SUSPEND:
2009 if ((temp & PORT_PE) == 0
2010 || (temp & PORT_RESET) != 0)
2011 goto error;
2012
2013 isp1760_writel(temp | PORT_SUSPEND, status_reg);
2014 break;
2015 case USB_PORT_FEAT_POWER:
2016 if (HCS_PPC(priv->hcs_params))
2017 isp1760_writel(temp | PORT_POWER,
2018 status_reg);
2019 break;
2020 case USB_PORT_FEAT_RESET:
2021 if (temp & PORT_RESUME)
2022 goto error;
2023 /* line status bits may report this as low speed,
2024 * which can be fine if this root hub has a
2025 * transaction translator built in.
2026 */
2027 if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
2028 && PORT_USB11(temp)) {
2029 temp |= PORT_OWNER;
2030 } else {
2031 temp |= PORT_RESET;
2032 temp &= ~PORT_PE;
2033
2034 /*
2035 * caller must wait, then call GetPortStatus
2036 * usb 2.0 spec says 50 ms resets on root
2037 */
2038 priv->reset_done = jiffies +
2039 msecs_to_jiffies(50);
2040 }
2041 isp1760_writel(temp, status_reg);
2042 break;
2043 default:
2044 goto error;
2045 }
2046 isp1760_readl(hcd->regs + HC_USBCMD);
2047 break;
2048
2049 default:
2050error:
2051 /* "stall" on error */
2052 retval = -EPIPE;
2053 }
2054 spin_unlock_irqrestore(&priv->lock, flags);
2055 return retval;
2056}
2057
2058static void isp1760_endpoint_disable(struct usb_hcd *usb_hcd,
2059 struct usb_host_endpoint *ep)
2060{
2061 struct isp1760_hcd *priv = hcd_to_priv(usb_hcd);
2062 struct isp1760_qh *qh;
2063 struct isp1760_qtd *qtd;
2064 u32 flags;
2065
2066 spin_lock_irqsave(&priv->lock, flags);
2067 qh = ep->hcpriv;
2068 if (!qh)
2069 goto out;
2070
2071 ep->hcpriv = NULL;
2072 do {
2073 /* more than entry might get removed */
2074 if (list_empty(&qh->qtd_list))
2075 break;
2076
2077 qtd = list_first_entry(&qh->qtd_list, struct isp1760_qtd,
2078 qtd_list);
2079
2080 if (qtd->status & URB_ENQUEUED) {
2081
2082 spin_unlock_irqrestore(&priv->lock, flags);
2083 isp1760_urb_dequeue(usb_hcd, qtd->urb, -ECONNRESET);
2084 spin_lock_irqsave(&priv->lock, flags);
2085 } else {
2086 struct urb *urb;
2087
2088 urb = qtd->urb;
2089 clean_up_qtdlist(qtd);
2090 isp1760_urb_done(priv, urb, -ECONNRESET);
2091 }
2092 } while (1);
2093
2094 qh_destroy(qh);
2095 /* remove requests and leak them.
2096 * ATL are pretty fast done, INT could take a while...
2097 * The latter shoule be removed
2098 */
2099out:
2100 spin_unlock_irqrestore(&priv->lock, flags);
2101}
2102
2103static int isp1760_get_frame(struct usb_hcd *hcd)
2104{
2105 struct isp1760_hcd *priv = hcd_to_priv(hcd);
2106 u32 fr;
2107
2108 fr = isp1760_readl(hcd->regs + HC_FRINDEX);
2109 return (fr >> 3) % priv->periodic_size;
2110}
2111
2112static void isp1760_stop(struct usb_hcd *hcd)
2113{
2114 struct isp1760_hcd *priv = hcd_to_priv(hcd);
2115
2116 isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1,
2117 NULL, 0);
2118 mdelay(20);
2119
2120 spin_lock_irq(&priv->lock);
2121 ehci_reset(priv);
2122 /* Disable IRQ */
2123 isp1760_writel(HW_DATA_BUS_32BIT, hcd->regs + HC_HW_MODE_CTRL);
2124 spin_unlock_irq(&priv->lock);
2125
2126 isp1760_writel(0, hcd->regs + HC_CONFIGFLAG);
2127}
2128
2129static void isp1760_shutdown(struct usb_hcd *hcd)
2130{
2131 u32 command;
2132
2133 isp1760_stop(hcd);
2134 isp1760_writel(HW_DATA_BUS_32BIT, hcd->regs + HC_HW_MODE_CTRL);
2135
2136 command = isp1760_readl(hcd->regs + HC_USBCMD);
2137 command &= ~CMD_RUN;
2138 isp1760_writel(command, hcd->regs + HC_USBCMD);
2139}
2140
2141static const struct hc_driver isp1760_hc_driver = {
2142 .description = "isp1760-hcd",
2143 .product_desc = "NXP ISP1760 USB Host Controller",
2144 .hcd_priv_size = sizeof(struct isp1760_hcd),
2145 .irq = isp1760_irq,
2146 .flags = HCD_MEMORY | HCD_USB2,
2147 .reset = isp1760_hc_setup,
2148 .start = isp1760_run,
2149 .stop = isp1760_stop,
2150 .shutdown = isp1760_shutdown,
2151 .urb_enqueue = isp1760_urb_enqueue,
2152 .urb_dequeue = isp1760_urb_dequeue,
2153 .endpoint_disable = isp1760_endpoint_disable,
2154 .get_frame_number = isp1760_get_frame,
2155 .hub_status_data = isp1760_hub_status_data,
2156 .hub_control = isp1760_hub_control,
2157};
2158
2159int __init init_kmem_once(void)
2160{
2161 qtd_cachep = kmem_cache_create("isp1760_qtd",
2162 sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
2163 SLAB_MEM_SPREAD, NULL);
2164
2165 if (!qtd_cachep)
2166 return -ENOMEM;
2167
2168 qh_cachep = kmem_cache_create("isp1760_qh", sizeof(struct isp1760_qh),
2169 0, SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
2170
2171 if (!qh_cachep) {
2172 kmem_cache_destroy(qtd_cachep);
2173 return -ENOMEM;
2174 }
2175
2176 return 0;
2177}
2178
2179void deinit_kmem_cache(void)
2180{
2181 kmem_cache_destroy(qtd_cachep);
2182 kmem_cache_destroy(qh_cachep);
2183}
2184
2185struct usb_hcd *isp1760_register(u64 res_start, u64 res_len, int irq,
2186 u64 irqflags, struct device *dev, const char *busname)
2187{
2188 struct usb_hcd *hcd;
2189 struct isp1760_hcd *priv;
2190 int ret;
2191
2192 if (usb_disabled())
2193 return ERR_PTR(-ENODEV);
2194
2195 /* prevent usb-core allocating DMA pages */
2196 dev->dma_mask = NULL;
2197
2198 hcd = usb_create_hcd(&isp1760_hc_driver, dev, dev->bus_id);
2199 if (!hcd)
2200 return ERR_PTR(-ENOMEM);
2201
2202 priv = hcd_to_priv(hcd);
2203 init_memory(priv);
2204 hcd->regs = ioremap(res_start, res_len);
2205 if (!hcd->regs) {
2206 ret = -EIO;
2207 goto err_put;
2208 }
2209
2210 ret = usb_add_hcd(hcd, irq, irqflags);
2211 if (ret)
2212 goto err_unmap;
2213
2214 hcd->irq = irq;
2215 hcd->rsrc_start = res_start;
2216 hcd->rsrc_len = res_len;
2217
2218 return hcd;
2219
2220err_unmap:
2221 iounmap(hcd->regs);
2222
2223err_put:
2224 usb_put_hcd(hcd);
2225
2226 return ERR_PTR(ret);
2227}
2228
2229MODULE_DESCRIPTION("Driver for the ISP1760 USB-controller from NXP");
2230MODULE_AUTHOR("Sebastian Siewior <bigeasy@linuxtronix.de>");
2231MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
new file mode 100644
index 000000000000..3d86d0f6b147
--- /dev/null
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -0,0 +1,206 @@
1#ifndef _ISP1760_HCD_H_
2#define _ISP1760_HCD_H_
3
4/* exports for if */
5struct usb_hcd *isp1760_register(u64 res_start, u64 res_len, int irq,
6 u64 irqflags, struct device *dev, const char *busname);
7int init_kmem_once(void);
8void deinit_kmem_cache(void);
9
10/* EHCI capability registers */
11#define HC_CAPLENGTH 0x00
12#define HC_HCSPARAMS 0x04
13#define HC_HCCPARAMS 0x08
14
15/* EHCI operational registers */
16#define HC_USBCMD 0x20
17#define HC_USBSTS 0x24
18#define HC_FRINDEX 0x2c
19#define HC_CONFIGFLAG 0x60
20#define HC_PORTSC1 0x64
21#define HC_ISO_PTD_DONEMAP_REG 0x130
22#define HC_ISO_PTD_SKIPMAP_REG 0x134
23#define HC_ISO_PTD_LASTPTD_REG 0x138
24#define HC_INT_PTD_DONEMAP_REG 0x140
25#define HC_INT_PTD_SKIPMAP_REG 0x144
26#define HC_INT_PTD_LASTPTD_REG 0x148
27#define HC_ATL_PTD_DONEMAP_REG 0x150
28#define HC_ATL_PTD_SKIPMAP_REG 0x154
29#define HC_ATL_PTD_LASTPTD_REG 0x158
30
31/* Configuration Register */
32#define HC_HW_MODE_CTRL 0x300
33#define ALL_ATX_RESET (1 << 31)
34#define HW_DATA_BUS_32BIT (1 << 8)
35#define HW_DACK_POL_HIGH (1 << 6)
36#define HW_DREQ_POL_HIGH (1 << 5)
37#define HW_INTR_HIGH_ACT (1 << 2)
38#define HW_INTR_EDGE_TRIG (1 << 1)
39#define HW_GLOBAL_INTR_EN (1 << 0)
40
41#define HC_CHIP_ID_REG 0x304
42#define HC_SCRATCH_REG 0x308
43
44#define HC_RESET_REG 0x30c
45#define SW_RESET_RESET_HC (1 << 1)
46#define SW_RESET_RESET_ALL (1 << 0)
47
48#define HC_BUFFER_STATUS_REG 0x334
49#define ATL_BUFFER 0x1
50#define INT_BUFFER 0x2
51#define ISO_BUFFER 0x4
52#define BUFFER_MAP 0x7
53
54#define HC_MEMORY_REG 0x33c
55#define HC_PORT1_CTRL 0x374
56#define PORT1_POWER (3 << 3)
57#define PORT1_INIT1 (1 << 7)
58#define PORT1_INIT2 (1 << 23)
59
60/* Interrupt Register */
61#define HC_INTERRUPT_REG 0x310
62
63#define HC_INTERRUPT_ENABLE 0x314
64#define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT)
65#define FINAL_HW_CONFIG (HW_GLOBAL_INTR_EN | HW_DATA_BUS_32BIT)
66
67#define HC_ISO_INT (1 << 9)
68#define HC_ATL_INT (1 << 8)
69#define HC_INTL_INT (1 << 7)
70#define HC_EOT_INT (1 << 3)
71#define HC_SOT_INT (1 << 1)
72
73#define HC_ISO_IRQ_MASK_OR_REG 0x318
74#define HC_INT_IRQ_MASK_OR_REG 0x31C
75#define HC_ATL_IRQ_MASK_OR_REG 0x320
76#define HC_ISO_IRQ_MASK_AND_REG 0x324
77#define HC_INT_IRQ_MASK_AND_REG 0x328
78#define HC_ATL_IRQ_MASK_AND_REG 0x32C
79
80/* Register sets */
81#define HC_BEGIN_OF_ATL 0x0c00
82#define HC_BEGIN_OF_INT 0x0800
83#define HC_BEGIN_OF_ISO 0x0400
84#define HC_BEGIN_OF_PAYLOAD 0x1000
85
86/* urb state*/
87#define DELETE_URB (0x0008)
88#define NO_TRANSFER_ACTIVE (0xffffffff)
89
90#define ATL_REGS_OFFSET (0xc00)
91#define INT_REGS_OFFSET (0x800)
92
93/* Philips Transfer Descriptor (PTD) */
94struct ptd {
95 __le32 dw0;
96 __le32 dw1;
97 __le32 dw2;
98 __le32 dw3;
99 __le32 dw4;
100 __le32 dw5;
101 __le32 dw6;
102 __le32 dw7;
103};
104
105struct inter_packet_info {
106 void *data_buffer;
107 u32 payload;
108#define PTD_FIRE_NEXT (1 << 0)
109#define PTD_URB_FINISHED (1 << 1)
110 struct urb *urb;
111 struct isp1760_qh *qh;
112 struct isp1760_qtd *qtd;
113};
114
115
116typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
117 struct isp1760_qtd *qtd);
118
119#define isp1760_info(priv, fmt, args...) \
120 dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
121
122#define isp1760_err(priv, fmt, args...) \
123 dev_err(priv_to_hcd(priv)->self.controller, fmt, ##args)
124
125/* chip memory management */
126struct memory_chunk {
127 unsigned int start;
128 unsigned int size;
129 unsigned int free;
130};
131
132/*
133 * 60kb divided in:
134 * - 32 blocks @ 256 bytes
135 * - 20 blocks @ 1024 bytes
136 * - 4 blocks @ 8192 bytes
137 */
138
139#define BLOCK_1_NUM 32
140#define BLOCK_2_NUM 20
141#define BLOCK_3_NUM 4
142
143#define BLOCK_1_SIZE 256
144#define BLOCK_2_SIZE 1024
145#define BLOCK_3_SIZE 8192
146#define BLOCKS (BLOCK_1_NUM + BLOCK_2_NUM + BLOCK_3_NUM)
147#define PAYLOAD_SIZE 0xf000
148
149/* I saw if some reloads if the pointer was negative */
150#define ISP1760_NULL_POINTER (0x400)
151
152/* ATL */
153/* DW0 */
154#define PTD_VALID 1
155#define PTD_LENGTH(x) (((u32) x) << 3)
156#define PTD_MAXPACKET(x) (((u32) x) << 18)
157#define PTD_MULTI(x) (((u32) x) << 29)
158#define PTD_ENDPOINT(x) (((u32) x) << 31)
159/* DW1 */
160#define PTD_DEVICE_ADDR(x) (((u32) x) << 3)
161#define PTD_PID_TOKEN(x) (((u32) x) << 10)
162#define PTD_TRANS_BULK ((u32) 2 << 12)
163#define PTD_TRANS_INT ((u32) 3 << 12)
164#define PTD_TRANS_SPLIT ((u32) 1 << 14)
165#define PTD_SE_USB_LOSPEED ((u32) 2 << 16)
166#define PTD_PORT_NUM(x) (((u32) x) << 18)
167#define PTD_HUB_NUM(x) (((u32) x) << 25)
168#define PTD_PING(x) (((u32) x) << 26)
169/* DW2 */
170#define PTD_RL_CNT(x) (((u32) x) << 25)
171#define PTD_DATA_START_ADDR(x) (((u32) x) << 8)
172#define BASE_ADDR 0x1000
173/* DW3 */
174#define PTD_CERR(x) (((u32) x) << 23)
175#define PTD_NAC_CNT(x) (((u32) x) << 19)
176#define PTD_ACTIVE ((u32) 1 << 31)
177#define PTD_DATA_TOGGLE(x) (((u32) x) << 25)
178
179#define DW3_HALT_BIT (1 << 30)
180#define DW3_ERROR_BIT (1 << 28)
181#define DW3_QTD_ACTIVE (1 << 31)
182
183#define INT_UNDERRUN (1 << 2)
184#define INT_BABBLE (1 << 1)
185#define INT_EXACT (1 << 0)
186
187#define DW1_GET_PID(x) (((x) >> 10) & 0x3)
188#define PTD_XFERRED_LENGTH(x) ((x) & 0x7fff)
189#define PTD_XFERRED_LENGTH_LO(x) ((x) & 0x7ff)
190
191#define SETUP_PID (2)
192#define IN_PID (1)
193#define OUT_PID (0)
194#define GET_QTD_TOKEN_TYPE(x) ((x) & 0x3)
195
196#define DATA_TOGGLE (1 << 31)
197#define GET_DATA_TOGGLE(x) ((x) >> 31)
198
199/* Errata 1 */
200#define RL_COUNTER (0)
201#define NAK_COUNTER (0)
202#define ERR_COUNTER (2)
203
204#define HC_ATL_PL_SIZE (8192)
205
206#endif
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
new file mode 100644
index 000000000000..73fb2a38f1e4
--- /dev/null
+++ b/drivers/usb/host/isp1760-if.c
@@ -0,0 +1,298 @@
1/*
2 * Glue code for the ISP1760 driver and bus
3 * Currently there is support for
4 * - OpenFirmware
5 * - PCI
6 *
7 * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
8 *
9 */
10
11#include <linux/usb.h>
12#include <linux/io.h>
13
14#include "../core/hcd.h"
15#include "isp1760-hcd.h"
16
17#ifdef CONFIG_USB_ISP1760_OF
18#include <linux/of.h>
19#include <linux/of_platform.h>
20#endif
21
22#ifdef CONFIG_USB_ISP1760_PCI
23#include <linux/pci.h>
24#endif
25
26#ifdef CONFIG_USB_ISP1760_OF
27static int of_isp1760_probe(struct of_device *dev,
28 const struct of_device_id *match)
29{
30 struct usb_hcd *hcd;
31 struct device_node *dp = dev->node;
32 struct resource *res;
33 struct resource memory;
34 struct of_irq oirq;
35 int virq;
36 u64 res_len;
37 int ret;
38
39 ret = of_address_to_resource(dp, 0, &memory);
40 if (ret)
41 return -ENXIO;
42
43 res = request_mem_region(memory.start, memory.end - memory.start + 1,
44 dev->dev.bus_id);
45 if (!res)
46 return -EBUSY;
47
48 res_len = memory.end - memory.start + 1;
49
50 if (of_irq_map_one(dp, 0, &oirq)) {
51 ret = -ENODEV;
52 goto release_reg;
53 }
54
55 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
56 oirq.size);
57
58 hcd = isp1760_register(memory.start, res_len, virq,
59 IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev->dev.bus_id);
60 if (IS_ERR(hcd)) {
61 ret = PTR_ERR(hcd);
62 goto release_reg;
63 }
64
65 dev_set_drvdata(&dev->dev, hcd);
66 return ret;
67
68release_reg:
69 release_mem_region(memory.start, memory.end - memory.start + 1);
70 return ret;
71}
72
73static int of_isp1760_remove(struct of_device *dev)
74{
75 struct usb_hcd *hcd = dev_get_drvdata(&dev->dev);
76
77 dev_set_drvdata(&dev->dev, NULL);
78
79 usb_remove_hcd(hcd);
80 iounmap(hcd->regs);
81 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
82 usb_put_hcd(hcd);
83 return 0;
84}
85
86static struct of_device_id of_isp1760_match[] = {
87 {
88 .compatible = "nxp,usb-isp1760",
89 },
90 { },
91};
92MODULE_DEVICE_TABLE(of, of_isp1760_match);
93
94static struct of_platform_driver isp1760_of_driver = {
95 .name = "nxp-isp1760",
96 .match_table = of_isp1760_match,
97 .probe = of_isp1760_probe,
98 .remove = of_isp1760_remove,
99};
100#endif
101
102#ifdef CONFIG_USB_ISP1760_PCI
103static u32 nxp_pci_io_base;
104static u32 iolength;
105static u32 pci_mem_phy0;
106static u32 length;
107static u8 *chip_addr;
108static u8 *iobase;
109
110static int __devinit isp1761_pci_probe(struct pci_dev *dev,
111 const struct pci_device_id *id)
112{
113 u8 latency, limit;
114 __u32 reg_data;
115 int retry_count;
116 int length;
117 int status = 1;
118 struct usb_hcd *hcd;
119
120 if (usb_disabled())
121 return -ENODEV;
122
123 if (pci_enable_device(dev) < 0)
124 return -ENODEV;
125
126 if (!dev->irq)
127 return -ENODEV;
128
129 /* Grab the PLX PCI mem maped port start address we need */
130 nxp_pci_io_base = pci_resource_start(dev, 0);
131 iolength = pci_resource_len(dev, 0);
132
133 if (!request_mem_region(nxp_pci_io_base, iolength, "ISP1761 IO MEM")) {
134 printk(KERN_ERR "request region #1\n");
135 return -EBUSY;
136 }
137
138 iobase = ioremap_nocache(nxp_pci_io_base, iolength);
139 if (!iobase) {
140 printk(KERN_ERR "ioremap #1\n");
141 release_mem_region(nxp_pci_io_base, iolength);
142 return -ENOMEM;
143 }
144 /* Grab the PLX PCI shared memory of the ISP 1761 we need */
145 pci_mem_phy0 = pci_resource_start(dev, 3);
146 length = pci_resource_len(dev, 3);
147
148 if (length < 0xffff) {
149 printk(KERN_ERR "memory length for this resource is less than "
150 "required\n");
151 release_mem_region(nxp_pci_io_base, iolength);
152 iounmap(iobase);
153 return -ENOMEM;
154 }
155
156 if (!request_mem_region(pci_mem_phy0, length, "ISP-PCI")) {
157 printk(KERN_ERR "host controller already in use\n");
158 release_mem_region(nxp_pci_io_base, iolength);
159 iounmap(iobase);
160 return -EBUSY;
161 }
162
163 /* bad pci latencies can contribute to overruns */
164 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &latency);
165 if (latency) {
166 pci_read_config_byte(dev, PCI_MAX_LAT, &limit);
167 if (limit && limit < latency)
168 pci_write_config_byte(dev, PCI_LATENCY_TIMER, limit);
169 }
170
171 /* Try to check whether we can access Scratch Register of
172 * Host Controller or not. The initial PCI access is retried until
173 * local init for the PCI bridge is completed
174 */
175 retry_count = 20;
176 reg_data = 0;
177 while ((reg_data != 0xFACE) && retry_count) {
178 /*by default host is in 16bit mode, so
179 * io operations at this stage must be 16 bit
180 * */
181 writel(0xface, chip_addr + HC_SCRATCH_REG);
182 udelay(100);
183 reg_data = readl(chip_addr + HC_SCRATCH_REG);
184 retry_count--;
185 }
186
187 /* Host Controller presence is detected by writing to scratch register
188 * and reading back and checking the contents are same or not
189 */
190 if (reg_data != 0xFACE) {
191 err("scratch register mismatch %x", reg_data);
192 goto clean;
193 }
194
195 pci_set_master(dev);
196
197 status = readl(iobase + 0x68);
198 status |= 0x900;
199 writel(status, iobase + 0x68);
200
201 dev->dev.dma_mask = NULL;
202 hcd = isp1760_register(pci_mem_phy0, length, dev->irq,
203 IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev->dev.bus_id);
204 pci_set_drvdata(dev, hcd);
205 if (!hcd)
206 return 0;
207clean:
208 status = -ENODEV;
209 iounmap(iobase);
210 release_mem_region(pci_mem_phy0, length);
211 release_mem_region(nxp_pci_io_base, iolength);
212 return status;
213}
214static void isp1761_pci_remove(struct pci_dev *dev)
215{
216 struct usb_hcd *hcd;
217
218 hcd = pci_get_drvdata(dev);
219
220 usb_remove_hcd(hcd);
221 iounmap(hcd->regs);
222 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
223 usb_put_hcd(hcd);
224
225 pci_disable_device(dev);
226
227 iounmap(iobase);
228 iounmap(chip_addr);
229
230 release_mem_region(nxp_pci_io_base, iolength);
231 release_mem_region(pci_mem_phy0, length);
232}
233
234static void isp1761_pci_shutdown(struct pci_dev *dev)
235{
236 printk(KERN_ERR "ips1761_pci_shutdown\n");
237}
238
239static const struct pci_device_id isp1760_plx [] = { {
240 /* handle any USB 2.0 EHCI controller */
241 PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_OTHER << 8) | (0x06 << 16)), ~0),
242 .driver_data = 0,
243},
244{ /* end: all zeroes */ }
245};
246MODULE_DEVICE_TABLE(pci, isp1760_plx);
247
248static struct pci_driver isp1761_pci_driver = {
249 .name = "isp1760",
250 .id_table = isp1760_plx,
251 .probe = isp1761_pci_probe,
252 .remove = isp1761_pci_remove,
253 .shutdown = isp1761_pci_shutdown,
254};
255#endif
256
257static int __init isp1760_init(void)
258{
259 int ret;
260
261 init_kmem_once();
262
263#ifdef CONFIG_USB_ISP1760_OF
264 ret = of_register_platform_driver(&isp1760_of_driver);
265 if (ret) {
266 deinit_kmem_cache();
267 return ret;
268 }
269#endif
270#ifdef CONFIG_USB_ISP1760_PCI
271 ret = pci_register_driver(&isp1761_pci_driver);
272 if (ret)
273 goto unreg_of;
274#endif
275 return ret;
276
277#ifdef CONFIG_USB_ISP1760_PCI
278unreg_of:
279#endif
280#ifdef CONFIG_USB_ISP1760_OF
281 of_unregister_platform_driver(&isp1760_of_driver);
282#endif
283 deinit_kmem_cache();
284 return ret;
285}
286module_init(isp1760_init);
287
288static void __exit isp1760_exit(void)
289{
290#ifdef CONFIG_USB_ISP1760_OF
291 of_unregister_platform_driver(&isp1760_of_driver);
292#endif
293#ifdef CONFIG_USB_ISP1760_PCI
294 pci_unregister_driver(&isp1761_pci_driver);
295#endif
296 deinit_kmem_cache();
297}
298module_exit(isp1760_exit);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 5be3bb3e6a9d..79a78029f896 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -613,7 +613,7 @@ static void start_hnp(struct ohci_hcd *ohci);
613static inline int root_port_reset (struct ohci_hcd *ohci, unsigned port) 613static inline int root_port_reset (struct ohci_hcd *ohci, unsigned port)
614{ 614{
615 __hc32 __iomem *portstat = &ohci->regs->roothub.portstatus [port]; 615 __hc32 __iomem *portstat = &ohci->regs->roothub.portstatus [port];
616 u32 temp; 616 u32 temp = 0;
617 u16 now = ohci_readl(ohci, &ohci->regs->fmnumber); 617 u16 now = ohci_readl(ohci, &ohci->regs->fmnumber);
618 u16 reset_done = now + PORT_RESET_MSEC; 618 u16 reset_done = now + PORT_RESET_MSEC;
619 int limit_1 = DIV_ROUND_UP(PORT_RESET_MSEC, PORT_RESET_HW_MSEC); 619 int limit_1 = DIV_ROUND_UP(PORT_RESET_MSEC, PORT_RESET_HW_MSEC);
@@ -736,14 +736,14 @@ static int ohci_hub_control (
736 break; 736 break;
737 case GetHubStatus: 737 case GetHubStatus:
738 temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE); 738 temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE);
739 put_unaligned(cpu_to_le32 (temp), (__le32 *) buf); 739 put_unaligned_le32(temp, buf);
740 break; 740 break;
741 case GetPortStatus: 741 case GetPortStatus:
742 if (!wIndex || wIndex > ports) 742 if (!wIndex || wIndex > ports)
743 goto error; 743 goto error;
744 wIndex--; 744 wIndex--;
745 temp = roothub_portstatus (ohci, wIndex); 745 temp = roothub_portstatus (ohci, wIndex);
746 put_unaligned(cpu_to_le32 (temp), (__le32 *) buf); 746 put_unaligned_le32(temp, buf);
747 747
748#ifndef OHCI_VERBOSE_DEBUG 748#ifndef OHCI_VERBOSE_DEBUG
749 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ 749 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 3fd7a0c12078..426575247b23 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1506,15 +1506,7 @@ static const char proc_filename[] = "driver/sl811h";
1506 1506
1507static void create_debug_file(struct sl811 *sl811) 1507static void create_debug_file(struct sl811 *sl811)
1508{ 1508{
1509 struct proc_dir_entry *pde; 1509 sl811->pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, sl811);
1510
1511 pde = create_proc_entry(proc_filename, 0, NULL);
1512 if (pde == NULL)
1513 return;
1514
1515 pde->proc_fops = &proc_ops;
1516 pde->data = sl811;
1517 sl811->pde = pde;
1518} 1510}
1519 1511
1520static void remove_debug_file(struct sl811 *sl811) 1512static void remove_debug_file(struct sl811 *sl811)
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index d3e0d8aa3980..3a7bfe7a8874 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -234,7 +234,7 @@ static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
234 return 0; 234 return 0;
235} 235}
236 236
237static int remote_wakeup_is_broken(struct uhci_hcd *uhci) 237static int global_suspend_mode_is_broken(struct uhci_hcd *uhci)
238{ 238{
239 int port; 239 int port;
240 const char *sys_info; 240 const char *sys_info;
@@ -261,27 +261,60 @@ __releases(uhci->lock)
261__acquires(uhci->lock) 261__acquires(uhci->lock)
262{ 262{
263 int auto_stop; 263 int auto_stop;
264 int int_enable, egsm_enable; 264 int int_enable, egsm_enable, wakeup_enable;
265 struct usb_device *rhdev = uhci_to_hcd(uhci)->self.root_hub; 265 struct usb_device *rhdev = uhci_to_hcd(uhci)->self.root_hub;
266 266
267 auto_stop = (new_state == UHCI_RH_AUTO_STOPPED); 267 auto_stop = (new_state == UHCI_RH_AUTO_STOPPED);
268 dev_dbg(&rhdev->dev, "%s%s\n", __func__, 268 dev_dbg(&rhdev->dev, "%s%s\n", __func__,
269 (auto_stop ? " (auto-stop)" : "")); 269 (auto_stop ? " (auto-stop)" : ""));
270 270
271 /* Enable resume-detect interrupts if they work. 271 /* Start off by assuming Resume-Detect interrupts and EGSM work
272 * Then enter Global Suspend mode if _it_ works, still configured. 272 * and that remote wakeups should be enabled.
273 */ 273 */
274 egsm_enable = USBCMD_EGSM; 274 egsm_enable = USBCMD_EGSM;
275 uhci->working_RD = 1; 275 uhci->RD_enable = 1;
276 int_enable = USBINTR_RESUME; 276 int_enable = USBINTR_RESUME;
277 if (remote_wakeup_is_broken(uhci)) 277 wakeup_enable = 1;
278 egsm_enable = 0; 278
279 if (resume_detect_interrupts_are_broken(uhci) || !egsm_enable || 279 /* In auto-stop mode wakeups must always be detected, but
280 * Resume-Detect interrupts may be prohibited. (In the absence
281 * of CONFIG_PM, they are always disallowed.)
282 */
283 if (auto_stop) {
284 if (!device_may_wakeup(&rhdev->dev))
285 int_enable = 0;
286
287 /* In bus-suspend mode wakeups may be disabled, but if they are
288 * allowed then so are Resume-Detect interrupts.
289 */
290 } else {
280#ifdef CONFIG_PM 291#ifdef CONFIG_PM
281 (!auto_stop && !rhdev->do_remote_wakeup) || 292 if (!rhdev->do_remote_wakeup)
293 wakeup_enable = 0;
282#endif 294#endif
283 (auto_stop && !device_may_wakeup(&rhdev->dev))) 295 }
284 uhci->working_RD = int_enable = 0; 296
297 /* EGSM causes the root hub to echo a 'K' signal (resume) out any
298 * port which requests a remote wakeup. According to the USB spec,
299 * every hub is supposed to do this. But if we are ignoring
300 * remote-wakeup requests anyway then there's no point to it.
301 * We also shouldn't enable EGSM if it's broken.
302 */
303 if (!wakeup_enable || global_suspend_mode_is_broken(uhci))
304 egsm_enable = 0;
305
306 /* If we're ignoring wakeup events then there's no reason to
307 * enable Resume-Detect interrupts. We also shouldn't enable
308 * them if they are broken or disallowed.
309 *
310 * This logic may lead us to enabling RD but not EGSM. The UHCI
311 * spec foolishly says that RD works only when EGSM is on, but
312 * there's no harm in enabling it anyway -- perhaps some chips
313 * will implement it!
314 */
315 if (!wakeup_enable || resume_detect_interrupts_are_broken(uhci) ||
316 !int_enable)
317 uhci->RD_enable = int_enable = 0;
285 318
286 outw(int_enable, uhci->io_addr + USBINTR); 319 outw(int_enable, uhci->io_addr + USBINTR);
287 outw(egsm_enable | USBCMD_CF, uhci->io_addr + USBCMD); 320 outw(egsm_enable | USBCMD_CF, uhci->io_addr + USBCMD);
@@ -308,7 +341,11 @@ __acquires(uhci->lock)
308 341
309 uhci->rh_state = new_state; 342 uhci->rh_state = new_state;
310 uhci->is_stopped = UHCI_IS_STOPPED; 343 uhci->is_stopped = UHCI_IS_STOPPED;
311 uhci_to_hcd(uhci)->poll_rh = !int_enable; 344
345 /* If interrupts don't work and remote wakeup is enabled then
346 * the suspended root hub needs to be polled.
347 */
348 uhci_to_hcd(uhci)->poll_rh = (!int_enable && wakeup_enable);
312 349
313 uhci_scan_schedule(uhci); 350 uhci_scan_schedule(uhci);
314 uhci_fsbr_off(uhci); 351 uhci_fsbr_off(uhci);
@@ -344,9 +381,12 @@ __acquires(uhci->lock)
344 * for 20 ms. 381 * for 20 ms.
345 */ 382 */
346 if (uhci->rh_state == UHCI_RH_SUSPENDED) { 383 if (uhci->rh_state == UHCI_RH_SUSPENDED) {
384 unsigned egsm;
385
386 /* Keep EGSM on if it was set before */
387 egsm = inw(uhci->io_addr + USBCMD) & USBCMD_EGSM;
347 uhci->rh_state = UHCI_RH_RESUMING; 388 uhci->rh_state = UHCI_RH_RESUMING;
348 outw(USBCMD_FGR | USBCMD_EGSM | USBCMD_CF, 389 outw(USBCMD_FGR | USBCMD_CF | egsm, uhci->io_addr + USBCMD);
349 uhci->io_addr + USBCMD);
350 spin_unlock_irq(&uhci->lock); 390 spin_unlock_irq(&uhci->lock);
351 msleep(20); 391 msleep(20);
352 spin_lock_irq(&uhci->lock); 392 spin_lock_irq(&uhci->lock);
@@ -801,8 +841,10 @@ static int uhci_pci_resume(struct usb_hcd *hcd)
801 841
802 spin_unlock_irq(&uhci->lock); 842 spin_unlock_irq(&uhci->lock);
803 843
804 if (!uhci->working_RD) { 844 /* If interrupts don't work and remote wakeup is enabled then
805 /* Suspended root hub needs to be polled */ 845 * the suspended root hub needs to be polled.
846 */
847 if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup) {
806 hcd->poll_rh = 1; 848 hcd->poll_rh = 1;
807 usb_hcd_poll_rh_status(hcd); 849 usb_hcd_poll_rh_status(hcd);
808 } 850 }
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 340d6ed3e6e9..7d01c5677f92 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -400,8 +400,9 @@ struct uhci_hcd {
400 unsigned int scan_in_progress:1; /* Schedule scan is running */ 400 unsigned int scan_in_progress:1; /* Schedule scan is running */
401 unsigned int need_rescan:1; /* Redo the schedule scan */ 401 unsigned int need_rescan:1; /* Redo the schedule scan */
402 unsigned int dead:1; /* Controller has died */ 402 unsigned int dead:1; /* Controller has died */
403 unsigned int working_RD:1; /* Suspended root hub doesn't 403 unsigned int RD_enable:1; /* Suspended root hub with
404 need to be polled */ 404 Resume-Detect interrupts
405 enabled */
405 unsigned int is_initialized:1; /* Data structure is usable */ 406 unsigned int is_initialized:1; /* Data structure is usable */
406 unsigned int fsbr_is_on:1; /* FSBR is turned on */ 407 unsigned int fsbr_is_on:1; /* FSBR is turned on */
407 unsigned int fsbr_is_wanted:1; /* Does any URB want FSBR? */ 408 unsigned int fsbr_is_wanted:1; /* Does any URB want FSBR? */
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 11580e81e2c6..7aafd53fbcab 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -148,7 +148,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in
148 148
149/* Structure to hold all of our device specific stuff */ 149/* Structure to hold all of our device specific stuff */
150struct ld_usb { 150struct ld_usb {
151 struct semaphore sem; /* locks this structure */ 151 struct mutex mutex; /* locks this structure */
152 struct usb_interface* intf; /* save off the usb interface pointer */ 152 struct usb_interface* intf; /* save off the usb interface pointer */
153 153
154 int open_count; /* number of times this port has been opened */ 154 int open_count; /* number of times this port has been opened */
@@ -319,7 +319,7 @@ static int ld_usb_open(struct inode *inode, struct file *file)
319 return -ENODEV; 319 return -ENODEV;
320 320
321 /* lock this device */ 321 /* lock this device */
322 if (down_interruptible(&dev->sem)) 322 if (mutex_lock_interruptible(&dev->mutex))
323 return -ERESTARTSYS; 323 return -ERESTARTSYS;
324 324
325 /* allow opening only once */ 325 /* allow opening only once */
@@ -358,7 +358,7 @@ static int ld_usb_open(struct inode *inode, struct file *file)
358 file->private_data = dev; 358 file->private_data = dev;
359 359
360unlock_exit: 360unlock_exit:
361 up(&dev->sem); 361 mutex_unlock(&dev->mutex);
362 362
363 return retval; 363 return retval;
364} 364}
@@ -378,7 +378,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
378 goto exit; 378 goto exit;
379 } 379 }
380 380
381 if (down_interruptible(&dev->sem)) { 381 if (mutex_lock_interruptible(&dev->mutex)) {
382 retval = -ERESTARTSYS; 382 retval = -ERESTARTSYS;
383 goto exit; 383 goto exit;
384 } 384 }
@@ -389,7 +389,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
389 } 389 }
390 if (dev->intf == NULL) { 390 if (dev->intf == NULL) {
391 /* the device was unplugged before the file was released */ 391 /* the device was unplugged before the file was released */
392 up(&dev->sem); 392 mutex_unlock(&dev->mutex);
393 /* unlock here as ld_usb_delete frees dev */ 393 /* unlock here as ld_usb_delete frees dev */
394 ld_usb_delete(dev); 394 ld_usb_delete(dev);
395 goto exit; 395 goto exit;
@@ -402,7 +402,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
402 dev->open_count = 0; 402 dev->open_count = 0;
403 403
404unlock_exit: 404unlock_exit:
405 up(&dev->sem); 405 mutex_unlock(&dev->mutex);
406 406
407exit: 407exit:
408 return retval; 408 return retval;
@@ -448,7 +448,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
448 goto exit; 448 goto exit;
449 449
450 /* lock this object */ 450 /* lock this object */
451 if (down_interruptible(&dev->sem)) { 451 if (mutex_lock_interruptible(&dev->mutex)) {
452 retval = -ERESTARTSYS; 452 retval = -ERESTARTSYS;
453 goto exit; 453 goto exit;
454 } 454 }
@@ -505,7 +505,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
505 505
506unlock_exit: 506unlock_exit:
507 /* unlock the device */ 507 /* unlock the device */
508 up(&dev->sem); 508 mutex_unlock(&dev->mutex);
509 509
510exit: 510exit:
511 return retval; 511 return retval;
@@ -528,7 +528,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
528 goto exit; 528 goto exit;
529 529
530 /* lock this object */ 530 /* lock this object */
531 if (down_interruptible(&dev->sem)) { 531 if (mutex_lock_interruptible(&dev->mutex)) {
532 retval = -ERESTARTSYS; 532 retval = -ERESTARTSYS;
533 goto exit; 533 goto exit;
534 } 534 }
@@ -602,7 +602,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
602 602
603unlock_exit: 603unlock_exit:
604 /* unlock the device */ 604 /* unlock the device */
605 up(&dev->sem); 605 mutex_unlock(&dev->mutex);
606 606
607exit: 607exit:
608 return retval; 608 return retval;
@@ -651,7 +651,7 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
651 dev_err(&intf->dev, "Out of memory\n"); 651 dev_err(&intf->dev, "Out of memory\n");
652 goto exit; 652 goto exit;
653 } 653 }
654 init_MUTEX(&dev->sem); 654 mutex_init(&dev->mutex);
655 spin_lock_init(&dev->rbsl); 655 spin_lock_init(&dev->rbsl);
656 dev->intf = intf; 656 dev->intf = intf;
657 init_waitqueue_head(&dev->read_wait); 657 init_waitqueue_head(&dev->read_wait);
@@ -765,15 +765,15 @@ static void ld_usb_disconnect(struct usb_interface *intf)
765 /* give back our minor */ 765 /* give back our minor */
766 usb_deregister_dev(intf, &ld_usb_class); 766 usb_deregister_dev(intf, &ld_usb_class);
767 767
768 down(&dev->sem); 768 mutex_lock(&dev->mutex);
769 769
770 /* if the device is not opened, then we clean up right now */ 770 /* if the device is not opened, then we clean up right now */
771 if (!dev->open_count) { 771 if (!dev->open_count) {
772 up(&dev->sem); 772 mutex_unlock(&dev->mutex);
773 ld_usb_delete(dev); 773 ld_usb_delete(dev);
774 } else { 774 } else {
775 dev->intf = NULL; 775 dev->intf = NULL;
776 up(&dev->sem); 776 mutex_unlock(&dev->mutex);
777 } 777 }
778 778
779 dev_info(&intf->dev, "LD USB Device #%d now disconnected\n", 779 dev_info(&intf->dev, "LD USB Device #%d now disconnected\n",
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index a51983854ca0..742be3c35947 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -79,30 +79,10 @@ static struct usb_device *testdev_to_usbdev (struct usbtest_dev *test)
79/* set up all urbs so they can be used with either bulk or interrupt */ 79/* set up all urbs so they can be used with either bulk or interrupt */
80#define INTERRUPT_RATE 1 /* msec/transfer */ 80#define INTERRUPT_RATE 1 /* msec/transfer */
81 81
82#define xprintk(tdev,level,fmt,args...) \ 82#define ERROR(tdev, fmt, args...) \
83 dev_printk(level , &(tdev)->intf->dev , fmt , ## args) 83 dev_err(&(tdev)->intf->dev , fmt , ## args)
84 84#define WARN(tdev, fmt, args...) \
85#ifdef DEBUG 85 dev_warn(&(tdev)->intf->dev , fmt , ## args)
86#define DBG(dev,fmt,args...) \
87 xprintk(dev , KERN_DEBUG , fmt , ## args)
88#else
89#define DBG(dev,fmt,args...) \
90 do { } while (0)
91#endif /* DEBUG */
92
93#ifdef VERBOSE
94#define VDBG DBG
95#else
96#define VDBG(dev,fmt,args...) \
97 do { } while (0)
98#endif /* VERBOSE */
99
100#define ERROR(dev,fmt,args...) \
101 xprintk(dev , KERN_ERR , fmt , ## args)
102#define WARN(dev,fmt,args...) \
103 xprintk(dev , KERN_WARNING , fmt , ## args)
104#define INFO(dev,fmt,args...) \
105 xprintk(dev , KERN_INFO , fmt , ## args)
106 86
107/*-------------------------------------------------------------------------*/ 87/*-------------------------------------------------------------------------*/
108 88
@@ -236,7 +216,7 @@ static struct urb *simple_alloc_urb (
236 216
237static unsigned pattern = 0; 217static unsigned pattern = 0;
238module_param (pattern, uint, S_IRUGO); 218module_param (pattern, uint, S_IRUGO);
239// MODULE_PARM_DESC (pattern, "i/o pattern (0 == zeroes)"); 219MODULE_PARM_DESC(pattern, "i/o pattern (0 == zeroes)");
240 220
241static inline void simple_fill_buf (struct urb *urb) 221static inline void simple_fill_buf (struct urb *urb)
242{ 222{
@@ -257,7 +237,7 @@ static inline void simple_fill_buf (struct urb *urb)
257 } 237 }
258} 238}
259 239
260static inline int simple_check_buf (struct urb *urb) 240static inline int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
261{ 241{
262 unsigned i; 242 unsigned i;
263 u8 expected; 243 u8 expected;
@@ -285,7 +265,7 @@ static inline int simple_check_buf (struct urb *urb)
285 } 265 }
286 if (*buf == expected) 266 if (*buf == expected)
287 continue; 267 continue;
288 dbg ("buf[%d] = %d (not %d)", i, *buf, expected); 268 ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
289 return -EINVAL; 269 return -EINVAL;
290 } 270 }
291 return 0; 271 return 0;
@@ -299,6 +279,7 @@ static void simple_free_urb (struct urb *urb)
299} 279}
300 280
301static int simple_io ( 281static int simple_io (
282 struct usbtest_dev *tdev,
302 struct urb *urb, 283 struct urb *urb,
303 int iterations, 284 int iterations,
304 int vary, 285 int vary,
@@ -324,7 +305,7 @@ static int simple_io (
324 retval = urb->status; 305 retval = urb->status;
325 urb->dev = udev; 306 urb->dev = udev;
326 if (retval == 0 && usb_pipein (urb->pipe)) 307 if (retval == 0 && usb_pipein (urb->pipe))
327 retval = simple_check_buf (urb); 308 retval = simple_check_buf(tdev, urb);
328 309
329 if (vary) { 310 if (vary) {
330 int len = urb->transfer_buffer_length; 311 int len = urb->transfer_buffer_length;
@@ -341,7 +322,7 @@ static int simple_io (
341 urb->transfer_buffer_length = max; 322 urb->transfer_buffer_length = max;
342 323
343 if (expected != retval) 324 if (expected != retval)
344 dev_dbg (&udev->dev, 325 dev_err(&udev->dev,
345 "%s failed, iterations left %d, status %d (not %d)\n", 326 "%s failed, iterations left %d, status %d (not %d)\n",
346 label, iterations, retval, expected); 327 label, iterations, retval, expected);
347 return retval; 328 return retval;
@@ -357,7 +338,7 @@ static int simple_io (
357static void free_sglist (struct scatterlist *sg, int nents) 338static void free_sglist (struct scatterlist *sg, int nents)
358{ 339{
359 unsigned i; 340 unsigned i;
360 341
361 if (!sg) 342 if (!sg)
362 return; 343 return;
363 for (i = 0; i < nents; i++) { 344 for (i = 0; i < nents; i++) {
@@ -415,7 +396,7 @@ alloc_sglist (int nents, int max, int vary)
415} 396}
416 397
417static int perform_sglist ( 398static int perform_sglist (
418 struct usb_device *udev, 399 struct usbtest_dev *tdev,
419 unsigned iterations, 400 unsigned iterations,
420 int pipe, 401 int pipe,
421 struct usb_sg_request *req, 402 struct usb_sg_request *req,
@@ -423,6 +404,7 @@ static int perform_sglist (
423 int nents 404 int nents
424) 405)
425{ 406{
407 struct usb_device *udev = testdev_to_usbdev(tdev);
426 int retval = 0; 408 int retval = 0;
427 409
428 while (retval == 0 && iterations-- > 0) { 410 while (retval == 0 && iterations-- > 0) {
@@ -431,7 +413,7 @@ static int perform_sglist (
431 ? (INTERRUPT_RATE << 3) 413 ? (INTERRUPT_RATE << 3)
432 : INTERRUPT_RATE, 414 : INTERRUPT_RATE,
433 sg, nents, 0, GFP_KERNEL); 415 sg, nents, 0, GFP_KERNEL);
434 416
435 if (retval) 417 if (retval)
436 break; 418 break;
437 usb_sg_wait (req); 419 usb_sg_wait (req);
@@ -446,7 +428,8 @@ static int perform_sglist (
446 // failure if retval is as we expected ... 428 // failure if retval is as we expected ...
447 429
448 if (retval) 430 if (retval)
449 dbg ("perform_sglist failed, iterations left %d, status %d", 431 ERROR(tdev, "perform_sglist failed, "
432 "iterations left %d, status %d\n",
450 iterations, retval); 433 iterations, retval);
451 return retval; 434 return retval;
452} 435}
@@ -505,28 +488,28 @@ static int set_altsetting (struct usbtest_dev *dev, int alternate)
505 alternate); 488 alternate);
506} 489}
507 490
508static int is_good_config (char *buf, int len) 491static int is_good_config(struct usbtest_dev *tdev, int len)
509{ 492{
510 struct usb_config_descriptor *config; 493 struct usb_config_descriptor *config;
511 494
512 if (len < sizeof *config) 495 if (len < sizeof *config)
513 return 0; 496 return 0;
514 config = (struct usb_config_descriptor *) buf; 497 config = (struct usb_config_descriptor *) tdev->buf;
515 498
516 switch (config->bDescriptorType) { 499 switch (config->bDescriptorType) {
517 case USB_DT_CONFIG: 500 case USB_DT_CONFIG:
518 case USB_DT_OTHER_SPEED_CONFIG: 501 case USB_DT_OTHER_SPEED_CONFIG:
519 if (config->bLength != 9) { 502 if (config->bLength != 9) {
520 dbg ("bogus config descriptor length"); 503 ERROR(tdev, "bogus config descriptor length\n");
521 return 0; 504 return 0;
522 } 505 }
523 /* this bit 'must be 1' but often isn't */ 506 /* this bit 'must be 1' but often isn't */
524 if (!realworld && !(config->bmAttributes & 0x80)) { 507 if (!realworld && !(config->bmAttributes & 0x80)) {
525 dbg ("high bit of config attributes not set"); 508 ERROR(tdev, "high bit of config attributes not set\n");
526 return 0; 509 return 0;
527 } 510 }
528 if (config->bmAttributes & 0x1f) { /* reserved == 0 */ 511 if (config->bmAttributes & 0x1f) { /* reserved == 0 */
529 dbg ("reserved config bits set"); 512 ERROR(tdev, "reserved config bits set\n");
530 return 0; 513 return 0;
531 } 514 }
532 break; 515 break;
@@ -538,7 +521,7 @@ static int is_good_config (char *buf, int len)
538 return 1; 521 return 1;
539 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */ 522 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
540 return 1; 523 return 1;
541 dbg ("bogus config descriptor read size"); 524 ERROR(tdev, "bogus config descriptor read size\n");
542 return 0; 525 return 0;
543} 526}
544 527
@@ -571,7 +554,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
571 /* 9.2.3 constrains the range here */ 554 /* 9.2.3 constrains the range here */
572 alt = iface->altsetting [i].desc.bAlternateSetting; 555 alt = iface->altsetting [i].desc.bAlternateSetting;
573 if (alt < 0 || alt >= iface->num_altsetting) { 556 if (alt < 0 || alt >= iface->num_altsetting) {
574 dev_dbg (&iface->dev, 557 dev_err(&iface->dev,
575 "invalid alt [%d].bAltSetting = %d\n", 558 "invalid alt [%d].bAltSetting = %d\n",
576 i, alt); 559 i, alt);
577 } 560 }
@@ -583,7 +566,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
583 /* [9.4.10] set_interface */ 566 /* [9.4.10] set_interface */
584 retval = set_altsetting (dev, alt); 567 retval = set_altsetting (dev, alt);
585 if (retval) { 568 if (retval) {
586 dev_dbg (&iface->dev, "can't set_interface = %d, %d\n", 569 dev_err(&iface->dev, "can't set_interface = %d, %d\n",
587 alt, retval); 570 alt, retval);
588 return retval; 571 return retval;
589 } 572 }
@@ -591,7 +574,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
591 /* [9.4.4] get_interface always works */ 574 /* [9.4.4] get_interface always works */
592 retval = get_altsetting (dev); 575 retval = get_altsetting (dev);
593 if (retval != alt) { 576 if (retval != alt) {
594 dev_dbg (&iface->dev, "get alt should be %d, was %d\n", 577 dev_err(&iface->dev, "get alt should be %d, was %d\n",
595 alt, retval); 578 alt, retval);
596 return (retval < 0) ? retval : -EDOM; 579 return (retval < 0) ? retval : -EDOM;
597 } 580 }
@@ -611,7 +594,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
611 USB_DIR_IN | USB_RECIP_DEVICE, 594 USB_DIR_IN | USB_RECIP_DEVICE,
612 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT); 595 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
613 if (retval != 1 || dev->buf [0] != expected) { 596 if (retval != 1 || dev->buf [0] != expected) {
614 dev_dbg (&iface->dev, "get config --> %d %d (1 %d)\n", 597 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
615 retval, dev->buf[0], expected); 598 retval, dev->buf[0], expected);
616 return (retval < 0) ? retval : -EDOM; 599 return (retval < 0) ? retval : -EDOM;
617 } 600 }
@@ -621,7 +604,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
621 retval = usb_get_descriptor (udev, USB_DT_DEVICE, 0, 604 retval = usb_get_descriptor (udev, USB_DT_DEVICE, 0,
622 dev->buf, sizeof udev->descriptor); 605 dev->buf, sizeof udev->descriptor);
623 if (retval != sizeof udev->descriptor) { 606 if (retval != sizeof udev->descriptor) {
624 dev_dbg (&iface->dev, "dev descriptor --> %d\n", retval); 607 dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
625 return (retval < 0) ? retval : -EDOM; 608 return (retval < 0) ? retval : -EDOM;
626 } 609 }
627 610
@@ -629,8 +612,8 @@ static int ch9_postconfig (struct usbtest_dev *dev)
629 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) { 612 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
630 retval = usb_get_descriptor (udev, USB_DT_CONFIG, i, 613 retval = usb_get_descriptor (udev, USB_DT_CONFIG, i,
631 dev->buf, TBUF_SIZE); 614 dev->buf, TBUF_SIZE);
632 if (!is_good_config (dev->buf, retval)) { 615 if (!is_good_config(dev, retval)) {
633 dev_dbg (&iface->dev, 616 dev_err(&iface->dev,
634 "config [%d] descriptor --> %d\n", 617 "config [%d] descriptor --> %d\n",
635 i, retval); 618 i, retval);
636 return (retval < 0) ? retval : -EDOM; 619 return (retval < 0) ? retval : -EDOM;
@@ -650,14 +633,14 @@ static int ch9_postconfig (struct usbtest_dev *dev)
650 sizeof (struct usb_qualifier_descriptor)); 633 sizeof (struct usb_qualifier_descriptor));
651 if (retval == -EPIPE) { 634 if (retval == -EPIPE) {
652 if (udev->speed == USB_SPEED_HIGH) { 635 if (udev->speed == USB_SPEED_HIGH) {
653 dev_dbg (&iface->dev, 636 dev_err(&iface->dev,
654 "hs dev qualifier --> %d\n", 637 "hs dev qualifier --> %d\n",
655 retval); 638 retval);
656 return (retval < 0) ? retval : -EDOM; 639 return (retval < 0) ? retval : -EDOM;
657 } 640 }
658 /* usb2.0 but not high-speed capable; fine */ 641 /* usb2.0 but not high-speed capable; fine */
659 } else if (retval != sizeof (struct usb_qualifier_descriptor)) { 642 } else if (retval != sizeof (struct usb_qualifier_descriptor)) {
660 dev_dbg (&iface->dev, "dev qualifier --> %d\n", retval); 643 dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
661 return (retval < 0) ? retval : -EDOM; 644 return (retval < 0) ? retval : -EDOM;
662 } else 645 } else
663 d = (struct usb_qualifier_descriptor *) dev->buf; 646 d = (struct usb_qualifier_descriptor *) dev->buf;
@@ -669,8 +652,8 @@ static int ch9_postconfig (struct usbtest_dev *dev)
669 retval = usb_get_descriptor (udev, 652 retval = usb_get_descriptor (udev,
670 USB_DT_OTHER_SPEED_CONFIG, i, 653 USB_DT_OTHER_SPEED_CONFIG, i,
671 dev->buf, TBUF_SIZE); 654 dev->buf, TBUF_SIZE);
672 if (!is_good_config (dev->buf, retval)) { 655 if (!is_good_config(dev, retval)) {
673 dev_dbg (&iface->dev, 656 dev_err(&iface->dev,
674 "other speed config --> %d\n", 657 "other speed config --> %d\n",
675 retval); 658 retval);
676 return (retval < 0) ? retval : -EDOM; 659 return (retval < 0) ? retval : -EDOM;
@@ -683,7 +666,7 @@ static int ch9_postconfig (struct usbtest_dev *dev)
683 /* [9.4.5] get_status always works */ 666 /* [9.4.5] get_status always works */
684 retval = usb_get_status (udev, USB_RECIP_DEVICE, 0, dev->buf); 667 retval = usb_get_status (udev, USB_RECIP_DEVICE, 0, dev->buf);
685 if (retval != 2) { 668 if (retval != 2) {
686 dev_dbg (&iface->dev, "get dev status --> %d\n", retval); 669 dev_err(&iface->dev, "get dev status --> %d\n", retval);
687 return (retval < 0) ? retval : -EDOM; 670 return (retval < 0) ? retval : -EDOM;
688 } 671 }
689 672
@@ -693,11 +676,11 @@ static int ch9_postconfig (struct usbtest_dev *dev)
693 retval = usb_get_status (udev, USB_RECIP_INTERFACE, 676 retval = usb_get_status (udev, USB_RECIP_INTERFACE,
694 iface->altsetting [0].desc.bInterfaceNumber, dev->buf); 677 iface->altsetting [0].desc.bInterfaceNumber, dev->buf);
695 if (retval != 2) { 678 if (retval != 2) {
696 dev_dbg (&iface->dev, "get interface status --> %d\n", retval); 679 dev_err(&iface->dev, "get interface status --> %d\n", retval);
697 return (retval < 0) ? retval : -EDOM; 680 return (retval < 0) ? retval : -EDOM;
698 } 681 }
699 // FIXME get status for each endpoint in the interface 682 // FIXME get status for each endpoint in the interface
700 683
701 return 0; 684 return 0;
702} 685}
703 686
@@ -752,8 +735,9 @@ static void ctrl_complete (struct urb *urb)
752 */ 735 */
753 if (subcase->number > 0) { 736 if (subcase->number > 0) {
754 if ((subcase->number - ctx->last) != 1) { 737 if ((subcase->number - ctx->last) != 1) {
755 dbg ("subcase %d completed out of order, last %d", 738 ERROR(ctx->dev,
756 subcase->number, ctx->last); 739 "subcase %d completed out of order, last %d\n",
740 subcase->number, ctx->last);
757 status = -EDOM; 741 status = -EDOM;
758 ctx->last = subcase->number; 742 ctx->last = subcase->number;
759 goto error; 743 goto error;
@@ -777,7 +761,7 @@ static void ctrl_complete (struct urb *urb)
777 else if (subcase->number == 12 && status == -EPIPE) 761 else if (subcase->number == 12 && status == -EPIPE)
778 status = 0; 762 status = 0;
779 else 763 else
780 dbg ("subtest %d error, status %d", 764 ERROR(ctx->dev, "subtest %d error, status %d\n",
781 subcase->number, status); 765 subcase->number, status);
782 } 766 }
783 767
@@ -788,9 +772,12 @@ error:
788 int i; 772 int i;
789 773
790 ctx->status = status; 774 ctx->status = status;
791 info ("control queue %02x.%02x, err %d, %d left", 775 ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
776 "%d left, subcase %d, len %d/%d\n",
792 reqp->bRequestType, reqp->bRequest, 777 reqp->bRequestType, reqp->bRequest,
793 status, ctx->count); 778 status, ctx->count, subcase->number,
779 urb->actual_length,
780 urb->transfer_buffer_length);
794 781
795 /* FIXME this "unlink everything" exit route should 782 /* FIXME this "unlink everything" exit route should
796 * be a separate test case. 783 * be a separate test case.
@@ -799,7 +786,8 @@ error:
799 /* unlink whatever's still pending */ 786 /* unlink whatever's still pending */
800 for (i = 1; i < ctx->param->sglen; i++) { 787 for (i = 1; i < ctx->param->sglen; i++) {
801 struct urb *u = ctx->urb [ 788 struct urb *u = ctx->urb [
802 (i + subcase->number) % ctx->param->sglen]; 789 (i + subcase->number)
790 % ctx->param->sglen];
803 791
804 if (u == urb || !u->dev) 792 if (u == urb || !u->dev)
805 continue; 793 continue;
@@ -812,7 +800,8 @@ error:
812 case -EIDRM: 800 case -EIDRM:
813 continue; 801 continue;
814 default: 802 default:
815 dbg ("urb unlink --> %d", status); 803 ERROR(ctx->dev, "urb unlink --> %d\n",
804 status);
816 } 805 }
817 } 806 }
818 status = ctx->status; 807 status = ctx->status;
@@ -822,14 +811,15 @@ error:
822 /* resubmit if we need to, else mark this as done */ 811 /* resubmit if we need to, else mark this as done */
823 if ((status == 0) && (ctx->pending < ctx->count)) { 812 if ((status == 0) && (ctx->pending < ctx->count)) {
824 if ((status = usb_submit_urb (urb, GFP_ATOMIC)) != 0) { 813 if ((status = usb_submit_urb (urb, GFP_ATOMIC)) != 0) {
825 dbg ("can't resubmit ctrl %02x.%02x, err %d", 814 ERROR(ctx->dev,
815 "can't resubmit ctrl %02x.%02x, err %d\n",
826 reqp->bRequestType, reqp->bRequest, status); 816 reqp->bRequestType, reqp->bRequest, status);
827 urb->dev = NULL; 817 urb->dev = NULL;
828 } else 818 } else
829 ctx->pending++; 819 ctx->pending++;
830 } else 820 } else
831 urb->dev = NULL; 821 urb->dev = NULL;
832 822
833 /* signal completion when nothing's queued */ 823 /* signal completion when nothing's queued */
834 if (ctx->pending == 0) 824 if (ctx->pending == 0)
835 complete (&ctx->complete); 825 complete (&ctx->complete);
@@ -918,11 +908,11 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
918 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8); 908 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
919 // interface == 0 909 // interface == 0
920 len = sizeof (struct usb_interface_descriptor); 910 len = sizeof (struct usb_interface_descriptor);
921 expected = EPIPE; 911 expected = -EPIPE;
922 break; 912 break;
923 // NOTE: two consecutive stalls in the queue here. 913 // NOTE: two consecutive stalls in the queue here.
924 // that tests fault recovery a bit more aggressively. 914 // that tests fault recovery a bit more aggressively.
925 case 8: // clear endpoint halt (USUALLY STALLS) 915 case 8: // clear endpoint halt (MAY STALL)
926 req.bRequest = USB_REQ_CLEAR_FEATURE; 916 req.bRequest = USB_REQ_CLEAR_FEATURE;
927 req.bRequestType = USB_RECIP_ENDPOINT; 917 req.bRequestType = USB_RECIP_ENDPOINT;
928 // wValue 0 == ep halt 918 // wValue 0 == ep halt
@@ -965,7 +955,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
965 break; 955 break;
966 case 14: // short read; try to fill the last packet 956 case 14: // short read; try to fill the last packet
967 req.wValue = cpu_to_le16 ((USB_DT_DEVICE << 8) | 0); 957 req.wValue = cpu_to_le16 ((USB_DT_DEVICE << 8) | 0);
968 // device descriptor size == 18 bytes 958 /* device descriptor size == 18 bytes */
969 len = udev->descriptor.bMaxPacketSize0; 959 len = udev->descriptor.bMaxPacketSize0;
970 switch (len) { 960 switch (len) {
971 case 8: len = 24; break; 961 case 8: len = 24; break;
@@ -974,7 +964,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
974 expected = -EREMOTEIO; 964 expected = -EREMOTEIO;
975 break; 965 break;
976 default: 966 default:
977 err ("bogus number of ctrl queue testcases!"); 967 ERROR(dev, "bogus number of ctrl queue testcases!\n");
978 context.status = -EINVAL; 968 context.status = -EINVAL;
979 goto cleanup; 969 goto cleanup;
980 } 970 }
@@ -1003,7 +993,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
1003 for (i = 0; i < param->sglen; i++) { 993 for (i = 0; i < param->sglen; i++) {
1004 context.status = usb_submit_urb (urb [i], GFP_ATOMIC); 994 context.status = usb_submit_urb (urb [i], GFP_ATOMIC);
1005 if (context.status != 0) { 995 if (context.status != 0) {
1006 dbg ("can't submit urb[%d], status %d", 996 ERROR(dev, "can't submit urb[%d], status %d\n",
1007 i, context.status); 997 i, context.status);
1008 context.count = context.pending; 998 context.count = context.pending;
1009 break; 999 break;
@@ -1070,7 +1060,7 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
1070 * due to errors, or is just NAKing requests. 1060 * due to errors, or is just NAKing requests.
1071 */ 1061 */
1072 if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) { 1062 if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) {
1073 dev_dbg (&dev->intf->dev, "submit fail %d\n", retval); 1063 dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1074 return retval; 1064 return retval;
1075 } 1065 }
1076 1066
@@ -1087,13 +1077,13 @@ retry:
1087 * "normal" drivers would prevent resubmission, but 1077 * "normal" drivers would prevent resubmission, but
1088 * since we're testing unlink paths, we can't. 1078 * since we're testing unlink paths, we can't.
1089 */ 1079 */
1090 dev_dbg (&dev->intf->dev, "unlink retry\n"); 1080 ERROR(dev, "unlink retry\n");
1091 goto retry; 1081 goto retry;
1092 } 1082 }
1093 } else 1083 } else
1094 usb_kill_urb (urb); 1084 usb_kill_urb (urb);
1095 if (!(retval == 0 || retval == -EINPROGRESS)) { 1085 if (!(retval == 0 || retval == -EINPROGRESS)) {
1096 dev_dbg (&dev->intf->dev, "unlink fail %d\n", retval); 1086 dev_err(&dev->intf->dev, "unlink fail %d\n", retval);
1097 return retval; 1087 return retval;
1098 } 1088 }
1099 1089
@@ -1121,7 +1111,7 @@ static int unlink_simple (struct usbtest_dev *dev, int pipe, int len)
1121 1111
1122/*-------------------------------------------------------------------------*/ 1112/*-------------------------------------------------------------------------*/
1123 1113
1124static int verify_not_halted (int ep, struct urb *urb) 1114static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1125{ 1115{
1126 int retval; 1116 int retval;
1127 u16 status; 1117 u16 status;
@@ -1129,20 +1119,21 @@ static int verify_not_halted (int ep, struct urb *urb)
1129 /* shouldn't look or act halted */ 1119 /* shouldn't look or act halted */
1130 retval = usb_get_status (urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1120 retval = usb_get_status (urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1131 if (retval < 0) { 1121 if (retval < 0) {
1132 dbg ("ep %02x couldn't get no-halt status, %d", ep, retval); 1122 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1123 ep, retval);
1133 return retval; 1124 return retval;
1134 } 1125 }
1135 if (status != 0) { 1126 if (status != 0) {
1136 dbg ("ep %02x bogus status: %04x != 0", ep, status); 1127 ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1137 return -EINVAL; 1128 return -EINVAL;
1138 } 1129 }
1139 retval = simple_io (urb, 1, 0, 0, __func__); 1130 retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1140 if (retval != 0) 1131 if (retval != 0)
1141 return -EINVAL; 1132 return -EINVAL;
1142 return 0; 1133 return 0;
1143} 1134}
1144 1135
1145static int verify_halted (int ep, struct urb *urb) 1136static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1146{ 1137{
1147 int retval; 1138 int retval;
1148 u16 status; 1139 u16 status;
@@ -1150,29 +1141,30 @@ static int verify_halted (int ep, struct urb *urb)
1150 /* should look and act halted */ 1141 /* should look and act halted */
1151 retval = usb_get_status (urb->dev, USB_RECIP_ENDPOINT, ep, &status); 1142 retval = usb_get_status (urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1152 if (retval < 0) { 1143 if (retval < 0) {
1153 dbg ("ep %02x couldn't get halt status, %d", ep, retval); 1144 ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1145 ep, retval);
1154 return retval; 1146 return retval;
1155 } 1147 }
1156 le16_to_cpus(&status); 1148 le16_to_cpus(&status);
1157 if (status != 1) { 1149 if (status != 1) {
1158 dbg ("ep %02x bogus status: %04x != 1", ep, status); 1150 ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1159 return -EINVAL; 1151 return -EINVAL;
1160 } 1152 }
1161 retval = simple_io (urb, 1, 0, -EPIPE, __func__); 1153 retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1162 if (retval != -EPIPE) 1154 if (retval != -EPIPE)
1163 return -EINVAL; 1155 return -EINVAL;
1164 retval = simple_io (urb, 1, 0, -EPIPE, "verify_still_halted"); 1156 retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1165 if (retval != -EPIPE) 1157 if (retval != -EPIPE)
1166 return -EINVAL; 1158 return -EINVAL;
1167 return 0; 1159 return 0;
1168} 1160}
1169 1161
1170static int test_halt (int ep, struct urb *urb) 1162static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1171{ 1163{
1172 int retval; 1164 int retval;
1173 1165
1174 /* shouldn't look or act halted now */ 1166 /* shouldn't look or act halted now */
1175 retval = verify_not_halted (ep, urb); 1167 retval = verify_not_halted(tdev, ep, urb);
1176 if (retval < 0) 1168 if (retval < 0)
1177 return retval; 1169 return retval;
1178 1170
@@ -1182,20 +1174,20 @@ static int test_halt (int ep, struct urb *urb)
1182 USB_ENDPOINT_HALT, ep, 1174 USB_ENDPOINT_HALT, ep,
1183 NULL, 0, USB_CTRL_SET_TIMEOUT); 1175 NULL, 0, USB_CTRL_SET_TIMEOUT);
1184 if (retval < 0) { 1176 if (retval < 0) {
1185 dbg ("ep %02x couldn't set halt, %d", ep, retval); 1177 ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1186 return retval; 1178 return retval;
1187 } 1179 }
1188 retval = verify_halted (ep, urb); 1180 retval = verify_halted(tdev, ep, urb);
1189 if (retval < 0) 1181 if (retval < 0)
1190 return retval; 1182 return retval;
1191 1183
1192 /* clear halt (tests API + protocol), verify it worked */ 1184 /* clear halt (tests API + protocol), verify it worked */
1193 retval = usb_clear_halt (urb->dev, urb->pipe); 1185 retval = usb_clear_halt (urb->dev, urb->pipe);
1194 if (retval < 0) { 1186 if (retval < 0) {
1195 dbg ("ep %02x couldn't clear halt, %d", ep, retval); 1187 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1196 return retval; 1188 return retval;
1197 } 1189 }
1198 retval = verify_not_halted (ep, urb); 1190 retval = verify_not_halted(tdev, ep, urb);
1199 if (retval < 0) 1191 if (retval < 0)
1200 return retval; 1192 return retval;
1201 1193
@@ -1217,7 +1209,7 @@ static int halt_simple (struct usbtest_dev *dev)
1217 if (dev->in_pipe) { 1209 if (dev->in_pipe) {
1218 ep = usb_pipeendpoint (dev->in_pipe) | USB_DIR_IN; 1210 ep = usb_pipeendpoint (dev->in_pipe) | USB_DIR_IN;
1219 urb->pipe = dev->in_pipe; 1211 urb->pipe = dev->in_pipe;
1220 retval = test_halt (ep, urb); 1212 retval = test_halt(dev, ep, urb);
1221 if (retval < 0) 1213 if (retval < 0)
1222 goto done; 1214 goto done;
1223 } 1215 }
@@ -1225,7 +1217,7 @@ static int halt_simple (struct usbtest_dev *dev)
1225 if (dev->out_pipe) { 1217 if (dev->out_pipe) {
1226 ep = usb_pipeendpoint (dev->out_pipe); 1218 ep = usb_pipeendpoint (dev->out_pipe);
1227 urb->pipe = dev->out_pipe; 1219 urb->pipe = dev->out_pipe;
1228 retval = test_halt (ep, urb); 1220 retval = test_halt(dev, ep, urb);
1229 } 1221 }
1230done: 1222done:
1231 simple_free_urb (urb); 1223 simple_free_urb (urb);
@@ -1275,7 +1267,7 @@ static int ctrl_out (struct usbtest_dev *dev,
1275 if (retval != len) { 1267 if (retval != len) {
1276 what = "write"; 1268 what = "write";
1277 if (retval >= 0) { 1269 if (retval >= 0) {
1278 INFO(dev, "ctrl_out, wlen %d (expected %d)\n", 1270 ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1279 retval, len); 1271 retval, len);
1280 retval = -EBADMSG; 1272 retval = -EBADMSG;
1281 } 1273 }
@@ -1289,7 +1281,7 @@ static int ctrl_out (struct usbtest_dev *dev,
1289 if (retval != len) { 1281 if (retval != len) {
1290 what = "read"; 1282 what = "read";
1291 if (retval >= 0) { 1283 if (retval >= 0) {
1292 INFO(dev, "ctrl_out, rlen %d (expected %d)\n", 1284 ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1293 retval, len); 1285 retval, len);
1294 retval = -EBADMSG; 1286 retval = -EBADMSG;
1295 } 1287 }
@@ -1299,7 +1291,7 @@ static int ctrl_out (struct usbtest_dev *dev,
1299 /* fail if we can't verify */ 1291 /* fail if we can't verify */
1300 for (j = 0; j < len; j++) { 1292 for (j = 0; j < len; j++) {
1301 if (buf [j] != (u8) (i + j)) { 1293 if (buf [j] != (u8) (i + j)) {
1302 INFO (dev, "ctrl_out, byte %d is %d not %d\n", 1294 ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1303 j, buf [j], (u8) i + j); 1295 j, buf [j], (u8) i + j);
1304 retval = -EBADMSG; 1296 retval = -EBADMSG;
1305 break; 1297 break;
@@ -1321,7 +1313,7 @@ static int ctrl_out (struct usbtest_dev *dev,
1321 } 1313 }
1322 1314
1323 if (retval < 0) 1315 if (retval < 0)
1324 INFO (dev, "ctrl_out %s failed, code %d, count %d\n", 1316 ERROR (dev, "ctrl_out %s failed, code %d, count %d\n",
1325 what, retval, i); 1317 what, retval, i);
1326 1318
1327 kfree (buf); 1319 kfree (buf);
@@ -1366,7 +1358,7 @@ static void iso_callback (struct urb *urb)
1366 case 0: 1358 case 0:
1367 goto done; 1359 goto done;
1368 default: 1360 default:
1369 dev_dbg (&ctx->dev->intf->dev, 1361 dev_err(&ctx->dev->intf->dev,
1370 "iso resubmit err %d\n", 1362 "iso resubmit err %d\n",
1371 status); 1363 status);
1372 /* FALLTHROUGH */ 1364 /* FALLTHROUGH */
@@ -1381,7 +1373,7 @@ static void iso_callback (struct urb *urb)
1381 ctx->pending--; 1373 ctx->pending--;
1382 if (ctx->pending == 0) { 1374 if (ctx->pending == 0) {
1383 if (ctx->errors) 1375 if (ctx->errors)
1384 dev_dbg (&ctx->dev->intf->dev, 1376 dev_err(&ctx->dev->intf->dev,
1385 "iso test, %lu errors out of %lu\n", 1377 "iso test, %lu errors out of %lu\n",
1386 ctx->errors, ctx->packet_count); 1378 ctx->errors, ctx->packet_count);
1387 complete (&ctx->done); 1379 complete (&ctx->done);
@@ -1458,7 +1450,7 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
1458 1450
1459 memset (urbs, 0, sizeof urbs); 1451 memset (urbs, 0, sizeof urbs);
1460 udev = testdev_to_usbdev (dev); 1452 udev = testdev_to_usbdev (dev);
1461 dev_dbg (&dev->intf->dev, 1453 dev_info(&dev->intf->dev,
1462 "... iso period %d %sframes, wMaxPacket %04x\n", 1454 "... iso period %d %sframes, wMaxPacket %04x\n",
1463 1 << (desc->bInterval - 1), 1455 1 << (desc->bInterval - 1),
1464 (udev->speed == USB_SPEED_HIGH) ? "micro" : "", 1456 (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
@@ -1475,7 +1467,7 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
1475 urbs [i]->context = &context; 1467 urbs [i]->context = &context;
1476 } 1468 }
1477 packets *= param->iterations; 1469 packets *= param->iterations;
1478 dev_dbg (&dev->intf->dev, 1470 dev_info(&dev->intf->dev,
1479 "... total %lu msec (%lu packets)\n", 1471 "... total %lu msec (%lu packets)\n",
1480 (packets * (1 << (desc->bInterval - 1))) 1472 (packets * (1 << (desc->bInterval - 1)))
1481 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1), 1473 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
@@ -1537,6 +1529,13 @@ fail:
1537 * except indirectly by consuming USB bandwidth and CPU resources for test 1529 * except indirectly by consuming USB bandwidth and CPU resources for test
1538 * threads and request completion. But the only way to know that for sure 1530 * threads and request completion. But the only way to know that for sure
1539 * is to test when HC queues are in use by many devices. 1531 * is to test when HC queues are in use by many devices.
1532 *
1533 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
1534 * it locks out usbcore in certain code paths. Notably, if you disconnect
1535 * the device-under-test, khubd will wait block forever waiting for the
1536 * ioctl to complete ... so that usb_disconnect() can abort the pending
1537 * urbs and then call usbtest_disconnect(). To abort a test, you're best
1538 * off just killing the userspace task and waiting for it to exit.
1540 */ 1539 */
1541 1540
1542static int 1541static int
@@ -1575,7 +1574,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1575 * altsettings; force a default so most tests don't need to check. 1574 * altsettings; force a default so most tests don't need to check.
1576 */ 1575 */
1577 if (dev->info->alt >= 0) { 1576 if (dev->info->alt >= 0) {
1578 int res; 1577 int res;
1579 1578
1580 if (intf->altsetting->desc.bInterfaceNumber) { 1579 if (intf->altsetting->desc.bInterfaceNumber) {
1581 mutex_unlock(&dev->lock); 1580 mutex_unlock(&dev->lock);
@@ -1604,7 +1603,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1604 switch (param->test_num) { 1603 switch (param->test_num) {
1605 1604
1606 case 0: 1605 case 0:
1607 dev_dbg (&intf->dev, "TEST 0: NOP\n"); 1606 dev_info(&intf->dev, "TEST 0: NOP\n");
1608 retval = 0; 1607 retval = 0;
1609 break; 1608 break;
1610 1609
@@ -1612,7 +1611,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1612 case 1: 1611 case 1:
1613 if (dev->out_pipe == 0) 1612 if (dev->out_pipe == 0)
1614 break; 1613 break;
1615 dev_dbg (&intf->dev, 1614 dev_info(&intf->dev,
1616 "TEST 1: write %d bytes %u times\n", 1615 "TEST 1: write %d bytes %u times\n",
1617 param->length, param->iterations); 1616 param->length, param->iterations);
1618 urb = simple_alloc_urb (udev, dev->out_pipe, param->length); 1617 urb = simple_alloc_urb (udev, dev->out_pipe, param->length);
@@ -1621,13 +1620,13 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1621 break; 1620 break;
1622 } 1621 }
1623 // FIRMWARE: bulk sink (maybe accepts short writes) 1622 // FIRMWARE: bulk sink (maybe accepts short writes)
1624 retval = simple_io (urb, param->iterations, 0, 0, "test1"); 1623 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
1625 simple_free_urb (urb); 1624 simple_free_urb (urb);
1626 break; 1625 break;
1627 case 2: 1626 case 2:
1628 if (dev->in_pipe == 0) 1627 if (dev->in_pipe == 0)
1629 break; 1628 break;
1630 dev_dbg (&intf->dev, 1629 dev_info(&intf->dev,
1631 "TEST 2: read %d bytes %u times\n", 1630 "TEST 2: read %d bytes %u times\n",
1632 param->length, param->iterations); 1631 param->length, param->iterations);
1633 urb = simple_alloc_urb (udev, dev->in_pipe, param->length); 1632 urb = simple_alloc_urb (udev, dev->in_pipe, param->length);
@@ -1636,13 +1635,13 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1636 break; 1635 break;
1637 } 1636 }
1638 // FIRMWARE: bulk source (maybe generates short writes) 1637 // FIRMWARE: bulk source (maybe generates short writes)
1639 retval = simple_io (urb, param->iterations, 0, 0, "test2"); 1638 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
1640 simple_free_urb (urb); 1639 simple_free_urb (urb);
1641 break; 1640 break;
1642 case 3: 1641 case 3:
1643 if (dev->out_pipe == 0 || param->vary == 0) 1642 if (dev->out_pipe == 0 || param->vary == 0)
1644 break; 1643 break;
1645 dev_dbg (&intf->dev, 1644 dev_info(&intf->dev,
1646 "TEST 3: write/%d 0..%d bytes %u times\n", 1645 "TEST 3: write/%d 0..%d bytes %u times\n",
1647 param->vary, param->length, param->iterations); 1646 param->vary, param->length, param->iterations);
1648 urb = simple_alloc_urb (udev, dev->out_pipe, param->length); 1647 urb = simple_alloc_urb (udev, dev->out_pipe, param->length);
@@ -1651,14 +1650,14 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1651 break; 1650 break;
1652 } 1651 }
1653 // FIRMWARE: bulk sink (maybe accepts short writes) 1652 // FIRMWARE: bulk sink (maybe accepts short writes)
1654 retval = simple_io (urb, param->iterations, param->vary, 1653 retval = simple_io(dev, urb, param->iterations, param->vary,
1655 0, "test3"); 1654 0, "test3");
1656 simple_free_urb (urb); 1655 simple_free_urb (urb);
1657 break; 1656 break;
1658 case 4: 1657 case 4:
1659 if (dev->in_pipe == 0 || param->vary == 0) 1658 if (dev->in_pipe == 0 || param->vary == 0)
1660 break; 1659 break;
1661 dev_dbg (&intf->dev, 1660 dev_info(&intf->dev,
1662 "TEST 4: read/%d 0..%d bytes %u times\n", 1661 "TEST 4: read/%d 0..%d bytes %u times\n",
1663 param->vary, param->length, param->iterations); 1662 param->vary, param->length, param->iterations);
1664 urb = simple_alloc_urb (udev, dev->in_pipe, param->length); 1663 urb = simple_alloc_urb (udev, dev->in_pipe, param->length);
@@ -1667,7 +1666,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1667 break; 1666 break;
1668 } 1667 }
1669 // FIRMWARE: bulk source (maybe generates short writes) 1668 // FIRMWARE: bulk source (maybe generates short writes)
1670 retval = simple_io (urb, param->iterations, param->vary, 1669 retval = simple_io(dev, urb, param->iterations, param->vary,
1671 0, "test4"); 1670 0, "test4");
1672 simple_free_urb (urb); 1671 simple_free_urb (urb);
1673 break; 1672 break;
@@ -1676,7 +1675,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1676 case 5: 1675 case 5:
1677 if (dev->out_pipe == 0 || param->sglen == 0) 1676 if (dev->out_pipe == 0 || param->sglen == 0)
1678 break; 1677 break;
1679 dev_dbg (&intf->dev, 1678 dev_info(&intf->dev,
1680 "TEST 5: write %d sglists %d entries of %d bytes\n", 1679 "TEST 5: write %d sglists %d entries of %d bytes\n",
1681 param->iterations, 1680 param->iterations,
1682 param->sglen, param->length); 1681 param->sglen, param->length);
@@ -1686,7 +1685,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1686 break; 1685 break;
1687 } 1686 }
1688 // FIRMWARE: bulk sink (maybe accepts short writes) 1687 // FIRMWARE: bulk sink (maybe accepts short writes)
1689 retval = perform_sglist (udev, param->iterations, dev->out_pipe, 1688 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1690 &req, sg, param->sglen); 1689 &req, sg, param->sglen);
1691 free_sglist (sg, param->sglen); 1690 free_sglist (sg, param->sglen);
1692 break; 1691 break;
@@ -1694,7 +1693,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1694 case 6: 1693 case 6:
1695 if (dev->in_pipe == 0 || param->sglen == 0) 1694 if (dev->in_pipe == 0 || param->sglen == 0)
1696 break; 1695 break;
1697 dev_dbg (&intf->dev, 1696 dev_info(&intf->dev,
1698 "TEST 6: read %d sglists %d entries of %d bytes\n", 1697 "TEST 6: read %d sglists %d entries of %d bytes\n",
1699 param->iterations, 1698 param->iterations,
1700 param->sglen, param->length); 1699 param->sglen, param->length);
@@ -1704,14 +1703,14 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1704 break; 1703 break;
1705 } 1704 }
1706 // FIRMWARE: bulk source (maybe generates short writes) 1705 // FIRMWARE: bulk source (maybe generates short writes)
1707 retval = perform_sglist (udev, param->iterations, dev->in_pipe, 1706 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1708 &req, sg, param->sglen); 1707 &req, sg, param->sglen);
1709 free_sglist (sg, param->sglen); 1708 free_sglist (sg, param->sglen);
1710 break; 1709 break;
1711 case 7: 1710 case 7:
1712 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0) 1711 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
1713 break; 1712 break;
1714 dev_dbg (&intf->dev, 1713 dev_info(&intf->dev,
1715 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n", 1714 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
1716 param->vary, param->iterations, 1715 param->vary, param->iterations,
1717 param->sglen, param->length); 1716 param->sglen, param->length);
@@ -1721,14 +1720,14 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1721 break; 1720 break;
1722 } 1721 }
1723 // FIRMWARE: bulk sink (maybe accepts short writes) 1722 // FIRMWARE: bulk sink (maybe accepts short writes)
1724 retval = perform_sglist (udev, param->iterations, dev->out_pipe, 1723 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1725 &req, sg, param->sglen); 1724 &req, sg, param->sglen);
1726 free_sglist (sg, param->sglen); 1725 free_sglist (sg, param->sglen);
1727 break; 1726 break;
1728 case 8: 1727 case 8:
1729 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0) 1728 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
1730 break; 1729 break;
1731 dev_dbg (&intf->dev, 1730 dev_info(&intf->dev,
1732 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n", 1731 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
1733 param->vary, param->iterations, 1732 param->vary, param->iterations,
1734 param->sglen, param->length); 1733 param->sglen, param->length);
@@ -1738,7 +1737,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1738 break; 1737 break;
1739 } 1738 }
1740 // FIRMWARE: bulk source (maybe generates short writes) 1739 // FIRMWARE: bulk source (maybe generates short writes)
1741 retval = perform_sglist (udev, param->iterations, dev->in_pipe, 1740 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1742 &req, sg, param->sglen); 1741 &req, sg, param->sglen);
1743 free_sglist (sg, param->sglen); 1742 free_sglist (sg, param->sglen);
1744 break; 1743 break;
@@ -1746,13 +1745,14 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1746 /* non-queued sanity tests for control (chapter 9 subset) */ 1745 /* non-queued sanity tests for control (chapter 9 subset) */
1747 case 9: 1746 case 9:
1748 retval = 0; 1747 retval = 0;
1749 dev_dbg (&intf->dev, 1748 dev_info(&intf->dev,
1750 "TEST 9: ch9 (subset) control tests, %d times\n", 1749 "TEST 9: ch9 (subset) control tests, %d times\n",
1751 param->iterations); 1750 param->iterations);
1752 for (i = param->iterations; retval == 0 && i--; /* NOP */) 1751 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1753 retval = ch9_postconfig (dev); 1752 retval = ch9_postconfig (dev);
1754 if (retval) 1753 if (retval)
1755 dbg ("ch9 subset failed, iterations left %d", i); 1754 dev_err(&intf->dev, "ch9 subset failed, "
1755 "iterations left %d\n", i);
1756 break; 1756 break;
1757 1757
1758 /* queued control messaging */ 1758 /* queued control messaging */
@@ -1760,7 +1760,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1760 if (param->sglen == 0) 1760 if (param->sglen == 0)
1761 break; 1761 break;
1762 retval = 0; 1762 retval = 0;
1763 dev_dbg (&intf->dev, 1763 dev_info(&intf->dev,
1764 "TEST 10: queue %d control calls, %d times\n", 1764 "TEST 10: queue %d control calls, %d times\n",
1765 param->sglen, 1765 param->sglen,
1766 param->iterations); 1766 param->iterations);
@@ -1772,26 +1772,26 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1772 if (dev->in_pipe == 0 || !param->length) 1772 if (dev->in_pipe == 0 || !param->length)
1773 break; 1773 break;
1774 retval = 0; 1774 retval = 0;
1775 dev_dbg (&intf->dev, "TEST 11: unlink %d reads of %d\n", 1775 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
1776 param->iterations, param->length); 1776 param->iterations, param->length);
1777 for (i = param->iterations; retval == 0 && i--; /* NOP */) 1777 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1778 retval = unlink_simple (dev, dev->in_pipe, 1778 retval = unlink_simple (dev, dev->in_pipe,
1779 param->length); 1779 param->length);
1780 if (retval) 1780 if (retval)
1781 dev_dbg (&intf->dev, "unlink reads failed %d, " 1781 dev_err(&intf->dev, "unlink reads failed %d, "
1782 "iterations left %d\n", retval, i); 1782 "iterations left %d\n", retval, i);
1783 break; 1783 break;
1784 case 12: 1784 case 12:
1785 if (dev->out_pipe == 0 || !param->length) 1785 if (dev->out_pipe == 0 || !param->length)
1786 break; 1786 break;
1787 retval = 0; 1787 retval = 0;
1788 dev_dbg (&intf->dev, "TEST 12: unlink %d writes of %d\n", 1788 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
1789 param->iterations, param->length); 1789 param->iterations, param->length);
1790 for (i = param->iterations; retval == 0 && i--; /* NOP */) 1790 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1791 retval = unlink_simple (dev, dev->out_pipe, 1791 retval = unlink_simple (dev, dev->out_pipe,
1792 param->length); 1792 param->length);
1793 if (retval) 1793 if (retval)
1794 dev_dbg (&intf->dev, "unlink writes failed %d, " 1794 dev_err(&intf->dev, "unlink writes failed %d, "
1795 "iterations left %d\n", retval, i); 1795 "iterations left %d\n", retval, i);
1796 break; 1796 break;
1797 1797
@@ -1800,24 +1800,24 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1800 if (dev->out_pipe == 0 && dev->in_pipe == 0) 1800 if (dev->out_pipe == 0 && dev->in_pipe == 0)
1801 break; 1801 break;
1802 retval = 0; 1802 retval = 0;
1803 dev_dbg (&intf->dev, "TEST 13: set/clear %d halts\n", 1803 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
1804 param->iterations); 1804 param->iterations);
1805 for (i = param->iterations; retval == 0 && i--; /* NOP */) 1805 for (i = param->iterations; retval == 0 && i--; /* NOP */)
1806 retval = halt_simple (dev); 1806 retval = halt_simple (dev);
1807 1807
1808 if (retval) 1808 if (retval)
1809 DBG (dev, "halts failed, iterations left %d\n", i); 1809 ERROR(dev, "halts failed, iterations left %d\n", i);
1810 break; 1810 break;
1811 1811
1812 /* control write tests */ 1812 /* control write tests */
1813 case 14: 1813 case 14:
1814 if (!dev->info->ctrl_out) 1814 if (!dev->info->ctrl_out)
1815 break; 1815 break;
1816 dev_dbg (&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n", 1816 dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
1817 param->iterations, 1817 param->iterations,
1818 realworld ? 1 : 0, param->length, 1818 realworld ? 1 : 0, param->length,
1819 param->vary); 1819 param->vary);
1820 retval = ctrl_out (dev, param->iterations, 1820 retval = ctrl_out(dev, param->iterations,
1821 param->length, param->vary); 1821 param->length, param->vary);
1822 break; 1822 break;
1823 1823
@@ -1825,7 +1825,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1825 case 15: 1825 case 15:
1826 if (dev->out_iso_pipe == 0 || param->sglen == 0) 1826 if (dev->out_iso_pipe == 0 || param->sglen == 0)
1827 break; 1827 break;
1828 dev_dbg (&intf->dev, 1828 dev_info(&intf->dev,
1829 "TEST 15: write %d iso, %d entries of %d bytes\n", 1829 "TEST 15: write %d iso, %d entries of %d bytes\n",
1830 param->iterations, 1830 param->iterations,
1831 param->sglen, param->length); 1831 param->sglen, param->length);
@@ -1838,7 +1838,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1838 case 16: 1838 case 16:
1839 if (dev->in_iso_pipe == 0 || param->sglen == 0) 1839 if (dev->in_iso_pipe == 0 || param->sglen == 0)
1840 break; 1840 break;
1841 dev_dbg (&intf->dev, 1841 dev_info(&intf->dev,
1842 "TEST 16: read %d iso, %d entries of %d bytes\n", 1842 "TEST 16: read %d iso, %d entries of %d bytes\n",
1843 param->iterations, 1843 param->iterations,
1844 param->sglen, param->length); 1844 param->sglen, param->length);
@@ -1898,7 +1898,8 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
1898 return -ENODEV; 1898 return -ENODEV;
1899 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product) 1899 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
1900 return -ENODEV; 1900 return -ENODEV;
1901 dbg ("matched module params, vend=0x%04x prod=0x%04x", 1901 dev_info(&intf->dev, "matched module params, "
1902 "vend=0x%04x prod=0x%04x\n",
1902 le16_to_cpu(udev->descriptor.idVendor), 1903 le16_to_cpu(udev->descriptor.idVendor),
1903 le16_to_cpu(udev->descriptor.idProduct)); 1904 le16_to_cpu(udev->descriptor.idProduct));
1904 } 1905 }
@@ -1940,7 +1941,8 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
1940 1941
1941 status = get_endpoints (dev, intf); 1942 status = get_endpoints (dev, intf);
1942 if (status < 0) { 1943 if (status < 0) {
1943 dbg ("couldn't get endpoints, %d\n", status); 1944 WARN(dev, "couldn't get endpoints, %d\n",
1945 status);
1944 return status; 1946 return status;
1945 } 1947 }
1946 /* may find bulk or ISO pipes */ 1948 /* may find bulk or ISO pipes */
@@ -2082,21 +2084,9 @@ static struct usbtest_info generic_info = {
2082}; 2084};
2083#endif 2085#endif
2084 2086
2085// FIXME remove this
2086static struct usbtest_info hact_info = {
2087 .name = "FX2/hact",
2088 //.ep_in = 6,
2089 .ep_out = 2,
2090 .alt = -1,
2091};
2092
2093 2087
2094static struct usb_device_id id_table [] = { 2088static struct usb_device_id id_table [] = {
2095 2089
2096 { USB_DEVICE (0x0547, 0x1002),
2097 .driver_info = (unsigned long) &hact_info,
2098 },
2099
2100 /*-------------------------------------------------------------*/ 2090 /*-------------------------------------------------------------*/
2101 2091
2102 /* EZ-USB devices which download firmware to replace (or in our 2092 /* EZ-USB devices which download firmware to replace (or in our
@@ -2185,7 +2175,7 @@ static int __init usbtest_init (void)
2185{ 2175{
2186#ifdef GENERIC 2176#ifdef GENERIC
2187 if (vendor) 2177 if (vendor)
2188 dbg ("params: vend=0x%04x prod=0x%04x", vendor, product); 2178 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2189#endif 2179#endif
2190 return usb_register (&usbtest_driver); 2180 return usb_register (&usbtest_driver);
2191} 2181}
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 9b1bb347dc2d..db6f97a93c02 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -147,7 +147,7 @@ static void serial_buf_free(struct circ_buf *cb)
147 */ 147 */
148static int serial_buf_data_avail(struct circ_buf *cb) 148static int serial_buf_data_avail(struct circ_buf *cb)
149{ 149{
150 return CIRC_CNT(cb->head,cb->tail,AIRCABLE_BUF_SIZE); 150 return CIRC_CNT(cb->head, cb->tail, AIRCABLE_BUF_SIZE);
151} 151}
152 152
153/* 153/*
@@ -171,7 +171,7 @@ static int serial_buf_put(struct circ_buf *cb, const char *buf, int count)
171 cb->head = (cb->head + c) & (AIRCABLE_BUF_SIZE-1); 171 cb->head = (cb->head + c) & (AIRCABLE_BUF_SIZE-1);
172 buf += c; 172 buf += c;
173 count -= c; 173 count -= c;
174 ret= c; 174 ret = c;
175 } 175 }
176 return ret; 176 return ret;
177} 177}
@@ -197,7 +197,7 @@ static int serial_buf_get(struct circ_buf *cb, char *buf, int count)
197 cb->tail = (cb->tail + c) & (AIRCABLE_BUF_SIZE-1); 197 cb->tail = (cb->tail + c) & (AIRCABLE_BUF_SIZE-1);
198 buf += c; 198 buf += c;
199 count -= c; 199 count -= c;
200 ret= c; 200 ret = c;
201 } 201 }
202 return ret; 202 return ret;
203} 203}
@@ -208,7 +208,7 @@ static void aircable_send(struct usb_serial_port *port)
208{ 208{
209 int count, result; 209 int count, result;
210 struct aircable_private *priv = usb_get_serial_port_data(port); 210 struct aircable_private *priv = usb_get_serial_port_data(port);
211 unsigned char* buf; 211 unsigned char *buf;
212 __le16 *dbuf; 212 __le16 *dbuf;
213 dbg("%s - port %d", __func__, port->number); 213 dbg("%s - port %d", __func__, port->number);
214 if (port->write_urb_busy) 214 if (port->write_urb_busy)
@@ -229,7 +229,8 @@ static void aircable_send(struct usb_serial_port *port)
229 buf[1] = TX_HEADER_1; 229 buf[1] = TX_HEADER_1;
230 dbuf = (__le16 *)&buf[2]; 230 dbuf = (__le16 *)&buf[2];
231 *dbuf = cpu_to_le16((u16)count); 231 *dbuf = cpu_to_le16((u16)count);
232 serial_buf_get(priv->tx_buf,buf + HCI_HEADER_LENGTH, MAX_HCI_FRAMESIZE); 232 serial_buf_get(priv->tx_buf, buf + HCI_HEADER_LENGTH,
233 MAX_HCI_FRAMESIZE);
233 234
234 memcpy(port->write_urb->transfer_buffer, buf, 235 memcpy(port->write_urb->transfer_buffer, buf,
235 count + HCI_HEADER_LENGTH); 236 count + HCI_HEADER_LENGTH);
@@ -261,7 +262,7 @@ static void aircable_read(struct work_struct *work)
261 struct tty_struct *tty; 262 struct tty_struct *tty;
262 unsigned char *data; 263 unsigned char *data;
263 int count; 264 int count;
264 if (priv->rx_flags & THROTTLED){ 265 if (priv->rx_flags & THROTTLED) {
265 if (priv->rx_flags & ACTUALLY_THROTTLED) 266 if (priv->rx_flags & ACTUALLY_THROTTLED)
266 schedule_work(&priv->rx_work); 267 schedule_work(&priv->rx_work);
267 return; 268 return;
@@ -282,10 +283,10 @@ static void aircable_read(struct work_struct *work)
282 count = min(64, serial_buf_data_avail(priv->rx_buf)); 283 count = min(64, serial_buf_data_avail(priv->rx_buf));
283 284
284 if (count <= 0) 285 if (count <= 0)
285 return; //We have finished sending everything. 286 return; /* We have finished sending everything. */
286 287
287 tty_prepare_flip_string(tty, &data, count); 288 tty_prepare_flip_string(tty, &data, count);
288 if (!data){ 289 if (!data) {
289 err("%s- kzalloc(%d) failed.", __func__, count); 290 err("%s- kzalloc(%d) failed.", __func__, count);
290 return; 291 return;
291 } 292 }
@@ -304,9 +305,10 @@ static void aircable_read(struct work_struct *work)
304static int aircable_probe(struct usb_serial *serial, 305static int aircable_probe(struct usb_serial *serial,
305 const struct usb_device_id *id) 306 const struct usb_device_id *id)
306{ 307{
307 struct usb_host_interface *iface_desc = serial->interface->cur_altsetting; 308 struct usb_host_interface *iface_desc = serial->interface->
309 cur_altsetting;
308 struct usb_endpoint_descriptor *endpoint; 310 struct usb_endpoint_descriptor *endpoint;
309 int num_bulk_out=0; 311 int num_bulk_out = 0;
310 int i; 312 int i;
311 313
312 for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { 314 for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
@@ -325,13 +327,13 @@ static int aircable_probe(struct usb_serial *serial,
325 return 0; 327 return 0;
326} 328}
327 329
328static int aircable_attach (struct usb_serial *serial) 330static int aircable_attach(struct usb_serial *serial)
329{ 331{
330 struct usb_serial_port *port = serial->port[0]; 332 struct usb_serial_port *port = serial->port[0];
331 struct aircable_private *priv; 333 struct aircable_private *priv;
332 334
333 priv = kzalloc(sizeof(struct aircable_private), GFP_KERNEL); 335 priv = kzalloc(sizeof(struct aircable_private), GFP_KERNEL);
334 if (!priv){ 336 if (!priv) {
335 err("%s- kmalloc(%Zd) failed.", __func__, 337 err("%s- kmalloc(%Zd) failed.", __func__,
336 sizeof(struct aircable_private)); 338 sizeof(struct aircable_private));
337 return -ENOMEM; 339 return -ENOMEM;
@@ -392,7 +394,7 @@ static int aircable_write(struct usb_serial_port *port,
392 394
393 usb_serial_debug_data(debug, &port->dev, __func__, count, source); 395 usb_serial_debug_data(debug, &port->dev, __func__, count, source);
394 396
395 if (!count){ 397 if (!count) {
396 dbg("%s - write request of 0 bytes", __func__); 398 dbg("%s - write request of 0 bytes", __func__);
397 return count; 399 return count;
398 } 400 }
@@ -418,31 +420,31 @@ static void aircable_write_bulk_callback(struct urb *urb)
418 420
419 /* This has been taken from cypress_m8.c cypress_write_int_callback */ 421 /* This has been taken from cypress_m8.c cypress_write_int_callback */
420 switch (status) { 422 switch (status) {
421 case 0: 423 case 0:
422 /* success */ 424 /* success */
423 break; 425 break;
424 case -ECONNRESET: 426 case -ECONNRESET:
425 case -ENOENT: 427 case -ENOENT:
426 case -ESHUTDOWN: 428 case -ESHUTDOWN:
427 /* this urb is terminated, clean up */ 429 /* this urb is terminated, clean up */
428 dbg("%s - urb shutting down with status: %d", 430 dbg("%s - urb shutting down with status: %d",
429 __func__, status); 431 __func__, status);
430 port->write_urb_busy = 0; 432 port->write_urb_busy = 0;
433 return;
434 default:
435 /* error in the urb, so we have to resubmit it */
436 dbg("%s - Overflow in write", __func__);
437 dbg("%s - nonzero write bulk status received: %d",
438 __func__, status);
439 port->write_urb->transfer_buffer_length = 1;
440 port->write_urb->dev = port->serial->dev;
441 result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
442 if (result)
443 dev_err(&urb->dev->dev,
444 "%s - failed resubmitting write urb, error %d\n",
445 __func__, result);
446 else
431 return; 447 return;
432 default:
433 /* error in the urb, so we have to resubmit it */
434 dbg("%s - Overflow in write", __func__);
435 dbg("%s - nonzero write bulk status received: %d",
436 __func__, status);
437 port->write_urb->transfer_buffer_length = 1;
438 port->write_urb->dev = port->serial->dev;
439 result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
440 if (result)
441 dev_err(&urb->dev->dev,
442 "%s - failed resubmitting write urb, error %d\n",
443 __func__, result);
444 else
445 return;
446 } 448 }
447 449
448 port->write_urb_busy = 0; 450 port->write_urb_busy = 0;
@@ -472,11 +474,11 @@ static void aircable_read_bulk_callback(struct urb *urb)
472 dbg("%s - caught -EPROTO, resubmitting the urb", 474 dbg("%s - caught -EPROTO, resubmitting the urb",
473 __func__); 475 __func__);
474 usb_fill_bulk_urb(port->read_urb, port->serial->dev, 476 usb_fill_bulk_urb(port->read_urb, port->serial->dev,
475 usb_rcvbulkpipe(port->serial->dev, 477 usb_rcvbulkpipe(port->serial->dev,
476 port->bulk_in_endpointAddress), 478 port->bulk_in_endpointAddress),
477 port->read_urb->transfer_buffer, 479 port->read_urb->transfer_buffer,
478 port->read_urb->transfer_buffer_length, 480 port->read_urb->transfer_buffer_length,
479 aircable_read_bulk_callback, port); 481 aircable_read_bulk_callback, port);
480 482
481 result = usb_submit_urb(urb, GFP_ATOMIC); 483 result = usb_submit_urb(urb, GFP_ATOMIC);
482 if (result) 484 if (result)
@@ -490,7 +492,7 @@ static void aircable_read_bulk_callback(struct urb *urb)
490 } 492 }
491 493
492 usb_serial_debug_data(debug, &port->dev, __func__, 494 usb_serial_debug_data(debug, &port->dev, __func__,
493 urb->actual_length,urb->transfer_buffer); 495 urb->actual_length, urb->transfer_buffer);
494 496
495 tty = port->tty; 497 tty = port->tty;
496 if (tty && urb->actual_length) { 498 if (tty && urb->actual_length) {
@@ -507,9 +509,9 @@ static void aircable_read_bulk_callback(struct urb *urb)
507 no_packages = urb->actual_length / (HCI_COMPLETE_FRAME); 509 no_packages = urb->actual_length / (HCI_COMPLETE_FRAME);
508 510
509 if (urb->actual_length % HCI_COMPLETE_FRAME != 0) 511 if (urb->actual_length % HCI_COMPLETE_FRAME != 0)
510 no_packages+=1; 512 no_packages++;
511 513
512 for (i = 0; i < no_packages ;i++) { 514 for (i = 0; i < no_packages; i++) {
513 if (remaining > (HCI_COMPLETE_FRAME)) 515 if (remaining > (HCI_COMPLETE_FRAME))
514 package_length = HCI_COMPLETE_FRAME; 516 package_length = HCI_COMPLETE_FRAME;
515 else 517 else
@@ -529,7 +531,7 @@ static void aircable_read_bulk_callback(struct urb *urb)
529 if (port->open_count) { 531 if (port->open_count) {
530 usb_fill_bulk_urb(port->read_urb, port->serial->dev, 532 usb_fill_bulk_urb(port->read_urb, port->serial->dev,
531 usb_rcvbulkpipe(port->serial->dev, 533 usb_rcvbulkpipe(port->serial->dev,
532 port->bulk_in_endpointAddress), 534 port->bulk_in_endpointAddress),
533 port->read_urb->transfer_buffer, 535 port->read_urb->transfer_buffer,
534 port->read_urb->transfer_buffer_length, 536 port->read_urb->transfer_buffer_length,
535 aircable_read_bulk_callback, port); 537 aircable_read_bulk_callback, port);
@@ -602,7 +604,7 @@ static struct usb_serial_driver aircable_device = {
602 .unthrottle = aircable_unthrottle, 604 .unthrottle = aircable_unthrottle,
603}; 605};
604 606
605static int __init aircable_init (void) 607static int __init aircable_init(void)
606{ 608{
607 int retval; 609 int retval;
608 retval = usb_serial_register(&aircable_device); 610 retval = usb_serial_register(&aircable_device);
@@ -619,7 +621,7 @@ failed_usb_register:
619 return retval; 621 return retval;
620} 622}
621 623
622static void __exit aircable_exit (void) 624static void __exit aircable_exit(void)
623{ 625{
624 usb_deregister(&aircable_driver); 626 usb_deregister(&aircable_driver);
625 usb_serial_deregister(&aircable_device); 627 usb_serial_deregister(&aircable_device);
diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c
index 725b6b94c274..0798c14ce787 100644
--- a/drivers/usb/serial/airprime.c
+++ b/drivers/usb/serial/airprime.c
@@ -68,8 +68,9 @@ static int airprime_send_setup(struct usb_serial_port *port)
68 val |= 0x02; 68 val |= 0x02;
69 69
70 return usb_control_msg(serial->dev, 70 return usb_control_msg(serial->dev,
71 usb_rcvctrlpipe(serial->dev, 0), 71 usb_rcvctrlpipe(serial->dev, 0),
72 0x22,0x21,val,0,NULL,0,USB_CTRL_SET_TIMEOUT); 72 0x22, 0x21, val, 0, NULL, 0,
73 USB_CTRL_SET_TIMEOUT);
73 } 74 }
74 75
75 return 0; 76 return 0;
@@ -90,17 +91,19 @@ static void airprime_read_bulk_callback(struct urb *urb)
90 __func__, status); 91 __func__, status);
91 return; 92 return;
92 } 93 }
93 usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, data); 94 usb_serial_debug_data(debug, &port->dev, __func__,
95 urb->actual_length, data);
94 96
95 tty = port->tty; 97 tty = port->tty;
96 if (tty && urb->actual_length) { 98 if (tty && urb->actual_length) {
97 tty_insert_flip_string (tty, data, urb->actual_length); 99 tty_insert_flip_string(tty, data, urb->actual_length);
98 tty_flip_buffer_push (tty); 100 tty_flip_buffer_push(tty);
99 } 101 }
100 102
101 result = usb_submit_urb (urb, GFP_ATOMIC); 103 result = usb_submit_urb(urb, GFP_ATOMIC);
102 if (result) 104 if (result)
103 dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", 105 dev_err(&port->dev,
106 "%s - failed resubmitting read urb, error %d\n",
104 __func__, result); 107 __func__, result);
105 return; 108 return;
106} 109}
@@ -115,7 +118,7 @@ static void airprime_write_bulk_callback(struct urb *urb)
115 dbg("%s - port %d", __func__, port->number); 118 dbg("%s - port %d", __func__, port->number);
116 119
117 /* free up the transfer buffer, as usb_free_urb() does not do this */ 120 /* free up the transfer buffer, as usb_free_urb() does not do this */
118 kfree (urb->transfer_buffer); 121 kfree(urb->transfer_buffer);
119 122
120 if (status) 123 if (status)
121 dbg("%s - nonzero write bulk status received: %d", 124 dbg("%s - nonzero write bulk status received: %d",
@@ -171,7 +174,7 @@ static int airprime_open(struct usb_serial_port *port, struct file *filp)
171 } 174 }
172 usb_fill_bulk_urb(urb, serial->dev, 175 usb_fill_bulk_urb(urb, serial->dev,
173 usb_rcvbulkpipe(serial->dev, 176 usb_rcvbulkpipe(serial->dev,
174 port->bulk_out_endpointAddress), 177 port->bulk_out_endpointAddress),
175 buffer, buffer_size, 178 buffer, buffer_size,
176 airprime_read_bulk_callback, port); 179 airprime_read_bulk_callback, port);
177 result = usb_submit_urb(urb, GFP_KERNEL); 180 result = usb_submit_urb(urb, GFP_KERNEL);
@@ -183,7 +186,8 @@ static int airprime_open(struct usb_serial_port *port, struct file *filp)
183 __func__, i, port->number, result); 186 __func__, i, port->number, result);
184 goto errout; 187 goto errout;
185 } 188 }
186 /* remember this urb so we can kill it when the port is closed */ 189 /* remember this urb so we can kill it when the
190 port is closed */
187 priv->read_urbp[i] = urb; 191 priv->read_urbp[i] = urb;
188 } 192 }
189 193
@@ -192,22 +196,22 @@ static int airprime_open(struct usb_serial_port *port, struct file *filp)
192 goto out; 196 goto out;
193 197
194 errout: 198 errout:
195 /* some error happened, cancel any submitted urbs and clean up anything that 199 /* some error happened, cancel any submitted urbs and clean up
196 got allocated successfully */ 200 anything that got allocated successfully */
197 201
198 while (i-- != 0) { 202 while (i-- != 0) {
199 urb = priv->read_urbp[i]; 203 urb = priv->read_urbp[i];
200 buffer = urb->transfer_buffer; 204 buffer = urb->transfer_buffer;
201 usb_kill_urb (urb); 205 usb_kill_urb(urb);
202 usb_free_urb (urb); 206 usb_free_urb(urb);
203 kfree (buffer); 207 kfree(buffer);
204 } 208 }
205 209
206 out: 210 out:
207 return result; 211 return result;
208} 212}
209 213
210static void airprime_close(struct usb_serial_port *port, struct file * filp) 214static void airprime_close(struct usb_serial_port *port, struct file *filp)
211{ 215{
212 struct airprime_private *priv = usb_get_serial_port_data(port); 216 struct airprime_private *priv = usb_get_serial_port_data(port);
213 int i; 217 int i;
@@ -220,16 +224,16 @@ static void airprime_close(struct usb_serial_port *port, struct file * filp)
220 mutex_lock(&port->serial->disc_mutex); 224 mutex_lock(&port->serial->disc_mutex);
221 if (!port->serial->disconnected) 225 if (!port->serial->disconnected)
222 airprime_send_setup(port); 226 airprime_send_setup(port);
223 mutex_lock(&port->serial->disc_mutex); 227 mutex_unlock(&port->serial->disc_mutex);
224 228
225 for (i = 0; i < NUM_READ_URBS; ++i) { 229 for (i = 0; i < NUM_READ_URBS; ++i) {
226 usb_kill_urb (priv->read_urbp[i]); 230 usb_kill_urb(priv->read_urbp[i]);
227 kfree (priv->read_urbp[i]->transfer_buffer); 231 kfree(priv->read_urbp[i]->transfer_buffer);
228 usb_free_urb (priv->read_urbp[i]); 232 usb_free_urb(priv->read_urbp[i]);
229 } 233 }
230 234
231 /* free up private structure */ 235 /* free up private structure */
232 kfree (priv); 236 kfree(priv);
233 usb_set_serial_port_data(port, NULL); 237 usb_set_serial_port_data(port, NULL);
234} 238}
235 239
@@ -259,10 +263,10 @@ static int airprime_write(struct usb_serial_port *port,
259 urb = usb_alloc_urb(0, GFP_ATOMIC); 263 urb = usb_alloc_urb(0, GFP_ATOMIC);
260 if (!urb) { 264 if (!urb) {
261 dev_err(&port->dev, "no more free urbs\n"); 265 dev_err(&port->dev, "no more free urbs\n");
262 kfree (buffer); 266 kfree(buffer);
263 return -ENOMEM; 267 return -ENOMEM;
264 } 268 }
265 memcpy (buffer, buf, count); 269 memcpy(buffer, buf, count);
266 270
267 usb_serial_debug_data(debug, &port->dev, __func__, count, buffer); 271 usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
268 272
@@ -279,7 +283,7 @@ static int airprime_write(struct usb_serial_port *port,
279 "%s - usb_submit_urb(write bulk) failed with status = %d\n", 283 "%s - usb_submit_urb(write bulk) failed with status = %d\n",
280 __func__, status); 284 __func__, status);
281 count = status; 285 count = status;
282 kfree (buffer); 286 kfree(buffer);
283 } else { 287 } else {
284 spin_lock_irqsave(&priv->lock, flags); 288 spin_lock_irqsave(&priv->lock, flags);
285 ++priv->outstanding_urbs; 289 ++priv->outstanding_urbs;
@@ -287,7 +291,7 @@ static int airprime_write(struct usb_serial_port *port,
287 } 291 }
288 /* we are done with this urb, so let the host driver 292 /* we are done with this urb, so let the host driver
289 * really free it when it is finished with it */ 293 * really free it when it is finished with it */
290 usb_free_urb (urb); 294 usb_free_urb(urb);
291 return count; 295 return count;
292} 296}
293 297
@@ -315,8 +319,10 @@ static int __init airprime_init(void)
315{ 319{
316 int retval; 320 int retval;
317 321
318 airprime_device.num_ports = 322 airprime_device.num_ports = endpoints;
319 (endpoints > 0 && endpoints <= MAX_BULK_EPS) ? endpoints : NUM_BULK_EPS; 323 if (endpoints < 0 || endpoints >= MAX_BULK_EPS)
324 airprime_device.num_ports = NUM_BULK_EPS;
325
320 retval = usb_serial_register(&airprime_device); 326 retval = usb_serial_register(&airprime_device);
321 if (retval) 327 if (retval)
322 return retval; 328 return retval;
@@ -341,6 +347,7 @@ MODULE_LICENSE("GPL");
341module_param(debug, bool, S_IRUGO | S_IWUSR); 347module_param(debug, bool, S_IRUGO | S_IWUSR);
342MODULE_PARM_DESC(debug, "Debug enabled"); 348MODULE_PARM_DESC(debug, "Debug enabled");
343module_param(buffer_size, int, 0); 349module_param(buffer_size, int, 0);
344MODULE_PARM_DESC(buffer_size, "Size of the transfer buffers in bytes (default 4096)"); 350MODULE_PARM_DESC(buffer_size,
351 "Size of the transfer buffers in bytes (default 4096)");
345module_param(endpoints, int, 0); 352module_param(endpoints, int, 0);
346MODULE_PARM_DESC(endpoints, "Number of bulk EPs to configure (default 3)"); 353MODULE_PARM_DESC(endpoints, "Number of bulk EPs to configure (default 3)");
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 599ab2e548a7..77895c8f8f31 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -24,7 +24,7 @@
24#include <linux/usb.h> 24#include <linux/usb.h>
25#include <linux/usb/serial.h> 25#include <linux/usb/serial.h>
26#include <linux/serial.h> 26#include <linux/serial.h>
27#include <asm/uaccess.h> 27#include <linux/uaccess.h>
28 28
29 29
30static int debug; 30static int debug;
@@ -246,29 +246,29 @@ static void ark3116_set_termios(struct usb_serial_port *port,
246 baud = tty_get_baud_rate(port->tty); 246 baud = tty_get_baud_rate(port->tty);
247 247
248 switch (baud) { 248 switch (baud) {
249 case 75: 249 case 75:
250 case 150: 250 case 150:
251 case 300: 251 case 300:
252 case 600: 252 case 600:
253 case 1200: 253 case 1200:
254 case 1800: 254 case 1800:
255 case 2400: 255 case 2400:
256 case 4800: 256 case 4800:
257 case 9600: 257 case 9600:
258 case 19200: 258 case 19200:
259 case 38400: 259 case 38400:
260 case 57600: 260 case 57600:
261 case 115200: 261 case 115200:
262 case 230400: 262 case 230400:
263 case 460800: 263 case 460800:
264 /* Report the resulting rate back to the caller */ 264 /* Report the resulting rate back to the caller */
265 tty_encode_baud_rate(port->tty, baud, baud); 265 tty_encode_baud_rate(port->tty, baud, baud);
266 break; 266 break;
267 /* set 9600 as default (if given baudrate is invalid for example) */ 267 /* set 9600 as default (if given baudrate is invalid for example) */
268 default: 268 default:
269 tty_encode_baud_rate(port->tty, 9600, 9600); 269 tty_encode_baud_rate(port->tty, 9600, 9600);
270 case 0: 270 case 0:
271 baud = 9600; 271 baud = 9600;
272 } 272 }
273 273
274 /* 274 /*
@@ -380,19 +380,19 @@ static int ark3116_ioctl(struct usb_serial_port *port, struct file *file,
380 switch (cmd) { 380 switch (cmd) {
381 case TIOCGSERIAL: 381 case TIOCGSERIAL:
382 /* XXX: Some of these values are probably wrong. */ 382 /* XXX: Some of these values are probably wrong. */
383 memset(&serstruct, 0, sizeof (serstruct)); 383 memset(&serstruct, 0, sizeof(serstruct));
384 serstruct.type = PORT_16654; 384 serstruct.type = PORT_16654;
385 serstruct.line = port->serial->minor; 385 serstruct.line = port->serial->minor;
386 serstruct.port = port->number; 386 serstruct.port = port->number;
387 serstruct.custom_divisor = 0; 387 serstruct.custom_divisor = 0;
388 serstruct.baud_base = 460800; 388 serstruct.baud_base = 460800;
389 389
390 if (copy_to_user(user_arg, &serstruct, sizeof (serstruct))) 390 if (copy_to_user(user_arg, &serstruct, sizeof(serstruct)))
391 return -EFAULT; 391 return -EFAULT;
392 392
393 return 0; 393 return 0;
394 case TIOCSSERIAL: 394 case TIOCSSERIAL:
395 if (copy_from_user(&serstruct, user_arg, sizeof (serstruct))) 395 if (copy_from_user(&serstruct, user_arg, sizeof(serstruct)))
396 return -EFAULT; 396 return -EFAULT;
397 return 0; 397 return 0;
398 default: 398 default:
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index d947d955bceb..ba28fdc9ccd2 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -130,7 +130,7 @@ static int ch341_get_status(struct usb_device *dev)
130 return -ENOMEM; 130 return -ENOMEM;
131 131
132 r = ch341_control_in(dev, 0x95, 0x0706, 0, buffer, size); 132 r = ch341_control_in(dev, 0x95, 0x0706, 0, buffer, size);
133 if ( r < 0) 133 if (r < 0)
134 goto out; 134 goto out;
135 135
136 /* Not having the datasheet for the CH341, we ignore the bytes returned 136 /* Not having the datasheet for the CH341, we ignore the bytes returned
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index d17d1645714f..04a56f300ea6 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1421,8 +1421,7 @@ static void digi_close(struct usb_serial_port *port, struct file *filp)
1421 tty_wait_until_sent(tty, DIGI_CLOSE_TIMEOUT); 1421 tty_wait_until_sent(tty, DIGI_CLOSE_TIMEOUT);
1422 1422
1423 /* flush driver and line discipline buffers */ 1423 /* flush driver and line discipline buffers */
1424 if (tty->driver->flush_buffer) 1424 tty_driver_flush_buffer(tty);
1425 tty->driver->flush_buffer(tty);
1426 tty_ldisc_flush(tty); 1425 tty_ldisc_flush(tty);
1427 1426
1428 if (port->serial->dev) { 1427 if (port->serial->dev) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index c7329f43d9c9..5b349ece7247 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -133,6 +133,14 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
133static struct usb_device_id id_table_combined [] = { 133static struct usb_device_id id_table_combined [] = {
134 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 134 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
135 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 135 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
136 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
137 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
138 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
139 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_3_PID) },
140 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_4_PID) },
141 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) },
142 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) },
143 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) },
136 { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) }, 144 { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) },
137 { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, 145 { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) },
138 { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, 146 { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) },
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 6da539ede0ee..504edf8c3a3f 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -40,6 +40,17 @@
40/* AlphaMicro Components AMC-232USB01 device */ 40/* AlphaMicro Components AMC-232USB01 device */
41#define FTDI_AMC232_PID 0xFF00 /* Product Id */ 41#define FTDI_AMC232_PID 0xFF00 /* Product Id */
42 42
43/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
44/* the VID is the standard ftdi vid (FTDI_VID) */
45#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
46#define FTDI_SCS_DEVICE_1_PID 0xD011 /* SCS Tracker / DSP TNC */
47#define FTDI_SCS_DEVICE_2_PID 0xD012
48#define FTDI_SCS_DEVICE_3_PID 0xD013
49#define FTDI_SCS_DEVICE_4_PID 0xD014
50#define FTDI_SCS_DEVICE_5_PID 0xD015
51#define FTDI_SCS_DEVICE_6_PID 0xD016
52#define FTDI_SCS_DEVICE_7_PID 0xD017
53
43/* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */ 54/* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */
44#define FTDI_ACTZWAVE_PID 0xF2D0 55#define FTDI_ACTZWAVE_PID 0xF2D0
45 56
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 6bcb82d3911a..78f2f6db494d 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1713,7 +1713,7 @@ static int mos7840_tiocmset(struct usb_serial_port *port, struct file *file,
1713{ 1713{
1714 struct moschip_port *mos7840_port; 1714 struct moschip_port *mos7840_port;
1715 unsigned int mcr; 1715 unsigned int mcr;
1716 unsigned int status; 1716 int status;
1717 1717
1718 dbg("%s - port %d", __func__, port->number); 1718 dbg("%s - port %d", __func__, port->number);
1719 1719
@@ -1740,11 +1740,10 @@ static int mos7840_tiocmset(struct usb_serial_port *port, struct file *file,
1740 1740
1741 mos7840_port->shadowMCR = mcr; 1741 mos7840_port->shadowMCR = mcr;
1742 1742
1743 status = 0;
1744 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr); 1743 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr);
1745 if (status < 0) { 1744 if (status < 0) {
1746 dbg("setting MODEM_CONTROL_REGISTER Failed\n"); 1745 dbg("setting MODEM_CONTROL_REGISTER Failed\n");
1747 return -1; 1746 return status;
1748 } 1747 }
1749 1748
1750 return 0; 1749 return 0;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index a9934a3f9845..0cb0d77dc429 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -296,16 +296,14 @@ static int serial_write (struct tty_struct * tty, const unsigned char *buf, int
296 struct usb_serial_port *port = tty->driver_data; 296 struct usb_serial_port *port = tty->driver_data;
297 int retval = -ENODEV; 297 int retval = -ENODEV;
298 298
299 if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED) 299 if (port->serial->dev->state == USB_STATE_NOTATTACHED)
300 goto exit; 300 goto exit;
301 301
302 dbg("%s - port %d, %d byte(s)", __func__, port->number, count); 302 dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
303 303
304 if (!port->open_count) { 304 /* open_count is managed under the mutex lock for the tty so cannot
305 retval = -EINVAL; 305 drop to zero until after the last close completes */
306 dbg("%s - port not opened", __func__); 306 WARN_ON(!port->open_count);
307 goto exit;
308 }
309 307
310 /* pass on to the driver specific version of this function */ 308 /* pass on to the driver specific version of this function */
311 retval = port->serial->type->write(port, buf, count); 309 retval = port->serial->type->write(port, buf, count);
@@ -317,61 +315,28 @@ exit:
317static int serial_write_room (struct tty_struct *tty) 315static int serial_write_room (struct tty_struct *tty)
318{ 316{
319 struct usb_serial_port *port = tty->driver_data; 317 struct usb_serial_port *port = tty->driver_data;
320 int retval = -ENODEV;
321
322 if (!port)
323 goto exit;
324
325 dbg("%s - port %d", __func__, port->number); 318 dbg("%s - port %d", __func__, port->number);
326 319 WARN_ON(!port->open_count);
327 if (!port->open_count) {
328 dbg("%s - port not open", __func__);
329 goto exit;
330 }
331
332 /* pass on to the driver specific version of this function */ 320 /* pass on to the driver specific version of this function */
333 retval = port->serial->type->write_room(port); 321 return port->serial->type->write_room(port);
334
335exit:
336 return retval;
337} 322}
338 323
339static int serial_chars_in_buffer (struct tty_struct *tty) 324static int serial_chars_in_buffer (struct tty_struct *tty)
340{ 325{
341 struct usb_serial_port *port = tty->driver_data; 326 struct usb_serial_port *port = tty->driver_data;
342 int retval = -ENODEV;
343
344 if (!port)
345 goto exit;
346
347 dbg("%s = port %d", __func__, port->number); 327 dbg("%s = port %d", __func__, port->number);
348 328
349 if (!port->open_count) { 329 WARN_ON(!port->open_count);
350 dbg("%s - port not open", __func__);
351 goto exit;
352 }
353
354 /* pass on to the driver specific version of this function */ 330 /* pass on to the driver specific version of this function */
355 retval = port->serial->type->chars_in_buffer(port); 331 return port->serial->type->chars_in_buffer(port);
356
357exit:
358 return retval;
359} 332}
360 333
361static void serial_throttle (struct tty_struct * tty) 334static void serial_throttle (struct tty_struct * tty)
362{ 335{
363 struct usb_serial_port *port = tty->driver_data; 336 struct usb_serial_port *port = tty->driver_data;
364
365 if (!port)
366 return;
367
368 dbg("%s - port %d", __func__, port->number); 337 dbg("%s - port %d", __func__, port->number);
369 338
370 if (!port->open_count) { 339 WARN_ON(!port->open_count);
371 dbg ("%s - port not open", __func__);
372 return;
373 }
374
375 /* pass on to the driver specific version of this function */ 340 /* pass on to the driver specific version of this function */
376 if (port->serial->type->throttle) 341 if (port->serial->type->throttle)
377 port->serial->type->throttle(port); 342 port->serial->type->throttle(port);
@@ -380,17 +345,9 @@ static void serial_throttle (struct tty_struct * tty)
380static void serial_unthrottle (struct tty_struct * tty) 345static void serial_unthrottle (struct tty_struct * tty)
381{ 346{
382 struct usb_serial_port *port = tty->driver_data; 347 struct usb_serial_port *port = tty->driver_data;
383
384 if (!port)
385 return;
386
387 dbg("%s - port %d", __func__, port->number); 348 dbg("%s - port %d", __func__, port->number);
388 349
389 if (!port->open_count) { 350 WARN_ON(!port->open_count);
390 dbg("%s - port not open", __func__);
391 return;
392 }
393
394 /* pass on to the driver specific version of this function */ 351 /* pass on to the driver specific version of this function */
395 if (port->serial->type->unthrottle) 352 if (port->serial->type->unthrottle)
396 port->serial->type->unthrottle(port); 353 port->serial->type->unthrottle(port);
@@ -401,42 +358,27 @@ static int serial_ioctl (struct tty_struct *tty, struct file * file, unsigned in
401 struct usb_serial_port *port = tty->driver_data; 358 struct usb_serial_port *port = tty->driver_data;
402 int retval = -ENODEV; 359 int retval = -ENODEV;
403 360
404 lock_kernel();
405 if (!port)
406 goto exit;
407
408 dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd); 361 dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd);
409 362
410 /* Caution - port->open_count is BKL protected */ 363 WARN_ON(!port->open_count);
411 if (!port->open_count) {
412 dbg ("%s - port not open", __func__);
413 goto exit;
414 }
415 364
416 /* pass on to the driver specific version of this function if it is available */ 365 /* pass on to the driver specific version of this function if it is available */
417 if (port->serial->type->ioctl) 366 if (port->serial->type->ioctl) {
367 lock_kernel();
418 retval = port->serial->type->ioctl(port, file, cmd, arg); 368 retval = port->serial->type->ioctl(port, file, cmd, arg);
369 unlock_kernel();
370 }
419 else 371 else
420 retval = -ENOIOCTLCMD; 372 retval = -ENOIOCTLCMD;
421exit:
422 unlock_kernel();
423 return retval; 373 return retval;
424} 374}
425 375
426static void serial_set_termios (struct tty_struct *tty, struct ktermios * old) 376static void serial_set_termios (struct tty_struct *tty, struct ktermios * old)
427{ 377{
428 struct usb_serial_port *port = tty->driver_data; 378 struct usb_serial_port *port = tty->driver_data;
429
430 if (!port)
431 return;
432
433 dbg("%s - port %d", __func__, port->number); 379 dbg("%s - port %d", __func__, port->number);
434 380
435 if (!port->open_count) { 381 WARN_ON(!port->open_count);
436 dbg("%s - port not open", __func__);
437 return;
438 }
439
440 /* pass on to the driver specific version of this function if it is available */ 382 /* pass on to the driver specific version of this function if it is available */
441 if (port->serial->type->set_termios) 383 if (port->serial->type->set_termios)
442 port->serial->type->set_termios(port, old); 384 port->serial->type->set_termios(port, old);
@@ -448,24 +390,15 @@ static void serial_break (struct tty_struct *tty, int break_state)
448{ 390{
449 struct usb_serial_port *port = tty->driver_data; 391 struct usb_serial_port *port = tty->driver_data;
450 392
451 lock_kernel();
452 if (!port) {
453 unlock_kernel();
454 return;
455 }
456
457 dbg("%s - port %d", __func__, port->number); 393 dbg("%s - port %d", __func__, port->number);
458 394
459 if (!port->open_count) { 395 WARN_ON(!port->open_count);
460 dbg("%s - port not open", __func__);
461 unlock_kernel();
462 return;
463 }
464
465 /* pass on to the driver specific version of this function if it is available */ 396 /* pass on to the driver specific version of this function if it is available */
466 if (port->serial->type->break_ctl) 397 if (port->serial->type->break_ctl) {
398 lock_kernel();
467 port->serial->type->break_ctl(port, break_state); 399 port->serial->type->break_ctl(port, break_state);
468 unlock_kernel(); 400 unlock_kernel();
401 }
469} 402}
470 403
471static int serial_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data) 404static int serial_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data)
@@ -519,19 +452,11 @@ static int serial_tiocmget (struct tty_struct *tty, struct file *file)
519{ 452{
520 struct usb_serial_port *port = tty->driver_data; 453 struct usb_serial_port *port = tty->driver_data;
521 454
522 if (!port)
523 return -ENODEV;
524
525 dbg("%s - port %d", __func__, port->number); 455 dbg("%s - port %d", __func__, port->number);
526 456
527 if (!port->open_count) { 457 WARN_ON(!port->open_count);
528 dbg("%s - port not open", __func__);
529 return -ENODEV;
530 }
531
532 if (port->serial->type->tiocmget) 458 if (port->serial->type->tiocmget)
533 return port->serial->type->tiocmget(port, file); 459 return port->serial->type->tiocmget(port, file);
534
535 return -EINVAL; 460 return -EINVAL;
536} 461}
537 462
@@ -540,19 +465,11 @@ static int serial_tiocmset (struct tty_struct *tty, struct file *file,
540{ 465{
541 struct usb_serial_port *port = tty->driver_data; 466 struct usb_serial_port *port = tty->driver_data;
542 467
543 if (!port)
544 return -ENODEV;
545
546 dbg("%s - port %d", __func__, port->number); 468 dbg("%s - port %d", __func__, port->number);
547 469
548 if (!port->open_count) { 470 WARN_ON(!port->open_count);
549 dbg("%s - port not open", __func__);
550 return -ENODEV;
551 }
552
553 if (port->serial->type->tiocmset) 471 if (port->serial->type->tiocmset)
554 return port->serial->type->tiocmset(port, file, set, clear); 472 return port->serial->type->tiocmset(port, file, set, clear);
555
556 return -EINVAL; 473 return -EINVAL;
557} 474}
558 475
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index e96bf8663ffc..f07e8a4c1f3d 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -673,15 +673,13 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp)
673 } 673 }
674*/ 674*/
675 675
676 if (port->tty->driver->flush_buffer) 676 tty_driver_flush_buffer(port->tty);
677 port->tty->driver->flush_buffer(port->tty);
678 tty_ldisc_flush(port->tty); 677 tty_ldisc_flush(port->tty);
679 678
680 firm_report_tx_done(port); 679 firm_report_tx_done(port);
681 680
682 firm_close(port); 681 firm_close(port);
683 682
684printk(KERN_ERR"Before processing rx_urbs_submitted.\n");
685 /* shutdown our bulk reads and writes */ 683 /* shutdown our bulk reads and writes */
686 mutex_lock(&info->deathwarrant); 684 mutex_lock(&info->deathwarrant);
687 spin_lock_irq(&info->lock); 685 spin_lock_irq(&info->lock);
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 0f6d234d699b..3d9249632ae1 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -123,7 +123,8 @@ config USB_STORAGE_ALAUDA
123 123
124config USB_STORAGE_ONETOUCH 124config USB_STORAGE_ONETOUCH
125 bool "Support OneTouch Button on Maxtor Hard Drives" 125 bool "Support OneTouch Button on Maxtor Hard Drives"
126 depends on USB_STORAGE && INPUT_EVDEV 126 depends on USB_STORAGE
127 depends on INPUT=y || INPUT=USB_STORAGE
127 help 128 help
128 Say Y here to include additional code to support the Maxtor OneTouch 129 Say Y here to include additional code to support the Maxtor OneTouch
129 USB hard drive's onetouch button. 130 USB hard drive's onetouch button.
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index d88824b3511c..898e67d30e56 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -46,7 +46,7 @@ void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
46 } 46 }
47 47
48 memcpy(save_cmnd, srb->cmnd, sizeof(save_cmnd)); 48 memcpy(save_cmnd, srb->cmnd, sizeof(save_cmnd));
49 memset(srb->cmnd, 0, sizeof(srb->cmnd)); 49 memset(srb->cmnd, 0, MAX_COMMAND_SIZE);
50 50
51 /* check if we support the command */ 51 /* check if we support the command */
52 if (save_cmnd[1] >> 5) /* MULTIPLE_COUNT */ 52 if (save_cmnd[1] >> 5) /* MULTIPLE_COUNT */
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 971d13dd5e65..3addcd8f827b 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -292,6 +292,7 @@ struct isd200_info {
292 292
293 /* maximum number of LUNs supported */ 293 /* maximum number of LUNs supported */
294 unsigned char MaxLUNs; 294 unsigned char MaxLUNs;
295 unsigned char cmnd[BLK_MAX_CDB];
295 struct scsi_cmnd srb; 296 struct scsi_cmnd srb;
296 struct scatterlist sg; 297 struct scatterlist sg;
297}; 298};
@@ -450,6 +451,7 @@ static int isd200_action( struct us_data *us, int action,
450 451
451 memset(&ata, 0, sizeof(ata)); 452 memset(&ata, 0, sizeof(ata));
452 memset(&srb_dev, 0, sizeof(srb_dev)); 453 memset(&srb_dev, 0, sizeof(srb_dev));
454 srb->cmnd = info->cmnd;
453 srb->device = &srb_dev; 455 srb->device = &srb_dev;
454 ++srb->serial_number; 456 ++srb->serial_number;
455 457
diff --git a/drivers/usb/storage/libusual.c b/drivers/usb/storage/libusual.c
index a28d49122e7a..d617e8ae6b00 100644
--- a/drivers/usb/storage/libusual.c
+++ b/drivers/usb/storage/libusual.c
@@ -135,7 +135,7 @@ static int usu_probe(struct usb_interface *intf,
135 stat[type].fls |= USU_MOD_FL_THREAD; 135 stat[type].fls |= USU_MOD_FL_THREAD;
136 spin_unlock_irqrestore(&usu_lock, flags); 136 spin_unlock_irqrestore(&usu_lock, flags);
137 137
138 task = kthread_run(usu_probe_thread, (void*)type, "libusual_%d", type); 138 task = kthread_run(usu_probe_thread, (void*)type, "libusual_%ld", type);
139 if (IS_ERR(task)) { 139 if (IS_ERR(task)) {
140 rc = PTR_ERR(task); 140 rc = PTR_ERR(task);
141 printk(KERN_WARNING "libusual: " 141 printk(KERN_WARNING "libusual: "
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index dfd42fe9e5f0..98b89ea9e312 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -38,7 +38,7 @@
38#include "onetouch.h" 38#include "onetouch.h"
39#include "debug.h" 39#include "debug.h"
40 40
41void onetouch_release_input(void *onetouch_); 41static void onetouch_release_input(void *onetouch_);
42 42
43struct usb_onetouch { 43struct usb_onetouch {
44 char name[128]; 44 char name[128];
@@ -223,7 +223,7 @@ int onetouch_connect_input(struct us_data *ss)
223 return error; 223 return error;
224} 224}
225 225
226void onetouch_release_input(void *onetouch_) 226static void onetouch_release_input(void *onetouch_)
227{ 227{
228 struct usb_onetouch *onetouch = (struct usb_onetouch *) onetouch_; 228 struct usb_onetouch *onetouch = (struct usb_onetouch *) onetouch_;
229 229
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 732bf52a775e..a0ed889230aa 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -44,7 +44,8 @@
44 * running with this patch. 44 * running with this patch.
45 * Send your submission to either Phil Dibowitz <phil@ipom.com> or 45 * Send your submission to either Phil Dibowitz <phil@ipom.com> or
46 * Alan Stern <stern@rowland.harvard.edu>, and don't forget to CC: the 46 * Alan Stern <stern@rowland.harvard.edu>, and don't forget to CC: the
47 * USB development list <linux-usb-devel@lists.sourceforge.net>. 47 * USB development list <linux-usb@vger.kernel.org> and the USB storage list
48 * <usb-storage@lists.one-eyed-alien.net>
48 */ 49 */
49 50
50/* patch submitted by Vivian Bregier <Vivian.Bregier@imag.fr> 51/* patch submitted by Vivian Bregier <Vivian.Bregier@imag.fr>
@@ -557,6 +558,13 @@ UNUSUAL_DEV( 0x04e6, 0x1010, 0x0000, 0x9999,
557 US_FL_SINGLE_LUN), 558 US_FL_SINGLE_LUN),
558#endif 559#endif
559 560
561/* Reported by Dmitry Khlystov <adminimus@gmail.com> */
562UNUSUAL_DEV( 0x04e8, 0x507c, 0x0220, 0x0220,
563 "Samsung",
564 "YP-U3",
565 US_SC_DEVICE, US_PR_DEVICE, NULL,
566 US_FL_MAX_SECTORS_64),
567
560/* Reported by Bob Sass <rls@vectordb.com> -- only rev 1.33 tested */ 568/* Reported by Bob Sass <rls@vectordb.com> -- only rev 1.33 tested */
561UNUSUAL_DEV( 0x050d, 0x0115, 0x0133, 0x0133, 569UNUSUAL_DEV( 0x050d, 0x0115, 0x0133, 0x0133,
562 "Belkin", 570 "Belkin",
@@ -1200,6 +1208,17 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110,
1200 US_SC_DEVICE, US_PR_DEVICE, NULL, 1208 US_SC_DEVICE, US_PR_DEVICE, NULL,
1201 US_FL_BULK32), 1209 US_FL_BULK32),
1202 1210
1211/* Andrew Lunn <andrew@lunn.ch>
1212 * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
1213 * on LUN 4.
1214 * Note: Vend:Prod clash with "Ltd Maxell WS30 Slim Digital Camera"
1215*/
1216UNUSUAL_DEV( 0x0851, 0x1543, 0x0200, 0x0200,
1217 "PanDigital",
1218 "Photo Frame",
1219 US_SC_DEVICE, US_PR_DEVICE, NULL,
1220 US_FL_NOT_LOCKABLE),
1221
1203/* Submitted by Jan De Luyck <lkml@kcore.org> */ 1222/* Submitted by Jan De Luyck <lkml@kcore.org> */
1204UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000, 1223UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000,
1205 "CITIZEN", 1224 "CITIZEN",
@@ -1342,6 +1361,13 @@ UNUSUAL_DEV( 0x0d96, 0x410a, 0x0001, 0xffff,
1342 US_SC_DEVICE, US_PR_DEVICE, NULL, 1361 US_SC_DEVICE, US_PR_DEVICE, NULL,
1343 US_FL_FIX_INQUIRY), 1362 US_FL_FIX_INQUIRY),
1344 1363
1364/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
1365UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
1366 "INTOVA",
1367 "Pixtreme",
1368 US_SC_DEVICE, US_PR_DEVICE, NULL,
1369 US_FL_FIX_CAPACITY ),
1370
1345/* 1371/*
1346 * Entry for Jenoptik JD 5200z3 1372 * Entry for Jenoptik JD 5200z3
1347 * 1373 *
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index a856effad3bd..e268aacb773a 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -539,7 +539,8 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id)
539 " has %s in unusual_devs.h (kernel" 539 " has %s in unusual_devs.h (kernel"
540 " %s)\n" 540 " %s)\n"
541 " Please send a copy of this message to " 541 " Please send a copy of this message to "
542 "<linux-usb-devel@lists.sourceforge.net>\n", 542 "<linux-usb@vger.kernel.org> and "
543 "<usb-storage@lists.one-eyed-alien.net>\n",
543 le16_to_cpu(ddesc->idVendor), 544 le16_to_cpu(ddesc->idVendor),
544 le16_to_cpu(ddesc->idProduct), 545 le16_to_cpu(ddesc->idProduct),
545 le16_to_cpu(ddesc->bcdDevice), 546 le16_to_cpu(ddesc->bcdDevice),
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index a576dc261732..bb1dadaa4a23 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1774,6 +1774,11 @@ config FB_PXA
1774 1774
1775 If unsure, say N. 1775 If unsure, say N.
1776 1776
1777config FB_PXA_SMARTPANEL
1778 bool "PXA Smartpanel LCD support"
1779 default n
1780 depends on FB_PXA
1781
1777config FB_PXA_PARAMETERS 1782config FB_PXA_PARAMETERS
1778 bool "PXA LCD command line parameters" 1783 bool "PXA LCD command line parameters"
1779 default n 1784 default n
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 17b5267f44d7..9f8a389dc7ae 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -381,7 +381,7 @@ int __init clps711xfb_init(void)
381 381
382 /* Register the /proc entries. */ 382 /* Register the /proc entries. */
383 clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444, 383 clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444,
384 &proc_root); 384 NULL);
385 if (clps7111fb_backlight_proc_entry == NULL) { 385 if (clps7111fb_backlight_proc_entry == NULL) {
386 printk("Couldn't create the /proc entry for the backlight.\n"); 386 printk("Couldn't create the /proc entry for the backlight.\n");
387 return -EINVAL; 387 return -EINVAL;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 8eda7b60df8f..ad31983b43eb 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1881,7 +1881,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1881 scr_memsetw((unsigned short *) (vc->vc_origin + 1881 scr_memsetw((unsigned short *) (vc->vc_origin +
1882 vc->vc_size_row * 1882 vc->vc_size_row *
1883 (b - count)), 1883 (b - count)),
1884 vc->vc_video_erase_char, 1884 vc->vc_scrl_erase_char,
1885 vc->vc_size_row * count); 1885 vc->vc_size_row * count);
1886 return 1; 1886 return 1;
1887 break; 1887 break;
@@ -1953,7 +1953,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1953 scr_memsetw((unsigned short *) (vc->vc_origin + 1953 scr_memsetw((unsigned short *) (vc->vc_origin +
1954 vc->vc_size_row * 1954 vc->vc_size_row *
1955 (b - count)), 1955 (b - count)),
1956 vc->vc_video_erase_char, 1956 vc->vc_scrl_erase_char,
1957 vc->vc_size_row * count); 1957 vc->vc_size_row * count);
1958 return 1; 1958 return 1;
1959 } 1959 }
@@ -1972,7 +1972,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1972 scr_memsetw((unsigned short *) (vc->vc_origin + 1972 scr_memsetw((unsigned short *) (vc->vc_origin +
1973 vc->vc_size_row * 1973 vc->vc_size_row *
1974 t), 1974 t),
1975 vc->vc_video_erase_char, 1975 vc->vc_scrl_erase_char,
1976 vc->vc_size_row * count); 1976 vc->vc_size_row * count);
1977 return 1; 1977 return 1;
1978 break; 1978 break;
@@ -2042,7 +2042,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
2042 scr_memsetw((unsigned short *) (vc->vc_origin + 2042 scr_memsetw((unsigned short *) (vc->vc_origin +
2043 vc->vc_size_row * 2043 vc->vc_size_row *
2044 t), 2044 t),
2045 vc->vc_video_erase_char, 2045 vc->vc_scrl_erase_char,
2046 vc->vc_size_row * count); 2046 vc->vc_size_row * count);
2047 return 1; 2047 return 1;
2048 } 2048 }
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index bd8d995fe25d..38a296bbdfc9 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -531,7 +531,7 @@ static void mdacon_cursor(struct vc_data *c, int mode)
531 531
532static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines) 532static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
533{ 533{
534 u16 eattr = mda_convert_attr(c->vc_video_erase_char); 534 u16 eattr = mda_convert_attr(c->vc_scrl_erase_char);
535 535
536 if (!lines) 536 if (!lines)
537 return 0; 537 return 0;
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 67a682d6cc7b..a11cc2fdd4cd 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -170,12 +170,12 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
170 switch (dir) { 170 switch (dir) {
171 case SM_UP: 171 case SM_UP:
172 sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols); 172 sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols);
173 sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_video_erase_char); 173 sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_scrl_erase_char);
174 break; 174 break;
175 175
176 case SM_DOWN: 176 case SM_DOWN:
177 sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols); 177 sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols);
178 sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_video_erase_char); 178 sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_scrl_erase_char);
179 break; 179 break;
180 } 180 }
181 181
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 6df29a62d720..bd1f57b259d9 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1350,7 +1350,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
1350 } else 1350 } else
1351 c->vc_origin += delta; 1351 c->vc_origin += delta;
1352 scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size - 1352 scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size -
1353 delta), c->vc_video_erase_char, 1353 delta), c->vc_scrl_erase_char,
1354 delta); 1354 delta);
1355 } else { 1355 } else {
1356 if (oldo - delta < vga_vram_base) { 1356 if (oldo - delta < vga_vram_base) {
@@ -1363,7 +1363,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
1363 } else 1363 } else
1364 c->vc_origin -= delta; 1364 c->vc_origin -= delta;
1365 c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; 1365 c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
1366 scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, 1366 scr_memsetw((u16 *) (c->vc_origin), c->vc_scrl_erase_char,
1367 delta); 1367 delta);
1368 } 1368 }
1369 c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; 1369 c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c
index aaa3e538e5da..5b5f072fc1a8 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/matrox/matroxfb_misc.c
@@ -522,8 +522,6 @@ static void parse_bios(unsigned char __iomem* vbios, struct matrox_bios* bd) {
522#endif 522#endif
523} 523}
524 524
525#define get_u16(x) (le16_to_cpu(get_unaligned((__u16*)(x))))
526#define get_u32(x) (le32_to_cpu(get_unaligned((__u32*)(x))))
527static int parse_pins1(WPMINFO const struct matrox_bios* bd) { 525static int parse_pins1(WPMINFO const struct matrox_bios* bd) {
528 unsigned int maxdac; 526 unsigned int maxdac;
529 527
@@ -532,11 +530,12 @@ static int parse_pins1(WPMINFO const struct matrox_bios* bd) {
532 case 1: maxdac = 220000; break; 530 case 1: maxdac = 220000; break;
533 default: maxdac = 240000; break; 531 default: maxdac = 240000; break;
534 } 532 }
535 if (get_u16(bd->pins + 24)) { 533 if (get_unaligned_le16(bd->pins + 24)) {
536 maxdac = get_u16(bd->pins + 24) * 10; 534 maxdac = get_unaligned_le16(bd->pins + 24) * 10;
537 } 535 }
538 MINFO->limits.pixel.vcomax = maxdac; 536 MINFO->limits.pixel.vcomax = maxdac;
539 MINFO->values.pll.system = get_u16(bd->pins + 28) ? get_u16(bd->pins + 28) * 10 : 50000; 537 MINFO->values.pll.system = get_unaligned_le16(bd->pins + 28) ?
538 get_unaligned_le16(bd->pins + 28) * 10 : 50000;
540 /* ignore 4MB, 8MB, module clocks */ 539 /* ignore 4MB, 8MB, module clocks */
541 MINFO->features.pll.ref_freq = 14318; 540 MINFO->features.pll.ref_freq = 14318;
542 MINFO->values.reg.mctlwtst = 0x00030101; 541 MINFO->values.reg.mctlwtst = 0x00030101;
@@ -575,7 +574,8 @@ static void default_pins2(WPMINFO2) {
575static int parse_pins3(WPMINFO const struct matrox_bios* bd) { 574static int parse_pins3(WPMINFO const struct matrox_bios* bd) {
576 MINFO->limits.pixel.vcomax = 575 MINFO->limits.pixel.vcomax =
577 MINFO->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000); 576 MINFO->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000);
578 MINFO->values.reg.mctlwtst = get_u32(bd->pins + 48) == 0xFFFFFFFF ? 0x01250A21 : get_u32(bd->pins + 48); 577 MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ?
578 0x01250A21 : get_unaligned_le32(bd->pins + 48);
579 /* memory config */ 579 /* memory config */
580 MINFO->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) | 580 MINFO->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) |
581 ((bd->pins[57] << 22) & 0x00C00000) | 581 ((bd->pins[57] << 22) & 0x00C00000) |
@@ -601,7 +601,7 @@ static void default_pins3(WPMINFO2) {
601static int parse_pins4(WPMINFO const struct matrox_bios* bd) { 601static int parse_pins4(WPMINFO const struct matrox_bios* bd) {
602 MINFO->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000; 602 MINFO->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000;
603 MINFO->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? MINFO->limits.pixel.vcomax : bd->pins[ 38] * 4000; 603 MINFO->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? MINFO->limits.pixel.vcomax : bd->pins[ 38] * 4000;
604 MINFO->values.reg.mctlwtst = get_u32(bd->pins + 71); 604 MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 71);
605 MINFO->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) | 605 MINFO->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) |
606 ((bd->pins[87] << 22) & 0x00C00000) | 606 ((bd->pins[87] << 22) & 0x00C00000) |
607 ((bd->pins[86] << 1) & 0x000001E0) | 607 ((bd->pins[86] << 1) & 0x000001E0) |
@@ -609,7 +609,7 @@ static int parse_pins4(WPMINFO const struct matrox_bios* bd) {
609 MINFO->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) | 609 MINFO->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) |
610 ((bd->pins[53] << 22) & 0x10000000) | 610 ((bd->pins[53] << 22) & 0x10000000) |
611 ((bd->pins[53] << 7) & 0x00001C00); 611 ((bd->pins[53] << 7) & 0x00001C00);
612 MINFO->values.reg.opt3 = get_u32(bd->pins + 67); 612 MINFO->values.reg.opt3 = get_unaligned_le32(bd->pins + 67);
613 MINFO->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000; 613 MINFO->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000;
614 MINFO->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000; 614 MINFO->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000;
615 return 0; 615 return 0;
@@ -640,12 +640,12 @@ static int parse_pins5(WPMINFO const struct matrox_bios* bd) {
640 MINFO->limits.video.vcomin = (bd->pins[122] == 0xFF) ? MINFO->limits.system.vcomin : bd->pins[122] * mult; 640 MINFO->limits.video.vcomin = (bd->pins[122] == 0xFF) ? MINFO->limits.system.vcomin : bd->pins[122] * mult;
641 MINFO->values.pll.system = 641 MINFO->values.pll.system =
642 MINFO->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000; 642 MINFO->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000;
643 MINFO->values.reg.opt = get_u32(bd->pins+ 48); 643 MINFO->values.reg.opt = get_unaligned_le32(bd->pins + 48);
644 MINFO->values.reg.opt2 = get_u32(bd->pins+ 52); 644 MINFO->values.reg.opt2 = get_unaligned_le32(bd->pins + 52);
645 MINFO->values.reg.opt3 = get_u32(bd->pins+ 94); 645 MINFO->values.reg.opt3 = get_unaligned_le32(bd->pins + 94);
646 MINFO->values.reg.mctlwtst = get_u32(bd->pins+ 98); 646 MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 98);
647 MINFO->values.reg.memmisc = get_u32(bd->pins+102); 647 MINFO->values.reg.memmisc = get_unaligned_le32(bd->pins + 102);
648 MINFO->values.reg.memrdbk = get_u32(bd->pins+106); 648 MINFO->values.reg.memrdbk = get_unaligned_le32(bd->pins + 106);
649 MINFO->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000; 649 MINFO->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000;
650 MINFO->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20; 650 MINFO->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20;
651 MINFO->values.memory.dll = (bd->pins[115] & 0x02) != 0; 651 MINFO->values.memory.dll = (bd->pins[115] & 0x02) != 0;
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index 249791286367..cc4c038a1b3f 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -206,8 +206,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t,
206 } 206 }
207 207
208 /* check waveform mode table address checksum */ 208 /* check waveform mode table address checksum */
209 wmta = le32_to_cpu(get_unaligned((__le32 *) wfm_hdr->wmta)); 209 wmta = get_unaligned_le32(wfm_hdr->wmta) & 0x00FFFFFF;
210 wmta &= 0x00FFFFFF;
211 cksum_idx = wmta + m*4 + 3; 210 cksum_idx = wmta + m*4 + 3;
212 if (cksum_idx > size) 211 if (cksum_idx > size)
213 return -EINVAL; 212 return -EINVAL;
@@ -219,8 +218,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t,
219 } 218 }
220 219
221 /* check waveform temperature table address checksum */ 220 /* check waveform temperature table address checksum */
222 tta = le32_to_cpu(get_unaligned((int *) (mem + wmta + m*4))); 221 tta = get_unaligned_le32(mem + wmta + m * 4) & 0x00FFFFFF;
223 tta &= 0x00FFFFFF;
224 cksum_idx = tta + trn*4 + 3; 222 cksum_idx = tta + trn*4 + 3;
225 if (cksum_idx > size) 223 if (cksum_idx > size)
226 return -EINVAL; 224 return -EINVAL;
@@ -233,8 +231,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t,
233 231
234 /* here we do the real work of putting the waveform into the 232 /* here we do the real work of putting the waveform into the
235 metromem buffer. this does runlength decoding of the waveform */ 233 metromem buffer. this does runlength decoding of the waveform */
236 wfm_idx = le32_to_cpu(get_unaligned((__le32 *) (mem + tta + trn*4))); 234 wfm_idx = get_unaligned_le32(mem + tta + trn * 4) & 0x00FFFFFF;
237 wfm_idx &= 0x00FFFFFF;
238 owfm_idx = wfm_idx; 235 owfm_idx = wfm_idx;
239 if (wfm_idx > size) 236 if (wfm_idx > size)
240 return -EINVAL; 237 return -EINVAL;
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 757651954e6c..3ab6e3d973a1 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -39,6 +39,9 @@
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <linux/clk.h> 40#include <linux/clk.h>
41#include <linux/err.h> 41#include <linux/err.h>
42#include <linux/completion.h>
43#include <linux/kthread.h>
44#include <linux/freezer.h>
42 45
43#include <asm/hardware.h> 46#include <asm/hardware.h>
44#include <asm/io.h> 47#include <asm/io.h>
@@ -57,19 +60,31 @@
57#include "pxafb.h" 60#include "pxafb.h"
58 61
59/* Bits which should not be set in machine configuration structures */ 62/* Bits which should not be set in machine configuration structures */
60#define LCCR0_INVALID_CONFIG_MASK (LCCR0_OUM|LCCR0_BM|LCCR0_QDM|LCCR0_DIS|LCCR0_EFM|LCCR0_IUM|LCCR0_SFM|LCCR0_LDM|LCCR0_ENB) 63#define LCCR0_INVALID_CONFIG_MASK (LCCR0_OUM | LCCR0_BM | LCCR0_QDM |\
61#define LCCR3_INVALID_CONFIG_MASK (LCCR3_HSP|LCCR3_VSP|LCCR3_PCD|LCCR3_BPP) 64 LCCR0_DIS | LCCR0_EFM | LCCR0_IUM |\
65 LCCR0_SFM | LCCR0_LDM | LCCR0_ENB)
66
67#define LCCR3_INVALID_CONFIG_MASK (LCCR3_HSP | LCCR3_VSP |\
68 LCCR3_PCD | LCCR3_BPP)
62 69
63static void (*pxafb_backlight_power)(int); 70static void (*pxafb_backlight_power)(int);
64static void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *); 71static void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *);
65 72
66static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *); 73static int pxafb_activate_var(struct fb_var_screeninfo *var,
74 struct pxafb_info *);
67static void set_ctrlr_state(struct pxafb_info *fbi, u_int state); 75static void set_ctrlr_state(struct pxafb_info *fbi, u_int state);
68 76
69#ifdef CONFIG_FB_PXA_PARAMETERS 77static inline unsigned long
70#define PXAFB_OPTIONS_SIZE 256 78lcd_readl(struct pxafb_info *fbi, unsigned int off)
71static char g_options[PXAFB_OPTIONS_SIZE] __devinitdata = ""; 79{
72#endif 80 return __raw_readl(fbi->mmio_base + off);
81}
82
83static inline void
84lcd_writel(struct pxafb_info *fbi, unsigned int off, unsigned long val)
85{
86 __raw_writel(val, fbi->mmio_base + off);
87}
73 88
74static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state) 89static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state)
75{ 90{
@@ -79,10 +94,12 @@ static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state)
79 /* 94 /*
80 * We need to handle two requests being made at the same time. 95 * We need to handle two requests being made at the same time.
81 * There are two important cases: 96 * There are two important cases:
82 * 1. When we are changing VT (C_REENABLE) while unblanking (C_ENABLE) 97 * 1. When we are changing VT (C_REENABLE) while unblanking
83 * We must perform the unblanking, which will do our REENABLE for us. 98 * (C_ENABLE) We must perform the unblanking, which will
84 * 2. When we are blanking, but immediately unblank before we have 99 * do our REENABLE for us.
85 * blanked. We do the "REENABLE" thing here as well, just to be sure. 100 * 2. When we are blanking, but immediately unblank before
101 * we have blanked. We do the "REENABLE" thing here as
102 * well, just to be sure.
86 */ 103 */
87 if (fbi->task_state == C_ENABLE && state == C_REENABLE) 104 if (fbi->task_state == C_ENABLE && state == C_REENABLE)
88 state = (u_int) -1; 105 state = (u_int) -1;
@@ -129,13 +146,13 @@ pxafb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue,
129 val = ((red << 8) & 0x00f80000); 146 val = ((red << 8) & 0x00f80000);
130 val |= ((green >> 0) & 0x0000fc00); 147 val |= ((green >> 0) & 0x0000fc00);
131 val |= ((blue >> 8) & 0x000000f8); 148 val |= ((blue >> 8) & 0x000000f8);
132 ((u32*)(fbi->palette_cpu))[regno] = val; 149 ((u32 *)(fbi->palette_cpu))[regno] = val;
133 break; 150 break;
134 case LCCR4_PAL_FOR_2: 151 case LCCR4_PAL_FOR_2:
135 val = ((red << 8) & 0x00fc0000); 152 val = ((red << 8) & 0x00fc0000);
136 val |= ((green >> 0) & 0x0000fc00); 153 val |= ((green >> 0) & 0x0000fc00);
137 val |= ((blue >> 8) & 0x000000fc); 154 val |= ((blue >> 8) & 0x000000fc);
138 ((u32*)(fbi->palette_cpu))[regno] = val; 155 ((u32 *)(fbi->palette_cpu))[regno] = val;
139 break; 156 break;
140 } 157 }
141 158
@@ -203,15 +220,15 @@ pxafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
203 */ 220 */
204static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var) 221static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var)
205{ 222{
206 int ret = 0; 223 int ret = 0;
207 switch (var->bits_per_pixel) { 224 switch (var->bits_per_pixel) {
208 case 1: ret = LCCR3_1BPP; break; 225 case 1: ret = LCCR3_1BPP; break;
209 case 2: ret = LCCR3_2BPP; break; 226 case 2: ret = LCCR3_2BPP; break;
210 case 4: ret = LCCR3_4BPP; break; 227 case 4: ret = LCCR3_4BPP; break;
211 case 8: ret = LCCR3_8BPP; break; 228 case 8: ret = LCCR3_8BPP; break;
212 case 16: ret = LCCR3_16BPP; break; 229 case 16: ret = LCCR3_16BPP; break;
213 } 230 }
214 return ret; 231 return ret;
215} 232}
216 233
217#ifdef CONFIG_CPU_FREQ 234#ifdef CONFIG_CPU_FREQ
@@ -223,31 +240,32 @@ static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var)
223 */ 240 */
224static unsigned int pxafb_display_dma_period(struct fb_var_screeninfo *var) 241static unsigned int pxafb_display_dma_period(struct fb_var_screeninfo *var)
225{ 242{
226 /* 243 /*
227 * Period = pixclock * bits_per_byte * bytes_per_transfer 244 * Period = pixclock * bits_per_byte * bytes_per_transfer
228 * / memory_bits_per_pixel; 245 * / memory_bits_per_pixel;
229 */ 246 */
230 return var->pixclock * 8 * 16 / var->bits_per_pixel; 247 return var->pixclock * 8 * 16 / var->bits_per_pixel;
231} 248}
232
233extern unsigned int get_clk_frequency_khz(int info);
234#endif 249#endif
235 250
236/* 251/*
237 * Select the smallest mode that allows the desired resolution to be 252 * Select the smallest mode that allows the desired resolution to be
238 * displayed. If desired parameters can be rounded up. 253 * displayed. If desired parameters can be rounded up.
239 */ 254 */
240static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach, struct fb_var_screeninfo *var) 255static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach,
256 struct fb_var_screeninfo *var)
241{ 257{
242 struct pxafb_mode_info *mode = NULL; 258 struct pxafb_mode_info *mode = NULL;
243 struct pxafb_mode_info *modelist = mach->modes; 259 struct pxafb_mode_info *modelist = mach->modes;
244 unsigned int best_x = 0xffffffff, best_y = 0xffffffff; 260 unsigned int best_x = 0xffffffff, best_y = 0xffffffff;
245 unsigned int i; 261 unsigned int i;
246 262
247 for (i = 0 ; i < mach->num_modes ; i++) { 263 for (i = 0; i < mach->num_modes; i++) {
248 if (modelist[i].xres >= var->xres && modelist[i].yres >= var->yres && 264 if (modelist[i].xres >= var->xres &&
249 modelist[i].xres < best_x && modelist[i].yres < best_y && 265 modelist[i].yres >= var->yres &&
250 modelist[i].bpp >= var->bits_per_pixel ) { 266 modelist[i].xres < best_x &&
267 modelist[i].yres < best_y &&
268 modelist[i].bpp >= var->bits_per_pixel) {
251 best_x = modelist[i].xres; 269 best_x = modelist[i].xres;
252 best_y = modelist[i].yres; 270 best_y = modelist[i].yres;
253 mode = &modelist[i]; 271 mode = &modelist[i];
@@ -257,7 +275,8 @@ static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach, struc
257 return mode; 275 return mode;
258} 276}
259 277
260static void pxafb_setmode(struct fb_var_screeninfo *var, struct pxafb_mode_info *mode) 278static void pxafb_setmode(struct fb_var_screeninfo *var,
279 struct pxafb_mode_info *mode)
261{ 280{
262 var->xres = mode->xres; 281 var->xres = mode->xres;
263 var->yres = mode->yres; 282 var->yres = mode->yres;
@@ -315,19 +334,20 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
315 var->yres_virtual = 334 var->yres_virtual =
316 max(var->yres_virtual, var->yres); 335 max(var->yres_virtual, var->yres);
317 336
318 /* 337 /*
319 * Setup the RGB parameters for this display. 338 * Setup the RGB parameters for this display.
320 * 339 *
321 * The pixel packing format is described on page 7-11 of the 340 * The pixel packing format is described on page 7-11 of the
322 * PXA2XX Developer's Manual. 341 * PXA2XX Developer's Manual.
323 */ 342 */
324 if (var->bits_per_pixel == 16) { 343 if (var->bits_per_pixel == 16) {
325 var->red.offset = 11; var->red.length = 5; 344 var->red.offset = 11; var->red.length = 5;
326 var->green.offset = 5; var->green.length = 6; 345 var->green.offset = 5; var->green.length = 6;
327 var->blue.offset = 0; var->blue.length = 5; 346 var->blue.offset = 0; var->blue.length = 5;
328 var->transp.offset = var->transp.length = 0; 347 var->transp.offset = var->transp.length = 0;
329 } else { 348 } else {
330 var->red.offset = var->green.offset = var->blue.offset = var->transp.offset = 0; 349 var->red.offset = var->green.offset = 0;
350 var->blue.offset = var->transp.offset = 0;
331 var->red.length = 8; 351 var->red.length = 8;
332 var->green.length = 8; 352 var->green.length = 8;
333 var->blue.length = 8; 353 var->blue.length = 8;
@@ -345,8 +365,7 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
345 365
346static inline void pxafb_set_truecolor(u_int is_true_color) 366static inline void pxafb_set_truecolor(u_int is_true_color)
347{ 367{
348 pr_debug("pxafb: true_color = %d\n", is_true_color); 368 /* do your machine-specific setup if needed */
349 // do your machine-specific setup if needed
350} 369}
351 370
352/* 371/*
@@ -357,9 +376,6 @@ static int pxafb_set_par(struct fb_info *info)
357{ 376{
358 struct pxafb_info *fbi = (struct pxafb_info *)info; 377 struct pxafb_info *fbi = (struct pxafb_info *)info;
359 struct fb_var_screeninfo *var = &info->var; 378 struct fb_var_screeninfo *var = &info->var;
360 unsigned long palette_mem_size;
361
362 pr_debug("pxafb: set_par\n");
363 379
364 if (var->bits_per_pixel == 16) 380 if (var->bits_per_pixel == 16)
365 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; 381 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
@@ -379,17 +395,10 @@ static int pxafb_set_par(struct fb_info *info)
379 if (var->bits_per_pixel == 16) 395 if (var->bits_per_pixel == 16)
380 fbi->palette_size = 0; 396 fbi->palette_size = 0;
381 else 397 else
382 fbi->palette_size = var->bits_per_pixel == 1 ? 4 : 1 << var->bits_per_pixel; 398 fbi->palette_size = var->bits_per_pixel == 1 ?
383 399 4 : 1 << var->bits_per_pixel;
384 if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0)
385 palette_mem_size = fbi->palette_size * sizeof(u16);
386 else
387 palette_mem_size = fbi->palette_size * sizeof(u32);
388
389 pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size);
390 400
391 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); 401 fbi->palette_cpu = (u16 *)&fbi->dma_buff->palette[0];
392 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
393 402
394 /* 403 /*
395 * Set (any) board control register to handle new color depth 404 * Set (any) board control register to handle new color depth
@@ -407,36 +416,6 @@ static int pxafb_set_par(struct fb_info *info)
407} 416}
408 417
409/* 418/*
410 * Formal definition of the VESA spec:
411 * On
412 * This refers to the state of the display when it is in full operation
413 * Stand-By
414 * This defines an optional operating state of minimal power reduction with
415 * the shortest recovery time
416 * Suspend
417 * This refers to a level of power management in which substantial power
418 * reduction is achieved by the display. The display can have a longer
419 * recovery time from this state than from the Stand-by state
420 * Off
421 * This indicates that the display is consuming the lowest level of power
422 * and is non-operational. Recovery from this state may optionally require
423 * the user to manually power on the monitor
424 *
425 * Now, the fbdev driver adds an additional state, (blank), where they
426 * turn off the video (maybe by colormap tricks), but don't mess with the
427 * video itself: think of it semantically between on and Stand-By.
428 *
429 * So here's what we should do in our fbdev blank routine:
430 *
431 * VESA_NO_BLANKING (mode 0) Video on, front/back light on
432 * VESA_VSYNC_SUSPEND (mode 1) Video on, front/back light off
433 * VESA_HSYNC_SUSPEND (mode 2) Video on, front/back light off
434 * VESA_POWERDOWN (mode 3) Video off, front/back light off
435 *
436 * This will match the matrox implementation.
437 */
438
439/*
440 * pxafb_blank(): 419 * pxafb_blank():
441 * Blank the display by setting all palette values to zero. Note, the 420 * Blank the display by setting all palette values to zero. Note, the
442 * 16 bpp mode does not really use the palette, so this will not 421 * 16 bpp mode does not really use the palette, so this will not
@@ -447,8 +426,6 @@ static int pxafb_blank(int blank, struct fb_info *info)
447 struct pxafb_info *fbi = (struct pxafb_info *)info; 426 struct pxafb_info *fbi = (struct pxafb_info *)info;
448 int i; 427 int i;
449 428
450 pr_debug("pxafb: blank=%d\n", blank);
451
452 switch (blank) { 429 switch (blank) {
453 case FB_BLANK_POWERDOWN: 430 case FB_BLANK_POWERDOWN:
454 case FB_BLANK_VSYNC_SUSPEND: 431 case FB_BLANK_VSYNC_SUSPEND:
@@ -460,11 +437,11 @@ static int pxafb_blank(int blank, struct fb_info *info)
460 pxafb_setpalettereg(i, 0, 0, 0, 0, info); 437 pxafb_setpalettereg(i, 0, 0, 0, 0, info);
461 438
462 pxafb_schedule_work(fbi, C_DISABLE); 439 pxafb_schedule_work(fbi, C_DISABLE);
463 //TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); 440 /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */
464 break; 441 break;
465 442
466 case FB_BLANK_UNBLANK: 443 case FB_BLANK_UNBLANK:
467 //TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); 444 /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */
468 if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR || 445 if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR ||
469 fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) 446 fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
470 fb_set_cmap(&fbi->fb.cmap, info); 447 fb_set_cmap(&fbi->fb.cmap, info);
@@ -480,7 +457,7 @@ static int pxafb_mmap(struct fb_info *info,
480 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; 457 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
481 458
482 if (off < info->fix.smem_len) { 459 if (off < info->fix.smem_len) {
483 vma->vm_pgoff += 1; 460 vma->vm_pgoff += fbi->video_offset / PAGE_SIZE;
484 return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu, 461 return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu,
485 fbi->map_dma, fbi->map_size); 462 fbi->map_dma, fbi->map_size);
486 } 463 }
@@ -529,7 +506,8 @@ static struct fb_ops pxafb_ops = {
529 * 506 *
530 * Factoring the 10^4 and 10^-12 out gives 10^-8 == 1 / 100000000 as used below. 507 * Factoring the 10^4 and 10^-12 out gives 10^-8 == 1 / 100000000 as used below.
531 */ 508 */
532static inline unsigned int get_pcd(struct pxafb_info *fbi, unsigned int pixclock) 509static inline unsigned int get_pcd(struct pxafb_info *fbi,
510 unsigned int pixclock)
533{ 511{
534 unsigned long long pcd; 512 unsigned long long pcd;
535 513
@@ -555,7 +533,7 @@ static inline void set_hsync_time(struct pxafb_info *fbi, unsigned int pcd)
555 unsigned long htime; 533 unsigned long htime;
556 534
557 if ((pcd == 0) || (fbi->fb.var.hsync_len == 0)) { 535 if ((pcd == 0) || (fbi->fb.var.hsync_len == 0)) {
558 fbi->hsync_time=0; 536 fbi->hsync_time = 0;
559 return; 537 return;
560 } 538 }
561 539
@@ -576,71 +554,231 @@ unsigned long pxafb_get_hsync_time(struct device *dev)
576} 554}
577EXPORT_SYMBOL(pxafb_get_hsync_time); 555EXPORT_SYMBOL(pxafb_get_hsync_time);
578 556
579/* 557static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal,
580 * pxafb_activate_var(): 558 unsigned int offset, size_t size)
581 * Configures LCD Controller based on entries in var parameter. Settings are
582 * only written to the controller if changes were made.
583 */
584static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *fbi)
585{ 559{
586 struct pxafb_lcd_reg new_regs; 560 struct pxafb_dma_descriptor *dma_desc, *pal_desc;
587 u_long flags; 561 unsigned int dma_desc_off, pal_desc_off;
588 u_int lines_per_panel, pcd = get_pcd(fbi, var->pixclock);
589 562
590 pr_debug("pxafb: Configuring PXA LCD\n"); 563 if (dma < 0 || dma >= DMA_MAX)
564 return -EINVAL;
591 565
592 pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n", 566 dma_desc = &fbi->dma_buff->dma_desc[dma];
593 var->xres, var->hsync_len, 567 dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[dma]);
594 var->left_margin, var->right_margin);
595 pr_debug("var: yres=%d vslen=%d um=%d bm=%d\n",
596 var->yres, var->vsync_len,
597 var->upper_margin, var->lower_margin);
598 pr_debug("var: pixclock=%d pcd=%d\n", var->pixclock, pcd);
599 568
600#if DEBUG_VAR 569 dma_desc->fsadr = fbi->screen_dma + offset;
601 if (var->xres < 16 || var->xres > 1024) 570 dma_desc->fidr = 0;
602 printk(KERN_ERR "%s: invalid xres %d\n", 571 dma_desc->ldcmd = size;
603 fbi->fb.fix.id, var->xres); 572
604 switch(var->bits_per_pixel) { 573 if (pal < 0 || pal >= PAL_MAX) {
605 case 1: 574 dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off;
606 case 2: 575 fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off;
607 case 4: 576 } else {
608 case 8: 577 pal_desc = &fbi->dma_buff->pal_desc[dma];
609 case 16: 578 pal_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[pal]);
610 break; 579
611 default: 580 pal_desc->fsadr = fbi->dma_buff_phys + pal * PALETTE_SIZE;
612 printk(KERN_ERR "%s: invalid bit depth %d\n", 581 pal_desc->fidr = 0;
613 fbi->fb.fix.id, var->bits_per_pixel); 582
614 break; 583 if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0)
584 pal_desc->ldcmd = fbi->palette_size * sizeof(u16);
585 else
586 pal_desc->ldcmd = fbi->palette_size * sizeof(u32);
587
588 pal_desc->ldcmd |= LDCMD_PAL;
589
590 /* flip back and forth between palette and frame buffer */
591 pal_desc->fdadr = fbi->dma_buff_phys + dma_desc_off;
592 dma_desc->fdadr = fbi->dma_buff_phys + pal_desc_off;
593 fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off;
615 } 594 }
616 if (var->hsync_len < 1 || var->hsync_len > 64)
617 printk(KERN_ERR "%s: invalid hsync_len %d\n",
618 fbi->fb.fix.id, var->hsync_len);
619 if (var->left_margin < 1 || var->left_margin > 255)
620 printk(KERN_ERR "%s: invalid left_margin %d\n",
621 fbi->fb.fix.id, var->left_margin);
622 if (var->right_margin < 1 || var->right_margin > 255)
623 printk(KERN_ERR "%s: invalid right_margin %d\n",
624 fbi->fb.fix.id, var->right_margin);
625 if (var->yres < 1 || var->yres > 1024)
626 printk(KERN_ERR "%s: invalid yres %d\n",
627 fbi->fb.fix.id, var->yres);
628 if (var->vsync_len < 1 || var->vsync_len > 64)
629 printk(KERN_ERR "%s: invalid vsync_len %d\n",
630 fbi->fb.fix.id, var->vsync_len);
631 if (var->upper_margin < 0 || var->upper_margin > 255)
632 printk(KERN_ERR "%s: invalid upper_margin %d\n",
633 fbi->fb.fix.id, var->upper_margin);
634 if (var->lower_margin < 0 || var->lower_margin > 255)
635 printk(KERN_ERR "%s: invalid lower_margin %d\n",
636 fbi->fb.fix.id, var->lower_margin);
637#endif
638 595
639 new_regs.lccr0 = fbi->lccr0 | 596 return 0;
640 (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | 597}
641 LCCR0_QDM | LCCR0_BM | LCCR0_OUM); 598
599#ifdef CONFIG_FB_PXA_SMARTPANEL
600static int setup_smart_dma(struct pxafb_info *fbi)
601{
602 struct pxafb_dma_descriptor *dma_desc;
603 unsigned long dma_desc_off, cmd_buff_off;
604
605 dma_desc = &fbi->dma_buff->dma_desc[DMA_CMD];
606 dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[DMA_CMD]);
607 cmd_buff_off = offsetof(struct pxafb_dma_buff, cmd_buff);
608
609 dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off;
610 dma_desc->fsadr = fbi->dma_buff_phys + cmd_buff_off;
611 dma_desc->fidr = 0;
612 dma_desc->ldcmd = fbi->n_smart_cmds * sizeof(uint16_t);
613
614 fbi->fdadr[DMA_CMD] = dma_desc->fdadr;
615 return 0;
616}
617
618int pxafb_smart_flush(struct fb_info *info)
619{
620 struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb);
621 uint32_t prsr;
622 int ret = 0;
623
624 /* disable controller until all registers are set up */
625 lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB);
626
627 /* 1. make it an even number of commands to align on 32-bit boundary
628 * 2. add the interrupt command to the end of the chain so we can
629 * keep track of the end of the transfer
630 */
631
632 while (fbi->n_smart_cmds & 1)
633 fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_NOOP;
634
635 fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_INTERRUPT;
636 fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_WAIT_FOR_VSYNC;
637 setup_smart_dma(fbi);
638
639 /* continue to execute next command */
640 prsr = lcd_readl(fbi, PRSR) | PRSR_ST_OK | PRSR_CON_NT;
641 lcd_writel(fbi, PRSR, prsr);
642
643 /* stop the processor in case it executed "wait for sync" cmd */
644 lcd_writel(fbi, CMDCR, 0x0001);
645
646 /* don't send interrupts for fifo underruns on channel 6 */
647 lcd_writel(fbi, LCCR5, LCCR5_IUM(6));
648
649 lcd_writel(fbi, LCCR1, fbi->reg_lccr1);
650 lcd_writel(fbi, LCCR2, fbi->reg_lccr2);
651 lcd_writel(fbi, LCCR3, fbi->reg_lccr3);
652 lcd_writel(fbi, FDADR0, fbi->fdadr[0]);
653 lcd_writel(fbi, FDADR6, fbi->fdadr[6]);
654
655 /* begin sending */
656 lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB);
657
658 if (wait_for_completion_timeout(&fbi->command_done, HZ/2) == 0) {
659 pr_warning("%s: timeout waiting for command done\n",
660 __func__);
661 ret = -ETIMEDOUT;
662 }
663
664 /* quick disable */
665 prsr = lcd_readl(fbi, PRSR) & ~(PRSR_ST_OK | PRSR_CON_NT);
666 lcd_writel(fbi, PRSR, prsr);
667 lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB);
668 lcd_writel(fbi, FDADR6, 0);
669 fbi->n_smart_cmds = 0;
670 return ret;
671}
672
673int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds)
674{
675 int i;
676 struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb);
677
678 /* leave 2 commands for INTERRUPT and WAIT_FOR_SYNC */
679 for (i = 0; i < n_cmds; i++) {
680 if (fbi->n_smart_cmds == CMD_BUFF_SIZE - 8)
681 pxafb_smart_flush(info);
682
683 fbi->smart_cmds[fbi->n_smart_cmds++] = *cmds++;
684 }
685
686 return 0;
687}
688
689static unsigned int __smart_timing(unsigned time_ns, unsigned long lcd_clk)
690{
691 unsigned int t = (time_ns * (lcd_clk / 1000000) / 1000);
692 return (t == 0) ? 1 : t;
693}
694
695static void setup_smart_timing(struct pxafb_info *fbi,
696 struct fb_var_screeninfo *var)
697{
698 struct pxafb_mach_info *inf = fbi->dev->platform_data;
699 struct pxafb_mode_info *mode = &inf->modes[0];
700 unsigned long lclk = clk_get_rate(fbi->clk);
701 unsigned t1, t2, t3, t4;
702
703 t1 = max(mode->a0csrd_set_hld, mode->a0cswr_set_hld);
704 t2 = max(mode->rd_pulse_width, mode->wr_pulse_width);
705 t3 = mode->op_hold_time;
706 t4 = mode->cmd_inh_time;
707
708 fbi->reg_lccr1 =
709 LCCR1_DisWdth(var->xres) |
710 LCCR1_BegLnDel(__smart_timing(t1, lclk)) |
711 LCCR1_EndLnDel(__smart_timing(t2, lclk)) |
712 LCCR1_HorSnchWdth(__smart_timing(t3, lclk));
713
714 fbi->reg_lccr2 = LCCR2_DisHght(var->yres);
715 fbi->reg_lccr3 = LCCR3_PixClkDiv(__smart_timing(t4, lclk));
716
717 /* FIXME: make this configurable */
718 fbi->reg_cmdcr = 1;
719}
720
721static int pxafb_smart_thread(void *arg)
722{
723 struct pxafb_info *fbi = arg;
724 struct pxafb_mach_info *inf = fbi->dev->platform_data;
725
726 if (!fbi || !inf->smart_update) {
727 pr_err("%s: not properly initialized, thread terminated\n",
728 __func__);
729 return -EINVAL;
730 }
642 731
643 new_regs.lccr1 = 732 pr_debug("%s(): task starting\n", __func__);
733
734 set_freezable();
735 while (!kthread_should_stop()) {
736
737 if (try_to_freeze())
738 continue;
739
740 if (fbi->state == C_ENABLE) {
741 inf->smart_update(&fbi->fb);
742 complete(&fbi->refresh_done);
743 }
744
745 set_current_state(TASK_INTERRUPTIBLE);
746 schedule_timeout(30 * HZ / 1000);
747 }
748
749 pr_debug("%s(): task ending\n", __func__);
750 return 0;
751}
752
753static int pxafb_smart_init(struct pxafb_info *fbi)
754{
755 fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi,
756 "lcd_refresh");
757 if (IS_ERR(fbi->smart_thread)) {
758 printk(KERN_ERR "%s: unable to create kernel thread\n",
759 __func__);
760 return PTR_ERR(fbi->smart_thread);
761 }
762 return 0;
763}
764#else
765int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds)
766{
767 return 0;
768}
769
770int pxafb_smart_flush(struct fb_info *info)
771{
772 return 0;
773}
774#endif /* CONFIG_FB_SMART_PANEL */
775
776static void setup_parallel_timing(struct pxafb_info *fbi,
777 struct fb_var_screeninfo *var)
778{
779 unsigned int lines_per_panel, pcd = get_pcd(fbi, var->pixclock);
780
781 fbi->reg_lccr1 =
644 LCCR1_DisWdth(var->xres) + 782 LCCR1_DisWdth(var->xres) +
645 LCCR1_HorSnchWdth(var->hsync_len) + 783 LCCR1_HorSnchWdth(var->hsync_len) +
646 LCCR1_BegLnDel(var->left_margin) + 784 LCCR1_BegLnDel(var->left_margin) +
@@ -654,110 +792,118 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
654 if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual) 792 if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual)
655 lines_per_panel /= 2; 793 lines_per_panel /= 2;
656 794
657 new_regs.lccr2 = 795 fbi->reg_lccr2 =
658 LCCR2_DisHght(lines_per_panel) + 796 LCCR2_DisHght(lines_per_panel) +
659 LCCR2_VrtSnchWdth(var->vsync_len) + 797 LCCR2_VrtSnchWdth(var->vsync_len) +
660 LCCR2_BegFrmDel(var->upper_margin) + 798 LCCR2_BegFrmDel(var->upper_margin) +
661 LCCR2_EndFrmDel(var->lower_margin); 799 LCCR2_EndFrmDel(var->lower_margin);
662 800
663 new_regs.lccr3 = fbi->lccr3 | 801 fbi->reg_lccr3 = fbi->lccr3 |
664 pxafb_bpp_to_lccr3(var) | 802 (var->sync & FB_SYNC_HOR_HIGH_ACT ?
665 (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) | 803 LCCR3_HorSnchH : LCCR3_HorSnchL) |
666 (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL); 804 (var->sync & FB_SYNC_VERT_HIGH_ACT ?
805 LCCR3_VrtSnchH : LCCR3_VrtSnchL);
806
807 if (pcd) {
808 fbi->reg_lccr3 |= LCCR3_PixClkDiv(pcd);
809 set_hsync_time(fbi, pcd);
810 }
811}
667 812
668 if (pcd) 813/*
669 new_regs.lccr3 |= LCCR3_PixClkDiv(pcd); 814 * pxafb_activate_var():
815 * Configures LCD Controller based on entries in var parameter.
816 * Settings are only written to the controller if changes were made.
817 */
818static int pxafb_activate_var(struct fb_var_screeninfo *var,
819 struct pxafb_info *fbi)
820{
821 u_long flags;
822 size_t nbytes;
670 823
671 pr_debug("nlccr0 = 0x%08x\n", new_regs.lccr0); 824#if DEBUG_VAR
672 pr_debug("nlccr1 = 0x%08x\n", new_regs.lccr1); 825 if (!(fbi->lccr0 & LCCR0_LCDT)) {
673 pr_debug("nlccr2 = 0x%08x\n", new_regs.lccr2); 826 if (var->xres < 16 || var->xres > 1024)
674 pr_debug("nlccr3 = 0x%08x\n", new_regs.lccr3); 827 printk(KERN_ERR "%s: invalid xres %d\n",
828 fbi->fb.fix.id, var->xres);
829 switch (var->bits_per_pixel) {
830 case 1:
831 case 2:
832 case 4:
833 case 8:
834 case 16:
835 break;
836 default:
837 printk(KERN_ERR "%s: invalid bit depth %d\n",
838 fbi->fb.fix.id, var->bits_per_pixel);
839 break;
840 }
675 841
842 if (var->hsync_len < 1 || var->hsync_len > 64)
843 printk(KERN_ERR "%s: invalid hsync_len %d\n",
844 fbi->fb.fix.id, var->hsync_len);
845 if (var->left_margin < 1 || var->left_margin > 255)
846 printk(KERN_ERR "%s: invalid left_margin %d\n",
847 fbi->fb.fix.id, var->left_margin);
848 if (var->right_margin < 1 || var->right_margin > 255)
849 printk(KERN_ERR "%s: invalid right_margin %d\n",
850 fbi->fb.fix.id, var->right_margin);
851 if (var->yres < 1 || var->yres > 1024)
852 printk(KERN_ERR "%s: invalid yres %d\n",
853 fbi->fb.fix.id, var->yres);
854 if (var->vsync_len < 1 || var->vsync_len > 64)
855 printk(KERN_ERR "%s: invalid vsync_len %d\n",
856 fbi->fb.fix.id, var->vsync_len);
857 if (var->upper_margin < 0 || var->upper_margin > 255)
858 printk(KERN_ERR "%s: invalid upper_margin %d\n",
859 fbi->fb.fix.id, var->upper_margin);
860 if (var->lower_margin < 0 || var->lower_margin > 255)
861 printk(KERN_ERR "%s: invalid lower_margin %d\n",
862 fbi->fb.fix.id, var->lower_margin);
863 }
864#endif
676 /* Update shadow copy atomically */ 865 /* Update shadow copy atomically */
677 local_irq_save(flags); 866 local_irq_save(flags);
678 867
679 /* setup dma descriptors */ 868#ifdef CONFIG_FB_PXA_SMARTPANEL
680 fbi->dmadesc_fblow_cpu = (struct pxafb_dma_descriptor *)((unsigned int)fbi->palette_cpu - 3*16); 869 if (fbi->lccr0 & LCCR0_LCDT)
681 fbi->dmadesc_fbhigh_cpu = (struct pxafb_dma_descriptor *)((unsigned int)fbi->palette_cpu - 2*16); 870 setup_smart_timing(fbi, var);
682 fbi->dmadesc_palette_cpu = (struct pxafb_dma_descriptor *)((unsigned int)fbi->palette_cpu - 1*16); 871 else
683 872#endif
684 fbi->dmadesc_fblow_dma = fbi->palette_dma - 3*16; 873 setup_parallel_timing(fbi, var);
685 fbi->dmadesc_fbhigh_dma = fbi->palette_dma - 2*16;
686 fbi->dmadesc_palette_dma = fbi->palette_dma - 1*16;
687
688#define BYTES_PER_PANEL (lines_per_panel * fbi->fb.fix.line_length)
689
690 /* populate descriptors */
691 fbi->dmadesc_fblow_cpu->fdadr = fbi->dmadesc_fblow_dma;
692 fbi->dmadesc_fblow_cpu->fsadr = fbi->screen_dma + BYTES_PER_PANEL;
693 fbi->dmadesc_fblow_cpu->fidr = 0;
694 fbi->dmadesc_fblow_cpu->ldcmd = BYTES_PER_PANEL;
695 874
696 fbi->fdadr1 = fbi->dmadesc_fblow_dma; /* only used in dual-panel mode */ 875 fbi->reg_lccr0 = fbi->lccr0 |
876 (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM |
877 LCCR0_QDM | LCCR0_BM | LCCR0_OUM);
697 878
698 fbi->dmadesc_fbhigh_cpu->fsadr = fbi->screen_dma; 879 fbi->reg_lccr3 |= pxafb_bpp_to_lccr3(var);
699 fbi->dmadesc_fbhigh_cpu->fidr = 0;
700 fbi->dmadesc_fbhigh_cpu->ldcmd = BYTES_PER_PANEL;
701 880
702 fbi->dmadesc_palette_cpu->fsadr = fbi->palette_dma; 881 nbytes = var->yres * fbi->fb.fix.line_length;
703 fbi->dmadesc_palette_cpu->fidr = 0;
704 if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0)
705 fbi->dmadesc_palette_cpu->ldcmd = fbi->palette_size *
706 sizeof(u16);
707 else
708 fbi->dmadesc_palette_cpu->ldcmd = fbi->palette_size *
709 sizeof(u32);
710 fbi->dmadesc_palette_cpu->ldcmd |= LDCMD_PAL;
711 882
712 if (var->bits_per_pixel == 16) { 883 if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual) {
713 /* palette shouldn't be loaded in true-color mode */ 884 nbytes = nbytes / 2;
714 fbi->dmadesc_fbhigh_cpu->fdadr = fbi->dmadesc_fbhigh_dma; 885 setup_frame_dma(fbi, DMA_LOWER, PAL_NONE, nbytes, nbytes);
715 fbi->fdadr0 = fbi->dmadesc_fbhigh_dma; /* no pal just fbhigh */
716 /* init it to something, even though we won't be using it */
717 fbi->dmadesc_palette_cpu->fdadr = fbi->dmadesc_palette_dma;
718 } else {
719 fbi->dmadesc_palette_cpu->fdadr = fbi->dmadesc_fbhigh_dma;
720 fbi->dmadesc_fbhigh_cpu->fdadr = fbi->dmadesc_palette_dma;
721 fbi->fdadr0 = fbi->dmadesc_palette_dma; /* flips back and forth between pal and fbhigh */
722 } 886 }
723 887
724#if 0 888 if ((var->bits_per_pixel >= 16) || (fbi->lccr0 & LCCR0_LCDT))
725 pr_debug("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu); 889 setup_frame_dma(fbi, DMA_BASE, PAL_NONE, 0, nbytes);
726 pr_debug("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu); 890 else
727 pr_debug("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu); 891 setup_frame_dma(fbi, DMA_BASE, PAL_BASE, 0, nbytes);
728 pr_debug("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma);
729 pr_debug("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma);
730 pr_debug("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma);
731
732 pr_debug("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr);
733 pr_debug("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr);
734 pr_debug("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr);
735
736 pr_debug("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr);
737 pr_debug("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr);
738 pr_debug("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr);
739
740 pr_debug("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd);
741 pr_debug("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd);
742 pr_debug("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd);
743#endif
744 892
745 fbi->reg_lccr0 = new_regs.lccr0; 893 fbi->reg_lccr4 = lcd_readl(fbi, LCCR4) & ~LCCR4_PAL_FOR_MASK;
746 fbi->reg_lccr1 = new_regs.lccr1;
747 fbi->reg_lccr2 = new_regs.lccr2;
748 fbi->reg_lccr3 = new_regs.lccr3;
749 fbi->reg_lccr4 = LCCR4 & (~LCCR4_PAL_FOR_MASK);
750 fbi->reg_lccr4 |= (fbi->lccr4 & LCCR4_PAL_FOR_MASK); 894 fbi->reg_lccr4 |= (fbi->lccr4 & LCCR4_PAL_FOR_MASK);
751 set_hsync_time(fbi, pcd);
752 local_irq_restore(flags); 895 local_irq_restore(flags);
753 896
754 /* 897 /*
755 * Only update the registers if the controller is enabled 898 * Only update the registers if the controller is enabled
756 * and something has changed. 899 * and something has changed.
757 */ 900 */
758 if ((LCCR0 != fbi->reg_lccr0) || (LCCR1 != fbi->reg_lccr1) || 901 if ((lcd_readl(fbi, LCCR0) != fbi->reg_lccr0) ||
759 (LCCR2 != fbi->reg_lccr2) || (LCCR3 != fbi->reg_lccr3) || 902 (lcd_readl(fbi, LCCR1) != fbi->reg_lccr1) ||
760 (FDADR0 != fbi->fdadr0) || (FDADR1 != fbi->fdadr1)) 903 (lcd_readl(fbi, LCCR2) != fbi->reg_lccr2) ||
904 (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) ||
905 (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) ||
906 (lcd_readl(fbi, FDADR1) != fbi->fdadr[1]))
761 pxafb_schedule_work(fbi, C_REENABLE); 907 pxafb_schedule_work(fbi, C_REENABLE);
762 908
763 return 0; 909 return 0;
@@ -773,8 +919,8 @@ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on)
773{ 919{
774 pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff"); 920 pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff");
775 921
776 if (pxafb_backlight_power) 922 if (pxafb_backlight_power)
777 pxafb_backlight_power(on); 923 pxafb_backlight_power(on);
778} 924}
779 925
780static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) 926static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
@@ -788,11 +934,11 @@ static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
788static void pxafb_setup_gpio(struct pxafb_info *fbi) 934static void pxafb_setup_gpio(struct pxafb_info *fbi)
789{ 935{
790 int gpio, ldd_bits; 936 int gpio, ldd_bits;
791 unsigned int lccr0 = fbi->lccr0; 937 unsigned int lccr0 = fbi->lccr0;
792 938
793 /* 939 /*
794 * setup is based on type of panel supported 940 * setup is based on type of panel supported
795 */ 941 */
796 942
797 /* 4 bit interface */ 943 /* 4 bit interface */
798 if ((lccr0 & LCCR0_CMS) == LCCR0_Mono && 944 if ((lccr0 & LCCR0_CMS) == LCCR0_Mono &&
@@ -801,21 +947,25 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
801 ldd_bits = 4; 947 ldd_bits = 4;
802 948
803 /* 8 bit interface */ 949 /* 8 bit interface */
804 else if (((lccr0 & LCCR0_CMS) == LCCR0_Mono && 950 else if (((lccr0 & LCCR0_CMS) == LCCR0_Mono &&
805 ((lccr0 & LCCR0_SDS) == LCCR0_Dual || (lccr0 & LCCR0_DPD) == LCCR0_8PixMono)) || 951 ((lccr0 & LCCR0_SDS) == LCCR0_Dual ||
806 ((lccr0 & LCCR0_CMS) == LCCR0_Color && 952 (lccr0 & LCCR0_DPD) == LCCR0_8PixMono)) ||
807 (lccr0 & LCCR0_PAS) == LCCR0_Pas && (lccr0 & LCCR0_SDS) == LCCR0_Sngl)) 953 ((lccr0 & LCCR0_CMS) == LCCR0_Color &&
954 (lccr0 & LCCR0_PAS) == LCCR0_Pas &&
955 (lccr0 & LCCR0_SDS) == LCCR0_Sngl))
808 ldd_bits = 8; 956 ldd_bits = 8;
809 957
810 /* 16 bit interface */ 958 /* 16 bit interface */
811 else if ((lccr0 & LCCR0_CMS) == LCCR0_Color && 959 else if ((lccr0 & LCCR0_CMS) == LCCR0_Color &&
812 ((lccr0 & LCCR0_SDS) == LCCR0_Dual || (lccr0 & LCCR0_PAS) == LCCR0_Act)) 960 ((lccr0 & LCCR0_SDS) == LCCR0_Dual ||
961 (lccr0 & LCCR0_PAS) == LCCR0_Act))
813 ldd_bits = 16; 962 ldd_bits = 16;
814 963
815 else { 964 else {
816 printk(KERN_ERR "pxafb_setup_gpio: unable to determine bits per pixel\n"); 965 printk(KERN_ERR "pxafb_setup_gpio: unable to determine "
966 "bits per pixel\n");
817 return; 967 return;
818 } 968 }
819 969
820 for (gpio = 58; ldd_bits; gpio++, ldd_bits--) 970 for (gpio = 58; ldd_bits; gpio++, ldd_bits--)
821 pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT); 971 pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT);
@@ -828,8 +978,8 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
828static void pxafb_enable_controller(struct pxafb_info *fbi) 978static void pxafb_enable_controller(struct pxafb_info *fbi)
829{ 979{
830 pr_debug("pxafb: Enabling LCD controller\n"); 980 pr_debug("pxafb: Enabling LCD controller\n");
831 pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0); 981 pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr[0]);
832 pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1); 982 pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr[1]);
833 pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0); 983 pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0);
834 pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1); 984 pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1);
835 pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2); 985 pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2);
@@ -838,40 +988,40 @@ static void pxafb_enable_controller(struct pxafb_info *fbi)
838 /* enable LCD controller clock */ 988 /* enable LCD controller clock */
839 clk_enable(fbi->clk); 989 clk_enable(fbi->clk);
840 990
991 if (fbi->lccr0 & LCCR0_LCDT)
992 return;
993
841 /* Sequence from 11.7.10 */ 994 /* Sequence from 11.7.10 */
842 LCCR3 = fbi->reg_lccr3; 995 lcd_writel(fbi, LCCR3, fbi->reg_lccr3);
843 LCCR2 = fbi->reg_lccr2; 996 lcd_writel(fbi, LCCR2, fbi->reg_lccr2);
844 LCCR1 = fbi->reg_lccr1; 997 lcd_writel(fbi, LCCR1, fbi->reg_lccr1);
845 LCCR0 = fbi->reg_lccr0 & ~LCCR0_ENB; 998 lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB);
846 999
847 FDADR0 = fbi->fdadr0; 1000 lcd_writel(fbi, FDADR0, fbi->fdadr[0]);
848 FDADR1 = fbi->fdadr1; 1001 lcd_writel(fbi, FDADR1, fbi->fdadr[1]);
849 LCCR0 |= LCCR0_ENB; 1002 lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB);
850
851 pr_debug("FDADR0 0x%08x\n", (unsigned int) FDADR0);
852 pr_debug("FDADR1 0x%08x\n", (unsigned int) FDADR1);
853 pr_debug("LCCR0 0x%08x\n", (unsigned int) LCCR0);
854 pr_debug("LCCR1 0x%08x\n", (unsigned int) LCCR1);
855 pr_debug("LCCR2 0x%08x\n", (unsigned int) LCCR2);
856 pr_debug("LCCR3 0x%08x\n", (unsigned int) LCCR3);
857 pr_debug("LCCR4 0x%08x\n", (unsigned int) LCCR4);
858} 1003}
859 1004
860static void pxafb_disable_controller(struct pxafb_info *fbi) 1005static void pxafb_disable_controller(struct pxafb_info *fbi)
861{ 1006{
862 DECLARE_WAITQUEUE(wait, current); 1007 uint32_t lccr0;
863 1008
864 pr_debug("pxafb: disabling LCD controller\n"); 1009#ifdef CONFIG_FB_PXA_SMARTPANEL
1010 if (fbi->lccr0 & LCCR0_LCDT) {
1011 wait_for_completion_timeout(&fbi->refresh_done,
1012 200 * HZ / 1000);
1013 return;
1014 }
1015#endif
865 1016
866 set_current_state(TASK_UNINTERRUPTIBLE); 1017 /* Clear LCD Status Register */
867 add_wait_queue(&fbi->ctrlr_wait, &wait); 1018 lcd_writel(fbi, LCSR, 0xffffffff);
868 1019
869 LCSR = 0xffffffff; /* Clear LCD Status Register */ 1020 lccr0 = lcd_readl(fbi, LCCR0) & ~LCCR0_LDM;
870 LCCR0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */ 1021 lcd_writel(fbi, LCCR0, lccr0);
871 LCCR0 |= LCCR0_DIS; /* Disable LCD Controller */ 1022 lcd_writel(fbi, LCCR0, lccr0 | LCCR0_DIS);
872 1023
873 schedule_timeout(200 * HZ / 1000); 1024 wait_for_completion_timeout(&fbi->disable_done, 200 * HZ / 1000);
874 remove_wait_queue(&fbi->ctrlr_wait, &wait);
875 1025
876 /* disable LCD controller clock */ 1026 /* disable LCD controller clock */
877 clk_disable(fbi->clk); 1027 clk_disable(fbi->clk);
@@ -883,14 +1033,20 @@ static void pxafb_disable_controller(struct pxafb_info *fbi)
883static irqreturn_t pxafb_handle_irq(int irq, void *dev_id) 1033static irqreturn_t pxafb_handle_irq(int irq, void *dev_id)
884{ 1034{
885 struct pxafb_info *fbi = dev_id; 1035 struct pxafb_info *fbi = dev_id;
886 unsigned int lcsr = LCSR; 1036 unsigned int lccr0, lcsr = lcd_readl(fbi, LCSR);
887 1037
888 if (lcsr & LCSR_LDD) { 1038 if (lcsr & LCSR_LDD) {
889 LCCR0 |= LCCR0_LDM; 1039 lccr0 = lcd_readl(fbi, LCCR0);
890 wake_up(&fbi->ctrlr_wait); 1040 lcd_writel(fbi, LCCR0, lccr0 | LCCR0_LDM);
1041 complete(&fbi->disable_done);
891 } 1042 }
892 1043
893 LCSR = lcsr; 1044#ifdef CONFIG_FB_PXA_SMARTPANEL
1045 if (lcsr & LCSR_CMD_INT)
1046 complete(&fbi->command_done);
1047#endif
1048
1049 lcd_writel(fbi, LCSR, lcsr);
894 return IRQ_HANDLED; 1050 return IRQ_HANDLED;
895} 1051}
896 1052
@@ -921,7 +1077,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
921 */ 1077 */
922 if (old_state != C_DISABLE && old_state != C_DISABLE_PM) { 1078 if (old_state != C_DISABLE && old_state != C_DISABLE_PM) {
923 fbi->state = state; 1079 fbi->state = state;
924 //TODO __pxafb_lcd_power(fbi, 0); 1080 /* TODO __pxafb_lcd_power(fbi, 0); */
925 pxafb_disable_controller(fbi); 1081 pxafb_disable_controller(fbi);
926 } 1082 }
927 break; 1083 break;
@@ -948,7 +1104,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
948 if (old_state == C_DISABLE_CLKCHANGE) { 1104 if (old_state == C_DISABLE_CLKCHANGE) {
949 fbi->state = C_ENABLE; 1105 fbi->state = C_ENABLE;
950 pxafb_enable_controller(fbi); 1106 pxafb_enable_controller(fbi);
951 //TODO __pxafb_lcd_power(fbi, 1); 1107 /* TODO __pxafb_lcd_power(fbi, 1); */
952 } 1108 }
953 break; 1109 break;
954 1110
@@ -1019,7 +1175,7 @@ static int
1019pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) 1175pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
1020{ 1176{
1021 struct pxafb_info *fbi = TO_INF(nb, freq_transition); 1177 struct pxafb_info *fbi = TO_INF(nb, freq_transition);
1022 //TODO struct cpufreq_freqs *f = data; 1178 /* TODO struct cpufreq_freqs *f = data; */
1023 u_int pcd; 1179 u_int pcd;
1024 1180
1025 switch (val) { 1181 switch (val) {
@@ -1030,7 +1186,8 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
1030 case CPUFREQ_POSTCHANGE: 1186 case CPUFREQ_POSTCHANGE:
1031 pcd = get_pcd(fbi, fbi->fb.var.pixclock); 1187 pcd = get_pcd(fbi, fbi->fb.var.pixclock);
1032 set_hsync_time(fbi, pcd); 1188 set_hsync_time(fbi, pcd);
1033 fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd); 1189 fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) |
1190 LCCR3_PixClkDiv(pcd);
1034 set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE); 1191 set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE);
1035 break; 1192 break;
1036 } 1193 }
@@ -1050,18 +1207,8 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
1050 pr_debug("min dma period: %d ps, " 1207 pr_debug("min dma period: %d ps, "
1051 "new clock %d kHz\n", pxafb_display_dma_period(var), 1208 "new clock %d kHz\n", pxafb_display_dma_period(var),
1052 policy->max); 1209 policy->max);
1053 // TODO: fill in min/max values 1210 /* TODO: fill in min/max values */
1054 break;
1055#if 0
1056 case CPUFREQ_NOTIFY:
1057 printk(KERN_ERR "%s: got CPUFREQ_NOTIFY\n", __FUNCTION__);
1058 do {} while(0);
1059 /* todo: panic if min/max values aren't fulfilled
1060 * [can't really happen unless there's a bug in the
1061 * CPU policy verification process *
1062 */
1063 break; 1211 break;
1064#endif
1065 } 1212 }
1066 return 0; 1213 return 0;
1067} 1214}
@@ -1102,21 +1249,21 @@ static int pxafb_resume(struct platform_device *dev)
1102 */ 1249 */
1103static int __init pxafb_map_video_memory(struct pxafb_info *fbi) 1250static int __init pxafb_map_video_memory(struct pxafb_info *fbi)
1104{ 1251{
1105 u_long palette_mem_size;
1106
1107 /* 1252 /*
1108 * We reserve one page for the palette, plus the size 1253 * We reserve one page for the palette, plus the size
1109 * of the framebuffer. 1254 * of the framebuffer.
1110 */ 1255 */
1111 fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE); 1256 fbi->video_offset = PAGE_ALIGN(sizeof(struct pxafb_dma_buff));
1257 fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + fbi->video_offset);
1112 fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size, 1258 fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size,
1113 &fbi->map_dma, GFP_KERNEL); 1259 &fbi->map_dma, GFP_KERNEL);
1114 1260
1115 if (fbi->map_cpu) { 1261 if (fbi->map_cpu) {
1116 /* prevent initial garbage on screen */ 1262 /* prevent initial garbage on screen */
1117 memset(fbi->map_cpu, 0, fbi->map_size); 1263 memset(fbi->map_cpu, 0, fbi->map_size);
1118 fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE; 1264 fbi->fb.screen_base = fbi->map_cpu + fbi->video_offset;
1119 fbi->screen_dma = fbi->map_dma + PAGE_SIZE; 1265 fbi->screen_dma = fbi->map_dma + fbi->video_offset;
1266
1120 /* 1267 /*
1121 * FIXME: this is actually the wrong thing to place in 1268 * FIXME: this is actually the wrong thing to place in
1122 * smem_start. But fbdev suffers from the problem that 1269 * smem_start. But fbdev suffers from the problem that
@@ -1126,27 +1273,86 @@ static int __init pxafb_map_video_memory(struct pxafb_info *fbi)
1126 fbi->fb.fix.smem_start = fbi->screen_dma; 1273 fbi->fb.fix.smem_start = fbi->screen_dma;
1127 fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16; 1274 fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16;
1128 1275
1129 if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0) 1276 fbi->dma_buff = (void *) fbi->map_cpu;
1130 palette_mem_size = fbi->palette_size * sizeof(u16); 1277 fbi->dma_buff_phys = fbi->map_dma;
1131 else 1278 fbi->palette_cpu = (u16 *) fbi->dma_buff->palette;
1132 palette_mem_size = fbi->palette_size * sizeof(u32);
1133 1279
1134 pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size); 1280#ifdef CONFIG_FB_PXA_SMARTPANEL
1135 1281 fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff;
1136 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); 1282 fbi->n_smart_cmds = 0;
1137 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; 1283#endif
1138 } 1284 }
1139 1285
1140 return fbi->map_cpu ? 0 : -ENOMEM; 1286 return fbi->map_cpu ? 0 : -ENOMEM;
1141} 1287}
1142 1288
1289static void pxafb_decode_mode_info(struct pxafb_info *fbi,
1290 struct pxafb_mode_info *modes,
1291 unsigned int num_modes)
1292{
1293 unsigned int i, smemlen;
1294
1295 pxafb_setmode(&fbi->fb.var, &modes[0]);
1296
1297 for (i = 0; i < num_modes; i++) {
1298 smemlen = modes[i].xres * modes[i].yres * modes[i].bpp / 8;
1299 if (smemlen > fbi->fb.fix.smem_len)
1300 fbi->fb.fix.smem_len = smemlen;
1301 }
1302}
1303
1304static int pxafb_decode_mach_info(struct pxafb_info *fbi,
1305 struct pxafb_mach_info *inf)
1306{
1307 unsigned int lcd_conn = inf->lcd_conn;
1308
1309 fbi->cmap_inverse = inf->cmap_inverse;
1310 fbi->cmap_static = inf->cmap_static;
1311
1312 switch (lcd_conn & 0xf) {
1313 case LCD_TYPE_MONO_STN:
1314 fbi->lccr0 = LCCR0_CMS;
1315 break;
1316 case LCD_TYPE_MONO_DSTN:
1317 fbi->lccr0 = LCCR0_CMS | LCCR0_SDS;
1318 break;
1319 case LCD_TYPE_COLOR_STN:
1320 fbi->lccr0 = 0;
1321 break;
1322 case LCD_TYPE_COLOR_DSTN:
1323 fbi->lccr0 = LCCR0_SDS;
1324 break;
1325 case LCD_TYPE_COLOR_TFT:
1326 fbi->lccr0 = LCCR0_PAS;
1327 break;
1328 case LCD_TYPE_SMART_PANEL:
1329 fbi->lccr0 = LCCR0_LCDT | LCCR0_PAS;
1330 break;
1331 default:
1332 /* fall back to backward compatibility way */
1333 fbi->lccr0 = inf->lccr0;
1334 fbi->lccr3 = inf->lccr3;
1335 fbi->lccr4 = inf->lccr4;
1336 return -EINVAL;
1337 }
1338
1339 if (lcd_conn == LCD_MONO_STN_8BPP)
1340 fbi->lccr0 |= LCCR0_DPD;
1341
1342 fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff);
1343 fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0;
1344 fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0;
1345
1346 pxafb_decode_mode_info(fbi, inf->modes, inf->num_modes);
1347 return 0;
1348}
1349
1143static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev) 1350static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev)
1144{ 1351{
1145 struct pxafb_info *fbi; 1352 struct pxafb_info *fbi;
1146 void *addr; 1353 void *addr;
1147 struct pxafb_mach_info *inf = dev->platform_data; 1354 struct pxafb_mach_info *inf = dev->platform_data;
1148 struct pxafb_mode_info *mode = inf->modes; 1355 struct pxafb_mode_info *mode = inf->modes;
1149 int i, smemlen;
1150 1356
1151 /* Alloc the pxafb_info and pseudo_palette in one step */ 1357 /* Alloc the pxafb_info and pseudo_palette in one step */
1152 fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL); 1358 fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL);
@@ -1186,187 +1392,233 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev)
1186 addr = addr + sizeof(struct pxafb_info); 1392 addr = addr + sizeof(struct pxafb_info);
1187 fbi->fb.pseudo_palette = addr; 1393 fbi->fb.pseudo_palette = addr;
1188 1394
1189 pxafb_setmode(&fbi->fb.var, mode); 1395 fbi->state = C_STARTUP;
1396 fbi->task_state = (u_char)-1;
1190 1397
1191 fbi->cmap_inverse = inf->cmap_inverse; 1398 pxafb_decode_mach_info(fbi, inf);
1192 fbi->cmap_static = inf->cmap_static;
1193
1194 fbi->lccr0 = inf->lccr0;
1195 fbi->lccr3 = inf->lccr3;
1196 fbi->lccr4 = inf->lccr4;
1197 fbi->state = C_STARTUP;
1198 fbi->task_state = (u_char)-1;
1199
1200 for (i = 0; i < inf->num_modes; i++) {
1201 smemlen = mode[i].xres * mode[i].yres * mode[i].bpp / 8;
1202 if (smemlen > fbi->fb.fix.smem_len)
1203 fbi->fb.fix.smem_len = smemlen;
1204 }
1205 1399
1206 init_waitqueue_head(&fbi->ctrlr_wait); 1400 init_waitqueue_head(&fbi->ctrlr_wait);
1207 INIT_WORK(&fbi->task, pxafb_task); 1401 INIT_WORK(&fbi->task, pxafb_task);
1208 init_MUTEX(&fbi->ctrlr_sem); 1402 init_MUTEX(&fbi->ctrlr_sem);
1403 init_completion(&fbi->disable_done);
1404#ifdef CONFIG_FB_PXA_SMARTPANEL
1405 init_completion(&fbi->command_done);
1406 init_completion(&fbi->refresh_done);
1407#endif
1209 1408
1210 return fbi; 1409 return fbi;
1211} 1410}
1212 1411
1213#ifdef CONFIG_FB_PXA_PARAMETERS 1412#ifdef CONFIG_FB_PXA_PARAMETERS
1214static int __init pxafb_parse_options(struct device *dev, char *options) 1413static int __init parse_opt_mode(struct device *dev, const char *this_opt)
1414{
1415 struct pxafb_mach_info *inf = dev->platform_data;
1416
1417 const char *name = this_opt+5;
1418 unsigned int namelen = strlen(name);
1419 int res_specified = 0, bpp_specified = 0;
1420 unsigned int xres = 0, yres = 0, bpp = 0;
1421 int yres_specified = 0;
1422 int i;
1423 for (i = namelen-1; i >= 0; i--) {
1424 switch (name[i]) {
1425 case '-':
1426 namelen = i;
1427 if (!bpp_specified && !yres_specified) {
1428 bpp = simple_strtoul(&name[i+1], NULL, 0);
1429 bpp_specified = 1;
1430 } else
1431 goto done;
1432 break;
1433 case 'x':
1434 if (!yres_specified) {
1435 yres = simple_strtoul(&name[i+1], NULL, 0);
1436 yres_specified = 1;
1437 } else
1438 goto done;
1439 break;
1440 case '0' ... '9':
1441 break;
1442 default:
1443 goto done;
1444 }
1445 }
1446 if (i < 0 && yres_specified) {
1447 xres = simple_strtoul(name, NULL, 0);
1448 res_specified = 1;
1449 }
1450done:
1451 if (res_specified) {
1452 dev_info(dev, "overriding resolution: %dx%d\n", xres, yres);
1453 inf->modes[0].xres = xres; inf->modes[0].yres = yres;
1454 }
1455 if (bpp_specified)
1456 switch (bpp) {
1457 case 1:
1458 case 2:
1459 case 4:
1460 case 8:
1461 case 16:
1462 inf->modes[0].bpp = bpp;
1463 dev_info(dev, "overriding bit depth: %d\n", bpp);
1464 break;
1465 default:
1466 dev_err(dev, "Depth %d is not valid\n", bpp);
1467 return -EINVAL;
1468 }
1469 return 0;
1470}
1471
1472static int __init parse_opt(struct device *dev, char *this_opt)
1215{ 1473{
1216 struct pxafb_mach_info *inf = dev->platform_data; 1474 struct pxafb_mach_info *inf = dev->platform_data;
1475 struct pxafb_mode_info *mode = &inf->modes[0];
1476 char s[64];
1477
1478 s[0] = '\0';
1479
1480 if (!strncmp(this_opt, "mode:", 5)) {
1481 return parse_opt_mode(dev, this_opt);
1482 } else if (!strncmp(this_opt, "pixclock:", 9)) {
1483 mode->pixclock = simple_strtoul(this_opt+9, NULL, 0);
1484 sprintf(s, "pixclock: %ld\n", mode->pixclock);
1485 } else if (!strncmp(this_opt, "left:", 5)) {
1486 mode->left_margin = simple_strtoul(this_opt+5, NULL, 0);
1487 sprintf(s, "left: %u\n", mode->left_margin);
1488 } else if (!strncmp(this_opt, "right:", 6)) {
1489 mode->right_margin = simple_strtoul(this_opt+6, NULL, 0);
1490 sprintf(s, "right: %u\n", mode->right_margin);
1491 } else if (!strncmp(this_opt, "upper:", 6)) {
1492 mode->upper_margin = simple_strtoul(this_opt+6, NULL, 0);
1493 sprintf(s, "upper: %u\n", mode->upper_margin);
1494 } else if (!strncmp(this_opt, "lower:", 6)) {
1495 mode->lower_margin = simple_strtoul(this_opt+6, NULL, 0);
1496 sprintf(s, "lower: %u\n", mode->lower_margin);
1497 } else if (!strncmp(this_opt, "hsynclen:", 9)) {
1498 mode->hsync_len = simple_strtoul(this_opt+9, NULL, 0);
1499 sprintf(s, "hsynclen: %u\n", mode->hsync_len);
1500 } else if (!strncmp(this_opt, "vsynclen:", 9)) {
1501 mode->vsync_len = simple_strtoul(this_opt+9, NULL, 0);
1502 sprintf(s, "vsynclen: %u\n", mode->vsync_len);
1503 } else if (!strncmp(this_opt, "hsync:", 6)) {
1504 if (simple_strtoul(this_opt+6, NULL, 0) == 0) {
1505 sprintf(s, "hsync: Active Low\n");
1506 mode->sync &= ~FB_SYNC_HOR_HIGH_ACT;
1507 } else {
1508 sprintf(s, "hsync: Active High\n");
1509 mode->sync |= FB_SYNC_HOR_HIGH_ACT;
1510 }
1511 } else if (!strncmp(this_opt, "vsync:", 6)) {
1512 if (simple_strtoul(this_opt+6, NULL, 0) == 0) {
1513 sprintf(s, "vsync: Active Low\n");
1514 mode->sync &= ~FB_SYNC_VERT_HIGH_ACT;
1515 } else {
1516 sprintf(s, "vsync: Active High\n");
1517 mode->sync |= FB_SYNC_VERT_HIGH_ACT;
1518 }
1519 } else if (!strncmp(this_opt, "dpc:", 4)) {
1520 if (simple_strtoul(this_opt+4, NULL, 0) == 0) {
1521 sprintf(s, "double pixel clock: false\n");
1522 inf->lccr3 &= ~LCCR3_DPC;
1523 } else {
1524 sprintf(s, "double pixel clock: true\n");
1525 inf->lccr3 |= LCCR3_DPC;
1526 }
1527 } else if (!strncmp(this_opt, "outputen:", 9)) {
1528 if (simple_strtoul(this_opt+9, NULL, 0) == 0) {
1529 sprintf(s, "output enable: active low\n");
1530 inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnL;
1531 } else {
1532 sprintf(s, "output enable: active high\n");
1533 inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnH;
1534 }
1535 } else if (!strncmp(this_opt, "pixclockpol:", 12)) {
1536 if (simple_strtoul(this_opt+12, NULL, 0) == 0) {
1537 sprintf(s, "pixel clock polarity: falling edge\n");
1538 inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixFlEdg;
1539 } else {
1540 sprintf(s, "pixel clock polarity: rising edge\n");
1541 inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixRsEdg;
1542 }
1543 } else if (!strncmp(this_opt, "color", 5)) {
1544 inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Color;
1545 } else if (!strncmp(this_opt, "mono", 4)) {
1546 inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Mono;
1547 } else if (!strncmp(this_opt, "active", 6)) {
1548 inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Act;
1549 } else if (!strncmp(this_opt, "passive", 7)) {
1550 inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Pas;
1551 } else if (!strncmp(this_opt, "single", 6)) {
1552 inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Sngl;
1553 } else if (!strncmp(this_opt, "dual", 4)) {
1554 inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Dual;
1555 } else if (!strncmp(this_opt, "4pix", 4)) {
1556 inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_4PixMono;
1557 } else if (!strncmp(this_opt, "8pix", 4)) {
1558 inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_8PixMono;
1559 } else {
1560 dev_err(dev, "unknown option: %s\n", this_opt);
1561 return -EINVAL;
1562 }
1563
1564 if (s[0] != '\0')
1565 dev_info(dev, "override %s", s);
1566
1567 return 0;
1568}
1569
1570static int __init pxafb_parse_options(struct device *dev, char *options)
1571{
1217 char *this_opt; 1572 char *this_opt;
1573 int ret;
1218 1574
1219 if (!options || !*options) 1575 if (!options || !*options)
1220 return 0; 1576 return 0;
1221 1577
1222 dev_dbg(dev, "options are \"%s\"\n", options ? options : "null"); 1578 dev_dbg(dev, "options are \"%s\"\n", options ? options : "null");
1223 1579
1224 /* could be made table driven or similar?... */ 1580 /* could be made table driven or similar?... */
1225 while ((this_opt = strsep(&options, ",")) != NULL) { 1581 while ((this_opt = strsep(&options, ",")) != NULL) {
1226 if (!strncmp(this_opt, "mode:", 5)) { 1582 ret = parse_opt(dev, this_opt);
1227 const char *name = this_opt+5; 1583 if (ret)
1228 unsigned int namelen = strlen(name); 1584 return ret;
1229 int res_specified = 0, bpp_specified = 0; 1585 }
1230 unsigned int xres = 0, yres = 0, bpp = 0; 1586 return 0;
1231 int yres_specified = 0; 1587}
1232 int i; 1588
1233 for (i = namelen-1; i >= 0; i--) { 1589static char g_options[256] __devinitdata = "";
1234 switch (name[i]) {
1235 case '-':
1236 namelen = i;
1237 if (!bpp_specified && !yres_specified) {
1238 bpp = simple_strtoul(&name[i+1], NULL, 0);
1239 bpp_specified = 1;
1240 } else
1241 goto done;
1242 break;
1243 case 'x':
1244 if (!yres_specified) {
1245 yres = simple_strtoul(&name[i+1], NULL, 0);
1246 yres_specified = 1;
1247 } else
1248 goto done;
1249 break;
1250 case '0' ... '9':
1251 break;
1252 default:
1253 goto done;
1254 }
1255 }
1256 if (i < 0 && yres_specified) {
1257 xres = simple_strtoul(name, NULL, 0);
1258 res_specified = 1;
1259 }
1260 done:
1261 if (res_specified) {
1262 dev_info(dev, "overriding resolution: %dx%d\n", xres, yres);
1263 inf->modes[0].xres = xres; inf->modes[0].yres = yres;
1264 }
1265 if (bpp_specified)
1266 switch (bpp) {
1267 case 1:
1268 case 2:
1269 case 4:
1270 case 8:
1271 case 16:
1272 inf->modes[0].bpp = bpp;
1273 dev_info(dev, "overriding bit depth: %d\n", bpp);
1274 break;
1275 default:
1276 dev_err(dev, "Depth %d is not valid\n", bpp);
1277 }
1278 } else if (!strncmp(this_opt, "pixclock:", 9)) {
1279 inf->modes[0].pixclock = simple_strtoul(this_opt+9, NULL, 0);
1280 dev_info(dev, "override pixclock: %ld\n", inf->modes[0].pixclock);
1281 } else if (!strncmp(this_opt, "left:", 5)) {
1282 inf->modes[0].left_margin = simple_strtoul(this_opt+5, NULL, 0);
1283 dev_info(dev, "override left: %u\n", inf->modes[0].left_margin);
1284 } else if (!strncmp(this_opt, "right:", 6)) {
1285 inf->modes[0].right_margin = simple_strtoul(this_opt+6, NULL, 0);
1286 dev_info(dev, "override right: %u\n", inf->modes[0].right_margin);
1287 } else if (!strncmp(this_opt, "upper:", 6)) {
1288 inf->modes[0].upper_margin = simple_strtoul(this_opt+6, NULL, 0);
1289 dev_info(dev, "override upper: %u\n", inf->modes[0].upper_margin);
1290 } else if (!strncmp(this_opt, "lower:", 6)) {
1291 inf->modes[0].lower_margin = simple_strtoul(this_opt+6, NULL, 0);
1292 dev_info(dev, "override lower: %u\n", inf->modes[0].lower_margin);
1293 } else if (!strncmp(this_opt, "hsynclen:", 9)) {
1294 inf->modes[0].hsync_len = simple_strtoul(this_opt+9, NULL, 0);
1295 dev_info(dev, "override hsynclen: %u\n", inf->modes[0].hsync_len);
1296 } else if (!strncmp(this_opt, "vsynclen:", 9)) {
1297 inf->modes[0].vsync_len = simple_strtoul(this_opt+9, NULL, 0);
1298 dev_info(dev, "override vsynclen: %u\n", inf->modes[0].vsync_len);
1299 } else if (!strncmp(this_opt, "hsync:", 6)) {
1300 if (simple_strtoul(this_opt+6, NULL, 0) == 0) {
1301 dev_info(dev, "override hsync: Active Low\n");
1302 inf->modes[0].sync &= ~FB_SYNC_HOR_HIGH_ACT;
1303 } else {
1304 dev_info(dev, "override hsync: Active High\n");
1305 inf->modes[0].sync |= FB_SYNC_HOR_HIGH_ACT;
1306 }
1307 } else if (!strncmp(this_opt, "vsync:", 6)) {
1308 if (simple_strtoul(this_opt+6, NULL, 0) == 0) {
1309 dev_info(dev, "override vsync: Active Low\n");
1310 inf->modes[0].sync &= ~FB_SYNC_VERT_HIGH_ACT;
1311 } else {
1312 dev_info(dev, "override vsync: Active High\n");
1313 inf->modes[0].sync |= FB_SYNC_VERT_HIGH_ACT;
1314 }
1315 } else if (!strncmp(this_opt, "dpc:", 4)) {
1316 if (simple_strtoul(this_opt+4, NULL, 0) == 0) {
1317 dev_info(dev, "override double pixel clock: false\n");
1318 inf->lccr3 &= ~LCCR3_DPC;
1319 } else {
1320 dev_info(dev, "override double pixel clock: true\n");
1321 inf->lccr3 |= LCCR3_DPC;
1322 }
1323 } else if (!strncmp(this_opt, "outputen:", 9)) {
1324 if (simple_strtoul(this_opt+9, NULL, 0) == 0) {
1325 dev_info(dev, "override output enable: active low\n");
1326 inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnL;
1327 } else {
1328 dev_info(dev, "override output enable: active high\n");
1329 inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnH;
1330 }
1331 } else if (!strncmp(this_opt, "pixclockpol:", 12)) {
1332 if (simple_strtoul(this_opt+12, NULL, 0) == 0) {
1333 dev_info(dev, "override pixel clock polarity: falling edge\n");
1334 inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixFlEdg;
1335 } else {
1336 dev_info(dev, "override pixel clock polarity: rising edge\n");
1337 inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixRsEdg;
1338 }
1339 } else if (!strncmp(this_opt, "color", 5)) {
1340 inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Color;
1341 } else if (!strncmp(this_opt, "mono", 4)) {
1342 inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Mono;
1343 } else if (!strncmp(this_opt, "active", 6)) {
1344 inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Act;
1345 } else if (!strncmp(this_opt, "passive", 7)) {
1346 inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Pas;
1347 } else if (!strncmp(this_opt, "single", 6)) {
1348 inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Sngl;
1349 } else if (!strncmp(this_opt, "dual", 4)) {
1350 inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Dual;
1351 } else if (!strncmp(this_opt, "4pix", 4)) {
1352 inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_4PixMono;
1353 } else if (!strncmp(this_opt, "8pix", 4)) {
1354 inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_8PixMono;
1355 } else {
1356 dev_err(dev, "unknown option: %s\n", this_opt);
1357 return -EINVAL;
1358 }
1359 }
1360 return 0;
1361 1590
1591#ifndef CONFIG_MODULES
1592static int __devinit pxafb_setup_options(void)
1593{
1594 char *options = NULL;
1595
1596 if (fb_get_options("pxafb", &options))
1597 return -ENODEV;
1598
1599 if (options)
1600 strlcpy(g_options, options, sizeof(g_options));
1601
1602 return 0;
1362} 1603}
1604#else
1605#define pxafb_setup_options() (0)
1606
1607module_param_string(options, g_options, sizeof(g_options), 0);
1608MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
1609#endif
1610
1611#else
1612#define pxafb_parse_options(...) (0)
1613#define pxafb_setup_options() (0)
1363#endif 1614#endif
1364 1615
1365static int __init pxafb_probe(struct platform_device *dev) 1616static int __init pxafb_probe(struct platform_device *dev)
1366{ 1617{
1367 struct pxafb_info *fbi; 1618 struct pxafb_info *fbi;
1368 struct pxafb_mach_info *inf; 1619 struct pxafb_mach_info *inf;
1369 int ret; 1620 struct resource *r;
1621 int irq, ret;
1370 1622
1371 dev_dbg(&dev->dev, "pxafb_probe\n"); 1623 dev_dbg(&dev->dev, "pxafb_probe\n");
1372 1624
@@ -1376,38 +1628,45 @@ static int __init pxafb_probe(struct platform_device *dev)
1376 if (!inf) 1628 if (!inf)
1377 goto failed; 1629 goto failed;
1378 1630
1379#ifdef CONFIG_FB_PXA_PARAMETERS
1380 ret = pxafb_parse_options(&dev->dev, g_options); 1631 ret = pxafb_parse_options(&dev->dev, g_options);
1381 if (ret < 0) 1632 if (ret < 0)
1382 goto failed; 1633 goto failed;
1383#endif
1384 1634
1385#ifdef DEBUG_VAR 1635#ifdef DEBUG_VAR
1386 /* Check for various illegal bit-combinations. Currently only 1636 /* Check for various illegal bit-combinations. Currently only
1387 * a warning is given. */ 1637 * a warning is given. */
1388 1638
1389 if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK) 1639 if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK)
1390 dev_warn(&dev->dev, "machine LCCR0 setting contains illegal bits: %08x\n", 1640 dev_warn(&dev->dev, "machine LCCR0 setting contains "
1391 inf->lccr0 & LCCR0_INVALID_CONFIG_MASK); 1641 "illegal bits: %08x\n",
1392 if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK) 1642 inf->lccr0 & LCCR0_INVALID_CONFIG_MASK);
1393 dev_warn(&dev->dev, "machine LCCR3 setting contains illegal bits: %08x\n", 1643 if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK)
1394 inf->lccr3 & LCCR3_INVALID_CONFIG_MASK); 1644 dev_warn(&dev->dev, "machine LCCR3 setting contains "
1395 if (inf->lccr0 & LCCR0_DPD && 1645 "illegal bits: %08x\n",
1646 inf->lccr3 & LCCR3_INVALID_CONFIG_MASK);
1647 if (inf->lccr0 & LCCR0_DPD &&
1396 ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas || 1648 ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas ||
1397 (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl || 1649 (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl ||
1398 (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono)) 1650 (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono))
1399 dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is only valid in passive mono" 1651 dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is "
1400 " single panel mode\n"); 1652 "only valid in passive mono"
1401 if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act && 1653 " single panel mode\n");
1654 if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act &&
1402 (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual) 1655 (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual)
1403 dev_warn(&dev->dev, "Dual panel only valid in passive mode\n"); 1656 dev_warn(&dev->dev, "Dual panel only valid in passive mode\n");
1404 if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas && 1657 if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas &&
1405 (inf->modes->upper_margin || inf->modes->lower_margin)) 1658 (inf->modes->upper_margin || inf->modes->lower_margin))
1406 dev_warn(&dev->dev, "Upper and lower margins must be 0 in passive mode\n"); 1659 dev_warn(&dev->dev, "Upper and lower margins must be 0 in "
1660 "passive mode\n");
1407#endif 1661#endif
1408 1662
1409 dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n",inf->modes->xres, inf->modes->yres, inf->modes->bpp); 1663 dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n",
1410 if (inf->modes->xres == 0 || inf->modes->yres == 0 || inf->modes->bpp == 0) { 1664 inf->modes->xres,
1665 inf->modes->yres,
1666 inf->modes->bpp);
1667 if (inf->modes->xres == 0 ||
1668 inf->modes->yres == 0 ||
1669 inf->modes->bpp == 0) {
1411 dev_err(&dev->dev, "Invalid resolution or bit depth\n"); 1670 dev_err(&dev->dev, "Invalid resolution or bit depth\n");
1412 ret = -EINVAL; 1671 ret = -EINVAL;
1413 goto failed; 1672 goto failed;
@@ -1416,26 +1675,62 @@ static int __init pxafb_probe(struct platform_device *dev)
1416 pxafb_lcd_power = inf->pxafb_lcd_power; 1675 pxafb_lcd_power = inf->pxafb_lcd_power;
1417 fbi = pxafb_init_fbinfo(&dev->dev); 1676 fbi = pxafb_init_fbinfo(&dev->dev);
1418 if (!fbi) { 1677 if (!fbi) {
1678 /* only reason for pxafb_init_fbinfo to fail is kmalloc */
1419 dev_err(&dev->dev, "Failed to initialize framebuffer device\n"); 1679 dev_err(&dev->dev, "Failed to initialize framebuffer device\n");
1420 ret = -ENOMEM; // only reason for pxafb_init_fbinfo to fail is kmalloc 1680 ret = -ENOMEM;
1421 goto failed; 1681 goto failed;
1422 } 1682 }
1423 1683
1684 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
1685 if (r == NULL) {
1686 dev_err(&dev->dev, "no I/O memory resource defined\n");
1687 ret = -ENODEV;
1688 goto failed;
1689 }
1690
1691 r = request_mem_region(r->start, r->end - r->start + 1, dev->name);
1692 if (r == NULL) {
1693 dev_err(&dev->dev, "failed to request I/O memory\n");
1694 ret = -EBUSY;
1695 goto failed;
1696 }
1697
1698 fbi->mmio_base = ioremap(r->start, r->end - r->start + 1);
1699 if (fbi->mmio_base == NULL) {
1700 dev_err(&dev->dev, "failed to map I/O memory\n");
1701 ret = -EBUSY;
1702 goto failed_free_res;
1703 }
1704
1424 /* Initialize video memory */ 1705 /* Initialize video memory */
1425 ret = pxafb_map_video_memory(fbi); 1706 ret = pxafb_map_video_memory(fbi);
1426 if (ret) { 1707 if (ret) {
1427 dev_err(&dev->dev, "Failed to allocate video RAM: %d\n", ret); 1708 dev_err(&dev->dev, "Failed to allocate video RAM: %d\n", ret);
1428 ret = -ENOMEM; 1709 ret = -ENOMEM;
1429 goto failed; 1710 goto failed_free_io;
1711 }
1712
1713 irq = platform_get_irq(dev, 0);
1714 if (irq < 0) {
1715 dev_err(&dev->dev, "no IRQ defined\n");
1716 ret = -ENODEV;
1717 goto failed_free_mem;
1430 } 1718 }
1431 1719
1432 ret = request_irq(IRQ_LCD, pxafb_handle_irq, IRQF_DISABLED, "LCD", fbi); 1720 ret = request_irq(irq, pxafb_handle_irq, IRQF_DISABLED, "LCD", fbi);
1433 if (ret) { 1721 if (ret) {
1434 dev_err(&dev->dev, "request_irq failed: %d\n", ret); 1722 dev_err(&dev->dev, "request_irq failed: %d\n", ret);
1435 ret = -EBUSY; 1723 ret = -EBUSY;
1436 goto failed; 1724 goto failed_free_mem;
1437 } 1725 }
1438 1726
1727#ifdef CONFIG_FB_PXA_SMARTPANEL
1728 ret = pxafb_smart_init(fbi);
1729 if (ret) {
1730 dev_err(&dev->dev, "failed to initialize smartpanel\n");
1731 goto failed_free_irq;
1732 }
1733#endif
1439 /* 1734 /*
1440 * This makes sure that our colour bitfield 1735 * This makes sure that our colour bitfield
1441 * descriptors are correctly initialised. 1736 * descriptors are correctly initialised.
@@ -1447,19 +1742,18 @@ static int __init pxafb_probe(struct platform_device *dev)
1447 1742
1448 ret = register_framebuffer(&fbi->fb); 1743 ret = register_framebuffer(&fbi->fb);
1449 if (ret < 0) { 1744 if (ret < 0) {
1450 dev_err(&dev->dev, "Failed to register framebuffer device: %d\n", ret); 1745 dev_err(&dev->dev,
1451 goto failed; 1746 "Failed to register framebuffer device: %d\n", ret);
1747 goto failed_free_irq;
1452 } 1748 }
1453 1749
1454#ifdef CONFIG_PM
1455 // TODO
1456#endif
1457
1458#ifdef CONFIG_CPU_FREQ 1750#ifdef CONFIG_CPU_FREQ
1459 fbi->freq_transition.notifier_call = pxafb_freq_transition; 1751 fbi->freq_transition.notifier_call = pxafb_freq_transition;
1460 fbi->freq_policy.notifier_call = pxafb_freq_policy; 1752 fbi->freq_policy.notifier_call = pxafb_freq_policy;
1461 cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); 1753 cpufreq_register_notifier(&fbi->freq_transition,
1462 cpufreq_register_notifier(&fbi->freq_policy, CPUFREQ_POLICY_NOTIFIER); 1754 CPUFREQ_TRANSITION_NOTIFIER);
1755 cpufreq_register_notifier(&fbi->freq_policy,
1756 CPUFREQ_POLICY_NOTIFIER);
1463#endif 1757#endif
1464 1758
1465 /* 1759 /*
@@ -1469,6 +1763,15 @@ static int __init pxafb_probe(struct platform_device *dev)
1469 1763
1470 return 0; 1764 return 0;
1471 1765
1766failed_free_irq:
1767 free_irq(irq, fbi);
1768failed_free_res:
1769 release_mem_region(r->start, r->end - r->start + 1);
1770failed_free_io:
1771 iounmap(fbi->mmio_base);
1772failed_free_mem:
1773 dma_free_writecombine(&dev->dev, fbi->map_size,
1774 fbi->map_cpu, fbi->map_dma);
1472failed: 1775failed:
1473 platform_set_drvdata(dev, NULL); 1776 platform_set_drvdata(dev, NULL);
1474 kfree(fbi); 1777 kfree(fbi);
@@ -1477,40 +1780,18 @@ failed:
1477 1780
1478static struct platform_driver pxafb_driver = { 1781static struct platform_driver pxafb_driver = {
1479 .probe = pxafb_probe, 1782 .probe = pxafb_probe,
1480#ifdef CONFIG_PM
1481 .suspend = pxafb_suspend, 1783 .suspend = pxafb_suspend,
1482 .resume = pxafb_resume, 1784 .resume = pxafb_resume,
1483#endif
1484 .driver = { 1785 .driver = {
1485 .name = "pxa2xx-fb", 1786 .name = "pxa2xx-fb",
1486 }, 1787 },
1487}; 1788};
1488 1789
1489#ifndef MODULE
1490static int __devinit pxafb_setup(char *options)
1491{
1492# ifdef CONFIG_FB_PXA_PARAMETERS
1493 if (options)
1494 strlcpy(g_options, options, sizeof(g_options));
1495# endif
1496 return 0;
1497}
1498#else
1499# ifdef CONFIG_FB_PXA_PARAMETERS
1500module_param_string(options, g_options, sizeof(g_options), 0);
1501MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
1502# endif
1503#endif
1504
1505static int __devinit pxafb_init(void) 1790static int __devinit pxafb_init(void)
1506{ 1791{
1507#ifndef MODULE 1792 if (pxafb_setup_options())
1508 char *option = NULL; 1793 return -EINVAL;
1509 1794
1510 if (fb_get_options("pxafb", &option))
1511 return -ENODEV;
1512 pxafb_setup(option);
1513#endif
1514 return platform_driver_register(&pxafb_driver); 1795 return platform_driver_register(&pxafb_driver);
1515} 1796}
1516 1797
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h
index d920b8a14c35..8238dc826429 100644
--- a/drivers/video/pxafb.h
+++ b/drivers/video/pxafb.h
@@ -21,14 +21,6 @@
21 * for more details. 21 * for more details.
22 */ 22 */
23 23
24/* Shadows for LCD controller registers */
25struct pxafb_lcd_reg {
26 unsigned int lccr0;
27 unsigned int lccr1;
28 unsigned int lccr2;
29 unsigned int lccr3;
30};
31
32/* PXA LCD DMA descriptor */ 24/* PXA LCD DMA descriptor */
33struct pxafb_dma_descriptor { 25struct pxafb_dma_descriptor {
34 unsigned int fdadr; 26 unsigned int fdadr;
@@ -37,11 +29,49 @@ struct pxafb_dma_descriptor {
37 unsigned int ldcmd; 29 unsigned int ldcmd;
38}; 30};
39 31
32enum {
33 PAL_NONE = -1,
34 PAL_BASE = 0,
35 PAL_OV1 = 1,
36 PAL_OV2 = 2,
37 PAL_MAX,
38};
39
40enum {
41 DMA_BASE = 0,
42 DMA_UPPER = 0,
43 DMA_LOWER = 1,
44 DMA_OV1 = 1,
45 DMA_OV2_Y = 2,
46 DMA_OV2_Cb = 3,
47 DMA_OV2_Cr = 4,
48 DMA_CURSOR = 5,
49 DMA_CMD = 6,
50 DMA_MAX,
51};
52
53/* maximum palette size - 256 entries, each 4 bytes long */
54#define PALETTE_SIZE (256 * 4)
55#define CMD_BUFF_SIZE (1024 * 50)
56
57struct pxafb_dma_buff {
58 unsigned char palette[PAL_MAX * PALETTE_SIZE];
59 uint16_t cmd_buff[CMD_BUFF_SIZE];
60 struct pxafb_dma_descriptor pal_desc[PAL_MAX];
61 struct pxafb_dma_descriptor dma_desc[DMA_MAX];
62};
63
40struct pxafb_info { 64struct pxafb_info {
41 struct fb_info fb; 65 struct fb_info fb;
42 struct device *dev; 66 struct device *dev;
43 struct clk *clk; 67 struct clk *clk;
44 68
69 void __iomem *mmio_base;
70
71 struct pxafb_dma_buff *dma_buff;
72 dma_addr_t dma_buff_phys;
73 dma_addr_t fdadr[DMA_MAX];
74
45 /* 75 /*
46 * These are the addresses we mapped 76 * These are the addresses we mapped
47 * the framebuffer memory region to. 77 * the framebuffer memory region to.
@@ -55,19 +85,8 @@ struct pxafb_info {
55 u_char * screen_cpu; /* virtual address of frame buffer */ 85 u_char * screen_cpu; /* virtual address of frame buffer */
56 dma_addr_t screen_dma; /* physical address of frame buffer */ 86 dma_addr_t screen_dma; /* physical address of frame buffer */
57 u16 * palette_cpu; /* virtual address of palette memory */ 87 u16 * palette_cpu; /* virtual address of palette memory */
58 dma_addr_t palette_dma; /* physical address of palette memory */
59 u_int palette_size; 88 u_int palette_size;
60 89 ssize_t video_offset;
61 /* DMA descriptors */
62 struct pxafb_dma_descriptor * dmadesc_fblow_cpu;
63 dma_addr_t dmadesc_fblow_dma;
64 struct pxafb_dma_descriptor * dmadesc_fbhigh_cpu;
65 dma_addr_t dmadesc_fbhigh_dma;
66 struct pxafb_dma_descriptor * dmadesc_palette_cpu;
67 dma_addr_t dmadesc_palette_dma;
68
69 dma_addr_t fdadr0;
70 dma_addr_t fdadr1;
71 90
72 u_int lccr0; 91 u_int lccr0;
73 u_int lccr3; 92 u_int lccr3;
@@ -81,6 +100,7 @@ struct pxafb_info {
81 u_int reg_lccr2; 100 u_int reg_lccr2;
82 u_int reg_lccr3; 101 u_int reg_lccr3;
83 u_int reg_lccr4; 102 u_int reg_lccr4;
103 u_int reg_cmdcr;
84 104
85 unsigned long hsync_time; 105 unsigned long hsync_time;
86 106
@@ -90,6 +110,16 @@ struct pxafb_info {
90 wait_queue_head_t ctrlr_wait; 110 wait_queue_head_t ctrlr_wait;
91 struct work_struct task; 111 struct work_struct task;
92 112
113 struct completion disable_done;
114
115#ifdef CONFIG_FB_PXA_SMARTPANEL
116 uint16_t *smart_cmds;
117 size_t n_smart_cmds;
118 struct completion command_done;
119 struct completion refresh_done;
120 struct task_struct *smart_thread;
121#endif
122
93#ifdef CONFIG_CPU_FREQ 123#ifdef CONFIG_CPU_FREQ
94 struct notifier_block freq_transition; 124 struct notifier_block freq_transition;
95 struct notifier_block freq_policy; 125 struct notifier_block freq_policy;
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b535483bc556..13866789b356 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -80,19 +80,51 @@ static void add_status(struct virtio_device *dev, unsigned status)
80 dev->config->set_status(dev, dev->config->get_status(dev) | status); 80 dev->config->set_status(dev, dev->config->get_status(dev) | status);
81} 81}
82 82
83void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
84 unsigned int fbit)
85{
86 unsigned int i;
87 struct virtio_driver *drv = container_of(vdev->dev.driver,
88 struct virtio_driver, driver);
89
90 for (i = 0; i < drv->feature_table_size; i++)
91 if (drv->feature_table[i] == fbit)
92 return;
93 BUG();
94}
95EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature);
96
83static int virtio_dev_probe(struct device *_d) 97static int virtio_dev_probe(struct device *_d)
84{ 98{
85 int err; 99 int err, i;
86 struct virtio_device *dev = container_of(_d,struct virtio_device,dev); 100 struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
87 struct virtio_driver *drv = container_of(dev->dev.driver, 101 struct virtio_driver *drv = container_of(dev->dev.driver,
88 struct virtio_driver, driver); 102 struct virtio_driver, driver);
103 u32 device_features;
89 104
105 /* We have a driver! */
90 add_status(dev, VIRTIO_CONFIG_S_DRIVER); 106 add_status(dev, VIRTIO_CONFIG_S_DRIVER);
107
108 /* Figure out what features the device supports. */
109 device_features = dev->config->get_features(dev);
110
111 /* Features supported by both device and driver into dev->features. */
112 memset(dev->features, 0, sizeof(dev->features));
113 for (i = 0; i < drv->feature_table_size; i++) {
114 unsigned int f = drv->feature_table[i];
115 BUG_ON(f >= 32);
116 if (device_features & (1 << f))
117 set_bit(f, dev->features);
118 }
119
91 err = drv->probe(dev); 120 err = drv->probe(dev);
92 if (err) 121 if (err)
93 add_status(dev, VIRTIO_CONFIG_S_FAILED); 122 add_status(dev, VIRTIO_CONFIG_S_FAILED);
94 else 123 else {
95 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 124 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
125 /* They should never have set feature bits beyond 32 */
126 dev->config->set_features(dev, dev->features[0]);
127 }
96 return err; 128 return err;
97} 129}
98 130
@@ -114,6 +146,8 @@ static int virtio_dev_remove(struct device *_d)
114 146
115int register_virtio_driver(struct virtio_driver *driver) 147int register_virtio_driver(struct virtio_driver *driver)
116{ 148{
149 /* Catch this early. */
150 BUG_ON(driver->feature_table_size && !driver->feature_table);
117 driver->driver.bus = &virtio_bus; 151 driver->driver.bus = &virtio_bus;
118 driver->driver.probe = virtio_dev_probe; 152 driver->driver.probe = virtio_dev_probe;
119 driver->driver.remove = virtio_dev_remove; 153 driver->driver.remove = virtio_dev_remove;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0b3efc31ee6d..bfef604160d1 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -155,9 +155,9 @@ static void virtballoon_changed(struct virtio_device *vdev)
155static inline s64 towards_target(struct virtio_balloon *vb) 155static inline s64 towards_target(struct virtio_balloon *vb)
156{ 156{
157 u32 v; 157 u32 v;
158 __virtio_config_val(vb->vdev, 158 vb->vdev->config->get(vb->vdev,
159 offsetof(struct virtio_balloon_config, num_pages), 159 offsetof(struct virtio_balloon_config, num_pages),
160 &v); 160 &v, sizeof(v));
161 return v - vb->num_pages; 161 return v - vb->num_pages;
162} 162}
163 163
@@ -227,7 +227,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
227 } 227 }
228 228
229 vb->tell_host_first 229 vb->tell_host_first
230 = vdev->config->feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); 230 = virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
231 231
232 return 0; 232 return 0;
233 233
@@ -259,7 +259,11 @@ static void virtballoon_remove(struct virtio_device *vdev)
259 kfree(vb); 259 kfree(vb);
260} 260}
261 261
262static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST };
263
262static struct virtio_driver virtio_balloon = { 264static struct virtio_driver virtio_balloon = {
265 .feature_table = features,
266 .feature_table_size = ARRAY_SIZE(features),
263 .driver.name = KBUILD_MODNAME, 267 .driver.name = KBUILD_MODNAME,
264 .driver.owner = THIS_MODULE, 268 .driver.owner = THIS_MODULE,
265 .id_table = id_table, 269 .id_table = id_table,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index c0df924766a7..27e9fc9117cd 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -87,23 +87,22 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
87 return container_of(vdev, struct virtio_pci_device, vdev); 87 return container_of(vdev, struct virtio_pci_device, vdev);
88} 88}
89 89
90/* virtio config->feature() implementation */ 90/* virtio config->get_features() implementation */
91static bool vp_feature(struct virtio_device *vdev, unsigned bit) 91static u32 vp_get_features(struct virtio_device *vdev)
92{
93 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
94
95 /* When someone needs more than 32 feature bits, we'll need to
96 * steal a bit to indicate that the rest are somewhere else. */
97 return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
98}
99
100/* virtio config->set_features() implementation */
101static void vp_set_features(struct virtio_device *vdev, u32 features)
92{ 102{
93 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 103 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
94 u32 mask;
95
96 /* Since this function is supposed to have the side effect of
97 * enabling a queried feature, we simulate that by doing a read
98 * from the host feature bitmask and then writing to the guest
99 * feature bitmask */
100 mask = ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
101 if (mask & (1 << bit)) {
102 mask |= (1 << bit);
103 iowrite32(mask, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
104 }
105 104
106 return !!(mask & (1 << bit)); 105 iowrite32(features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
107} 106}
108 107
109/* virtio config->get() implementation */ 108/* virtio config->get() implementation */
@@ -145,14 +144,14 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
145 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 144 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
146 /* We should never be setting status to 0. */ 145 /* We should never be setting status to 0. */
147 BUG_ON(status == 0); 146 BUG_ON(status == 0);
148 return iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); 147 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
149} 148}
150 149
151static void vp_reset(struct virtio_device *vdev) 150static void vp_reset(struct virtio_device *vdev)
152{ 151{
153 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 152 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
154 /* 0 status means a reset. */ 153 /* 0 status means a reset. */
155 return iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); 154 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
156} 155}
157 156
158/* the notify function used when creating a virt queue */ 157/* the notify function used when creating a virt queue */
@@ -293,7 +292,6 @@ static void vp_del_vq(struct virtqueue *vq)
293} 292}
294 293
295static struct virtio_config_ops virtio_pci_config_ops = { 294static struct virtio_config_ops virtio_pci_config_ops = {
296 .feature = vp_feature,
297 .get = vp_get, 295 .get = vp_get,
298 .set = vp_set, 296 .set = vp_set,
299 .get_status = vp_get_status, 297 .get_status = vp_get_status,
@@ -301,6 +299,8 @@ static struct virtio_config_ops virtio_pci_config_ops = {
301 .reset = vp_reset, 299 .reset = vp_reset,
302 .find_vq = vp_find_vq, 300 .find_vq = vp_find_vq,
303 .del_vq = vp_del_vq, 301 .del_vq = vp_del_vq,
302 .get_features = vp_get_features,
303 .set_features = vp_set_features,
304}; 304};
305 305
306/* the PCI probing function */ 306/* the PCI probing function */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c2fa5c630813..937a49d6772c 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -184,6 +184,11 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
184 184
185 START_USE(vq); 185 START_USE(vq);
186 186
187 if (unlikely(vq->broken)) {
188 END_USE(vq);
189 return NULL;
190 }
191
187 if (!more_used(vq)) { 192 if (!more_used(vq)) {
188 pr_debug("No more buffers in queue\n"); 193 pr_debug("No more buffers in queue\n");
189 END_USE(vq); 194 END_USE(vq);
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h
index fe6bdf43380f..e6ab7cf08f88 100644
--- a/drivers/w1/w1_log.h
+++ b/drivers/w1/w1_log.h
@@ -30,7 +30,7 @@
30# define assert(expr) \ 30# define assert(expr) \
31 if(unlikely(!(expr))) { \ 31 if(unlikely(!(expr))) { \
32 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ 32 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
33 #expr,__FILE__,__FUNCTION__,__LINE__); \ 33 #expr, __FILE__, __func__, __LINE__); \
34 } 34 }
35#endif 35#endif
36 36
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 2ce4cebc31d9..099b6fb5b5cb 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -13,6 +13,7 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/zorro.h> 14#include <linux/zorro.h>
15#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/smp_lock.h> 18#include <linux/smp_lock.h>
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
@@ -76,36 +77,58 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *
76} 77}
77 78
78static const struct file_operations proc_bus_zorro_operations = { 79static const struct file_operations proc_bus_zorro_operations = {
80 .owner = THIS_MODULE,
79 .llseek = proc_bus_zorro_lseek, 81 .llseek = proc_bus_zorro_lseek,
80 .read = proc_bus_zorro_read, 82 .read = proc_bus_zorro_read,
81}; 83};
82 84
83static int 85static void * zorro_seq_start(struct seq_file *m, loff_t *pos)
84get_zorro_dev_info(char *buf, char **start, off_t pos, int count)
85{ 86{
86 u_int slot; 87 return (*pos < zorro_num_autocon) ? pos : NULL;
87 off_t at = 0; 88}
88 int len, cnt; 89
89 90static void * zorro_seq_next(struct seq_file *m, void *v, loff_t *pos)
90 for (slot = cnt = 0; slot < zorro_num_autocon && count > cnt; slot++) { 91{
91 struct zorro_dev *z = &zorro_autocon[slot]; 92 (*pos)++;
92 len = sprintf(buf, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, 93 return (*pos < zorro_num_autocon) ? pos : NULL;
93 z->id, (unsigned long)zorro_resource_start(z), 94}
94 (unsigned long)zorro_resource_len(z), 95
95 z->rom.er_Type); 96static void zorro_seq_stop(struct seq_file *m, void *v)
96 at += len; 97{
97 if (at >= pos) { 98}
98 if (!*start) { 99
99 *start = buf + (pos - (at - len)); 100static int zorro_seq_show(struct seq_file *m, void *v)
100 cnt = at - pos; 101{
101 } else 102 u_int slot = *(loff_t *)v;
102 cnt += len; 103 struct zorro_dev *z = &zorro_autocon[slot];
103 buf += len; 104
104 } 105 seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id,
105 } 106 (unsigned long)zorro_resource_start(z),
106 return (count > cnt) ? cnt : count; 107 (unsigned long)zorro_resource_len(z),
108 z->rom.er_Type);
109 return 0;
110}
111
112static const struct seq_operations zorro_devices_seq_ops = {
113 .start = zorro_seq_start,
114 .next = zorro_seq_next,
115 .stop = zorro_seq_stop,
116 .show = zorro_seq_show,
117};
118
119static int zorro_devices_proc_open(struct inode *inode, struct file *file)
120{
121 return seq_open(file, &zorro_devices_seq_ops);
107} 122}
108 123
124static const struct file_operations zorro_devices_proc_fops = {
125 .owner = THIS_MODULE,
126 .open = zorro_devices_proc_open,
127 .read = seq_read,
128 .llseek = seq_lseek,
129 .release = seq_release,
130};
131
109static struct proc_dir_entry *proc_bus_zorro_dir; 132static struct proc_dir_entry *proc_bus_zorro_dir;
110 133
111static int __init zorro_proc_attach_device(u_int slot) 134static int __init zorro_proc_attach_device(u_int slot)
@@ -114,11 +137,11 @@ static int __init zorro_proc_attach_device(u_int slot)
114 char name[4]; 137 char name[4];
115 138
116 sprintf(name, "%02x", slot); 139 sprintf(name, "%02x", slot);
117 entry = create_proc_entry(name, 0, proc_bus_zorro_dir); 140 entry = proc_create_data(name, 0, proc_bus_zorro_dir,
141 &proc_bus_zorro_operations,
142 &zorro_autocon[slot]);
118 if (!entry) 143 if (!entry)
119 return -ENOMEM; 144 return -ENOMEM;
120 entry->proc_fops = &proc_bus_zorro_operations;
121 entry->data = &zorro_autocon[slot];
122 entry->size = sizeof(struct zorro_dev); 145 entry->size = sizeof(struct zorro_dev);
123 return 0; 146 return 0;
124} 147}
@@ -128,9 +151,9 @@ static int __init zorro_proc_init(void)
128 u_int slot; 151 u_int slot;
129 152
130 if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { 153 if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) {
131 proc_bus_zorro_dir = proc_mkdir("zorro", proc_bus); 154 proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL);
132 create_proc_info_entry("devices", 0, proc_bus_zorro_dir, 155 proc_create("devices", 0, proc_bus_zorro_dir,
133 get_zorro_dev_info); 156 &zorro_devices_proc_fops);
134 for (slot = 0; slot < zorro_num_autocon; slot++) 157 for (slot = 0; slot < zorro_num_autocon; slot++)
135 zorro_proc_attach_device(slot); 158 zorro_proc_attach_device(slot);
136 } 159 }