aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-07-05 13:13:03 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-07-05 13:13:03 -0400
commit5e66dd6d66ffe758b39b6dcadf2330753ee1159b (patch)
treea72cdcff4448e4af9425cc213ddf56ab23e697fe
parent026477c1141b67e98e3bd8bdedb7d4b88a3ecd09 (diff)
parentca78f6baca863afe2e6a244a0fe94b3a70211d46 (diff)
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
-rw-r--r--Documentation/DocBook/mtdnand.tmpl11
-rw-r--r--Documentation/irqflags-tracing.txt57
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/lockdep-design.txt197
-rw-r--r--Documentation/networking/ipvs-sysctl.txt143
-rw-r--r--Documentation/powerpc/booting-without-of.txt4
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas16
-rw-r--r--Documentation/sysctl/vm.txt14
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile50
-rw-r--r--arch/alpha/boot/bootp.c2
-rw-r--r--arch/alpha/boot/bootpz.c2
-rw-r--r--arch/alpha/boot/main.c2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/arm/kernel/ecard.c2
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/mach-footbridge/isa-irq.c19
-rw-r--r--arch/arm/mm/ioremap.c5
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S2
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/frv/kernel/setup.c2
-rw-r--r--arch/i386/Kconfig8
-rw-r--r--arch/i386/Kconfig.debug13
-rw-r--r--arch/i386/boot/setup.S2
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/alternative.c10
-rw-r--r--arch/i386/kernel/cpuid.c6
-rw-r--r--arch/i386/kernel/entry.S36
-rw-r--r--arch/i386/kernel/irq.c6
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/stacktrace.c98
-rw-r--r--arch/i386/kernel/traps.c39
-rw-r--r--arch/ia64/kernel/acpi-ext.c2
-rw-r--r--arch/ia64/kernel/acpi.c12
-rw-r--r--arch/ia64/kernel/mca.c10
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/mm/discontig.c16
-rw-r--r--arch/mips/kernel/entry.S2
-rw-r--r--arch/mips/kernel/mips-mt.c6
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1378
-rw-r--r--arch/powerpc/configs/mpc834x_itx_defconfig1336
-rw-r--r--arch/powerpc/kernel/btext.c20
-rw-r--r--arch/powerpc/kernel/ibmebus.c9
-rw-r--r--arch/powerpc/kernel/irq.c656
-rw-r--r--arch/powerpc/kernel/legacy_serial.c57
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/pci_32.c37
-rw-r--r--arch/powerpc/kernel/pci_64.c33
-rw-r--r--arch/powerpc/kernel/prom.c454
-rw-r--r--arch/powerpc/kernel/prom_init.c20
-rw-r--r--arch/powerpc/kernel/prom_parse.c443
-rw-r--r--arch/powerpc/kernel/rtas_pci.c17
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c17
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig11
-rw-r--r--arch/powerpc/platforms/83xx/Makefile1
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c156
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.h23
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c419
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h19
-rw-r--r--arch/powerpc/platforms/cell/setup.c22
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c394
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c119
-rw-r--r--arch/powerpc/platforms/chrp/pci.c11
-rw-r--r--arch/powerpc/platforms/chrp/setup.c103
-rw-r--r--arch/powerpc/platforms/chrp/smp.c1
-rw-r--r--arch/powerpc/platforms/iseries/irq.c105
-rw-r--r--arch/powerpc/platforms/iseries/irq.h2
-rw-r--r--arch/powerpc/platforms/iseries/setup.c8
-rw-r--r--arch/powerpc/platforms/maple/pci.c17
-rw-r--r--arch/powerpc/platforms/maple/setup.c94
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c35
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c9
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c5
-rw-r--r--arch/powerpc/platforms/powermac/pci.c68
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c13
-rw-r--r--arch/powerpc/platforms/powermac/pic.c422
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c3
-rw-r--r--arch/powerpc/platforms/pseries/ras.c82
-rw-r--r--arch/powerpc/platforms/pseries/setup.c244
-rw-r--r--arch/powerpc/platforms/pseries/smp.c32
-rw-r--r--arch/powerpc/platforms/pseries/xics.c718
-rw-r--r--arch/powerpc/platforms/pseries/xics.h17
-rw-r--r--arch/powerpc/sysdev/Makefile5
-rw-r--r--arch/powerpc/sysdev/i8259.c163
-rw-r--r--arch/powerpc/sysdev/mpic.c482
-rw-r--r--arch/ppc/syslib/Makefile2
-rw-r--r--arch/ppc/syslib/btext.c2
-rw-r--r--arch/ppc/syslib/i8259.c212
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/Kconfig.debug4
-rw-r--r--arch/s390/Makefile5
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/entry.S29
-rw-r--r--arch/s390/kernel/entry64.S21
-rw-r--r--arch/s390/kernel/irq.c8
-rw-r--r--arch/s390/kernel/process.c1
-rw-r--r--arch/s390/kernel/stacktrace.c90
-rw-r--r--arch/um/kernel/tt/process_kern.c2
-rw-r--r--arch/um/kernel/um_arch.c2
-rw-r--r--arch/x86_64/Kconfig8
-rw-r--r--arch/x86_64/Kconfig.debug4
-rw-r--r--arch/x86_64/boot/setup.S2
-rw-r--r--arch/x86_64/ia32/ia32entry.S19
-rw-r--r--arch/x86_64/kernel/Makefile1
-rw-r--r--arch/x86_64/kernel/entry.S188
-rw-r--r--arch/x86_64/kernel/head64.c5
-rw-r--r--arch/x86_64/kernel/irq.c4
-rw-r--r--arch/x86_64/kernel/nmi.c2
-rw-r--r--arch/x86_64/kernel/process.c2
-rw-r--r--arch/x86_64/kernel/smpboot.c2
-rw-r--r--arch/x86_64/kernel/stacktrace.c221
-rw-r--r--arch/x86_64/kernel/traps.c129
-rw-r--r--arch/x86_64/lib/thunk.S5
-rw-r--r--arch/x86_64/mm/fault.c1
-rw-r--r--block/ll_rw_blk.c2
-rw-r--r--drivers/acpi/Kconfig12
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/ac.c32
-rw-r--r--drivers/acpi/acpi_memhotplug.c18
-rw-r--r--drivers/acpi/asus_acpi.c335
-rw-r--r--drivers/acpi/battery.c46
-rw-r--r--drivers/acpi/button.c10
-rw-r--r--drivers/acpi/cm_sbs.c131
-rw-r--r--drivers/acpi/container.c2
-rw-r--r--drivers/acpi/fan.c10
-rw-r--r--drivers/acpi/glue.c8
-rw-r--r--drivers/acpi/i2c_ec.c406
-rw-r--r--drivers/acpi/i2c_ec.h23
-rw-r--r--drivers/acpi/namespace/nsxfeval.c2
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/osl.c9
-rw-r--r--drivers/acpi/pci_link.c15
-rw-r--r--drivers/acpi/pci_root.c20
-rw-r--r--drivers/acpi/power.c22
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/processor_perflib.c6
-rw-r--r--drivers/acpi/sbs.c1766
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/acpi/system.c4
-rw-r--r--drivers/acpi/thermal.c57
-rw-r--r--drivers/acpi/utilities/utalloc.c4
-rw-r--r--drivers/acpi/utilities/utcache.c2
-rw-r--r--drivers/acpi/utils.c4
-rw-r--r--drivers/acpi/video.c76
-rw-r--r--drivers/atm/ambassador.c3
-rw-r--r--drivers/atm/idt77252.c3
-rw-r--r--drivers/block/floppy.c42
-rw-r--r--drivers/block/swim3.c230
-rw-r--r--drivers/bluetooth/bluecard_cs.c1
-rw-r--r--drivers/bluetooth/bt3c_cs.c1
-rw-r--r--drivers/bluetooth/btuart_cs.c1
-rw-r--r--drivers/bluetooth/dtl1_cs.c1
-rw-r--r--drivers/bluetooth/hci_usb.c80
-rw-r--r--drivers/bluetooth/hci_usb.h1
-rw-r--r--drivers/bluetooth/hci_vhci.c1
-rw-r--r--drivers/char/agp/frontend.c2
-rw-r--r--drivers/char/applicom.c2
-rw-r--r--drivers/char/cs5535_gpio.c2
-rw-r--r--drivers/char/ds1286.c2
-rw-r--r--drivers/char/ds1302.c2
-rw-r--r--drivers/char/ds1620.c2
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/dtlk.c2
-rw-r--r--drivers/char/efirtc.c2
-rw-r--r--drivers/char/ftape/zftape/zftape-init.c2
-rw-r--r--drivers/char/genrtc.c2
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hvsi.c7
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/i8k.c2
-rw-r--r--drivers/char/ip2/ip2main.c2
-rw-r--r--drivers/char/ip27-rtc.c2
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/istallion.c2
-rw-r--r--drivers/char/ite_gpio.c2
-rw-r--r--drivers/char/lcd.c2
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mem.c18
-rw-r--r--drivers/char/misc.c4
-rw-r--r--drivers/char/mmtimer.c2
-rw-r--r--drivers/char/mwave/mwavedd.c2
-rw-r--r--drivers/char/nvram.c2
-rw-r--r--drivers/char/nwbutton.c2
-rw-r--r--drivers/char/nwflash.c2
-rw-r--r--drivers/char/pc8736x_gpio.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c2
-rw-r--r--drivers/char/ppdev.c2
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/raw.c6
-rw-r--r--drivers/char/rio/rio_linux.c2
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/scx200_gpio.c2
-rw-r--r--drivers/char/snsc.c2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/stallion.c2
-rw-r--r--drivers/char/sx.c2
-rw-r--r--drivers/char/sysrq.c5
-rw-r--r--drivers/char/tb0219.c2
-rw-r--r--drivers/char/tipar.c2
-rw-r--r--drivers/char/tlclk.c2
-rw-r--r--drivers/char/toshiba.c2
-rw-r--r--drivers/char/tpm/tpm_atmel.c2
-rw-r--r--drivers/char/tpm/tpm_infineon.c2
-rw-r--r--drivers/char/tpm/tpm_nsc.c2
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tty_io.c10
-rw-r--r--drivers/char/vc_screen.c2
-rw-r--r--drivers/char/viotape.c2
-rw-r--r--drivers/char/vr41xx_giu.c2
-rw-r--r--drivers/char/vt.c1
-rw-r--r--drivers/char/watchdog/acquirewdt.c2
-rw-r--r--drivers/char/watchdog/advantechwdt.c2
-rw-r--r--drivers/char/watchdog/alim1535_wdt.c2
-rw-r--r--drivers/char/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/char/watchdog/at91_wdt.c2
-rw-r--r--drivers/char/watchdog/booke_wdt.c2
-rw-r--r--drivers/char/watchdog/cpu5wdt.c2
-rw-r--r--drivers/char/watchdog/ep93xx_wdt.c2
-rw-r--r--drivers/char/watchdog/eurotechwdt.c2
-rw-r--r--drivers/char/watchdog/i6300esb.c2
-rw-r--r--drivers/char/watchdog/i8xx_tco.c2
-rw-r--r--drivers/char/watchdog/ib700wdt.c2
-rw-r--r--drivers/char/watchdog/ibmasr.c2
-rw-r--r--drivers/char/watchdog/indydog.c2
-rw-r--r--drivers/char/watchdog/ixp2000_wdt.c2
-rw-r--r--drivers/char/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/char/watchdog/machzwd.c2
-rw-r--r--drivers/char/watchdog/mixcomwd.c2
-rw-r--r--drivers/char/watchdog/mpc83xx_wdt.c2
-rw-r--r--drivers/char/watchdog/mpc8xx_wdt.c2
-rw-r--r--drivers/char/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/char/watchdog/mv64x60_wdt.c2
-rw-r--r--drivers/char/watchdog/pcwd.c4
-rw-r--r--drivers/char/watchdog/pcwd_pci.c4
-rw-r--r--drivers/char/watchdog/pcwd_usb.c4
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/char/watchdog/sa1100_wdt.c2
-rw-r--r--drivers/char/watchdog/sbc60xxwdt.c2
-rw-r--r--drivers/char/watchdog/sbc8360.c2
-rw-r--r--drivers/char/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/char/watchdog/sc1200wdt.c2
-rw-r--r--drivers/char/watchdog/sc520_wdt.c2
-rw-r--r--drivers/char/watchdog/scx200_wdt.c2
-rw-r--r--drivers/char/watchdog/shwdt.c2
-rw-r--r--drivers/char/watchdog/softdog.c2
-rw-r--r--drivers/char/watchdog/w83627hf_wdt.c2
-rw-r--r--drivers/char/watchdog/w83877f_wdt.c2
-rw-r--r--drivers/char/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/char/watchdog/wafer5823wdt.c2
-rw-r--r--drivers/char/watchdog/wdrtas.c4
-rw-r--r--drivers/char/watchdog/wdt.c4
-rw-r--r--drivers/char/watchdog/wdt285.c2
-rw-r--r--drivers/char/watchdog/wdt977.c2
-rw-r--r--drivers/char/watchdog/wdt_pci.c4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c260
-rw-r--r--drivers/dma/dmaengine.c20
-rw-r--r--drivers/dma/ioatdma.c10
-rw-r--r--drivers/dma/ioatdma_registers.h2
-rw-r--r--drivers/dma/iovlock.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-io.c8
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/ieee1394/hosts.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c138
-rw-r--r--drivers/input/serio/i8042-sparcio.h2
-rw-r--r--drivers/input/serio/libps2.c2
-rw-r--r--drivers/macintosh/macio-adb.c19
-rw-r--r--drivers/macintosh/macio_asic.c152
-rw-r--r--drivers/macintosh/smu.c6
-rw-r--r--drivers/macintosh/via-cuda.c24
-rw-r--r--drivers/macintosh/via-pmu.c33
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/message/fusion/Makefile5
-rw-r--r--drivers/message/fusion/lsi/fc_log.h89
-rw-r--r--drivers/message/fusion/lsi/mpi.h5
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h158
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt76
-rw-r--r--drivers/message/fusion/lsi/mpi_init.h4
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h154
-rw-r--r--drivers/message/fusion/lsi/mpi_log_sas.h82
-rw-r--r--drivers/message/fusion/lsi/mpi_sas.h13
-rw-r--r--drivers/message/fusion/lsi/mpi_targ.h5
-rw-r--r--drivers/message/fusion/mptbase.c75
-rw-r--r--drivers/message/fusion/mptbase.h19
-rw-r--r--drivers/message/fusion/mptfc.c16
-rw-r--r--drivers/message/fusion/mptsas.c996
-rw-r--r--drivers/message/fusion/mptspi.c4
-rw-r--r--drivers/mmc/mmc.c2
-rw-r--r--drivers/mtd/devices/doc2000.c179
-rw-r--r--drivers/mtd/devices/doc2001.c179
-rw-r--r--drivers/mtd/devices/doc2001plus.c164
-rw-r--r--drivers/mtd/nand/nand_base.c16
-rw-r--r--drivers/mtd/nand/nand_ecc.c3
-rw-r--r--drivers/mtd/nand/sharpsl.c7
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/8390.c4
-rw-r--r--drivers/net/forcedeth.c28
-rw-r--r--drivers/net/mace.c4
-rw-r--r--drivers/net/tg3.c33
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c10
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c11
-rw-r--r--drivers/s390/char/sclp.c10
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/net/qeth_main.c6
-rw-r--r--drivers/s390/s390mach.c3
-rw-r--r--drivers/s390/scsi/zfcp_erp.c8
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c9
-rw-r--r--drivers/scsi/53c700.c56
-rw-r--r--drivers/scsi/53c700.h34
-rw-r--r--drivers/scsi/aacraid/comminit.c26
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c24
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c62
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h11
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c19
-rw-r--r--drivers/scsi/atp870u.c157
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c64
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c5
-rw-r--r--drivers/scsi/iscsi_tcp.c152
-rw-r--r--drivers/scsi/libata-core.c2
-rw-r--r--drivers/scsi/libiscsi.c187
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c32
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h21
-rw-r--r--drivers/scsi/nsp32.c12
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c925
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h151
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_devtbl.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c143
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c280
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c125
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c1223
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_error.c22
-rw-r--r--drivers/scsi/scsi_lib.c124
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sas_internal.h10
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c42
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c658
-rw-r--r--drivers/scsi/scsi_transport_sas.c371
-rw-r--r--drivers/scsi/scsicam.c3
-rw-r--r--drivers/scsi/sd.c169
-rw-r--r--drivers/scsi/sg.c10
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/serial/8250_pci.c20
-rw-r--r--drivers/serial/8250_pnp.c2
-rw-r--r--drivers/serial/mpc52xx_uart.c3
-rw-r--r--drivers/serial/pmac_zilog.c6
-rw-r--r--drivers/serial/serial_core.c17
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/usb/core/inode.c4
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.c2
-rw-r--r--drivers/video/Kconfig15
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/offb.c284
-rw-r--r--drivers/video/pnx4008/Makefile7
-rw-r--r--drivers/video/pnx4008/dum.h211
-rw-r--r--drivers/video/pnx4008/fbcommon.h43
-rw-r--r--drivers/video/pnx4008/pnxrgbfb.c213
-rw-r--r--drivers/video/pnx4008/sdum.c872
-rw-r--r--drivers/video/pnx4008/sdum.h139
-rw-r--r--fs/binfmt_elf.c15
-rw-r--r--fs/block_dev.c102
-rw-r--r--fs/dcache.c6
-rw-r--r--fs/direct-io.c6
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/jffs2/acl.c4
-rw-r--r--fs/jffs2/acl.h4
-rw-r--r--fs/jffs2/malloc.c2
-rw-r--r--fs/jffs2/nodelist.h2
-rw-r--r--fs/jffs2/readinode.c1
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/jffs2/xattr.c45
-rw-r--r--fs/namei.c20
-rw-r--r--fs/ntfs/inode.c33
-rw-r--r--fs/ntfs/super.c31
-rw-r--r--fs/proc/task_nommu.c2
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/super.c11
-rw-r--r--fs/ufs/super.c2
-rw-r--r--include/Kbuild2
-rw-r--r--include/acpi/acmacros.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/asm-alpha/Kbuild5
-rw-r--r--include/asm-alpha/rwsem.h14
-rw-r--r--include/asm-arm/Kbuild1
-rw-r--r--include/asm-arm26/Kbuild1
-rw-r--r--include/asm-cris/Kbuild1
-rw-r--r--include/asm-frv/Kbuild1
-rw-r--r--include/asm-generic/Kbuild3
-rw-r--r--include/asm-generic/Kbuild.asm11
-rw-r--r--include/asm-generic/cputime.h2
-rw-r--r--include/asm-generic/mutex-null.h15
-rw-r--r--include/asm-generic/percpu.h2
-rw-r--r--include/asm-h8300/Kbuild1
-rw-r--r--include/asm-i386/Kbuild5
-rw-r--r--include/asm-i386/irqflags.h127
-rw-r--r--include/asm-i386/rwsem.h42
-rw-r--r--include/asm-i386/spinlock.h12
-rw-r--r--include/asm-i386/system.h20
-rw-r--r--include/asm-ia64/Kbuild7
-rw-r--r--include/asm-ia64/irq.h2
-rw-r--r--include/asm-ia64/percpu.h1
-rw-r--r--include/asm-ia64/rwsem.h18
-rw-r--r--include/asm-ia64/thread_info.h2
-rw-r--r--include/asm-m32r/Kbuild1
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/asm-m68k/Kbuild1
-rw-r--r--include/asm-m68knommu/Kbuild1
-rw-r--r--include/asm-mips/Kbuild1
-rw-r--r--include/asm-parisc/Kbuild1
-rw-r--r--include/asm-powerpc/Kbuild10
-rw-r--r--include/asm-powerpc/i8259.h8
-rw-r--r--include/asm-powerpc/irq.h358
-rw-r--r--include/asm-powerpc/irqflags.h31
-rw-r--r--include/asm-powerpc/machdep.h2
-rw-r--r--include/asm-powerpc/mpic.h67
-rw-r--r--include/asm-powerpc/percpu.h1
-rw-r--r--include/asm-powerpc/prom.h98
-rw-r--r--include/asm-powerpc/rwsem.h18
-rw-r--r--include/asm-powerpc/spu.h1
-rw-r--r--include/asm-s390/Kbuild4
-rw-r--r--include/asm-s390/irqflags.h50
-rw-r--r--include/asm-s390/percpu.h1
-rw-r--r--include/asm-s390/rwsem.h31
-rw-r--r--include/asm-s390/semaphore.h3
-rw-r--r--include/asm-s390/system.h32
-rw-r--r--include/asm-sh/Kbuild1
-rw-r--r--include/asm-sh/rwsem.h18
-rw-r--r--include/asm-sh/system.h2
-rw-r--r--include/asm-sh64/Kbuild1
-rw-r--r--include/asm-sparc/Kbuild6
-rw-r--r--include/asm-sparc64/Kbuild10
-rw-r--r--include/asm-sparc64/percpu.h1
-rw-r--r--include/asm-um/Kbuild1
-rw-r--r--include/asm-v850/Kbuild1
-rw-r--r--include/asm-x86_64/Kbuild11
-rw-r--r--include/asm-x86_64/irqflags.h141
-rw-r--r--include/asm-x86_64/kdebug.h2
-rw-r--r--include/asm-x86_64/percpu.h2
-rw-r--r--include/asm-x86_64/system.h38
-rw-r--r--include/asm-xtensa/Kbuild1
-rw-r--r--include/asm-xtensa/rwsem.h18
-rw-r--r--include/linux/Kbuild63
-rw-r--r--include/linux/byteorder/Kbuild2
-rw-r--r--include/linux/completion.h12
-rw-r--r--include/linux/dcache.h12
-rw-r--r--include/linux/debug_locks.h69
-rw-r--r--include/linux/dmaengine.h43
-rw-r--r--include/linux/dvb/Kbuild2
-rw-r--r--include/linux/fs.h38
-rw-r--r--include/linux/hardirq.h27
-rw-r--r--include/linux/hdlc/Kbuild1
-rw-r--r--include/linux/hrtimer.h1
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/idr.h2
-rw-r--r--include/linux/init_task.h15
-rw-r--r--include/linux/interrupt.h77
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/irqflags.h96
-rw-r--r--include/linux/isdn/Kbuild1
-rw-r--r--include/linux/kallsyms.h23
-rw-r--r--include/linux/lockdep.h353
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/mtd/bbm.h35
-rw-r--r--include/linux/mtd/mtd.h4
-rw-r--r--include/linux/mtd/nand.h16
-rw-r--r--include/linux/mtd/onenand.h77
-rw-r--r--include/linux/mutex-debug.h18
-rw-r--r--include/linux/mutex.h37
-rw-r--r--include/linux/netfilter/Kbuild11
-rw-r--r--include/linux/netfilter_arp/Kbuild2
-rw-r--r--include/linux/netfilter_bridge/Kbuild4
-rw-r--r--include/linux/netfilter_ipv4/Kbuild21
-rw-r--r--include/linux/netfilter_ipv6/Kbuild6
-rw-r--r--include/linux/nfsd/Kbuild2
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/poison.h5
-rw-r--r--include/linux/raid/Kbuild1
-rw-r--r--include/linux/rtmutex.h10
-rw-r--r--include/linux/rwsem-spinlock.h27
-rw-r--r--include/linux/rwsem.h83
-rw-r--r--include/linux/sched.h86
-rw-r--r--include/linux/seqlock.h12
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/spinlock.h63
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/spinlock_types.h47
-rw-r--r--include/linux/spinlock_types_up.h9
-rw-r--r--include/linux/spinlock_up.h1
-rw-r--r--include/linux/stacktrace.h20
-rw-r--r--include/linux/sunrpc/Kbuild1
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/tc_act/Kbuild1
-rw-r--r--include/linux/tc_ematch/Kbuild1
-rw-r--r--include/linux/vermagic.h2
-rw-r--r--include/linux/wait.h8
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/mtd/Kbuild2
-rw-r--r--include/mtd/mtd-abi.h2
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/ax25.h24
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h73
-rw-r--r--include/net/bluetooth/hci_core.h59
-rw-r--r--include/net/sock.h19
-rw-r--r--include/rdma/Kbuild1
-rw-r--r--include/scsi/Kbuild2
-rw-r--r--include/scsi/iscsi_if.h24
-rw-r--r--include/scsi/libiscsi.h15
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/scsi/scsi_transport_iscsi.h48
-rw-r--r--include/scsi/scsi_transport_sas.h37
-rw-r--r--include/sound/Kbuild2
-rw-r--r--include/video/Kbuild1
-rw-r--r--init/main.c31
-rw-r--r--init/version.c1
-rw-r--r--kernel/Makefile8
-rw-r--r--kernel/capability.c8
-rw-r--r--kernel/exit.c40
-rw-r--r--kernel/fork.c51
-rw-r--r--kernel/futex.c28
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/irq/chip.c5
-rw-r--r--kernel/irq/handle.c18
-rw-r--r--kernel/irq/manage.c6
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/lockdep.c2702
-rw-r--r--kernel/lockdep_internals.h78
-rw-r--r--kernel/lockdep_proc.c345
-rw-r--r--kernel/module.c26
-rw-r--r--kernel/mutex-debug.c399
-rw-r--r--kernel/mutex-debug.h94
-rw-r--r--kernel/mutex.c74
-rw-r--r--kernel/mutex.h19
-rw-r--r--kernel/pid.c6
-rw-r--r--kernel/printk.c23
-rw-r--r--kernel/ptrace.c6
-rw-r--r--kernel/rcupdate.c4
-rw-r--r--kernel/rtmutex-debug.c307
-rw-r--r--kernel/rtmutex-debug.h8
-rw-r--r--kernel/rtmutex-tester.c4
-rw-r--r--kernel/rtmutex.c57
-rw-r--r--kernel/rtmutex.h3
-rw-r--r--kernel/rwsem.c147
-rw-r--r--kernel/sched.c748
-rw-r--r--kernel/softirq.c141
-rw-r--r--kernel/spinlock.c79
-rw-r--r--kernel/stacktrace.c24
-rw-r--r--kernel/stop_machine.c17
-rw-r--r--kernel/sysctl.c11
-rw-r--r--kernel/timer.c13
-rw-r--r--kernel/wait.c4
-rw-r--r--kernel/workqueue.c59
-rw-r--r--lib/Kconfig.debug127
-rw-r--r--lib/Makefile3
-rw-r--r--lib/debug_locks.c45
-rw-r--r--lib/kernel_lock.c7
-rw-r--r--lib/locking-selftest-hardirq.h9
-rw-r--r--lib/locking-selftest-mutex.h11
-rw-r--r--lib/locking-selftest-rlock-hardirq.h2
-rw-r--r--lib/locking-selftest-rlock-softirq.h2
-rw-r--r--lib/locking-selftest-rlock.h14
-rw-r--r--lib/locking-selftest-rsem.h14
-rw-r--r--lib/locking-selftest-softirq.h9
-rw-r--r--lib/locking-selftest-spin-hardirq.h2
-rw-r--r--lib/locking-selftest-spin-softirq.h2
-rw-r--r--lib/locking-selftest-spin.h11
-rw-r--r--lib/locking-selftest-wlock-hardirq.h2
-rw-r--r--lib/locking-selftest-wlock-softirq.h2
-rw-r--r--lib/locking-selftest-wlock.h14
-rw-r--r--lib/locking-selftest-wsem.h14
-rw-r--r--lib/locking-selftest.c1216
-rw-r--r--lib/rwsem-spinlock.c66
-rw-r--r--lib/rwsem.c51
-rw-r--r--lib/spinlock_debug.c98
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/oom_kill.c8
-rw-r--r--mm/page_alloc.c22
-rw-r--r--mm/slab.c59
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmscan.c27
-rw-r--r--net/8021q/vlan.c11
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/ax25/ax25_ip.c23
-rw-r--r--net/ax25/ax25_route.c49
-rw-r--r--net/bluetooth/af_bluetooth.c20
-rw-r--r--net/bluetooth/hci_conn.c100
-rw-r--r--net/bluetooth/hci_core.c38
-rw-r--r--net/bluetooth/hci_event.c206
-rw-r--r--net/bluetooth/hci_sysfs.c211
-rw-r--r--net/bluetooth/l2cap.c361
-rw-r--r--net/bluetooth/rfcomm/core.c27
-rw-r--r--net/bluetooth/rfcomm/sock.c4
-rw-r--r--net/bluetooth/sco.c4
-rw-r--r--net/bridge/br_netlink.c3
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c111
-rw-r--r--net/ipv4/af_inet.c14
-rw-r--r--net/ipv4/route.c26
-rw-r--r--net/ipv4/tcp.c13
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c11
-rw-r--r--net/ipv6/netfilter/ip6_tables.c3
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/netrom/af_netrom.c4
-rw-r--r--net/rose/af_rose.c7
-rw-r--r--net/rose/rose_dev.c5
-rw-r--r--net/sunrpc/rpc_pipe.c8
-rw-r--r--net/tipc/core.h5
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/unix/af_unix.c14
-rw-r--r--scripts/Makefile.headersinst158
-rwxr-xr-xscripts/checkversion.pl7
-rwxr-xr-xscripts/hdrcheck.sh8
-rw-r--r--sound/aoa/core/snd-aoa-gpio-feature.c7
-rw-r--r--sound/aoa/soundbus/i2sbus/i2sbus-core.c7
-rw-r--r--sound/core/seq/seq_device.c6
-rw-r--r--sound/core/seq/seq_ports.c4
-rw-r--r--sound/oss/dmasound/dmasound_awacs.c16
-rw-r--r--sound/pci/cs46xx/dsp_spos_scb_lib.c5
-rw-r--r--sound/ppc/pmac.c33
-rw-r--r--sound/ppc/tumbler.c8
-rw-r--r--sound/sparc/amd7930.c4
-rw-r--r--sound/sparc/cs4231.c2
-rw-r--r--sound/sparc/dbri.c2
662 files changed, 27110 insertions, 9374 deletions
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 999afe1ca8cb..a8c8cce50633 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -109,7 +109,7 @@
109 for most of the implementations. These functions can be replaced by the 109 for most of the implementations. These functions can be replaced by the
110 board driver if neccecary. Those functions are called via pointers in the 110 board driver if neccecary. Those functions are called via pointers in the
111 NAND chip description structure. The board driver can set the functions which 111 NAND chip description structure. The board driver can set the functions which
112 should be replaced by board dependend functions before calling nand_scan(). 112 should be replaced by board dependent functions before calling nand_scan().
113 If the function pointer is NULL on entry to nand_scan() then the pointer 113 If the function pointer is NULL on entry to nand_scan() then the pointer
114 is set to the default function which is suitable for the detected chip type. 114 is set to the default function which is suitable for the detected chip type.
115 </para></listitem> 115 </para></listitem>
@@ -133,7 +133,7 @@
133 [REPLACEABLE]</para><para> 133 [REPLACEABLE]</para><para>
134 Replaceable members hold hardware related functions which can be 134 Replaceable members hold hardware related functions which can be
135 provided by the board driver. The board driver can set the functions which 135 provided by the board driver. The board driver can set the functions which
136 should be replaced by board dependend functions before calling nand_scan(). 136 should be replaced by board dependent functions before calling nand_scan().
137 If the function pointer is NULL on entry to nand_scan() then the pointer 137 If the function pointer is NULL on entry to nand_scan() then the pointer
138 is set to the default function which is suitable for the detected chip type. 138 is set to the default function which is suitable for the detected chip type.
139 </para></listitem> 139 </para></listitem>
@@ -156,9 +156,8 @@
156 <title>Basic board driver</title> 156 <title>Basic board driver</title>
157 <para> 157 <para>
158 For most boards it will be sufficient to provide just the 158 For most boards it will be sufficient to provide just the
159 basic functions and fill out some really board dependend 159 basic functions and fill out some really board dependent
160 members in the nand chip description structure. 160 members in the nand chip description structure.
161 See drivers/mtd/nand/skeleton for reference.
162 </para> 161 </para>
163 <sect1> 162 <sect1>
164 <title>Basic defines</title> 163 <title>Basic defines</title>
@@ -1295,7 +1294,9 @@ in this page</entry>
1295 </para> 1294 </para>
1296!Idrivers/mtd/nand/nand_base.c 1295!Idrivers/mtd/nand/nand_base.c
1297!Idrivers/mtd/nand/nand_bbt.c 1296!Idrivers/mtd/nand/nand_bbt.c
1298!Idrivers/mtd/nand/nand_ecc.c 1297<!-- No internal functions for kernel-doc:
1298X!Idrivers/mtd/nand/nand_ecc.c
1299-->
1299 </chapter> 1300 </chapter>
1300 1301
1301 <chapter id="credits"> 1302 <chapter id="credits">
diff --git a/Documentation/irqflags-tracing.txt b/Documentation/irqflags-tracing.txt
new file mode 100644
index 000000000000..6a444877ee0b
--- /dev/null
+++ b/Documentation/irqflags-tracing.txt
@@ -0,0 +1,57 @@
1IRQ-flags state tracing
2
3started by Ingo Molnar <mingo@redhat.com>
4
5the "irq-flags tracing" feature "traces" hardirq and softirq state, in
6that it gives interested subsystems an opportunity to be notified of
7every hardirqs-off/hardirqs-on, softirqs-off/softirqs-on event that
8happens in the kernel.
9
10CONFIG_TRACE_IRQFLAGS_SUPPORT is needed for CONFIG_PROVE_SPIN_LOCKING
11and CONFIG_PROVE_RW_LOCKING to be offered by the generic lock debugging
12code. Otherwise only CONFIG_PROVE_MUTEX_LOCKING and
13CONFIG_PROVE_RWSEM_LOCKING will be offered on an architecture - these
14are locking APIs that are not used in IRQ context. (the one exception
15for rwsems is worked around)
16
17architecture support for this is certainly not in the "trivial"
18category, because lots of lowlevel assembly code deal with irq-flags
19state changes. But an architecture can be irq-flags-tracing enabled in a
20rather straightforward and risk-free manner.
21
22Architectures that want to support this need to do a couple of
23code-organizational changes first:
24
25- move their irq-flags manipulation code from their asm/system.h header
26 to asm/irqflags.h
27
28- rename local_irq_disable()/etc to raw_local_irq_disable()/etc. so that
29 the linux/irqflags.h code can inject callbacks and can construct the
30 real local_irq_disable()/etc APIs.
31
32- add and enable TRACE_IRQFLAGS_SUPPORT in their arch level Kconfig file
33
34and then a couple of functional changes are needed as well to implement
35irq-flags-tracing support:
36
37- in lowlevel entry code add (build-conditional) calls to the
38 trace_hardirqs_off()/trace_hardirqs_on() functions. The lock validator
39 closely guards whether the 'real' irq-flags matches the 'virtual'
40 irq-flags state, and complains loudly (and turns itself off) if the
41 two do not match. Usually most of the time for arch support for
42 irq-flags-tracing is spent in this state: look at the lockdep
43 complaint, try to figure out the assembly code we did not cover yet,
44 fix and repeat. Once the system has booted up and works without a
45 lockdep complaint in the irq-flags-tracing functions arch support is
46 complete.
47- if the architecture has non-maskable interrupts then those need to be
48 excluded from the irq-tracing [and lock validation] mechanism via
49 lockdep_off()/lockdep_on().
50
51in general there is no risk from having an incomplete irq-flags-tracing
52implementation in an architecture: lockdep will detect that and will
53turn itself off. I.e. the lock validator will still be reliable. There
54should be no crashes due to irq-tracing bugs. (except if the assembly
55changes break other code by modifying conditions or registers that
56shouldnt be)
57
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 86e9282d1c20..149f62ba14a5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -435,6 +435,15 @@ running once the system is up.
435 435
436 debug [KNL] Enable kernel debugging (events log level). 436 debug [KNL] Enable kernel debugging (events log level).
437 437
438 debug_locks_verbose=
439 [KNL] verbose self-tests
440 Format=<0|1>
441 Print debugging info while doing the locking API
442 self-tests.
443 We default to 0 (no extra messages), setting it to
444 1 will print _a lot_ more information - normally
445 only useful to kernel developers.
446
438 decnet= [HW,NET] 447 decnet= [HW,NET]
439 Format: <area>[,<node>] 448 Format: <area>[,<node>]
440 See also Documentation/networking/decnet.txt. 449 See also Documentation/networking/decnet.txt.
diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt
new file mode 100644
index 000000000000..00d93605bfd3
--- /dev/null
+++ b/Documentation/lockdep-design.txt
@@ -0,0 +1,197 @@
1Runtime locking correctness validator
2=====================================
3
4started by Ingo Molnar <mingo@redhat.com>
5additions by Arjan van de Ven <arjan@linux.intel.com>
6
7Lock-class
8----------
9
10The basic object the validator operates upon is a 'class' of locks.
11
12A class of locks is a group of locks that are logically the same with
13respect to locking rules, even if the locks may have multiple (possibly
14tens of thousands of) instantiations. For example a lock in the inode
15struct is one class, while each inode has its own instantiation of that
16lock class.
17
18The validator tracks the 'state' of lock-classes, and it tracks
19dependencies between different lock-classes. The validator maintains a
20rolling proof that the state and the dependencies are correct.
21
22Unlike an lock instantiation, the lock-class itself never goes away: when
23a lock-class is used for the first time after bootup it gets registered,
24and all subsequent uses of that lock-class will be attached to this
25lock-class.
26
27State
28-----
29
30The validator tracks lock-class usage history into 5 separate state bits:
31
32- 'ever held in hardirq context' [ == hardirq-safe ]
33- 'ever held in softirq context' [ == softirq-safe ]
34- 'ever held with hardirqs enabled' [ == hardirq-unsafe ]
35- 'ever held with softirqs and hardirqs enabled' [ == softirq-unsafe ]
36
37- 'ever used' [ == !unused ]
38
39Single-lock state rules:
40------------------------
41
42A softirq-unsafe lock-class is automatically hardirq-unsafe as well. The
43following states are exclusive, and only one of them is allowed to be
44set for any lock-class:
45
46 <hardirq-safe> and <hardirq-unsafe>
47 <softirq-safe> and <softirq-unsafe>
48
49The validator detects and reports lock usage that violate these
50single-lock state rules.
51
52Multi-lock dependency rules:
53----------------------------
54
55The same lock-class must not be acquired twice, because this could lead
56to lock recursion deadlocks.
57
58Furthermore, two locks may not be taken in different order:
59
60 <L1> -> <L2>
61 <L2> -> <L1>
62
63because this could lead to lock inversion deadlocks. (The validator
64finds such dependencies in arbitrary complexity, i.e. there can be any
65other locking sequence between the acquire-lock operations, the
66validator will still track all dependencies between locks.)
67
68Furthermore, the following usage based lock dependencies are not allowed
69between any two lock-classes:
70
71 <hardirq-safe> -> <hardirq-unsafe>
72 <softirq-safe> -> <softirq-unsafe>
73
74The first rule comes from the fact the a hardirq-safe lock could be
75taken by a hardirq context, interrupting a hardirq-unsafe lock - and
76thus could result in a lock inversion deadlock. Likewise, a softirq-safe
77lock could be taken by an softirq context, interrupting a softirq-unsafe
78lock.
79
80The above rules are enforced for any locking sequence that occurs in the
81kernel: when acquiring a new lock, the validator checks whether there is
82any rule violation between the new lock and any of the held locks.
83
84When a lock-class changes its state, the following aspects of the above
85dependency rules are enforced:
86
87- if a new hardirq-safe lock is discovered, we check whether it
88 took any hardirq-unsafe lock in the past.
89
90- if a new softirq-safe lock is discovered, we check whether it took
91 any softirq-unsafe lock in the past.
92
93- if a new hardirq-unsafe lock is discovered, we check whether any
94 hardirq-safe lock took it in the past.
95
96- if a new softirq-unsafe lock is discovered, we check whether any
97 softirq-safe lock took it in the past.
98
99(Again, we do these checks too on the basis that an interrupt context
100could interrupt _any_ of the irq-unsafe or hardirq-unsafe locks, which
101could lead to a lock inversion deadlock - even if that lock scenario did
102not trigger in practice yet.)
103
104Exception: Nested data dependencies leading to nested locking
105-------------------------------------------------------------
106
107There are a few cases where the Linux kernel acquires more than one
108instance of the same lock-class. Such cases typically happen when there
109is some sort of hierarchy within objects of the same type. In these
110cases there is an inherent "natural" ordering between the two objects
111(defined by the properties of the hierarchy), and the kernel grabs the
112locks in this fixed order on each of the objects.
113
114An example of such an object hieararchy that results in "nested locking"
115is that of a "whole disk" block-dev object and a "partition" block-dev
116object; the partition is "part of" the whole device and as long as one
117always takes the whole disk lock as a higher lock than the partition
118lock, the lock ordering is fully correct. The validator does not
119automatically detect this natural ordering, as the locking rule behind
120the ordering is not static.
121
122In order to teach the validator about this correct usage model, new
123versions of the various locking primitives were added that allow you to
124specify a "nesting level". An example call, for the block device mutex,
125looks like this:
126
127enum bdev_bd_mutex_lock_class
128{
129 BD_MUTEX_NORMAL,
130 BD_MUTEX_WHOLE,
131 BD_MUTEX_PARTITION
132};
133
134 mutex_lock_nested(&bdev->bd_contains->bd_mutex, BD_MUTEX_PARTITION);
135
136In this case the locking is done on a bdev object that is known to be a
137partition.
138
139The validator treats a lock that is taken in such a nested fasion as a
140separate (sub)class for the purposes of validation.
141
142Note: When changing code to use the _nested() primitives, be careful and
143check really thoroughly that the hiearchy is correctly mapped; otherwise
144you can get false positives or false negatives.
145
146Proof of 100% correctness:
147--------------------------
148
149The validator achieves perfect, mathematical 'closure' (proof of locking
150correctness) in the sense that for every simple, standalone single-task
151locking sequence that occured at least once during the lifetime of the
152kernel, the validator proves it with a 100% certainty that no
153combination and timing of these locking sequences can cause any class of
154lock related deadlock. [*]
155
156I.e. complex multi-CPU and multi-task locking scenarios do not have to
157occur in practice to prove a deadlock: only the simple 'component'
158locking chains have to occur at least once (anytime, in any
159task/context) for the validator to be able to prove correctness. (For
160example, complex deadlocks that would normally need more than 3 CPUs and
161a very unlikely constellation of tasks, irq-contexts and timings to
162occur, can be detected on a plain, lightly loaded single-CPU system as
163well!)
164
165This radically decreases the complexity of locking related QA of the
166kernel: what has to be done during QA is to trigger as many "simple"
167single-task locking dependencies in the kernel as possible, at least
168once, to prove locking correctness - instead of having to trigger every
169possible combination of locking interaction between CPUs, combined with
170every possible hardirq and softirq nesting scenario (which is impossible
171to do in practice).
172
173[*] assuming that the validator itself is 100% correct, and no other
174 part of the system corrupts the state of the validator in any way.
175 We also assume that all NMI/SMM paths [which could interrupt
176 even hardirq-disabled codepaths] are correct and do not interfere
177 with the validator. We also assume that the 64-bit 'chain hash'
178 value is unique for every lock-chain in the system. Also, lock
179 recursion must not be higher than 20.
180
181Performance:
182------------
183
184The above rules require _massive_ amounts of runtime checking. If we did
185that for every lock taken and for every irqs-enable event, it would
186render the system practically unusably slow. The complexity of checking
187is O(N^2), so even with just a few hundred lock-classes we'd have to do
188tens of thousands of checks for every event.
189
190This problem is solved by checking any given 'locking scenario' (unique
191sequence of locks taken after each other) only once. A simple stack of
192held locks is maintained, and a lightweight 64-bit hash value is
193calculated, which hash is unique for every lock chain. The hash value,
194when the chain is validated for the first time, is then put into a hash
195table, which hash-table can be checked in a lockfree manner. If the
196locking chain occurs again later on, the hash table tells us that we
197dont have to validate the chain again.
diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
new file mode 100644
index 000000000000..4ccdbca03811
--- /dev/null
+++ b/Documentation/networking/ipvs-sysctl.txt
@@ -0,0 +1,143 @@
1/proc/sys/net/ipv4/vs/* Variables:
2
3am_droprate - INTEGER
4 default 10
5
6 It sets the always mode drop rate, which is used in the mode 3
7 of the drop_rate defense.
8
9amemthresh - INTEGER
10 default 1024
11
12 It sets the available memory threshold (in pages), which is
13 used in the automatic modes of defense. When there is no
14 enough available memory, the respective strategy will be
15 enabled and the variable is automatically set to 2, otherwise
16 the strategy is disabled and the variable is set to 1.
17
18cache_bypass - BOOLEAN
19 0 - disabled (default)
20 not 0 - enabled
21
22 If it is enabled, forward packets to the original destination
23 directly when no cache server is available and destination
24 address is not local (iph->daddr is RTN_UNICAST). It is mostly
25 used in transparent web cache cluster.
26
27debug_level - INTEGER
28 0 - transmission error messages (default)
29 1 - non-fatal error messages
30 2 - configuration
31 3 - destination trash
32 4 - drop entry
33 5 - service lookup
34 6 - scheduling
35 7 - connection new/expire, lookup and synchronization
36 8 - state transition
37 9 - binding destination, template checks and applications
38 10 - IPVS packet transmission
39 11 - IPVS packet handling (ip_vs_in/ip_vs_out)
40 12 or more - packet traversal
41
42 Only available when IPVS is compiled with the CONFIG_IPVS_DEBUG
43
44 Higher debugging levels include the messages for lower debugging
45 levels, so setting debug level 2, includes level 0, 1 and 2
46 messages. Thus, logging becomes more and more verbose the higher
47 the level.
48
49drop_entry - INTEGER
50 0 - disabled (default)
51
52 The drop_entry defense is to randomly drop entries in the
53 connection hash table, just in order to collect back some
54 memory for new connections. In the current code, the
55 drop_entry procedure can be activated every second, then it
56 randomly scans 1/32 of the whole and drops entries that are in
57 the SYN-RECV/SYNACK state, which should be effective against
58 syn-flooding attack.
59
60 The valid values of drop_entry are from 0 to 3, where 0 means
61 that this strategy is always disabled, 1 and 2 mean automatic
62 modes (when there is no enough available memory, the strategy
63 is enabled and the variable is automatically set to 2,
64 otherwise the strategy is disabled and the variable is set to
65 1), and 3 means that that the strategy is always enabled.
66
67drop_packet - INTEGER
68 0 - disabled (default)
69
70 The drop_packet defense is designed to drop 1/rate packets
71 before forwarding them to real servers. If the rate is 1, then
72 drop all the incoming packets.
73
74 The value definition is the same as that of the drop_entry. In
75 the automatic mode, the rate is determined by the follow
76 formula: rate = amemthresh / (amemthresh - available_memory)
77 when available memory is less than the available memory
78 threshold. When the mode 3 is set, the always mode drop rate
79 is controlled by the /proc/sys/net/ipv4/vs/am_droprate.
80
81expire_nodest_conn - BOOLEAN
82 0 - disabled (default)
83 not 0 - enabled
84
85 The default value is 0, the load balancer will silently drop
86 packets when its destination server is not available. It may
87 be useful, when user-space monitoring program deletes the
88 destination server (because of server overload or wrong
89 detection) and add back the server later, and the connections
90 to the server can continue.
91
92 If this feature is enabled, the load balancer will expire the
93 connection immediately when a packet arrives and its
94 destination server is not available, then the client program
95 will be notified that the connection is closed. This is
96 equivalent to the feature some people requires to flush
97 connections when its destination is not available.
98
99expire_quiescent_template - BOOLEAN
100 0 - disabled (default)
101 not 0 - enabled
102
103 When set to a non-zero value, the load balancer will expire
104 persistent templates when the destination server is quiescent.
105 This may be useful, when a user makes a destination server
106 quiescent by setting its weight to 0 and it is desired that
107 subsequent otherwise persistent connections are sent to a
108 different destination server. By default new persistent
109 connections are allowed to quiescent destination servers.
110
111 If this feature is enabled, the load balancer will expire the
112 persistence template if it is to be used to schedule a new
113 connection and the destination server is quiescent.
114
115nat_icmp_send - BOOLEAN
116 0 - disabled (default)
117 not 0 - enabled
118
119 It controls sending icmp error messages (ICMP_DEST_UNREACH)
120 for VS/NAT when the load balancer receives packets from real
121 servers but the connection entries don't exist.
122
123secure_tcp - INTEGER
124 0 - disabled (default)
125
126 The secure_tcp defense is to use a more complicated state
127 transition table and some possible short timeouts of each
128 state. In the VS/NAT, it delays the entering the ESTABLISHED
129 until the real server starts to send data and ACK packet
130 (after 3-way handshake).
131
132 The value definition is the same as that of drop_entry or
133 drop_packet.
134
135sync_threshold - INTEGER
136 default 3
137
138 It sets synchronization threshold, which is the minimum number
139 of incoming packets that a connection needs to receive before
140 the connection will be synchronized. A connection will be
141 synchronized, every time the number of its incoming packets
142 modulus 50 equals the threshold. The range of the threshold is
143 from 0 to 49.
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 217e51768b87..3c62e66e1fcc 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1436,9 +1436,9 @@ platforms are moved over to use the flattened-device-tree model.
1436 interrupts = <1d 3>; 1436 interrupts = <1d 3>;
1437 interrupt-parent = <40000>; 1437 interrupt-parent = <40000>;
1438 num-channels = <4>; 1438 num-channels = <4>;
1439 channel-fifo-len = <24>; 1439 channel-fifo-len = <18>;
1440 exec-units-mask = <000000fe>; 1440 exec-units-mask = <000000fe>;
1441 descriptor-types-mask = <073f1127>; 1441 descriptor-types-mask = <012b0ebf>;
1442 }; 1442 };
1443 1443
1444 1444
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 0a85a7e8120e..d9e5960dafd5 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,4 +1,20 @@
1 1
21 Release Date : Sun May 14 22:49:52 PDT 2006 - Sumant Patro <Sumant.Patro@lsil.com>
32 Current Version : 00.00.03.01
43 Older Version : 00.00.02.04
5
6i. Added support for ZCR controller.
7
8 New device id 0x413 added.
9
10ii. Bug fix : Disable controller interrupt before firing INIT cmd to FW.
11
12 Interrupt is enabled after required initialization is over.
13 This is done to ensure that driver is ready to handle interrupts when
14 it is generated by the controller.
15
16 -Sumant Patro <Sumant.Patro@lsil.com>
17
21 Release Date : Wed Feb 03 14:31:44 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 181 Release Date : Wed Feb 03 14:31:44 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
32 Current Version : 00.00.02.04 192 Current Version : 00.00.02.04
43 Older Version : 00.00.02.04 203 Older Version : 00.00.02.04
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 86754eb390da..7cee90223d3a 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -28,6 +28,7 @@ Currently, these files are in /proc/sys/vm:
28- block_dump 28- block_dump
29- drop-caches 29- drop-caches
30- zone_reclaim_mode 30- zone_reclaim_mode
31- min_unmapped_ratio
31- panic_on_oom 32- panic_on_oom
32 33
33============================================================== 34==============================================================
@@ -168,6 +169,19 @@ in all nodes of the system.
168 169
169============================================================= 170=============================================================
170 171
172min_unmapped_ratio:
173
174This is available only on NUMA kernels.
175
176A percentage of the file backed pages in each zone. Zone reclaim will only
177occur if more than this percentage of pages are file backed and unmapped.
178This is to insure that a minimal amount of local pages is still available for
179file I/O even if the node is overallocated.
180
181The default is 1 percent.
182
183=============================================================
184
171panic_on_oom 185panic_on_oom
172 186
173This enables or disables panic on out-of-memory feature. If this is set to 1, 187This enables or disables panic on out-of-memory feature. If this is set to 1,
diff --git a/MAINTAINERS b/MAINTAINERS
index 42be131139c8..5f76a4f5cd4b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -861,6 +861,8 @@ S: Maintained
861DOCBOOK FOR DOCUMENTATION 861DOCBOOK FOR DOCUMENTATION
862P: Martin Waitz 862P: Martin Waitz
863M: tali@admingilde.org 863M: tali@admingilde.org
864P: Randy Dunlap
865M: rdunlap@xenotime.net
864T: git http://tali.admingilde.org/git/linux-docbook.git 866T: git http://tali.admingilde.org/git/linux-docbook.git
865S: Maintained 867S: Maintained
866 868
@@ -2298,6 +2300,14 @@ M: promise@pnd-pc.demon.co.uk
2298W: http://www.pnd-pc.demon.co.uk/promise/ 2300W: http://www.pnd-pc.demon.co.uk/promise/
2299S: Maintained 2301S: Maintained
2300 2302
2303PVRUSB2 VIDEO4LINUX DRIVER
2304P: Mike Isely
2305M: isely@pobox.com
2306L: pvrusb2@isely.net
2307L: video4linux-list@redhat.com
2308W: http://www.isely.net/pvrusb2/
2309S: Maintained
2310
2301PXA2xx SUPPORT 2311PXA2xx SUPPORT
2302P: Nicolas Pitre 2312P: Nicolas Pitre
2303M: nico@cam.org 2313M: nico@cam.org
diff --git a/Makefile b/Makefile
index 4dcf25d43fa6..11a850cffd3d 100644
--- a/Makefile
+++ b/Makefile
@@ -309,6 +309,9 @@ CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE)
309 309
310CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ 310CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
311 -fno-strict-aliasing -fno-common 311 -fno-strict-aliasing -fno-common
312# Force gcc to behave correct even for buggy distributions
313CFLAGS += $(call cc-option, -fno-stack-protector-all \
314 -fno-stack-protector)
312AFLAGS := -D__ASSEMBLY__ 315AFLAGS := -D__ASSEMBLY__
313 316
314# Read KERNELRELEASE from include/config/kernel.release (if it exists) 317# Read KERNELRELEASE from include/config/kernel.release (if it exists)
@@ -809,8 +812,8 @@ endif
809# prepare2 creates a makefile if using a separate output directory 812# prepare2 creates a makefile if using a separate output directory
810prepare2: prepare3 outputmakefile 813prepare2: prepare3 outputmakefile
811 814
812prepare1: prepare2 include/linux/version.h include/asm \ 815prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
813 include/config/auto.conf 816 include/asm include/config/auto.conf
814ifneq ($(KBUILD_MODULES),) 817ifneq ($(KBUILD_MODULES),)
815 $(Q)mkdir -p $(MODVERDIR) 818 $(Q)mkdir -p $(MODVERDIR)
816 $(Q)rm -f $(MODVERDIR)/* 819 $(Q)rm -f $(MODVERDIR)/*
@@ -845,21 +848,26 @@ include/asm:
845# needs to be updated, so this check is forced on all builds 848# needs to be updated, so this check is forced on all builds
846 849
847uts_len := 64 850uts_len := 64
851define filechk_utsrelease.h
852 if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \
853 echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \
854 exit 1; \
855 fi; \
856 (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\";)
857endef
848 858
849define filechk_version.h 859define filechk_version.h
850 if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \ 860 (echo \#define LINUX_VERSION_CODE $(shell \
851 echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \ 861 expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
852 exit 1; \ 862 echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
853 fi; \
854 (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\"; \
855 echo \#define LINUX_VERSION_CODE `expr $(VERSION) \\* 65536 + $(PATCHLEVEL) \\* 256 + $(SUBLEVEL)`; \
856 echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'; \
857 )
858endef 863endef
859 864
860include/linux/version.h: $(srctree)/Makefile include/config/kernel.release FORCE 865include/linux/version.h: $(srctree)/Makefile FORCE
861 $(call filechk,version.h) 866 $(call filechk,version.h)
862 867
868include/linux/utsrelease.h: include/config/kernel.release FORCE
869 $(call filechk,utsrelease.h)
870
863# --------------------------------------------------------------------------- 871# ---------------------------------------------------------------------------
864 872
865PHONY += depend dep 873PHONY += depend dep
@@ -867,6 +875,21 @@ depend dep:
867 @echo '*** Warning: make $@ is unnecessary now.' 875 @echo '*** Warning: make $@ is unnecessary now.'
868 876
869# --------------------------------------------------------------------------- 877# ---------------------------------------------------------------------------
878# Kernel headers
879INSTALL_HDR_PATH=$(MODLIB)/abi
880export INSTALL_HDR_PATH
881
882PHONY += headers_install
883headers_install: include/linux/version.h
884 $(Q)unifdef -Ux /dev/null
885 $(Q)rm -rf $(INSTALL_HDR_PATH)/include
886 $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.headersinst obj=include
887
888PHONY += headers_check
889headers_check: headers_install
890 $(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.headersinst obj=include HDRCHECK=1
891
892# ---------------------------------------------------------------------------
870# Modules 893# Modules
871 894
872ifdef CONFIG_MODULES 895ifdef CONFIG_MODULES
@@ -952,7 +975,8 @@ CLEAN_FILES += vmlinux System.map \
952# Directories & files removed with 'make mrproper' 975# Directories & files removed with 'make mrproper'
953MRPROPER_DIRS += include/config include2 976MRPROPER_DIRS += include/config include2
954MRPROPER_FILES += .config .config.old include/asm .version .old_version \ 977MRPROPER_FILES += .config .config.old include/asm .version .old_version \
955 include/linux/autoconf.h include/linux/version.h \ 978 include/linux/autoconf.h include/linux/version.h \
979 include/linux/utsrelease.h \
956 Module.symvers tags TAGS cscope* 980 Module.symvers tags TAGS cscope*
957 981
958# clean - Delete most, but leave enough to build external modules 982# clean - Delete most, but leave enough to build external modules
@@ -1039,6 +1063,8 @@ help:
1039 @echo ' cscope - Generate cscope index' 1063 @echo ' cscope - Generate cscope index'
1040 @echo ' kernelrelease - Output the release version string' 1064 @echo ' kernelrelease - Output the release version string'
1041 @echo ' kernelversion - Output the version stored in Makefile' 1065 @echo ' kernelversion - Output the version stored in Makefile'
1066 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'
1067 @echo ' (default: /lib/modules/$$VERSION/abi)'
1042 @echo '' 1068 @echo ''
1043 @echo 'Static analysers' 1069 @echo 'Static analysers'
1044 @echo ' checkstack - Generate a list of stack hogs' 1070 @echo ' checkstack - Generate a list of stack hogs'
diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c
index ec53c28e33de..3af21c789339 100644
--- a/arch/alpha/boot/bootp.c
+++ b/arch/alpha/boot/bootp.c
@@ -9,7 +9,7 @@
9 */ 9 */
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/version.h> 12#include <linux/utsrelease.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14 14
15#include <asm/system.h> 15#include <asm/system.h>
diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c
index a6657f2cf9bd..4307bde80a35 100644
--- a/arch/alpha/boot/bootpz.c
+++ b/arch/alpha/boot/bootpz.c
@@ -11,7 +11,7 @@
11 */ 11 */
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/version.h> 14#include <linux/utsrelease.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16 16
17#include <asm/system.h> 17#include <asm/system.h>
diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c
index 78c9b0b6eea7..90ed55b662a8 100644
--- a/arch/alpha/boot/main.c
+++ b/arch/alpha/boot/main.c
@@ -7,7 +7,7 @@
7 */ 7 */
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/version.h> 10#include <linux/utsrelease.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12 12
13#include <asm/system.h> 13#include <asm/system.h>
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 01c8c8b23337..41ebf51a107a 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -474,7 +474,7 @@ out:
474 */ 474 */
475 475
476unsigned long 476unsigned long
477thread_saved_pc(task_t *t) 477thread_saved_pc(struct task_struct *t)
478{ 478{
479 unsigned long base = (unsigned long)task_stack_page(t); 479 unsigned long base = (unsigned long)task_stack_page(t);
480 unsigned long fp, sp = task_thread_info(t)->pcb.ksp; 480 unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index ab4ad9562eee..b9a74a741d00 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -883,7 +883,7 @@ static ssize_t ecard_show_resources(struct device *dev, struct device_attribute
883 int i; 883 int i;
884 884
885 for (i = 0; i < ECARD_NUM_RESOURCES; i++) 885 for (i = 0; i < ECARD_NUM_RESOURCES; i++)
886 str += sprintf(str, "%08lx %08lx %08lx\n", 886 str += sprintf(str, "%08x %08x %08lx\n",
887 ec->resource[i].start, 887 ec->resource[i].start,
888 ec->resource[i].end, 888 ec->resource[i].end,
889 ec->resource[i].flags); 889 ec->resource[i].flags);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7d6a516c0b9f..ed1c4d62d999 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -344,7 +344,7 @@ static void __init setup_processor(void)
344 cpu_cache = *list->cache; 344 cpu_cache = *list->cache;
345#endif 345#endif
346 346
347 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08x\n", 347 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
348 cpu_name, processor_id, (int)processor_id & 15, 348 cpu_name, processor_id, (int)processor_id & 15,
349 proc_arch[cpu_architecture()], cr_alignment); 349 proc_arch[cpu_architecture()], cr_alignment);
350 350
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c
index e1c43b331d64..87448c2d6baa 100644
--- a/arch/arm/mach-footbridge/isa-irq.c
+++ b/arch/arm/mach-footbridge/isa-irq.c
@@ -98,9 +98,22 @@ isa_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
98 desc_handle_irq(isa_irq, desc, regs); 98 desc_handle_irq(isa_irq, desc, regs);
99} 99}
100 100
101static struct irqaction irq_cascade = { .handler = no_action, .name = "cascade", }; 101static struct irqaction irq_cascade = {
102static struct resource pic1_resource = { "pic1", 0x20, 0x3f }; 102 .handler = no_action,
103static struct resource pic2_resource = { "pic2", 0xa0, 0xbf }; 103 .name = "cascade",
104};
105
106static struct resource pic1_resource = {
107 .name = "pic1",
108 .start = 0x20,
109 .end = 0x3f,
110};
111
112static struct resource pic2_resource = {
113 .name = "pic2",
114 .start = 0xa0,
115 .end = 0xbf,
116};
104 117
105void __init isa_init_irq(unsigned int host_irq) 118void __init isa_init_irq(unsigned int host_irq)
106{ 119{
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 7eac87f05180..dba7dddfe57d 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -303,7 +303,6 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
303 int err; 303 int err;
304 unsigned long addr; 304 unsigned long addr;
305 struct vm_struct * area; 305 struct vm_struct * area;
306 unsigned int cr = get_cr();
307 306
308 /* 307 /*
309 * High mappings must be supersection aligned 308 * High mappings must be supersection aligned
@@ -317,7 +316,7 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
317 addr = (unsigned long)area->addr; 316 addr = (unsigned long)area->addr;
318 317
319#ifndef CONFIG_SMP 318#ifndef CONFIG_SMP
320 if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (cr & CR_XP)) || 319 if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
321 cpu_is_xsc3()) && 320 cpu_is_xsc3()) &&
322 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { 321 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
323 area->flags |= VM_ARM_SECTION_MAPPING; 322 area->flags |= VM_ARM_SECTION_MAPPING;
@@ -369,6 +368,7 @@ void __iounmap(void __iomem *addr)
369 368
370 addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr); 369 addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
371 370
371#ifndef CONFIG_SMP
372 /* 372 /*
373 * If this is a section based mapping we need to handle it 373 * If this is a section based mapping we need to handle it
374 * specially as the VM subysystem does not know how to handle 374 * specially as the VM subysystem does not know how to handle
@@ -390,6 +390,7 @@ void __iounmap(void __iomem *addr)
390 } 390 }
391 } 391 }
392 write_unlock(&vmlist_lock); 392 write_unlock(&vmlist_lock);
393#endif
393 394
394 if (!section_mapping) 395 if (!section_mapping)
395 vunmap(addr); 396 vunmap(addr);
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 700297ae4a55..1d8316f3cecf 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -34,6 +34,8 @@
34#include <asm/procinfo.h> 34#include <asm/procinfo.h>
35#include <asm/ptrace.h> 35#include <asm/ptrace.h>
36 36
37#include "proc-macros.S"
38
37/* 39/*
38 * This is the maximum size of an area which will be invalidated 40 * This is the maximum size of an area which will be invalidated
39 * using the single invalidate entry instructions. Anything larger 41 * using the single invalidate entry instructions. Anything larger
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 0c33a5ed5a61..89b1d6d3d7c0 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -34,6 +34,8 @@
34#include <asm/procinfo.h> 34#include <asm/procinfo.h>
35#include <asm/ptrace.h> 35#include <asm/ptrace.h>
36 36
37#include "proc-macros.S"
38
37/* 39/*
38 * This is the maximum size of an area which will be invalidated 40 * This is the maximum size of an area which will be invalidated
39 * using the single invalidate entry instructions. Anything larger 41 * using the single invalidate entry instructions. Anything larger
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 566a55653072..a089528e6bce 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -23,6 +23,8 @@
23#include <asm/procinfo.h> 23#include <asm/procinfo.h>
24#include <asm/ptrace.h> 24#include <asm/ptrace.h>
25 25
26#include "proc-macros.S"
27
26/* 28/*
27 * This is the maximum size of an area which will be invalidated 29 * This is the maximum size of an area which will be invalidated
28 * using the single invalidate entry instructions. Anything larger 30 * using the single invalidate entry instructions. Anything larger
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 6ea76321d0df..d6d84d92c7c7 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -23,6 +23,8 @@
23#include <asm/procinfo.h> 23#include <asm/procinfo.h>
24#include <asm/ptrace.h> 24#include <asm/ptrace.h>
25 25
26#include "proc-macros.S"
27
26/* 28/*
27 * This is the maximum size of an area which will be invalidated 29 * This is the maximum size of an area which will be invalidated
28 * using the single invalidate entry instructions. Anything larger 30 * using the single invalidate entry instructions. Anything larger
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index ad15f8503d51..8d9a9f93b011 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -454,7 +454,8 @@ __arm925_setup:
454 mcr p15, 7, r0, c15, c0, 0 454 mcr p15, 7, r0, c15, c0, 0
455#endif 455#endif
456 456
457 adr r5, {r5, r6} 457 adr r5, arm925_crval
458 ldmia r5, {r5, r6}
458 mrc p15, 0, r0, c1, c0 @ get control register v4 459 mrc p15, 0, r0, c1, c0 @ get control register v4
459 bic r0, r0, r5 460 bic r0, r0, r5
460 orr r0, r0, r6 461 orr r0, r0, r6
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index 5db3d4eff909..af08ccd4ed6e 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13#include <linux/version.h> 13#include <linux/utsrelease.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 27d8dddbaa47..daa75ce4b777 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -18,6 +18,14 @@ config GENERIC_TIME
18 bool 18 bool
19 default y 19 default y
20 20
21config LOCKDEP_SUPPORT
22 bool
23 default y
24
25config STACKTRACE_SUPPORT
26 bool
27 default y
28
21config SEMAPHORE_SLEEPERS 29config SEMAPHORE_SLEEPERS
22 bool 30 bool
23 default y 31 default y
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index c92191b1fb67..b31c0802e1cc 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -1,5 +1,9 @@
1menu "Kernel hacking" 1menu "Kernel hacking"
2 2
3config TRACE_IRQFLAGS_SUPPORT
4 bool
5 default y
6
3source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
4 8
5config EARLY_PRINTK 9config EARLY_PRINTK
@@ -31,15 +35,6 @@ config DEBUG_STACK_USAGE
31 35
32 This option will slow down process creation somewhat. 36 This option will slow down process creation somewhat.
33 37
34config STACK_BACKTRACE_COLS
35 int "Stack backtraces per line" if DEBUG_KERNEL
36 range 1 3
37 default 2
38 help
39 Selects how many stack backtrace entries per line to display.
40
41 This can save screen space when displaying traces.
42
43comment "Page alloc debug is incompatible with Software Suspend on i386" 38comment "Page alloc debug is incompatible with Software Suspend on i386"
44 depends on DEBUG_KERNEL && SOFTWARE_SUSPEND 39 depends on DEBUG_KERNEL && SOFTWARE_SUSPEND
45 40
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S
index 0a5a3be6d69c..d2b684cd620a 100644
--- a/arch/i386/boot/setup.S
+++ b/arch/i386/boot/setup.S
@@ -47,7 +47,7 @@
47 */ 47 */
48 48
49#include <asm/segment.h> 49#include <asm/segment.h>
50#include <linux/version.h> 50#include <linux/utsrelease.h>
51#include <linux/compile.h> 51#include <linux/compile.h>
52#include <asm/boot.h> 52#include <asm/boot.h>
53#include <asm/e820.h> 53#include <asm/e820.h>
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index cbc1184e9473..1b452a1665c4 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -9,6 +9,7 @@ obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
9 pci-dma.o i386_ksyms.o i387.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o bootflag.o \
10 quirks.o i8237.o topology.o alternative.o i8253.o tsc.o 10 quirks.o i8237.o topology.o alternative.o i8253.o tsc.o
11 11
12obj-$(CONFIG_STACKTRACE) += stacktrace.o
12obj-y += cpu/ 13obj-y += cpu/
13obj-y += acpi/ 14obj-y += acpi/
14obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o 15obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 7b421b3a053e..28ab80649764 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -303,6 +303,16 @@ void alternatives_smp_switch(int smp)
303 struct smp_alt_module *mod; 303 struct smp_alt_module *mod;
304 unsigned long flags; 304 unsigned long flags;
305 305
306#ifdef CONFIG_LOCKDEP
307 /*
308 * A not yet fixed binutils section handling bug prevents
309 * alternatives-replacement from working reliably, so turn
310 * it off:
311 */
312 printk("lockdep: not fixing up alternatives.\n");
313 return;
314#endif
315
306 if (no_replacement || smp_alt_once) 316 if (no_replacement || smp_alt_once)
307 return; 317 return;
308 BUG_ON(!smp && (num_online_cpus() > 1)); 318 BUG_ON(!smp && (num_online_cpus() > 1));
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index a8d3ecdc3897..fde8bea85cee 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -167,6 +167,7 @@ static int cpuid_class_device_create(int i)
167 return err; 167 return err;
168} 168}
169 169
170#ifdef CONFIG_HOTPLUG_CPU
170static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 171static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
171{ 172{
172 unsigned int cpu = (unsigned long)hcpu; 173 unsigned int cpu = (unsigned long)hcpu;
@@ -186,6 +187,7 @@ static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
186{ 187{
187 .notifier_call = cpuid_class_cpu_callback, 188 .notifier_call = cpuid_class_cpu_callback,
188}; 189};
190#endif /* !CONFIG_HOTPLUG_CPU */
189 191
190static int __init cpuid_init(void) 192static int __init cpuid_init(void)
191{ 193{
@@ -208,7 +210,7 @@ static int __init cpuid_init(void)
208 if (err != 0) 210 if (err != 0)
209 goto out_class; 211 goto out_class;
210 } 212 }
211 register_cpu_notifier(&cpuid_class_cpu_notifier); 213 register_hotcpu_notifier(&cpuid_class_cpu_notifier);
212 214
213 err = 0; 215 err = 0;
214 goto out; 216 goto out;
@@ -233,7 +235,7 @@ static void __exit cpuid_exit(void)
233 class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 235 class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
234 class_destroy(cpuid_class); 236 class_destroy(cpuid_class);
235 unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); 237 unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
236 unregister_cpu_notifier(&cpuid_class_cpu_notifier); 238 unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
237} 239}
238 240
239module_init(cpuid_init); 241module_init(cpuid_init);
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 787190c45fdb..d9a260f2efb4 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -42,6 +42,7 @@
42 42
43#include <linux/linkage.h> 43#include <linux/linkage.h>
44#include <asm/thread_info.h> 44#include <asm/thread_info.h>
45#include <asm/irqflags.h>
45#include <asm/errno.h> 46#include <asm/errno.h>
46#include <asm/segment.h> 47#include <asm/segment.h>
47#include <asm/smp.h> 48#include <asm/smp.h>
@@ -76,12 +77,21 @@ NT_MASK = 0x00004000
76VM_MASK = 0x00020000 77VM_MASK = 0x00020000
77 78
78#ifdef CONFIG_PREEMPT 79#ifdef CONFIG_PREEMPT
79#define preempt_stop cli 80#define preempt_stop cli; TRACE_IRQS_OFF
80#else 81#else
81#define preempt_stop 82#define preempt_stop
82#define resume_kernel restore_nocheck 83#define resume_kernel restore_nocheck
83#endif 84#endif
84 85
86.macro TRACE_IRQS_IRET
87#ifdef CONFIG_TRACE_IRQFLAGS
88 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
89 jz 1f
90 TRACE_IRQS_ON
911:
92#endif
93.endm
94
85#ifdef CONFIG_VM86 95#ifdef CONFIG_VM86
86#define resume_userspace_sig check_userspace 96#define resume_userspace_sig check_userspace
87#else 97#else
@@ -257,6 +267,10 @@ ENTRY(sysenter_entry)
257 CFI_REGISTER esp, ebp 267 CFI_REGISTER esp, ebp
258 movl TSS_sysenter_esp0(%esp),%esp 268 movl TSS_sysenter_esp0(%esp),%esp
259sysenter_past_esp: 269sysenter_past_esp:
270 /*
271 * No need to follow this irqs on/off section: the syscall
272 * disabled irqs and here we enable it straight after entry:
273 */
260 sti 274 sti
261 pushl $(__USER_DS) 275 pushl $(__USER_DS)
262 CFI_ADJUST_CFA_OFFSET 4 276 CFI_ADJUST_CFA_OFFSET 4
@@ -303,6 +317,7 @@ sysenter_past_esp:
303 call *sys_call_table(,%eax,4) 317 call *sys_call_table(,%eax,4)
304 movl %eax,EAX(%esp) 318 movl %eax,EAX(%esp)
305 cli 319 cli
320 TRACE_IRQS_OFF
306 movl TI_flags(%ebp), %ecx 321 movl TI_flags(%ebp), %ecx
307 testw $_TIF_ALLWORK_MASK, %cx 322 testw $_TIF_ALLWORK_MASK, %cx
308 jne syscall_exit_work 323 jne syscall_exit_work
@@ -310,6 +325,7 @@ sysenter_past_esp:
310 movl EIP(%esp), %edx 325 movl EIP(%esp), %edx
311 movl OLDESP(%esp), %ecx 326 movl OLDESP(%esp), %ecx
312 xorl %ebp,%ebp 327 xorl %ebp,%ebp
328 TRACE_IRQS_ON
313 sti 329 sti
314 sysexit 330 sysexit
315 CFI_ENDPROC 331 CFI_ENDPROC
@@ -339,6 +355,7 @@ syscall_exit:
339 cli # make sure we don't miss an interrupt 355 cli # make sure we don't miss an interrupt
340 # setting need_resched or sigpending 356 # setting need_resched or sigpending
341 # between sampling and the iret 357 # between sampling and the iret
358 TRACE_IRQS_OFF
342 movl TI_flags(%ebp), %ecx 359 movl TI_flags(%ebp), %ecx
343 testw $_TIF_ALLWORK_MASK, %cx # current->work 360 testw $_TIF_ALLWORK_MASK, %cx # current->work
344 jne syscall_exit_work 361 jne syscall_exit_work
@@ -355,12 +372,15 @@ restore_all:
355 CFI_REMEMBER_STATE 372 CFI_REMEMBER_STATE
356 je ldt_ss # returning to user-space with LDT SS 373 je ldt_ss # returning to user-space with LDT SS
357restore_nocheck: 374restore_nocheck:
375 TRACE_IRQS_IRET
376restore_nocheck_notrace:
358 RESTORE_REGS 377 RESTORE_REGS
359 addl $4, %esp 378 addl $4, %esp
360 CFI_ADJUST_CFA_OFFSET -4 379 CFI_ADJUST_CFA_OFFSET -4
3611: iret 3801: iret
362.section .fixup,"ax" 381.section .fixup,"ax"
363iret_exc: 382iret_exc:
383 TRACE_IRQS_ON
364 sti 384 sti
365 pushl $0 # no error code 385 pushl $0 # no error code
366 pushl $do_iret_error 386 pushl $do_iret_error
@@ -386,11 +406,13 @@ ldt_ss:
386 subl $8, %esp # reserve space for switch16 pointer 406 subl $8, %esp # reserve space for switch16 pointer
387 CFI_ADJUST_CFA_OFFSET 8 407 CFI_ADJUST_CFA_OFFSET 8
388 cli 408 cli
409 TRACE_IRQS_OFF
389 movl %esp, %eax 410 movl %esp, %eax
390 /* Set up the 16bit stack frame with switch32 pointer on top, 411 /* Set up the 16bit stack frame with switch32 pointer on top,
391 * and a switch16 pointer on top of the current frame. */ 412 * and a switch16 pointer on top of the current frame. */
392 call setup_x86_bogus_stack 413 call setup_x86_bogus_stack
393 CFI_ADJUST_CFA_OFFSET -8 # frame has moved 414 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
415 TRACE_IRQS_IRET
394 RESTORE_REGS 416 RESTORE_REGS
395 lss 20+4(%esp), %esp # switch to 16bit stack 417 lss 20+4(%esp), %esp # switch to 16bit stack
3961: iret 4181: iret
@@ -411,6 +433,7 @@ work_resched:
411 cli # make sure we don't miss an interrupt 433 cli # make sure we don't miss an interrupt
412 # setting need_resched or sigpending 434 # setting need_resched or sigpending
413 # between sampling and the iret 435 # between sampling and the iret
436 TRACE_IRQS_OFF
414 movl TI_flags(%ebp), %ecx 437 movl TI_flags(%ebp), %ecx
415 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other 438 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
416 # than syscall tracing? 439 # than syscall tracing?
@@ -462,6 +485,7 @@ syscall_trace_entry:
462syscall_exit_work: 485syscall_exit_work:
463 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl 486 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
464 jz work_pending 487 jz work_pending
488 TRACE_IRQS_ON
465 sti # could let do_syscall_trace() call 489 sti # could let do_syscall_trace() call
466 # schedule() instead 490 # schedule() instead
467 movl %esp, %eax 491 movl %esp, %eax
@@ -535,9 +559,14 @@ ENTRY(irq_entries_start)
535vector=vector+1 559vector=vector+1
536.endr 560.endr
537 561
562/*
563 * the CPU automatically disables interrupts when executing an IRQ vector,
564 * so IRQ-flags tracing has to follow that:
565 */
538 ALIGN 566 ALIGN
539common_interrupt: 567common_interrupt:
540 SAVE_ALL 568 SAVE_ALL
569 TRACE_IRQS_OFF
541 movl %esp,%eax 570 movl %esp,%eax
542 call do_IRQ 571 call do_IRQ
543 jmp ret_from_intr 572 jmp ret_from_intr
@@ -549,9 +578,10 @@ ENTRY(name) \
549 pushl $~(nr); \ 578 pushl $~(nr); \
550 CFI_ADJUST_CFA_OFFSET 4; \ 579 CFI_ADJUST_CFA_OFFSET 4; \
551 SAVE_ALL; \ 580 SAVE_ALL; \
581 TRACE_IRQS_OFF \
552 movl %esp,%eax; \ 582 movl %esp,%eax; \
553 call smp_/**/name; \ 583 call smp_/**/name; \
554 jmp ret_from_intr; \ 584 jmp ret_from_intr; \
555 CFI_ENDPROC 585 CFI_ENDPROC
556 586
557/* The include is where all of the SMP etc. interrupts come from */ 587/* The include is where all of the SMP etc. interrupts come from */
@@ -726,7 +756,7 @@ nmi_stack_correct:
726 xorl %edx,%edx # zero error code 756 xorl %edx,%edx # zero error code
727 movl %esp,%eax # pt_regs pointer 757 movl %esp,%eax # pt_regs pointer
728 call do_nmi 758 call do_nmi
729 jmp restore_all 759 jmp restore_nocheck_notrace
730 CFI_ENDPROC 760 CFI_ENDPROC
731 761
732nmi_stack_fixup: 762nmi_stack_fixup:
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 16b491703967..6cb529f60dcc 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -166,7 +166,7 @@ void irq_ctx_init(int cpu)
166 irqctx->tinfo.task = NULL; 166 irqctx->tinfo.task = NULL;
167 irqctx->tinfo.exec_domain = NULL; 167 irqctx->tinfo.exec_domain = NULL;
168 irqctx->tinfo.cpu = cpu; 168 irqctx->tinfo.cpu = cpu;
169 irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; 169 irqctx->tinfo.preempt_count = 0;
170 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 170 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
171 171
172 softirq_ctx[cpu] = irqctx; 172 softirq_ctx[cpu] = irqctx;
@@ -211,6 +211,10 @@ asmlinkage void do_softirq(void)
211 : "0"(isp) 211 : "0"(isp)
212 : "memory", "cc", "edx", "ecx", "eax" 212 : "memory", "cc", "edx", "ecx", "eax"
213 ); 213 );
214 /*
215 * Shouldnt happen, we returned above if in_interrupt():
216 */
217 WARN_ON_ONCE(softirq_count());
214 } 218 }
215 219
216 local_irq_restore(flags); 220 local_irq_restore(flags);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index a76e93146585..2dd928a84645 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -107,7 +107,7 @@ int nmi_active;
107static __init void nmi_cpu_busy(void *data) 107static __init void nmi_cpu_busy(void *data)
108{ 108{
109 volatile int *endflag = data; 109 volatile int *endflag = data;
110 local_irq_enable(); 110 local_irq_enable_in_hardirq();
111 /* Intentionally don't use cpu_relax here. This is 111 /* Intentionally don't use cpu_relax here. This is
112 to make sure that the performance counter really ticks, 112 to make sure that the performance counter really ticks,
113 even if there is a simulator or similar that catches the 113 even if there is a simulator or similar that catches the
diff --git a/arch/i386/kernel/stacktrace.c b/arch/i386/kernel/stacktrace.c
new file mode 100644
index 000000000000..e62a037ab399
--- /dev/null
+++ b/arch/i386/kernel/stacktrace.c
@@ -0,0 +1,98 @@
1/*
2 * arch/i386/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 */
8#include <linux/sched.h>
9#include <linux/stacktrace.h>
10
11static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
12{
13 return p > (void *)tinfo &&
14 p < (void *)tinfo + THREAD_SIZE - 3;
15}
16
17/*
18 * Save stack-backtrace addresses into a stack_trace buffer:
19 */
20static inline unsigned long
21save_context_stack(struct stack_trace *trace, unsigned int skip,
22 struct thread_info *tinfo, unsigned long *stack,
23 unsigned long ebp)
24{
25 unsigned long addr;
26
27#ifdef CONFIG_FRAME_POINTER
28 while (valid_stack_ptr(tinfo, (void *)ebp)) {
29 addr = *(unsigned long *)(ebp + 4);
30 if (!skip)
31 trace->entries[trace->nr_entries++] = addr;
32 else
33 skip--;
34 if (trace->nr_entries >= trace->max_entries)
35 break;
36 /*
37 * break out of recursive entries (such as
38 * end_of_stack_stop_unwind_function):
39 */
40 if (ebp == *(unsigned long *)ebp)
41 break;
42
43 ebp = *(unsigned long *)ebp;
44 }
45#else
46 while (valid_stack_ptr(tinfo, stack)) {
47 addr = *stack++;
48 if (__kernel_text_address(addr)) {
49 if (!skip)
50 trace->entries[trace->nr_entries++] = addr;
51 else
52 skip--;
53 if (trace->nr_entries >= trace->max_entries)
54 break;
55 }
56 }
57#endif
58
59 return ebp;
60}
61
62/*
63 * Save stack-backtrace addresses into a stack_trace buffer.
64 * If all_contexts is set, all contexts (hardirq, softirq and process)
65 * are saved. If not set then only the current context is saved.
66 */
67void save_stack_trace(struct stack_trace *trace,
68 struct task_struct *task, int all_contexts,
69 unsigned int skip)
70{
71 unsigned long ebp;
72 unsigned long *stack = &ebp;
73
74 WARN_ON(trace->nr_entries || !trace->max_entries);
75
76 if (!task || task == current) {
77 /* Grab ebp right from our regs: */
78 asm ("movl %%ebp, %0" : "=r" (ebp));
79 } else {
80 /* ebp is the last reg pushed by switch_to(): */
81 ebp = *(unsigned long *) task->thread.esp;
82 }
83
84 while (1) {
85 struct thread_info *context = (struct thread_info *)
86 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
87
88 ebp = save_context_stack(trace, skip, context, stack, ebp);
89 stack = (unsigned long *)context->previous_esp;
90 if (!all_contexts || !stack ||
91 trace->nr_entries >= trace->max_entries)
92 break;
93 trace->entries[trace->nr_entries++] = ULONG_MAX;
94 if (trace->nr_entries >= trace->max_entries)
95 break;
96 }
97}
98
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index e8c6086b2aa1..2bf8b55b91f8 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -115,28 +115,13 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
115} 115}
116 116
117/* 117/*
118 * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line. 118 * Print one address/symbol entries per line.
119 */ 119 */
120static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl, 120static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
121 int printed)
122{ 121{
123 if (!printed)
124 printk(log_lvl);
125
126#if CONFIG_STACK_BACKTRACE_COLS == 1
127 printk(" [<%08lx>] ", addr); 122 printk(" [<%08lx>] ", addr);
128#else
129 printk(" <%08lx> ", addr);
130#endif
131 print_symbol("%s", addr);
132 123
133 printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; 124 print_symbol("%s\n", addr);
134 if (printed)
135 printk(" ");
136 else
137 printk("\n");
138
139 return printed;
140} 125}
141 126
142static inline unsigned long print_context_stack(struct thread_info *tinfo, 127static inline unsigned long print_context_stack(struct thread_info *tinfo,
@@ -144,12 +129,11 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
144 char *log_lvl) 129 char *log_lvl)
145{ 130{
146 unsigned long addr; 131 unsigned long addr;
147 int printed = 0; /* nr of entries already printed on current line */
148 132
149#ifdef CONFIG_FRAME_POINTER 133#ifdef CONFIG_FRAME_POINTER
150 while (valid_stack_ptr(tinfo, (void *)ebp)) { 134 while (valid_stack_ptr(tinfo, (void *)ebp)) {
151 addr = *(unsigned long *)(ebp + 4); 135 addr = *(unsigned long *)(ebp + 4);
152 printed = print_addr_and_symbol(addr, log_lvl, printed); 136 print_addr_and_symbol(addr, log_lvl);
153 /* 137 /*
154 * break out of recursive entries (such as 138 * break out of recursive entries (such as
155 * end_of_stack_stop_unwind_function): 139 * end_of_stack_stop_unwind_function):
@@ -162,28 +146,23 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
162 while (valid_stack_ptr(tinfo, stack)) { 146 while (valid_stack_ptr(tinfo, stack)) {
163 addr = *stack++; 147 addr = *stack++;
164 if (__kernel_text_address(addr)) 148 if (__kernel_text_address(addr))
165 printed = print_addr_and_symbol(addr, log_lvl, printed); 149 print_addr_and_symbol(addr, log_lvl);
166 } 150 }
167#endif 151#endif
168 if (printed)
169 printk("\n");
170
171 return ebp; 152 return ebp;
172} 153}
173 154
174static asmlinkage int show_trace_unwind(struct unwind_frame_info *info, void *log_lvl) 155static asmlinkage int
156show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
175{ 157{
176 int n = 0; 158 int n = 0;
177 int printed = 0; /* nr of entries already printed on current line */
178 159
179 while (unwind(info) == 0 && UNW_PC(info)) { 160 while (unwind(info) == 0 && UNW_PC(info)) {
180 ++n; 161 n++;
181 printed = print_addr_and_symbol(UNW_PC(info), log_lvl, printed); 162 print_addr_and_symbol(UNW_PC(info), log_lvl);
182 if (arch_unw_user_mode(info)) 163 if (arch_unw_user_mode(info))
183 break; 164 break;
184 } 165 }
185 if (printed)
186 printk("\n");
187 return n; 166 return n;
188} 167}
189 168
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 2a1ef742e223..b7515bc808a8 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -50,7 +50,7 @@ static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length)
50 memcpy(length, vendor->byte_data + 8, sizeof(*length)); 50 memcpy(length, vendor->byte_data + 8, sizeof(*length));
51 51
52 exit: 52 exit:
53 acpi_os_free(buffer.pointer); 53 kfree(buffer.pointer);
54 return status; 54 return status;
55} 55}
56 56
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index ccdef199d915..99761b81db44 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -856,7 +856,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
856 obj = buffer.pointer; 856 obj = buffer.pointer;
857 if (obj->type != ACPI_TYPE_BUFFER || 857 if (obj->type != ACPI_TYPE_BUFFER ||
858 obj->buffer.length < sizeof(*lsapic)) { 858 obj->buffer.length < sizeof(*lsapic)) {
859 acpi_os_free(buffer.pointer); 859 kfree(buffer.pointer);
860 return -EINVAL; 860 return -EINVAL;
861 } 861 }
862 862
@@ -864,13 +864,13 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
864 864
865 if ((lsapic->header.type != ACPI_MADT_LSAPIC) || 865 if ((lsapic->header.type != ACPI_MADT_LSAPIC) ||
866 (!lsapic->flags.enabled)) { 866 (!lsapic->flags.enabled)) {
867 acpi_os_free(buffer.pointer); 867 kfree(buffer.pointer);
868 return -EINVAL; 868 return -EINVAL;
869 } 869 }
870 870
871 physid = ((lsapic->id << 8) | (lsapic->eid)); 871 physid = ((lsapic->id << 8) | (lsapic->eid));
872 872
873 acpi_os_free(buffer.pointer); 873 kfree(buffer.pointer);
874 buffer.length = ACPI_ALLOCATE_BUFFER; 874 buffer.length = ACPI_ALLOCATE_BUFFER;
875 buffer.pointer = NULL; 875 buffer.pointer = NULL;
876 876
@@ -934,20 +934,20 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
934 obj = buffer.pointer; 934 obj = buffer.pointer;
935 if (obj->type != ACPI_TYPE_BUFFER || 935 if (obj->type != ACPI_TYPE_BUFFER ||
936 obj->buffer.length < sizeof(*iosapic)) { 936 obj->buffer.length < sizeof(*iosapic)) {
937 acpi_os_free(buffer.pointer); 937 kfree(buffer.pointer);
938 return AE_OK; 938 return AE_OK;
939 } 939 }
940 940
941 iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer; 941 iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer;
942 942
943 if (iosapic->header.type != ACPI_MADT_IOSAPIC) { 943 if (iosapic->header.type != ACPI_MADT_IOSAPIC) {
944 acpi_os_free(buffer.pointer); 944 kfree(buffer.pointer);
945 return AE_OK; 945 return AE_OK;
946 } 946 }
947 947
948 gsi_base = iosapic->global_irq_base; 948 gsi_base = iosapic->global_irq_base;
949 949
950 acpi_os_free(buffer.pointer); 950 kfree(buffer.pointer);
951 951
952 /* 952 /*
953 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell 953 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index eb8e8dc5ac8e..2fbe4536fe18 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -678,7 +678,7 @@ copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat)
678 */ 678 */
679 679
680static void 680static void
681ia64_mca_modify_comm(const task_t *previous_current) 681ia64_mca_modify_comm(const struct task_struct *previous_current)
682{ 682{
683 char *p, comm[sizeof(current->comm)]; 683 char *p, comm[sizeof(current->comm)];
684 if (previous_current->pid) 684 if (previous_current->pid)
@@ -709,7 +709,7 @@ ia64_mca_modify_comm(const task_t *previous_current)
709 * that we can do backtrace on the MCA/INIT handler code itself. 709 * that we can do backtrace on the MCA/INIT handler code itself.
710 */ 710 */
711 711
712static task_t * 712static struct task_struct *
713ia64_mca_modify_original_stack(struct pt_regs *regs, 713ia64_mca_modify_original_stack(struct pt_regs *regs,
714 const struct switch_stack *sw, 714 const struct switch_stack *sw,
715 struct ia64_sal_os_state *sos, 715 struct ia64_sal_os_state *sos,
@@ -719,7 +719,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
719 ia64_va va; 719 ia64_va va;
720 extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ 720 extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
721 const pal_min_state_area_t *ms = sos->pal_min_state; 721 const pal_min_state_area_t *ms = sos->pal_min_state;
722 task_t *previous_current; 722 struct task_struct *previous_current;
723 struct pt_regs *old_regs; 723 struct pt_regs *old_regs;
724 struct switch_stack *old_sw; 724 struct switch_stack *old_sw;
725 unsigned size = sizeof(struct pt_regs) + 725 unsigned size = sizeof(struct pt_regs) +
@@ -1023,7 +1023,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1023 pal_processor_state_info_t *psp = (pal_processor_state_info_t *) 1023 pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
1024 &sos->proc_state_param; 1024 &sos->proc_state_param;
1025 int recover, cpu = smp_processor_id(); 1025 int recover, cpu = smp_processor_id();
1026 task_t *previous_current; 1026 struct task_struct *previous_current;
1027 struct ia64_mca_notify_die nd = 1027 struct ia64_mca_notify_die nd =
1028 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1028 { .sos = sos, .monarch_cpu = &monarch_cpu };
1029 1029
@@ -1352,7 +1352,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1352{ 1352{
1353 static atomic_t slaves; 1353 static atomic_t slaves;
1354 static atomic_t monarchs; 1354 static atomic_t monarchs;
1355 task_t *previous_current; 1355 struct task_struct *previous_current;
1356 int cpu = smp_processor_id(); 1356 int cpu = smp_processor_id();
1357 struct ia64_mca_notify_die nd = 1357 struct ia64_mca_notify_die nd =
1358 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1358 { .sos = sos, .monarch_cpu = &monarch_cpu };
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index e1960979be29..6203ed4ec8cf 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -124,7 +124,7 @@ extern void __devinit calibrate_delay (void);
124extern void start_ap (void); 124extern void start_ap (void);
125extern unsigned long ia64_iobase; 125extern unsigned long ia64_iobase;
126 126
127task_t *task_for_booting_cpu; 127struct task_struct *task_for_booting_cpu;
128 128
129/* 129/*
130 * State for each CPU 130 * State for each CPU
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 525b082eb661..99bd9e30db96 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -313,9 +313,19 @@ static void __meminit scatter_node_data(void)
313 pg_data_t **dst; 313 pg_data_t **dst;
314 int node; 314 int node;
315 315
316 for_each_online_node(node) { 316 /*
317 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; 317 * for_each_online_node() can't be used at here.
318 memcpy(dst, pgdat_list, sizeof(pgdat_list)); 318 * node_online_map is not set for hot-added nodes at this time,
319 * because we are halfway through initialization of the new node's
320 * structures. If for_each_online_node() is used, a new node's
321 * pg_data_ptrs will be not initialized. Insted of using it,
322 * pgdat_list[] is checked.
323 */
324 for_each_node(node) {
325 if (pgdat_list[node]) {
326 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
327 memcpy(dst, pgdat_list, sizeof(pgdat_list));
328 }
319 } 329 }
320} 330}
321 331
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index ecfd637d702a..01e7fa86aa43 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -65,7 +65,7 @@ need_resched:
65#endif 65#endif
66 66
67FEXPORT(ret_from_fork) 67FEXPORT(ret_from_fork)
68 jal schedule_tail # a0 = task_t *prev 68 jal schedule_tail # a0 = struct task_struct *prev
69 69
70FEXPORT(syscall_exit) 70FEXPORT(syscall_exit)
71 local_irq_disable # make sure need_resched and 71 local_irq_disable # make sure need_resched and
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 02237a685ec7..4dcc39f42951 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -47,7 +47,7 @@ unsigned long mt_fpemul_threshold = 0;
47 * used in sys_sched_set/getaffinity() in kernel/sched.c, so 47 * used in sys_sched_set/getaffinity() in kernel/sched.c, so
48 * cloned here. 48 * cloned here.
49 */ 49 */
50static inline task_t *find_process_by_pid(pid_t pid) 50static inline struct task_struct *find_process_by_pid(pid_t pid)
51{ 51{
52 return pid ? find_task_by_pid(pid) : current; 52 return pid ? find_task_by_pid(pid) : current;
53} 53}
@@ -62,7 +62,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
62 cpumask_t new_mask; 62 cpumask_t new_mask;
63 cpumask_t effective_mask; 63 cpumask_t effective_mask;
64 int retval; 64 int retval;
65 task_t *p; 65 struct task_struct *p;
66 66
67 if (len < sizeof(new_mask)) 67 if (len < sizeof(new_mask))
68 return -EINVAL; 68 return -EINVAL;
@@ -127,7 +127,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
127 unsigned int real_len; 127 unsigned int real_len;
128 cpumask_t mask; 128 cpumask_t mask;
129 int retval; 129 int retval;
130 task_t *p; 130 struct task_struct *p;
131 131
132 real_len = sizeof(mask); 132 real_len = sizeof(mask);
133 if (len < real_len) 133 if (len < real_len)
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
new file mode 100644
index 000000000000..0fa010a63a8e
--- /dev/null
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -0,0 +1,1378 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17
4# Mon Jul 3 12:08:41 2006
5#
6# CONFIG_PPC64 is not set
7CONFIG_PPC32=y
8CONFIG_PPC_MERGE=y
9CONFIG_MMU=y
10CONFIG_GENERIC_HARDIRQS=y
11CONFIG_RWSEM_XCHGADD_ALGORITHM=y
12CONFIG_GENERIC_HWEIGHT=y
13CONFIG_GENERIC_CALIBRATE_DELAY=y
14CONFIG_GENERIC_FIND_NEXT_BIT=y
15CONFIG_PPC=y
16CONFIG_EARLY_PRINTK=y
17CONFIG_GENERIC_NVRAM=y
18CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
19CONFIG_ARCH_MAY_HAVE_PC_FDC=y
20CONFIG_PPC_OF=y
21CONFIG_PPC_UDBG_16550=y
22CONFIG_GENERIC_TBSYNC=y
23# CONFIG_DEFAULT_UIMAGE is not set
24
25#
26# Processor support
27#
28CONFIG_CLASSIC32=y
29# CONFIG_PPC_52xx is not set
30# CONFIG_PPC_82xx is not set
31# CONFIG_PPC_83xx is not set
32# CONFIG_PPC_85xx is not set
33# CONFIG_PPC_86xx is not set
34# CONFIG_40x is not set
35# CONFIG_44x is not set
36# CONFIG_8xx is not set
37# CONFIG_E200 is not set
38CONFIG_6xx=y
39CONFIG_PPC_FPU=y
40# CONFIG_ALTIVEC is not set
41CONFIG_PPC_STD_MMU=y
42CONFIG_PPC_STD_MMU_32=y
43CONFIG_SMP=y
44CONFIG_NR_CPUS=4
45
46#
47# Code maturity level options
48#
49CONFIG_EXPERIMENTAL=y
50CONFIG_LOCK_KERNEL=y
51CONFIG_INIT_ENV_ARG_LIMIT=32
52
53#
54# General setup
55#
56CONFIG_LOCALVERSION=""
57# CONFIG_LOCALVERSION_AUTO is not set
58CONFIG_SWAP=y
59CONFIG_SYSVIPC=y
60CONFIG_POSIX_MQUEUE=y
61# CONFIG_BSD_PROCESS_ACCT is not set
62CONFIG_SYSCTL=y
63# CONFIG_AUDIT is not set
64CONFIG_IKCONFIG=y
65CONFIG_IKCONFIG_PROC=y
66# CONFIG_CPUSETS is not set
67# CONFIG_RELAY is not set
68CONFIG_INITRAMFS_SOURCE=""
69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
70# CONFIG_EMBEDDED is not set
71CONFIG_KALLSYMS=y
72# CONFIG_KALLSYMS_ALL is not set
73# CONFIG_KALLSYMS_EXTRA_PASS is not set
74CONFIG_HOTPLUG=y
75CONFIG_PRINTK=y
76CONFIG_BUG=y
77CONFIG_ELF_CORE=y
78CONFIG_BASE_FULL=y
79CONFIG_FUTEX=y
80CONFIG_EPOLL=y
81CONFIG_SHMEM=y
82CONFIG_SLAB=y
83# CONFIG_TINY_SHMEM is not set
84CONFIG_BASE_SMALL=0
85# CONFIG_SLOB is not set
86
87#
88# Loadable module support
89#
90CONFIG_MODULES=y
91CONFIG_MODULE_UNLOAD=y
92CONFIG_MODULE_FORCE_UNLOAD=y
93# CONFIG_MODVERSIONS is not set
94# CONFIG_MODULE_SRCVERSION_ALL is not set
95CONFIG_KMOD=y
96CONFIG_STOP_MACHINE=y
97
98#
99# Block layer
100#
101CONFIG_LBD=y
102# CONFIG_BLK_DEV_IO_TRACE is not set
103# CONFIG_LSF is not set
104
105#
106# IO Schedulers
107#
108CONFIG_IOSCHED_NOOP=y
109CONFIG_IOSCHED_AS=y
110CONFIG_IOSCHED_DEADLINE=y
111CONFIG_IOSCHED_CFQ=y
112CONFIG_DEFAULT_AS=y
113# CONFIG_DEFAULT_DEADLINE is not set
114# CONFIG_DEFAULT_CFQ is not set
115# CONFIG_DEFAULT_NOOP is not set
116CONFIG_DEFAULT_IOSCHED="anticipatory"
117
118#
119# Platform support
120#
121CONFIG_PPC_MULTIPLATFORM=y
122# CONFIG_PPC_ISERIES is not set
123# CONFIG_EMBEDDED6xx is not set
124# CONFIG_APUS is not set
125CONFIG_PPC_CHRP=y
126# CONFIG_PPC_PMAC is not set
127# CONFIG_PPC_CELL is not set
128# CONFIG_PPC_CELL_NATIVE is not set
129CONFIG_MPIC=y
130CONFIG_PPC_RTAS=y
131# CONFIG_RTAS_ERROR_LOGGING is not set
132CONFIG_RTAS_PROC=y
133# CONFIG_MMIO_NVRAM is not set
134CONFIG_PPC_MPC106=y
135# CONFIG_PPC_970_NAP is not set
136# CONFIG_CPU_FREQ is not set
137# CONFIG_TAU is not set
138# CONFIG_WANT_EARLY_SERIAL is not set
139
140#
141# Kernel options
142#
143CONFIG_HIGHMEM=y
144# CONFIG_HZ_100 is not set
145CONFIG_HZ_250=y
146# CONFIG_HZ_1000 is not set
147CONFIG_HZ=250
148CONFIG_PREEMPT_NONE=y
149# CONFIG_PREEMPT_VOLUNTARY is not set
150# CONFIG_PREEMPT is not set
151CONFIG_PREEMPT_BKL=y
152CONFIG_BINFMT_ELF=y
153CONFIG_BINFMT_MISC=y
154# CONFIG_KEXEC is not set
155CONFIG_IRQ_ALL_CPUS=y
156CONFIG_ARCH_FLATMEM_ENABLE=y
157CONFIG_SELECT_MEMORY_MODEL=y
158CONFIG_FLATMEM_MANUAL=y
159# CONFIG_DISCONTIGMEM_MANUAL is not set
160# CONFIG_SPARSEMEM_MANUAL is not set
161CONFIG_FLATMEM=y
162CONFIG_FLAT_NODE_MEM_MAP=y
163# CONFIG_SPARSEMEM_STATIC is not set
164CONFIG_SPLIT_PTLOCK_CPUS=4
165CONFIG_PROC_DEVICETREE=y
166# CONFIG_CMDLINE_BOOL is not set
167# CONFIG_PM is not set
168CONFIG_SECCOMP=y
169CONFIG_ISA_DMA_API=y
170
171#
172# Bus options
173#
174CONFIG_ISA=y
175CONFIG_GENERIC_ISA_DMA=y
176CONFIG_PPC_I8259=y
177CONFIG_PPC_INDIRECT_PCI=y
178CONFIG_PCI=y
179CONFIG_PCI_DOMAINS=y
180# CONFIG_PCIEPORTBUS is not set
181# CONFIG_PCI_DEBUG is not set
182
183#
184# PCCARD (PCMCIA/CardBus) support
185#
186# CONFIG_PCCARD is not set
187
188#
189# PCI Hotplug Support
190#
191# CONFIG_HOTPLUG_PCI is not set
192
193#
194# Advanced setup
195#
196# CONFIG_ADVANCED_OPTIONS is not set
197
198#
199# Default settings for advanced configuration options are used
200#
201CONFIG_HIGHMEM_START=0xfe000000
202CONFIG_LOWMEM_SIZE=0x30000000
203CONFIG_KERNEL_START=0xc0000000
204CONFIG_TASK_SIZE=0x80000000
205CONFIG_BOOT_LOAD=0x00800000
206
207#
208# Networking
209#
210CONFIG_NET=y
211
212#
213# Networking options
214#
215# CONFIG_NETDEBUG is not set
216CONFIG_PACKET=y
217# CONFIG_PACKET_MMAP is not set
218CONFIG_UNIX=y
219# CONFIG_NET_KEY is not set
220CONFIG_INET=y
221CONFIG_IP_MULTICAST=y
222# CONFIG_IP_ADVANCED_ROUTER is not set
223CONFIG_IP_FIB_HASH=y
224# CONFIG_IP_PNP is not set
225# CONFIG_NET_IPIP is not set
226# CONFIG_NET_IPGRE is not set
227# CONFIG_IP_MROUTE is not set
228# CONFIG_ARPD is not set
229CONFIG_SYN_COOKIES=y
230# CONFIG_INET_AH is not set
231# CONFIG_INET_ESP is not set
232# CONFIG_INET_IPCOMP is not set
233# CONFIG_INET_XFRM_TUNNEL is not set
234# CONFIG_INET_TUNNEL is not set
235# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
236# CONFIG_INET_XFRM_MODE_TUNNEL is not set
237CONFIG_INET_DIAG=y
238CONFIG_INET_TCP_DIAG=y
239# CONFIG_TCP_CONG_ADVANCED is not set
240CONFIG_TCP_CONG_BIC=y
241
242#
243# IP: Virtual Server Configuration
244#
245# CONFIG_IP_VS is not set
246# CONFIG_IPV6 is not set
247# CONFIG_INET6_XFRM_TUNNEL is not set
248# CONFIG_INET6_TUNNEL is not set
249# CONFIG_NETWORK_SECMARK is not set
250CONFIG_NETFILTER=y
251# CONFIG_NETFILTER_DEBUG is not set
252
253#
254# Core Netfilter Configuration
255#
256# CONFIG_NETFILTER_NETLINK is not set
257# CONFIG_NETFILTER_XTABLES is not set
258
259#
260# IP: Netfilter Configuration
261#
262CONFIG_IP_NF_CONNTRACK=m
263# CONFIG_IP_NF_CT_ACCT is not set
264# CONFIG_IP_NF_CONNTRACK_MARK is not set
265# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
266# CONFIG_IP_NF_CT_PROTO_SCTP is not set
267CONFIG_IP_NF_FTP=m
268CONFIG_IP_NF_IRC=m
269# CONFIG_IP_NF_NETBIOS_NS is not set
270CONFIG_IP_NF_TFTP=m
271CONFIG_IP_NF_AMANDA=m
272# CONFIG_IP_NF_PPTP is not set
273# CONFIG_IP_NF_H323 is not set
274# CONFIG_IP_NF_SIP is not set
275# CONFIG_IP_NF_QUEUE is not set
276
277#
278# DCCP Configuration (EXPERIMENTAL)
279#
280# CONFIG_IP_DCCP is not set
281
282#
283# SCTP Configuration (EXPERIMENTAL)
284#
285# CONFIG_IP_SCTP is not set
286
287#
288# TIPC Configuration (EXPERIMENTAL)
289#
290# CONFIG_TIPC is not set
291# CONFIG_ATM is not set
292# CONFIG_BRIDGE is not set
293# CONFIG_VLAN_8021Q is not set
294# CONFIG_DECNET is not set
295# CONFIG_LLC2 is not set
296# CONFIG_IPX is not set
297# CONFIG_ATALK is not set
298# CONFIG_X25 is not set
299# CONFIG_LAPB is not set
300# CONFIG_NET_DIVERT is not set
301# CONFIG_ECONET is not set
302# CONFIG_WAN_ROUTER is not set
303
304#
305# QoS and/or fair queueing
306#
307# CONFIG_NET_SCHED is not set
308
309#
310# Network testing
311#
312# CONFIG_NET_PKTGEN is not set
313# CONFIG_HAMRADIO is not set
314# CONFIG_IRDA is not set
315# CONFIG_BT is not set
316# CONFIG_IEEE80211 is not set
317
318#
319# Device Drivers
320#
321
322#
323# Generic Driver Options
324#
325# CONFIG_STANDALONE is not set
326CONFIG_PREVENT_FIRMWARE_BUILD=y
327# CONFIG_FW_LOADER is not set
328# CONFIG_DEBUG_DRIVER is not set
329# CONFIG_SYS_HYPERVISOR is not set
330
331#
332# Connector - unified userspace <-> kernelspace linker
333#
334# CONFIG_CONNECTOR is not set
335
336#
337# Memory Technology Devices (MTD)
338#
339# CONFIG_MTD is not set
340
341#
342# Parallel port support
343#
344# CONFIG_PARPORT is not set
345
346#
347# Plug and Play support
348#
349# CONFIG_PNP is not set
350
351#
352# Block devices
353#
354CONFIG_BLK_DEV_FD=y
355# CONFIG_BLK_DEV_XD is not set
356# CONFIG_BLK_CPQ_DA is not set
357# CONFIG_BLK_CPQ_CISS_DA is not set
358# CONFIG_BLK_DEV_DAC960 is not set
359# CONFIG_BLK_DEV_UMEM is not set
360# CONFIG_BLK_DEV_COW_COMMON is not set
361CONFIG_BLK_DEV_LOOP=y
362# CONFIG_BLK_DEV_CRYPTOLOOP is not set
363# CONFIG_BLK_DEV_NBD is not set
364# CONFIG_BLK_DEV_SX8 is not set
365# CONFIG_BLK_DEV_UB is not set
366CONFIG_BLK_DEV_RAM=y
367CONFIG_BLK_DEV_RAM_COUNT=16
368CONFIG_BLK_DEV_RAM_SIZE=4096
369CONFIG_BLK_DEV_INITRD=y
370# CONFIG_CDROM_PKTCDVD is not set
371# CONFIG_ATA_OVER_ETH is not set
372
373#
374# ATA/ATAPI/MFM/RLL support
375#
376CONFIG_IDE=y
377CONFIG_BLK_DEV_IDE=y
378
379#
380# Please see Documentation/ide.txt for help/info on IDE drives
381#
382# CONFIG_BLK_DEV_IDE_SATA is not set
383CONFIG_BLK_DEV_IDEDISK=y
384CONFIG_IDEDISK_MULTI_MODE=y
385CONFIG_BLK_DEV_IDECD=y
386# CONFIG_BLK_DEV_IDETAPE is not set
387# CONFIG_BLK_DEV_IDEFLOPPY is not set
388# CONFIG_BLK_DEV_IDESCSI is not set
389# CONFIG_IDE_TASK_IOCTL is not set
390
391#
392# IDE chipset support/bugfixes
393#
394CONFIG_IDE_GENERIC=y
395CONFIG_BLK_DEV_IDEPCI=y
396CONFIG_IDEPCI_SHARE_IRQ=y
397# CONFIG_BLK_DEV_OFFBOARD is not set
398CONFIG_BLK_DEV_GENERIC=y
399# CONFIG_BLK_DEV_OPTI621 is not set
400CONFIG_BLK_DEV_SL82C105=y
401CONFIG_BLK_DEV_IDEDMA_PCI=y
402# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
403CONFIG_IDEDMA_PCI_AUTO=y
404# CONFIG_IDEDMA_ONLYDISK is not set
405# CONFIG_BLK_DEV_AEC62XX is not set
406# CONFIG_BLK_DEV_ALI15X3 is not set
407# CONFIG_BLK_DEV_AMD74XX is not set
408# CONFIG_BLK_DEV_CMD64X is not set
409# CONFIG_BLK_DEV_TRIFLEX is not set
410# CONFIG_BLK_DEV_CY82C693 is not set
411# CONFIG_BLK_DEV_CS5520 is not set
412# CONFIG_BLK_DEV_CS5530 is not set
413# CONFIG_BLK_DEV_HPT34X is not set
414# CONFIG_BLK_DEV_HPT366 is not set
415# CONFIG_BLK_DEV_SC1200 is not set
416# CONFIG_BLK_DEV_PIIX is not set
417# CONFIG_BLK_DEV_IT821X is not set
418# CONFIG_BLK_DEV_NS87415 is not set
419# CONFIG_BLK_DEV_PDC202XX_OLD is not set
420# CONFIG_BLK_DEV_PDC202XX_NEW is not set
421# CONFIG_BLK_DEV_SVWKS is not set
422# CONFIG_BLK_DEV_SIIMAGE is not set
423# CONFIG_BLK_DEV_SLC90E66 is not set
424# CONFIG_BLK_DEV_TRM290 is not set
425CONFIG_BLK_DEV_VIA82CXXX=y
426# CONFIG_IDE_ARM is not set
427# CONFIG_IDE_CHIPSETS is not set
428CONFIG_BLK_DEV_IDEDMA=y
429# CONFIG_IDEDMA_IVB is not set
430CONFIG_IDEDMA_AUTO=y
431# CONFIG_BLK_DEV_HD is not set
432
433#
434# SCSI device support
435#
436# CONFIG_RAID_ATTRS is not set
437CONFIG_SCSI=y
438CONFIG_SCSI_PROC_FS=y
439
440#
441# SCSI support type (disk, tape, CD-ROM)
442#
443CONFIG_BLK_DEV_SD=y
444CONFIG_CHR_DEV_ST=y
445# CONFIG_CHR_DEV_OSST is not set
446CONFIG_BLK_DEV_SR=y
447CONFIG_BLK_DEV_SR_VENDOR=y
448CONFIG_CHR_DEV_SG=y
449# CONFIG_CHR_DEV_SCH is not set
450
451#
452# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
453#
454# CONFIG_SCSI_MULTI_LUN is not set
455CONFIG_SCSI_CONSTANTS=y
456# CONFIG_SCSI_LOGGING is not set
457
458#
459# SCSI Transport Attributes
460#
461CONFIG_SCSI_SPI_ATTRS=y
462# CONFIG_SCSI_FC_ATTRS is not set
463# CONFIG_SCSI_ISCSI_ATTRS is not set
464# CONFIG_SCSI_SAS_ATTRS is not set
465
466#
467# SCSI low-level drivers
468#
469# CONFIG_ISCSI_TCP is not set
470# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
471# CONFIG_SCSI_3W_9XXX is not set
472# CONFIG_SCSI_7000FASST is not set
473# CONFIG_SCSI_ACARD is not set
474# CONFIG_SCSI_AHA152X is not set
475# CONFIG_SCSI_AHA1542 is not set
476# CONFIG_SCSI_AACRAID is not set
477# CONFIG_SCSI_AIC7XXX is not set
478# CONFIG_SCSI_AIC7XXX_OLD is not set
479# CONFIG_SCSI_AIC79XX is not set
480# CONFIG_SCSI_DPT_I2O is not set
481# CONFIG_SCSI_IN2000 is not set
482# CONFIG_MEGARAID_NEWGEN is not set
483# CONFIG_MEGARAID_LEGACY is not set
484# CONFIG_MEGARAID_SAS is not set
485# CONFIG_SCSI_SATA is not set
486# CONFIG_SCSI_HPTIOP is not set
487# CONFIG_SCSI_BUSLOGIC is not set
488# CONFIG_SCSI_DMX3191D is not set
489# CONFIG_SCSI_DTC3280 is not set
490# CONFIG_SCSI_EATA is not set
491# CONFIG_SCSI_FUTURE_DOMAIN is not set
492# CONFIG_SCSI_GDTH is not set
493# CONFIG_SCSI_GENERIC_NCR5380 is not set
494# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
495# CONFIG_SCSI_IPS is not set
496# CONFIG_SCSI_INITIO is not set
497# CONFIG_SCSI_INIA100 is not set
498# CONFIG_SCSI_NCR53C406A is not set
499CONFIG_SCSI_SYM53C8XX_2=y
500CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
501CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
502CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
503CONFIG_SCSI_SYM53C8XX_MMIO=y
504# CONFIG_SCSI_IPR is not set
505# CONFIG_SCSI_PAS16 is not set
506# CONFIG_SCSI_PSI240I is not set
507# CONFIG_SCSI_QLOGIC_FAS is not set
508# CONFIG_SCSI_QLOGIC_1280 is not set
509# CONFIG_SCSI_QLA_FC is not set
510# CONFIG_SCSI_LPFC is not set
511# CONFIG_SCSI_SYM53C416 is not set
512# CONFIG_SCSI_DC395x is not set
513# CONFIG_SCSI_DC390T is not set
514# CONFIG_SCSI_T128 is not set
515# CONFIG_SCSI_U14_34F is not set
516# CONFIG_SCSI_NSP32 is not set
517# CONFIG_SCSI_DEBUG is not set
518
519#
520# Old CD-ROM drivers (not SCSI, not IDE)
521#
522# CONFIG_CD_NO_IDESCSI is not set
523
524#
525# Multi-device support (RAID and LVM)
526#
527# CONFIG_MD is not set
528
529#
530# Fusion MPT device support
531#
532# CONFIG_FUSION is not set
533# CONFIG_FUSION_SPI is not set
534# CONFIG_FUSION_FC is not set
535# CONFIG_FUSION_SAS is not set
536
537#
538# IEEE 1394 (FireWire) support
539#
540# CONFIG_IEEE1394 is not set
541
542#
543# I2O device support
544#
545# CONFIG_I2O is not set
546
547#
548# Macintosh device drivers
549#
550# CONFIG_WINDFARM is not set
551
552#
553# Network device support
554#
555CONFIG_NETDEVICES=y
556# CONFIG_DUMMY is not set
557# CONFIG_BONDING is not set
558# CONFIG_EQUALIZER is not set
559# CONFIG_TUN is not set
560
561#
562# ARCnet devices
563#
564# CONFIG_ARCNET is not set
565
566#
567# PHY device support
568#
569# CONFIG_PHYLIB is not set
570
571#
572# Ethernet (10 or 100Mbit)
573#
574CONFIG_NET_ETHERNET=y
575CONFIG_MII=y
576# CONFIG_HAPPYMEAL is not set
577# CONFIG_SUNGEM is not set
578# CONFIG_CASSINI is not set
579# CONFIG_NET_VENDOR_3COM is not set
580# CONFIG_LANCE is not set
581# CONFIG_NET_VENDOR_SMC is not set
582# CONFIG_NET_VENDOR_RACAL is not set
583
584#
585# Tulip family network device support
586#
587CONFIG_NET_TULIP=y
588# CONFIG_DE2104X is not set
589# CONFIG_TULIP is not set
590CONFIG_DE4X5=y
591# CONFIG_WINBOND_840 is not set
592# CONFIG_DM9102 is not set
593# CONFIG_ULI526X is not set
594# CONFIG_AT1700 is not set
595# CONFIG_DEPCA is not set
596# CONFIG_HP100 is not set
597# CONFIG_NET_ISA is not set
598CONFIG_NET_PCI=y
599CONFIG_PCNET32=y
600# CONFIG_AMD8111_ETH is not set
601# CONFIG_ADAPTEC_STARFIRE is not set
602# CONFIG_AC3200 is not set
603# CONFIG_APRICOT is not set
604# CONFIG_B44 is not set
605# CONFIG_FORCEDETH is not set
606# CONFIG_CS89x0 is not set
607# CONFIG_DGRS is not set
608# CONFIG_EEPRO100 is not set
609# CONFIG_E100 is not set
610# CONFIG_FEALNX is not set
611# CONFIG_NATSEMI is not set
612# CONFIG_NE2K_PCI is not set
613CONFIG_8139CP=y
614CONFIG_8139TOO=y
615# CONFIG_8139TOO_PIO is not set
616# CONFIG_8139TOO_TUNE_TWISTER is not set
617# CONFIG_8139TOO_8129 is not set
618# CONFIG_8139_OLD_RX_RESET is not set
619# CONFIG_SIS900 is not set
620# CONFIG_EPIC100 is not set
621# CONFIG_SUNDANCE is not set
622# CONFIG_TLAN is not set
623CONFIG_VIA_RHINE=y
624# CONFIG_VIA_RHINE_MMIO is not set
625
626#
627# Ethernet (1000 Mbit)
628#
629# CONFIG_ACENIC is not set
630# CONFIG_DL2K is not set
631# CONFIG_E1000 is not set
632# CONFIG_NS83820 is not set
633# CONFIG_HAMACHI is not set
634# CONFIG_YELLOWFIN is not set
635# CONFIG_R8169 is not set
636# CONFIG_SIS190 is not set
637# CONFIG_SKGE is not set
638# CONFIG_SKY2 is not set
639# CONFIG_SK98LIN is not set
640# CONFIG_VIA_VELOCITY is not set
641# CONFIG_TIGON3 is not set
642# CONFIG_BNX2 is not set
643CONFIG_MV643XX_ETH=y
644# CONFIG_MV643XX_ETH_0 is not set
645# CONFIG_MV643XX_ETH_1 is not set
646# CONFIG_MV643XX_ETH_2 is not set
647
648#
649# Ethernet (10000 Mbit)
650#
651# CONFIG_CHELSIO_T1 is not set
652# CONFIG_IXGB is not set
653# CONFIG_S2IO is not set
654# CONFIG_MYRI10GE is not set
655
656#
657# Token Ring devices
658#
659# CONFIG_TR is not set
660
661#
662# Wireless LAN (non-hamradio)
663#
664# CONFIG_NET_RADIO is not set
665
666#
667# Wan interfaces
668#
669# CONFIG_WAN is not set
670# CONFIG_FDDI is not set
671# CONFIG_HIPPI is not set
672CONFIG_PPP=m
673CONFIG_PPP_MULTILINK=y
674CONFIG_PPP_FILTER=y
675CONFIG_PPP_ASYNC=m
676CONFIG_PPP_SYNC_TTY=m
677CONFIG_PPP_DEFLATE=m
678CONFIG_PPP_BSDCOMP=m
679CONFIG_PPP_MPPE=m
680CONFIG_PPPOE=m
681# CONFIG_SLIP is not set
682# CONFIG_NET_FC is not set
683# CONFIG_SHAPER is not set
684# CONFIG_NETCONSOLE is not set
685# CONFIG_NETPOLL is not set
686# CONFIG_NET_POLL_CONTROLLER is not set
687
688#
689# ISDN subsystem
690#
691# CONFIG_ISDN is not set
692
693#
694# Telephony Support
695#
696# CONFIG_PHONE is not set
697
698#
699# Input device support
700#
701CONFIG_INPUT=y
702
703#
704# Userland interfaces
705#
706CONFIG_INPUT_MOUSEDEV=y
707CONFIG_INPUT_MOUSEDEV_PSAUX=y
708CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
709CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
710# CONFIG_INPUT_JOYDEV is not set
711# CONFIG_INPUT_TSDEV is not set
712CONFIG_INPUT_EVDEV=y
713# CONFIG_INPUT_EVBUG is not set
714
715#
716# Input Device Drivers
717#
718CONFIG_INPUT_KEYBOARD=y
719CONFIG_KEYBOARD_ATKBD=y
720# CONFIG_KEYBOARD_SUNKBD is not set
721# CONFIG_KEYBOARD_LKKBD is not set
722# CONFIG_KEYBOARD_XTKBD is not set
723# CONFIG_KEYBOARD_NEWTON is not set
724CONFIG_INPUT_MOUSE=y
725CONFIG_MOUSE_PS2=y
726# CONFIG_MOUSE_SERIAL is not set
727# CONFIG_MOUSE_INPORT is not set
728# CONFIG_MOUSE_LOGIBM is not set
729# CONFIG_MOUSE_PC110PAD is not set
730# CONFIG_MOUSE_VSXXXAA is not set
731# CONFIG_INPUT_JOYSTICK is not set
732# CONFIG_INPUT_TOUCHSCREEN is not set
733CONFIG_INPUT_MISC=y
734# CONFIG_INPUT_PCSPKR is not set
735CONFIG_INPUT_UINPUT=y
736
737#
738# Hardware I/O ports
739#
740CONFIG_SERIO=y
741CONFIG_SERIO_I8042=y
742CONFIG_SERIO_SERPORT=y
743# CONFIG_SERIO_PCIPS2 is not set
744CONFIG_SERIO_LIBPS2=y
745# CONFIG_SERIO_RAW is not set
746# CONFIG_GAMEPORT is not set
747
748#
749# Character devices
750#
751CONFIG_VT=y
752CONFIG_VT_CONSOLE=y
753CONFIG_HW_CONSOLE=y
754# CONFIG_SERIAL_NONSTANDARD is not set
755
756#
757# Serial drivers
758#
759CONFIG_SERIAL_8250=y
760CONFIG_SERIAL_8250_CONSOLE=y
761CONFIG_SERIAL_8250_PCI=y
762CONFIG_SERIAL_8250_NR_UARTS=4
763CONFIG_SERIAL_8250_RUNTIME_UARTS=4
764# CONFIG_SERIAL_8250_EXTENDED is not set
765
766#
767# Non-8250 serial port support
768#
769CONFIG_SERIAL_CORE=y
770CONFIG_SERIAL_CORE_CONSOLE=y
771# CONFIG_SERIAL_JSM is not set
772CONFIG_UNIX98_PTYS=y
773CONFIG_LEGACY_PTYS=y
774CONFIG_LEGACY_PTY_COUNT=256
775# CONFIG_HVC_RTAS is not set
776
777#
778# IPMI
779#
780# CONFIG_IPMI_HANDLER is not set
781
782#
783# Watchdog Cards
784#
785# CONFIG_WATCHDOG is not set
786CONFIG_NVRAM=y
787CONFIG_GEN_RTC=y
788# CONFIG_GEN_RTC_X is not set
789# CONFIG_DTLK is not set
790# CONFIG_R3964 is not set
791# CONFIG_APPLICOM is not set
792
793#
794# Ftape, the floppy tape device driver
795#
796# CONFIG_AGP is not set
797# CONFIG_DRM is not set
798# CONFIG_RAW_DRIVER is not set
799
800#
801# TPM devices
802#
803# CONFIG_TCG_TPM is not set
804# CONFIG_TELCLOCK is not set
805
806#
807# I2C support
808#
809CONFIG_I2C=y
810# CONFIG_I2C_CHARDEV is not set
811
812#
813# I2C Algorithms
814#
815CONFIG_I2C_ALGOBIT=y
816# CONFIG_I2C_ALGOPCF is not set
817# CONFIG_I2C_ALGOPCA is not set
818
819#
820# I2C Hardware Bus support
821#
822# CONFIG_I2C_ALI1535 is not set
823# CONFIG_I2C_ALI1563 is not set
824# CONFIG_I2C_ALI15X3 is not set
825# CONFIG_I2C_AMD756 is not set
826# CONFIG_I2C_AMD8111 is not set
827# CONFIG_I2C_HYDRA is not set
828# CONFIG_I2C_I801 is not set
829# CONFIG_I2C_I810 is not set
830# CONFIG_I2C_PIIX4 is not set
831# CONFIG_I2C_MPC is not set
832# CONFIG_I2C_NFORCE2 is not set
833# CONFIG_I2C_OCORES is not set
834# CONFIG_I2C_PARPORT_LIGHT is not set
835# CONFIG_I2C_PROSAVAGE is not set
836# CONFIG_I2C_SAVAGE4 is not set
837# CONFIG_I2C_SIS5595 is not set
838# CONFIG_I2C_SIS630 is not set
839# CONFIG_I2C_SIS96X is not set
840# CONFIG_I2C_STUB is not set
841# CONFIG_I2C_VIA is not set
842# CONFIG_I2C_VIAPRO is not set
843# CONFIG_I2C_VOODOO3 is not set
844# CONFIG_I2C_PCA_ISA is not set
845
846#
847# Miscellaneous I2C Chip support
848#
849# CONFIG_SENSORS_DS1337 is not set
850# CONFIG_SENSORS_DS1374 is not set
851# CONFIG_SENSORS_EEPROM is not set
852# CONFIG_SENSORS_PCF8574 is not set
853# CONFIG_SENSORS_PCA9539 is not set
854# CONFIG_SENSORS_PCF8591 is not set
855# CONFIG_SENSORS_M41T00 is not set
856# CONFIG_SENSORS_MAX6875 is not set
857# CONFIG_I2C_DEBUG_CORE is not set
858# CONFIG_I2C_DEBUG_ALGO is not set
859# CONFIG_I2C_DEBUG_BUS is not set
860# CONFIG_I2C_DEBUG_CHIP is not set
861
862#
863# SPI support
864#
865# CONFIG_SPI is not set
866# CONFIG_SPI_MASTER is not set
867
868#
869# Dallas's 1-wire bus
870#
871
872#
873# Hardware Monitoring support
874#
875# CONFIG_HWMON is not set
876# CONFIG_HWMON_VID is not set
877
878#
879# Misc devices
880#
881
882#
883# Multimedia devices
884#
885# CONFIG_VIDEO_DEV is not set
886CONFIG_VIDEO_V4L2=y
887
888#
889# Digital Video Broadcasting Devices
890#
891# CONFIG_DVB is not set
892# CONFIG_USB_DABUSB is not set
893
894#
895# Graphics support
896#
897CONFIG_FB=y
898CONFIG_FB_CFB_FILLRECT=y
899CONFIG_FB_CFB_COPYAREA=y
900CONFIG_FB_CFB_IMAGEBLIT=y
901CONFIG_FB_MACMODES=y
902CONFIG_FB_FIRMWARE_EDID=y
903# CONFIG_FB_BACKLIGHT is not set
904CONFIG_FB_MODE_HELPERS=y
905CONFIG_FB_TILEBLITTING=y
906# CONFIG_FB_CIRRUS is not set
907# CONFIG_FB_PM2 is not set
908# CONFIG_FB_CYBER2000 is not set
909CONFIG_FB_OF=y
910# CONFIG_FB_CT65550 is not set
911# CONFIG_FB_ASILIANT is not set
912# CONFIG_FB_IMSTT is not set
913# CONFIG_FB_VGA16 is not set
914# CONFIG_FB_S1D13XXX is not set
915# CONFIG_FB_NVIDIA is not set
916# CONFIG_FB_RIVA is not set
917CONFIG_FB_MATROX=y
918CONFIG_FB_MATROX_MILLENIUM=y
919CONFIG_FB_MATROX_MYSTIQUE=y
920CONFIG_FB_MATROX_G=y
921# CONFIG_FB_MATROX_I2C is not set
922# CONFIG_FB_MATROX_MULTIHEAD is not set
923CONFIG_FB_RADEON=y
924CONFIG_FB_RADEON_I2C=y
925# CONFIG_FB_RADEON_DEBUG is not set
926# CONFIG_FB_ATY128 is not set
927CONFIG_FB_ATY=y
928CONFIG_FB_ATY_CT=y
929# CONFIG_FB_ATY_GENERIC_LCD is not set
930CONFIG_FB_ATY_GX=y
931# CONFIG_FB_SAVAGE is not set
932# CONFIG_FB_SIS is not set
933# CONFIG_FB_NEOMAGIC is not set
934# CONFIG_FB_KYRO is not set
935CONFIG_FB_3DFX=y
936# CONFIG_FB_3DFX_ACCEL is not set
937# CONFIG_FB_VOODOO1 is not set
938# CONFIG_FB_TRIDENT is not set
939# CONFIG_FB_VIRTUAL is not set
940
941#
942# Console display driver support
943#
944CONFIG_VGA_CONSOLE=y
945# CONFIG_VGACON_SOFT_SCROLLBACK is not set
946# CONFIG_MDA_CONSOLE is not set
947CONFIG_DUMMY_CONSOLE=y
948CONFIG_FRAMEBUFFER_CONSOLE=y
949# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
950# CONFIG_FONTS is not set
951CONFIG_FONT_8x8=y
952CONFIG_FONT_8x16=y
953
954#
955# Logo configuration
956#
957CONFIG_LOGO=y
958CONFIG_LOGO_LINUX_MONO=y
959CONFIG_LOGO_LINUX_VGA16=y
960CONFIG_LOGO_LINUX_CLUT224=y
961# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
962
963#
964# Sound
965#
966# CONFIG_SOUND is not set
967
968#
969# USB support
970#
971CONFIG_USB_ARCH_HAS_HCD=y
972CONFIG_USB_ARCH_HAS_OHCI=y
973CONFIG_USB_ARCH_HAS_EHCI=y
974CONFIG_USB=y
975# CONFIG_USB_DEBUG is not set
976
977#
978# Miscellaneous USB options
979#
980CONFIG_USB_DEVICEFS=y
981# CONFIG_USB_BANDWIDTH is not set
982# CONFIG_USB_DYNAMIC_MINORS is not set
983# CONFIG_USB_OTG is not set
984
985#
986# USB Host Controller Drivers
987#
988CONFIG_USB_EHCI_HCD=m
989# CONFIG_USB_EHCI_SPLIT_ISO is not set
990# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
991# CONFIG_USB_EHCI_TT_NEWSCHED is not set
992# CONFIG_USB_ISP116X_HCD is not set
993CONFIG_USB_OHCI_HCD=y
994# CONFIG_USB_OHCI_BIG_ENDIAN is not set
995CONFIG_USB_OHCI_LITTLE_ENDIAN=y
996CONFIG_USB_UHCI_HCD=y
997# CONFIG_USB_SL811_HCD is not set
998
999#
1000# USB Device Class drivers
1001#
1002# CONFIG_USB_ACM is not set
1003# CONFIG_USB_PRINTER is not set
1004
1005#
1006# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
1007#
1008
1009#
1010# may also be needed; see USB_STORAGE Help for more information
1011#
1012CONFIG_USB_STORAGE=m
1013# CONFIG_USB_STORAGE_DEBUG is not set
1014# CONFIG_USB_STORAGE_DATAFAB is not set
1015# CONFIG_USB_STORAGE_FREECOM is not set
1016# CONFIG_USB_STORAGE_ISD200 is not set
1017# CONFIG_USB_STORAGE_DPCM is not set
1018# CONFIG_USB_STORAGE_USBAT is not set
1019# CONFIG_USB_STORAGE_SDDR09 is not set
1020# CONFIG_USB_STORAGE_SDDR55 is not set
1021# CONFIG_USB_STORAGE_JUMPSHOT is not set
1022# CONFIG_USB_STORAGE_ALAUDA is not set
1023# CONFIG_USB_STORAGE_ONETOUCH is not set
1024# CONFIG_USB_LIBUSUAL is not set
1025
1026#
1027# USB Input Devices
1028#
1029CONFIG_USB_HID=y
1030CONFIG_USB_HIDINPUT=y
1031# CONFIG_USB_HIDINPUT_POWERBOOK is not set
1032# CONFIG_HID_FF is not set
1033# CONFIG_USB_HIDDEV is not set
1034# CONFIG_USB_AIPTEK is not set
1035# CONFIG_USB_WACOM is not set
1036# CONFIG_USB_ACECAD is not set
1037# CONFIG_USB_KBTAB is not set
1038# CONFIG_USB_POWERMATE is not set
1039# CONFIG_USB_TOUCHSCREEN is not set
1040# CONFIG_USB_YEALINK is not set
1041# CONFIG_USB_XPAD is not set
1042# CONFIG_USB_ATI_REMOTE is not set
1043# CONFIG_USB_ATI_REMOTE2 is not set
1044# CONFIG_USB_KEYSPAN_REMOTE is not set
1045# CONFIG_USB_APPLETOUCH is not set
1046
1047#
1048# USB Imaging devices
1049#
1050# CONFIG_USB_MDC800 is not set
1051# CONFIG_USB_MICROTEK is not set
1052
1053#
1054# USB Network Adapters
1055#
1056# CONFIG_USB_CATC is not set
1057# CONFIG_USB_KAWETH is not set
1058# CONFIG_USB_PEGASUS is not set
1059# CONFIG_USB_RTL8150 is not set
1060# CONFIG_USB_USBNET is not set
1061CONFIG_USB_MON=y
1062
1063#
1064# USB port drivers
1065#
1066
1067#
1068# USB Serial Converter support
1069#
1070# CONFIG_USB_SERIAL is not set
1071
1072#
1073# USB Miscellaneous drivers
1074#
1075# CONFIG_USB_EMI62 is not set
1076# CONFIG_USB_EMI26 is not set
1077# CONFIG_USB_AUERSWALD is not set
1078# CONFIG_USB_RIO500 is not set
1079# CONFIG_USB_LEGOTOWER is not set
1080# CONFIG_USB_LCD is not set
1081# CONFIG_USB_LED is not set
1082# CONFIG_USB_CY7C63 is not set
1083# CONFIG_USB_CYTHERM is not set
1084# CONFIG_USB_PHIDGETKIT is not set
1085# CONFIG_USB_PHIDGETSERVO is not set
1086# CONFIG_USB_IDMOUSE is not set
1087# CONFIG_USB_APPLEDISPLAY is not set
1088# CONFIG_USB_SISUSBVGA is not set
1089# CONFIG_USB_LD is not set
1090# CONFIG_USB_TEST is not set
1091
1092#
1093# USB DSL modem support
1094#
1095
1096#
1097# USB Gadget Support
1098#
1099# CONFIG_USB_GADGET is not set
1100
1101#
1102# MMC/SD Card support
1103#
1104# CONFIG_MMC is not set
1105
1106#
1107# LED devices
1108#
1109# CONFIG_NEW_LEDS is not set
1110
1111#
1112# LED drivers
1113#
1114
1115#
1116# LED Triggers
1117#
1118
1119#
1120# InfiniBand support
1121#
1122# CONFIG_INFINIBAND is not set
1123
1124#
1125# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1126#
1127
1128#
1129# Real Time Clock
1130#
1131# CONFIG_RTC_CLASS is not set
1132
1133#
1134# DMA Engine support
1135#
1136# CONFIG_DMA_ENGINE is not set
1137
1138#
1139# DMA Clients
1140#
1141
1142#
1143# DMA Devices
1144#
1145
1146#
1147# File systems
1148#
1149CONFIG_EXT2_FS=y
1150# CONFIG_EXT2_FS_XATTR is not set
1151# CONFIG_EXT2_FS_XIP is not set
1152CONFIG_EXT3_FS=y
1153CONFIG_EXT3_FS_XATTR=y
1154# CONFIG_EXT3_FS_POSIX_ACL is not set
1155# CONFIG_EXT3_FS_SECURITY is not set
1156CONFIG_JBD=y
1157# CONFIG_JBD_DEBUG is not set
1158CONFIG_FS_MBCACHE=y
1159# CONFIG_REISERFS_FS is not set
1160# CONFIG_JFS_FS is not set
1161# CONFIG_FS_POSIX_ACL is not set
1162# CONFIG_XFS_FS is not set
1163# CONFIG_OCFS2_FS is not set
1164# CONFIG_MINIX_FS is not set
1165# CONFIG_ROMFS_FS is not set
1166CONFIG_INOTIFY=y
1167CONFIG_INOTIFY_USER=y
1168# CONFIG_QUOTA is not set
1169CONFIG_DNOTIFY=y
1170# CONFIG_AUTOFS_FS is not set
1171# CONFIG_AUTOFS4_FS is not set
1172# CONFIG_FUSE_FS is not set
1173
1174#
1175# CD-ROM/DVD Filesystems
1176#
1177CONFIG_ISO9660_FS=y
1178# CONFIG_JOLIET is not set
1179# CONFIG_ZISOFS is not set
1180# CONFIG_UDF_FS is not set
1181
1182#
1183# DOS/FAT/NT Filesystems
1184#
1185CONFIG_FAT_FS=m
1186CONFIG_MSDOS_FS=m
1187CONFIG_VFAT_FS=m
1188CONFIG_FAT_DEFAULT_CODEPAGE=437
1189CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1190# CONFIG_NTFS_FS is not set
1191
1192#
1193# Pseudo filesystems
1194#
1195CONFIG_PROC_FS=y
1196CONFIG_PROC_KCORE=y
1197CONFIG_SYSFS=y
1198CONFIG_TMPFS=y
1199# CONFIG_HUGETLB_PAGE is not set
1200CONFIG_RAMFS=y
1201# CONFIG_CONFIGFS_FS is not set
1202
1203#
1204# Miscellaneous filesystems
1205#
1206# CONFIG_ADFS_FS is not set
1207# CONFIG_AFFS_FS is not set
1208# CONFIG_HFS_FS is not set
1209# CONFIG_HFSPLUS_FS is not set
1210# CONFIG_BEFS_FS is not set
1211# CONFIG_BFS_FS is not set
1212# CONFIG_EFS_FS is not set
1213# CONFIG_CRAMFS is not set
1214# CONFIG_VXFS_FS is not set
1215# CONFIG_HPFS_FS is not set
1216# CONFIG_QNX4FS_FS is not set
1217# CONFIG_SYSV_FS is not set
1218# CONFIG_UFS_FS is not set
1219
1220#
1221# Network File Systems
1222#
1223# CONFIG_NFS_FS is not set
1224# CONFIG_NFSD is not set
1225# CONFIG_SMB_FS is not set
1226# CONFIG_CIFS is not set
1227# CONFIG_NCP_FS is not set
1228# CONFIG_CODA_FS is not set
1229# CONFIG_AFS_FS is not set
1230# CONFIG_9P_FS is not set
1231
1232#
1233# Partition Types
1234#
1235CONFIG_PARTITION_ADVANCED=y
1236# CONFIG_ACORN_PARTITION is not set
1237# CONFIG_OSF_PARTITION is not set
1238# CONFIG_AMIGA_PARTITION is not set
1239# CONFIG_ATARI_PARTITION is not set
1240CONFIG_MAC_PARTITION=y
1241CONFIG_MSDOS_PARTITION=y
1242# CONFIG_BSD_DISKLABEL is not set
1243# CONFIG_MINIX_SUBPARTITION is not set
1244# CONFIG_SOLARIS_X86_PARTITION is not set
1245# CONFIG_UNIXWARE_DISKLABEL is not set
1246# CONFIG_LDM_PARTITION is not set
1247# CONFIG_SGI_PARTITION is not set
1248# CONFIG_ULTRIX_PARTITION is not set
1249# CONFIG_SUN_PARTITION is not set
1250# CONFIG_KARMA_PARTITION is not set
1251# CONFIG_EFI_PARTITION is not set
1252
1253#
1254# Native Language Support
1255#
1256CONFIG_NLS=y
1257CONFIG_NLS_DEFAULT="iso8859-1"
1258# CONFIG_NLS_CODEPAGE_437 is not set
1259# CONFIG_NLS_CODEPAGE_737 is not set
1260# CONFIG_NLS_CODEPAGE_775 is not set
1261# CONFIG_NLS_CODEPAGE_850 is not set
1262# CONFIG_NLS_CODEPAGE_852 is not set
1263# CONFIG_NLS_CODEPAGE_855 is not set
1264# CONFIG_NLS_CODEPAGE_857 is not set
1265# CONFIG_NLS_CODEPAGE_860 is not set
1266# CONFIG_NLS_CODEPAGE_861 is not set
1267# CONFIG_NLS_CODEPAGE_862 is not set
1268# CONFIG_NLS_CODEPAGE_863 is not set
1269# CONFIG_NLS_CODEPAGE_864 is not set
1270# CONFIG_NLS_CODEPAGE_865 is not set
1271# CONFIG_NLS_CODEPAGE_866 is not set
1272# CONFIG_NLS_CODEPAGE_869 is not set
1273# CONFIG_NLS_CODEPAGE_936 is not set
1274# CONFIG_NLS_CODEPAGE_950 is not set
1275# CONFIG_NLS_CODEPAGE_932 is not set
1276# CONFIG_NLS_CODEPAGE_949 is not set
1277# CONFIG_NLS_CODEPAGE_874 is not set
1278# CONFIG_NLS_ISO8859_8 is not set
1279# CONFIG_NLS_CODEPAGE_1250 is not set
1280# CONFIG_NLS_CODEPAGE_1251 is not set
1281CONFIG_NLS_ASCII=y
1282CONFIG_NLS_ISO8859_1=m
1283# CONFIG_NLS_ISO8859_2 is not set
1284# CONFIG_NLS_ISO8859_3 is not set
1285# CONFIG_NLS_ISO8859_4 is not set
1286# CONFIG_NLS_ISO8859_5 is not set
1287# CONFIG_NLS_ISO8859_6 is not set
1288# CONFIG_NLS_ISO8859_7 is not set
1289# CONFIG_NLS_ISO8859_9 is not set
1290# CONFIG_NLS_ISO8859_13 is not set
1291# CONFIG_NLS_ISO8859_14 is not set
1292# CONFIG_NLS_ISO8859_15 is not set
1293# CONFIG_NLS_KOI8_R is not set
1294# CONFIG_NLS_KOI8_U is not set
1295# CONFIG_NLS_UTF8 is not set
1296
1297#
1298# Library routines
1299#
1300CONFIG_CRC_CCITT=m
1301# CONFIG_CRC16 is not set
1302CONFIG_CRC32=y
1303# CONFIG_LIBCRC32C is not set
1304CONFIG_ZLIB_INFLATE=m
1305CONFIG_ZLIB_DEFLATE=m
1306CONFIG_TEXTSEARCH=y
1307CONFIG_TEXTSEARCH_KMP=m
1308
1309#
1310# Instrumentation Support
1311#
1312# CONFIG_PROFILING is not set
1313
1314#
1315# Kernel hacking
1316#
1317# CONFIG_PRINTK_TIME is not set
1318CONFIG_MAGIC_SYSRQ=y
1319CONFIG_DEBUG_KERNEL=y
1320CONFIG_LOG_BUF_SHIFT=15
1321CONFIG_DETECT_SOFTLOCKUP=y
1322# CONFIG_SCHEDSTATS is not set
1323# CONFIG_DEBUG_SLAB is not set
1324CONFIG_DEBUG_MUTEXES=y
1325# CONFIG_DEBUG_SPINLOCK is not set
1326CONFIG_DEBUG_SPINLOCK_SLEEP=y
1327# CONFIG_DEBUG_KOBJECT is not set
1328# CONFIG_DEBUG_HIGHMEM is not set
1329# CONFIG_DEBUG_INFO is not set
1330# CONFIG_DEBUG_FS is not set
1331# CONFIG_DEBUG_VM is not set
1332CONFIG_FORCED_INLINING=y
1333# CONFIG_RCU_TORTURE_TEST is not set
1334CONFIG_DEBUGGER=y
1335CONFIG_XMON=y
1336CONFIG_XMON_DEFAULT=y
1337# CONFIG_BDI_SWITCH is not set
1338# CONFIG_BOOTX_TEXT is not set
1339# CONFIG_PPC_EARLY_DEBUG is not set
1340
1341#
1342# Security options
1343#
1344# CONFIG_KEYS is not set
1345# CONFIG_SECURITY is not set
1346
1347#
1348# Cryptographic options
1349#
1350CONFIG_CRYPTO=y
1351# CONFIG_CRYPTO_HMAC is not set
1352# CONFIG_CRYPTO_NULL is not set
1353# CONFIG_CRYPTO_MD4 is not set
1354# CONFIG_CRYPTO_MD5 is not set
1355CONFIG_CRYPTO_SHA1=m
1356# CONFIG_CRYPTO_SHA256 is not set
1357# CONFIG_CRYPTO_SHA512 is not set
1358# CONFIG_CRYPTO_WP512 is not set
1359# CONFIG_CRYPTO_TGR192 is not set
1360# CONFIG_CRYPTO_DES is not set
1361# CONFIG_CRYPTO_BLOWFISH is not set
1362# CONFIG_CRYPTO_TWOFISH is not set
1363# CONFIG_CRYPTO_SERPENT is not set
1364# CONFIG_CRYPTO_AES is not set
1365# CONFIG_CRYPTO_CAST5 is not set
1366# CONFIG_CRYPTO_CAST6 is not set
1367# CONFIG_CRYPTO_TEA is not set
1368CONFIG_CRYPTO_ARC4=m
1369# CONFIG_CRYPTO_KHAZAD is not set
1370# CONFIG_CRYPTO_ANUBIS is not set
1371# CONFIG_CRYPTO_DEFLATE is not set
1372# CONFIG_CRYPTO_MICHAEL_MIC is not set
1373# CONFIG_CRYPTO_CRC32C is not set
1374# CONFIG_CRYPTO_TEST is not set
1375
1376#
1377# Hardware crypto devices
1378#
diff --git a/arch/powerpc/configs/mpc834x_itx_defconfig b/arch/powerpc/configs/mpc834x_itx_defconfig
new file mode 100644
index 000000000000..fc2d9789adc8
--- /dev/null
+++ b/arch/powerpc/configs/mpc834x_itx_defconfig
@@ -0,0 +1,1336 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17
4# Fri Jun 30 17:53:25 2006
5#
6# CONFIG_PPC64 is not set
7CONFIG_PPC32=y
8CONFIG_PPC_MERGE=y
9CONFIG_MMU=y
10CONFIG_GENERIC_HARDIRQS=y
11CONFIG_IRQ_PER_CPU=y
12CONFIG_RWSEM_XCHGADD_ALGORITHM=y
13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_CALIBRATE_DELAY=y
15CONFIG_GENERIC_FIND_NEXT_BIT=y
16CONFIG_PPC=y
17CONFIG_EARLY_PRINTK=y
18CONFIG_GENERIC_NVRAM=y
19CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
20CONFIG_ARCH_MAY_HAVE_PC_FDC=y
21CONFIG_PPC_OF=y
22CONFIG_PPC_UDBG_16550=y
23# CONFIG_GENERIC_TBSYNC is not set
24CONFIG_DEFAULT_UIMAGE=y
25
26#
27# Processor support
28#
29# CONFIG_CLASSIC32 is not set
30# CONFIG_PPC_52xx is not set
31# CONFIG_PPC_82xx is not set
32CONFIG_PPC_83xx=y
33# CONFIG_PPC_85xx is not set
34# CONFIG_PPC_86xx is not set
35# CONFIG_40x is not set
36# CONFIG_44x is not set
37# CONFIG_8xx is not set
38# CONFIG_E200 is not set
39CONFIG_6xx=y
40CONFIG_83xx=y
41CONFIG_PPC_FPU=y
42CONFIG_PPC_STD_MMU=y
43CONFIG_PPC_STD_MMU_32=y
44# CONFIG_SMP is not set
45CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
46
47#
48# Code maturity level options
49#
50CONFIG_EXPERIMENTAL=y
51CONFIG_BROKEN_ON_SMP=y
52CONFIG_INIT_ENV_ARG_LIMIT=32
53
54#
55# General setup
56#
57CONFIG_LOCALVERSION=""
58CONFIG_LOCALVERSION_AUTO=y
59CONFIG_SWAP=y
60CONFIG_SYSVIPC=y
61# CONFIG_POSIX_MQUEUE is not set
62# CONFIG_BSD_PROCESS_ACCT is not set
63CONFIG_SYSCTL=y
64# CONFIG_AUDIT is not set
65# CONFIG_IKCONFIG is not set
66# CONFIG_RELAY is not set
67CONFIG_INITRAMFS_SOURCE=""
68# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
69CONFIG_EMBEDDED=y
70# CONFIG_KALLSYMS is not set
71CONFIG_HOTPLUG=y
72CONFIG_PRINTK=y
73CONFIG_BUG=y
74CONFIG_ELF_CORE=y
75CONFIG_BASE_FULL=y
76CONFIG_RT_MUTEXES=y
77CONFIG_FUTEX=y
78# CONFIG_EPOLL is not set
79CONFIG_SHMEM=y
80CONFIG_SLAB=y
81# CONFIG_TINY_SHMEM is not set
82CONFIG_BASE_SMALL=0
83# CONFIG_SLOB is not set
84
85#
86# Loadable module support
87#
88CONFIG_MODULES=y
89CONFIG_MODULE_UNLOAD=y
90# CONFIG_MODULE_FORCE_UNLOAD is not set
91# CONFIG_MODVERSIONS is not set
92# CONFIG_MODULE_SRCVERSION_ALL is not set
93# CONFIG_KMOD is not set
94
95#
96# Block layer
97#
98# CONFIG_LBD is not set
99# CONFIG_BLK_DEV_IO_TRACE is not set
100# CONFIG_LSF is not set
101
102#
103# IO Schedulers
104#
105CONFIG_IOSCHED_NOOP=y
106CONFIG_IOSCHED_AS=y
107CONFIG_IOSCHED_DEADLINE=y
108CONFIG_IOSCHED_CFQ=y
109CONFIG_DEFAULT_AS=y
110# CONFIG_DEFAULT_DEADLINE is not set
111# CONFIG_DEFAULT_CFQ is not set
112# CONFIG_DEFAULT_NOOP is not set
113CONFIG_DEFAULT_IOSCHED="anticipatory"
114CONFIG_PPC_GEN550=y
115# CONFIG_WANT_EARLY_SERIAL is not set
116
117#
118# Platform support
119#
120# CONFIG_MPC834x_SYS is not set
121CONFIG_MPC834x_ITX=y
122CONFIG_MPC834x=y
123
124#
125# Kernel options
126#
127# CONFIG_HIGHMEM is not set
128# CONFIG_HZ_100 is not set
129CONFIG_HZ_250=y
130# CONFIG_HZ_1000 is not set
131CONFIG_HZ=250
132CONFIG_PREEMPT_NONE=y
133# CONFIG_PREEMPT_VOLUNTARY is not set
134# CONFIG_PREEMPT is not set
135CONFIG_BINFMT_ELF=y
136# CONFIG_BINFMT_MISC is not set
137CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
138CONFIG_ARCH_FLATMEM_ENABLE=y
139CONFIG_SELECT_MEMORY_MODEL=y
140CONFIG_FLATMEM_MANUAL=y
141# CONFIG_DISCONTIGMEM_MANUAL is not set
142# CONFIG_SPARSEMEM_MANUAL is not set
143CONFIG_FLATMEM=y
144CONFIG_FLAT_NODE_MEM_MAP=y
145# CONFIG_SPARSEMEM_STATIC is not set
146CONFIG_SPLIT_PTLOCK_CPUS=4
147# CONFIG_RESOURCES_64BIT is not set
148CONFIG_PROC_DEVICETREE=y
149# CONFIG_CMDLINE_BOOL is not set
150# CONFIG_PM is not set
151# CONFIG_SOFTWARE_SUSPEND is not set
152CONFIG_SECCOMP=y
153CONFIG_ISA_DMA_API=y
154
155#
156# Bus options
157#
158CONFIG_GENERIC_ISA_DMA=y
159# CONFIG_PPC_I8259 is not set
160CONFIG_PPC_INDIRECT_PCI=y
161CONFIG_FSL_SOC=y
162CONFIG_PCI=y
163CONFIG_PCI_DOMAINS=y
164# CONFIG_PCIEPORTBUS is not set
165# CONFIG_PCI_DEBUG is not set
166
167#
168# PCCARD (PCMCIA/CardBus) support
169#
170# CONFIG_PCCARD is not set
171
172#
173# PCI Hotplug Support
174#
175# CONFIG_HOTPLUG_PCI is not set
176
177#
178# Advanced setup
179#
180# CONFIG_ADVANCED_OPTIONS is not set
181
182#
183# Default settings for advanced configuration options are used
184#
185CONFIG_HIGHMEM_START=0xfe000000
186CONFIG_LOWMEM_SIZE=0x30000000
187CONFIG_KERNEL_START=0xc0000000
188CONFIG_TASK_SIZE=0x80000000
189CONFIG_BOOT_LOAD=0x00800000
190
191#
192# Networking
193#
194CONFIG_NET=y
195
196#
197# Networking options
198#
199# CONFIG_NETDEBUG is not set
200CONFIG_PACKET=y
201# CONFIG_PACKET_MMAP is not set
202CONFIG_UNIX=y
203CONFIG_XFRM=y
204# CONFIG_XFRM_USER is not set
205# CONFIG_NET_KEY is not set
206CONFIG_INET=y
207CONFIG_IP_MULTICAST=y
208# CONFIG_IP_ADVANCED_ROUTER is not set
209CONFIG_IP_FIB_HASH=y
210CONFIG_IP_PNP=y
211CONFIG_IP_PNP_DHCP=y
212CONFIG_IP_PNP_BOOTP=y
213# CONFIG_IP_PNP_RARP is not set
214# CONFIG_NET_IPIP is not set
215# CONFIG_NET_IPGRE is not set
216# CONFIG_IP_MROUTE is not set
217# CONFIG_ARPD is not set
218CONFIG_SYN_COOKIES=y
219# CONFIG_INET_AH is not set
220# CONFIG_INET_ESP is not set
221# CONFIG_INET_IPCOMP is not set
222# CONFIG_INET_XFRM_TUNNEL is not set
223# CONFIG_INET_TUNNEL is not set
224CONFIG_INET_XFRM_MODE_TRANSPORT=y
225CONFIG_INET_XFRM_MODE_TUNNEL=y
226CONFIG_INET_DIAG=y
227CONFIG_INET_TCP_DIAG=y
228# CONFIG_TCP_CONG_ADVANCED is not set
229CONFIG_TCP_CONG_BIC=y
230# CONFIG_IPV6 is not set
231# CONFIG_INET6_XFRM_TUNNEL is not set
232# CONFIG_INET6_TUNNEL is not set
233# CONFIG_NETWORK_SECMARK is not set
234# CONFIG_NETFILTER is not set
235
236#
237# DCCP Configuration (EXPERIMENTAL)
238#
239# CONFIG_IP_DCCP is not set
240
241#
242# SCTP Configuration (EXPERIMENTAL)
243#
244# CONFIG_IP_SCTP is not set
245
246#
247# TIPC Configuration (EXPERIMENTAL)
248#
249# CONFIG_TIPC is not set
250# CONFIG_ATM is not set
251# CONFIG_BRIDGE is not set
252# CONFIG_VLAN_8021Q is not set
253# CONFIG_DECNET is not set
254# CONFIG_LLC2 is not set
255# CONFIG_IPX is not set
256# CONFIG_ATALK is not set
257# CONFIG_X25 is not set
258# CONFIG_LAPB is not set
259# CONFIG_NET_DIVERT is not set
260# CONFIG_ECONET is not set
261# CONFIG_WAN_ROUTER is not set
262
263#
264# QoS and/or fair queueing
265#
266# CONFIG_NET_SCHED is not set
267
268#
269# Network testing
270#
271# CONFIG_NET_PKTGEN is not set
272# CONFIG_HAMRADIO is not set
273# CONFIG_IRDA is not set
274# CONFIG_BT is not set
275# CONFIG_IEEE80211 is not set
276
277#
278# Device Drivers
279#
280
281#
282# Generic Driver Options
283#
284CONFIG_STANDALONE=y
285CONFIG_PREVENT_FIRMWARE_BUILD=y
286# CONFIG_FW_LOADER is not set
287# CONFIG_DEBUG_DRIVER is not set
288# CONFIG_SYS_HYPERVISOR is not set
289
290#
291# Connector - unified userspace <-> kernelspace linker
292#
293# CONFIG_CONNECTOR is not set
294
295#
296# Memory Technology Devices (MTD)
297#
298CONFIG_MTD=y
299# CONFIG_MTD_DEBUG is not set
300# CONFIG_MTD_CONCAT is not set
301# CONFIG_MTD_PARTITIONS is not set
302
303#
304# User Modules And Translation Layers
305#
306CONFIG_MTD_CHAR=y
307# CONFIG_MTD_BLOCK is not set
308# CONFIG_MTD_BLOCK_RO is not set
309# CONFIG_FTL is not set
310# CONFIG_NFTL is not set
311# CONFIG_INFTL is not set
312# CONFIG_RFD_FTL is not set
313
314#
315# RAM/ROM/Flash chip drivers
316#
317CONFIG_MTD_CFI=y
318# CONFIG_MTD_JEDECPROBE is not set
319CONFIG_MTD_GEN_PROBE=y
320# CONFIG_MTD_CFI_ADV_OPTIONS is not set
321CONFIG_MTD_MAP_BANK_WIDTH_1=y
322CONFIG_MTD_MAP_BANK_WIDTH_2=y
323CONFIG_MTD_MAP_BANK_WIDTH_4=y
324# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
325# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
326# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
327CONFIG_MTD_CFI_I1=y
328CONFIG_MTD_CFI_I2=y
329# CONFIG_MTD_CFI_I4 is not set
330# CONFIG_MTD_CFI_I8 is not set
331# CONFIG_MTD_CFI_INTELEXT is not set
332CONFIG_MTD_CFI_AMDSTD=y
333# CONFIG_MTD_CFI_STAA is not set
334CONFIG_MTD_CFI_UTIL=y
335# CONFIG_MTD_RAM is not set
336# CONFIG_MTD_ROM is not set
337# CONFIG_MTD_ABSENT is not set
338# CONFIG_MTD_OBSOLETE_CHIPS is not set
339
340#
341# Mapping drivers for chip access
342#
343# CONFIG_MTD_COMPLEX_MAPPINGS is not set
344CONFIG_MTD_PHYSMAP=y
345CONFIG_MTD_PHYSMAP_START=0xfe000000
346CONFIG_MTD_PHYSMAP_LEN=0x1000000
347CONFIG_MTD_PHYSMAP_BANKWIDTH=2
348# CONFIG_MTD_PLATRAM is not set
349
350#
351# Self-contained MTD device drivers
352#
353# CONFIG_MTD_PMC551 is not set
354# CONFIG_MTD_DATAFLASH is not set
355# CONFIG_MTD_M25P80 is not set
356# CONFIG_MTD_SLRAM is not set
357# CONFIG_MTD_PHRAM is not set
358# CONFIG_MTD_MTDRAM is not set
359# CONFIG_MTD_BLOCK2MTD is not set
360
361#
362# Disk-On-Chip Device Drivers
363#
364# CONFIG_MTD_DOC2000 is not set
365# CONFIG_MTD_DOC2001 is not set
366# CONFIG_MTD_DOC2001PLUS is not set
367
368#
369# NAND Flash Device Drivers
370#
371# CONFIG_MTD_NAND is not set
372
373#
374# OneNAND Flash Device Drivers
375#
376# CONFIG_MTD_ONENAND is not set
377
378#
379# Parallel port support
380#
381# CONFIG_PARPORT is not set
382
383#
384# Plug and Play support
385#
386
387#
388# Block devices
389#
390# CONFIG_BLK_DEV_FD is not set
391# CONFIG_BLK_CPQ_DA is not set
392# CONFIG_BLK_CPQ_CISS_DA is not set
393# CONFIG_BLK_DEV_DAC960 is not set
394# CONFIG_BLK_DEV_UMEM is not set
395# CONFIG_BLK_DEV_COW_COMMON is not set
396CONFIG_BLK_DEV_LOOP=y
397# CONFIG_BLK_DEV_CRYPTOLOOP is not set
398# CONFIG_BLK_DEV_NBD is not set
399# CONFIG_BLK_DEV_SX8 is not set
400# CONFIG_BLK_DEV_UB is not set
401CONFIG_BLK_DEV_RAM=y
402CONFIG_BLK_DEV_RAM_COUNT=16
403CONFIG_BLK_DEV_RAM_SIZE=32768
404CONFIG_BLK_DEV_INITRD=y
405# CONFIG_CDROM_PKTCDVD is not set
406# CONFIG_ATA_OVER_ETH is not set
407
408#
409# ATA/ATAPI/MFM/RLL support
410#
411CONFIG_IDE=y
412# CONFIG_BLK_DEV_IDE is not set
413# CONFIG_BLK_DEV_HD_ONLY is not set
414# CONFIG_BLK_DEV_HD is not set
415
416#
417# SCSI device support
418#
419# CONFIG_RAID_ATTRS is not set
420CONFIG_SCSI=y
421CONFIG_SCSI_PROC_FS=y
422
423#
424# SCSI support type (disk, tape, CD-ROM)
425#
426CONFIG_BLK_DEV_SD=y
427# CONFIG_CHR_DEV_ST is not set
428# CONFIG_CHR_DEV_OSST is not set
429# CONFIG_BLK_DEV_SR is not set
430CONFIG_CHR_DEV_SG=y
431# CONFIG_CHR_DEV_SCH is not set
432
433#
434# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
435#
436# CONFIG_SCSI_MULTI_LUN is not set
437# CONFIG_SCSI_CONSTANTS is not set
438# CONFIG_SCSI_LOGGING is not set
439
440#
441# SCSI Transport Attributes
442#
443CONFIG_SCSI_SPI_ATTRS=y
444# CONFIG_SCSI_FC_ATTRS is not set
445# CONFIG_SCSI_ISCSI_ATTRS is not set
446# CONFIG_SCSI_SAS_ATTRS is not set
447
448#
449# SCSI low-level drivers
450#
451# CONFIG_ISCSI_TCP is not set
452# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
453# CONFIG_SCSI_3W_9XXX is not set
454# CONFIG_SCSI_ACARD is not set
455# CONFIG_SCSI_AACRAID is not set
456# CONFIG_SCSI_AIC7XXX is not set
457# CONFIG_SCSI_AIC7XXX_OLD is not set
458# CONFIG_SCSI_AIC79XX is not set
459# CONFIG_SCSI_DPT_I2O is not set
460# CONFIG_MEGARAID_NEWGEN is not set
461# CONFIG_MEGARAID_LEGACY is not set
462# CONFIG_MEGARAID_SAS is not set
463CONFIG_SCSI_SATA=y
464# CONFIG_SCSI_SATA_AHCI is not set
465# CONFIG_SCSI_SATA_SVW is not set
466# CONFIG_SCSI_ATA_PIIX is not set
467# CONFIG_SCSI_SATA_MV is not set
468# CONFIG_SCSI_SATA_NV is not set
469# CONFIG_SCSI_PDC_ADMA is not set
470# CONFIG_SCSI_HPTIOP is not set
471# CONFIG_SCSI_SATA_QSTOR is not set
472# CONFIG_SCSI_SATA_PROMISE is not set
473# CONFIG_SCSI_SATA_SX4 is not set
474CONFIG_SCSI_SATA_SIL=y
475# CONFIG_SCSI_SATA_SIL24 is not set
476# CONFIG_SCSI_SATA_SIS is not set
477# CONFIG_SCSI_SATA_ULI is not set
478# CONFIG_SCSI_SATA_VIA is not set
479# CONFIG_SCSI_SATA_VITESSE is not set
480# CONFIG_SCSI_BUSLOGIC is not set
481# CONFIG_SCSI_DMX3191D is not set
482# CONFIG_SCSI_EATA is not set
483# CONFIG_SCSI_FUTURE_DOMAIN is not set
484# CONFIG_SCSI_GDTH is not set
485# CONFIG_SCSI_IPS is not set
486# CONFIG_SCSI_INITIO is not set
487# CONFIG_SCSI_INIA100 is not set
488# CONFIG_SCSI_SYM53C8XX_2 is not set
489# CONFIG_SCSI_IPR is not set
490# CONFIG_SCSI_QLOGIC_1280 is not set
491# CONFIG_SCSI_QLA_FC is not set
492# CONFIG_SCSI_LPFC is not set
493# CONFIG_SCSI_DC395x is not set
494# CONFIG_SCSI_DC390T is not set
495# CONFIG_SCSI_NSP32 is not set
496# CONFIG_SCSI_DEBUG is not set
497
498#
499# Multi-device support (RAID and LVM)
500#
501CONFIG_MD=y
502CONFIG_BLK_DEV_MD=y
503CONFIG_MD_LINEAR=y
504CONFIG_MD_RAID0=y
505CONFIG_MD_RAID1=y
506# CONFIG_MD_RAID10 is not set
507# CONFIG_MD_RAID456 is not set
508# CONFIG_MD_MULTIPATH is not set
509# CONFIG_MD_FAULTY is not set
510# CONFIG_BLK_DEV_DM is not set
511
512#
513# Fusion MPT device support
514#
515# CONFIG_FUSION is not set
516# CONFIG_FUSION_SPI is not set
517# CONFIG_FUSION_FC is not set
518# CONFIG_FUSION_SAS is not set
519
520#
521# IEEE 1394 (FireWire) support
522#
523# CONFIG_IEEE1394 is not set
524
525#
526# I2O device support
527#
528# CONFIG_I2O is not set
529
530#
531# Macintosh device drivers
532#
533# CONFIG_WINDFARM is not set
534
535#
536# Network device support
537#
538CONFIG_NETDEVICES=y
539# CONFIG_DUMMY is not set
540# CONFIG_BONDING is not set
541# CONFIG_EQUALIZER is not set
542# CONFIG_TUN is not set
543
544#
545# ARCnet devices
546#
547# CONFIG_ARCNET is not set
548
549#
550# PHY device support
551#
552CONFIG_PHYLIB=y
553
554#
555# MII PHY device drivers
556#
557# CONFIG_MARVELL_PHY is not set
558# CONFIG_DAVICOM_PHY is not set
559# CONFIG_QSEMI_PHY is not set
560# CONFIG_LXT_PHY is not set
561CONFIG_CICADA_PHY=y
562# CONFIG_VITESSE_PHY is not set
563# CONFIG_SMSC_PHY is not set
564
565#
566# Ethernet (10 or 100Mbit)
567#
568CONFIG_NET_ETHERNET=y
569CONFIG_MII=y
570# CONFIG_HAPPYMEAL is not set
571# CONFIG_SUNGEM is not set
572# CONFIG_CASSINI is not set
573# CONFIG_NET_VENDOR_3COM is not set
574
575#
576# Tulip family network device support
577#
578# CONFIG_NET_TULIP is not set
579# CONFIG_HP100 is not set
580CONFIG_NET_PCI=y
581# CONFIG_PCNET32 is not set
582# CONFIG_AMD8111_ETH is not set
583# CONFIG_ADAPTEC_STARFIRE is not set
584# CONFIG_B44 is not set
585# CONFIG_FORCEDETH is not set
586# CONFIG_DGRS is not set
587# CONFIG_EEPRO100 is not set
588CONFIG_E100=y
589# CONFIG_FEALNX is not set
590# CONFIG_NATSEMI is not set
591# CONFIG_NE2K_PCI is not set
592# CONFIG_8139CP is not set
593# CONFIG_8139TOO is not set
594# CONFIG_SIS900 is not set
595# CONFIG_EPIC100 is not set
596# CONFIG_SUNDANCE is not set
597# CONFIG_TLAN is not set
598# CONFIG_VIA_RHINE is not set
599
600#
601# Ethernet (1000 Mbit)
602#
603# CONFIG_ACENIC is not set
604# CONFIG_DL2K is not set
605# CONFIG_E1000 is not set
606# CONFIG_NS83820 is not set
607# CONFIG_HAMACHI is not set
608# CONFIG_YELLOWFIN is not set
609# CONFIG_R8169 is not set
610# CONFIG_SIS190 is not set
611# CONFIG_SKGE is not set
612# CONFIG_SKY2 is not set
613# CONFIG_SK98LIN is not set
614# CONFIG_VIA_VELOCITY is not set
615# CONFIG_TIGON3 is not set
616# CONFIG_BNX2 is not set
617CONFIG_GIANFAR=y
618CONFIG_GFAR_NAPI=y
619
620#
621# Ethernet (10000 Mbit)
622#
623# CONFIG_CHELSIO_T1 is not set
624# CONFIG_IXGB is not set
625# CONFIG_S2IO is not set
626# CONFIG_MYRI10GE is not set
627
628#
629# Token Ring devices
630#
631# CONFIG_TR is not set
632
633#
634# Wireless LAN (non-hamradio)
635#
636# CONFIG_NET_RADIO is not set
637
638#
639# Wan interfaces
640#
641# CONFIG_WAN is not set
642# CONFIG_FDDI is not set
643# CONFIG_HIPPI is not set
644# CONFIG_PPP is not set
645# CONFIG_SLIP is not set
646# CONFIG_NET_FC is not set
647# CONFIG_SHAPER is not set
648# CONFIG_NETCONSOLE is not set
649# CONFIG_NETPOLL is not set
650# CONFIG_NET_POLL_CONTROLLER is not set
651
652#
653# ISDN subsystem
654#
655# CONFIG_ISDN is not set
656
657#
658# Telephony Support
659#
660# CONFIG_PHONE is not set
661
662#
663# Input device support
664#
665CONFIG_INPUT=y
666
667#
668# Userland interfaces
669#
670# CONFIG_INPUT_MOUSEDEV is not set
671# CONFIG_INPUT_JOYDEV is not set
672# CONFIG_INPUT_TSDEV is not set
673# CONFIG_INPUT_EVDEV is not set
674# CONFIG_INPUT_EVBUG is not set
675
676#
677# Input Device Drivers
678#
679# CONFIG_INPUT_KEYBOARD is not set
680# CONFIG_INPUT_MOUSE is not set
681# CONFIG_INPUT_JOYSTICK is not set
682# CONFIG_INPUT_TOUCHSCREEN is not set
683# CONFIG_INPUT_MISC is not set
684
685#
686# Hardware I/O ports
687#
688# CONFIG_SERIO is not set
689# CONFIG_GAMEPORT is not set
690
691#
692# Character devices
693#
694# CONFIG_VT is not set
695# CONFIG_SERIAL_NONSTANDARD is not set
696
697#
698# Serial drivers
699#
700CONFIG_SERIAL_8250=y
701CONFIG_SERIAL_8250_CONSOLE=y
702CONFIG_SERIAL_8250_PCI=y
703CONFIG_SERIAL_8250_NR_UARTS=4
704CONFIG_SERIAL_8250_RUNTIME_UARTS=4
705# CONFIG_SERIAL_8250_EXTENDED is not set
706
707#
708# Non-8250 serial port support
709#
710CONFIG_SERIAL_CORE=y
711CONFIG_SERIAL_CORE_CONSOLE=y
712# CONFIG_SERIAL_JSM is not set
713CONFIG_UNIX98_PTYS=y
714CONFIG_LEGACY_PTYS=y
715CONFIG_LEGACY_PTY_COUNT=256
716
717#
718# IPMI
719#
720# CONFIG_IPMI_HANDLER is not set
721
722#
723# Watchdog Cards
724#
725CONFIG_WATCHDOG=y
726# CONFIG_WATCHDOG_NOWAYOUT is not set
727
728#
729# Watchdog Device Drivers
730#
731# CONFIG_SOFT_WATCHDOG is not set
732CONFIG_83xx_WDT=y
733
734#
735# PCI-based Watchdog Cards
736#
737# CONFIG_PCIPCWATCHDOG is not set
738# CONFIG_WDTPCI is not set
739
740#
741# USB-based Watchdog Cards
742#
743# CONFIG_USBPCWATCHDOG is not set
744CONFIG_HW_RANDOM=y
745# CONFIG_NVRAM is not set
746# CONFIG_GEN_RTC is not set
747# CONFIG_DTLK is not set
748# CONFIG_R3964 is not set
749# CONFIG_APPLICOM is not set
750
751#
752# Ftape, the floppy tape device driver
753#
754# CONFIG_AGP is not set
755# CONFIG_DRM is not set
756# CONFIG_RAW_DRIVER is not set
757
758#
759# TPM devices
760#
761# CONFIG_TCG_TPM is not set
762# CONFIG_TELCLOCK is not set
763
764#
765# I2C support
766#
767CONFIG_I2C=y
768CONFIG_I2C_CHARDEV=y
769
770#
771# I2C Algorithms
772#
773# CONFIG_I2C_ALGOBIT is not set
774# CONFIG_I2C_ALGOPCF is not set
775# CONFIG_I2C_ALGOPCA is not set
776
777#
778# I2C Hardware Bus support
779#
780# CONFIG_I2C_ALI1535 is not set
781# CONFIG_I2C_ALI1563 is not set
782# CONFIG_I2C_ALI15X3 is not set
783# CONFIG_I2C_AMD756 is not set
784# CONFIG_I2C_AMD8111 is not set
785# CONFIG_I2C_I801 is not set
786# CONFIG_I2C_I810 is not set
787# CONFIG_I2C_PIIX4 is not set
788CONFIG_I2C_MPC=y
789# CONFIG_I2C_NFORCE2 is not set
790# CONFIG_I2C_OCORES is not set
791# CONFIG_I2C_PARPORT_LIGHT is not set
792# CONFIG_I2C_PROSAVAGE is not set
793# CONFIG_I2C_SAVAGE4 is not set
794# CONFIG_I2C_SIS5595 is not set
795# CONFIG_I2C_SIS630 is not set
796# CONFIG_I2C_SIS96X is not set
797# CONFIG_I2C_STUB is not set
798# CONFIG_I2C_VIA is not set
799# CONFIG_I2C_VIAPRO is not set
800# CONFIG_I2C_VOODOO3 is not set
801# CONFIG_I2C_PCA_ISA is not set
802
803#
804# Miscellaneous I2C Chip support
805#
806# CONFIG_SENSORS_DS1337 is not set
807# CONFIG_SENSORS_DS1374 is not set
808# CONFIG_SENSORS_EEPROM is not set
809# CONFIG_SENSORS_PCF8574 is not set
810# CONFIG_SENSORS_PCA9539 is not set
811# CONFIG_SENSORS_PCF8591 is not set
812# CONFIG_SENSORS_M41T00 is not set
813# CONFIG_SENSORS_MAX6875 is not set
814# CONFIG_I2C_DEBUG_CORE is not set
815# CONFIG_I2C_DEBUG_ALGO is not set
816# CONFIG_I2C_DEBUG_BUS is not set
817# CONFIG_I2C_DEBUG_CHIP is not set
818
819#
820# SPI support
821#
822CONFIG_SPI=y
823# CONFIG_SPI_DEBUG is not set
824CONFIG_SPI_MASTER=y
825
826#
827# SPI Master Controller Drivers
828#
829CONFIG_SPI_BITBANG=y
830CONFIG_SPI_MPC83xx=y
831
832#
833# SPI Protocol Masters
834#
835
836#
837# Dallas's 1-wire bus
838#
839
840#
841# Hardware Monitoring support
842#
843CONFIG_HWMON=y
844# CONFIG_HWMON_VID is not set
845# CONFIG_SENSORS_ABITUGURU is not set
846# CONFIG_SENSORS_ADM1021 is not set
847# CONFIG_SENSORS_ADM1025 is not set
848# CONFIG_SENSORS_ADM1026 is not set
849# CONFIG_SENSORS_ADM1031 is not set
850# CONFIG_SENSORS_ADM9240 is not set
851# CONFIG_SENSORS_ASB100 is not set
852# CONFIG_SENSORS_ATXP1 is not set
853# CONFIG_SENSORS_DS1621 is not set
854# CONFIG_SENSORS_F71805F is not set
855# CONFIG_SENSORS_FSCHER is not set
856# CONFIG_SENSORS_FSCPOS is not set
857# CONFIG_SENSORS_GL518SM is not set
858# CONFIG_SENSORS_GL520SM is not set
859# CONFIG_SENSORS_IT87 is not set
860# CONFIG_SENSORS_LM63 is not set
861# CONFIG_SENSORS_LM70 is not set
862# CONFIG_SENSORS_LM75 is not set
863# CONFIG_SENSORS_LM77 is not set
864# CONFIG_SENSORS_LM78 is not set
865# CONFIG_SENSORS_LM80 is not set
866# CONFIG_SENSORS_LM83 is not set
867# CONFIG_SENSORS_LM85 is not set
868# CONFIG_SENSORS_LM87 is not set
869# CONFIG_SENSORS_LM90 is not set
870# CONFIG_SENSORS_LM92 is not set
871# CONFIG_SENSORS_MAX1619 is not set
872# CONFIG_SENSORS_PC87360 is not set
873# CONFIG_SENSORS_SIS5595 is not set
874# CONFIG_SENSORS_SMSC47M1 is not set
875# CONFIG_SENSORS_SMSC47M192 is not set
876# CONFIG_SENSORS_SMSC47B397 is not set
877# CONFIG_SENSORS_VIA686A is not set
878# CONFIG_SENSORS_VT8231 is not set
879# CONFIG_SENSORS_W83781D is not set
880# CONFIG_SENSORS_W83791D is not set
881# CONFIG_SENSORS_W83792D is not set
882# CONFIG_SENSORS_W83L785TS is not set
883# CONFIG_SENSORS_W83627HF is not set
884# CONFIG_SENSORS_W83627EHF is not set
885# CONFIG_HWMON_DEBUG_CHIP is not set
886
887#
888# Misc devices
889#
890
891#
892# Multimedia devices
893#
894# CONFIG_VIDEO_DEV is not set
895CONFIG_VIDEO_V4L2=y
896
897#
898# Digital Video Broadcasting Devices
899#
900# CONFIG_DVB is not set
901# CONFIG_USB_DABUSB is not set
902
903#
904# Graphics support
905#
906CONFIG_FIRMWARE_EDID=y
907# CONFIG_FB is not set
908
909#
910# Sound
911#
912# CONFIG_SOUND is not set
913
914#
915# USB support
916#
917CONFIG_USB_ARCH_HAS_HCD=y
918CONFIG_USB_ARCH_HAS_OHCI=y
919CONFIG_USB_ARCH_HAS_EHCI=y
920CONFIG_USB=y
921# CONFIG_USB_DEBUG is not set
922
923#
924# Miscellaneous USB options
925#
926CONFIG_USB_DEVICEFS=y
927# CONFIG_USB_BANDWIDTH is not set
928# CONFIG_USB_DYNAMIC_MINORS is not set
929# CONFIG_USB_OTG is not set
930
931#
932# USB Host Controller Drivers
933#
934CONFIG_USB_EHCI_HCD=y
935# CONFIG_USB_EHCI_SPLIT_ISO is not set
936# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
937# CONFIG_USB_EHCI_TT_NEWSCHED is not set
938# CONFIG_USB_ISP116X_HCD is not set
939CONFIG_USB_OHCI_HCD=y
940# CONFIG_USB_OHCI_BIG_ENDIAN is not set
941CONFIG_USB_OHCI_LITTLE_ENDIAN=y
942CONFIG_USB_UHCI_HCD=y
943# CONFIG_USB_SL811_HCD is not set
944
945#
946# USB Device Class drivers
947#
948# CONFIG_USB_ACM is not set
949# CONFIG_USB_PRINTER is not set
950
951#
952# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
953#
954
955#
956# may also be needed; see USB_STORAGE Help for more information
957#
958CONFIG_USB_STORAGE=y
959# CONFIG_USB_STORAGE_DEBUG is not set
960# CONFIG_USB_STORAGE_DATAFAB is not set
961# CONFIG_USB_STORAGE_FREECOM is not set
962# CONFIG_USB_STORAGE_DPCM is not set
963# CONFIG_USB_STORAGE_USBAT is not set
964# CONFIG_USB_STORAGE_SDDR09 is not set
965# CONFIG_USB_STORAGE_SDDR55 is not set
966# CONFIG_USB_STORAGE_JUMPSHOT is not set
967# CONFIG_USB_STORAGE_ALAUDA is not set
968# CONFIG_USB_LIBUSUAL is not set
969
970#
971# USB Input Devices
972#
973# CONFIG_USB_HID is not set
974
975#
976# USB HID Boot Protocol drivers
977#
978# CONFIG_USB_KBD is not set
979# CONFIG_USB_MOUSE is not set
980# CONFIG_USB_AIPTEK is not set
981# CONFIG_USB_WACOM is not set
982# CONFIG_USB_ACECAD is not set
983# CONFIG_USB_KBTAB is not set
984# CONFIG_USB_POWERMATE is not set
985# CONFIG_USB_TOUCHSCREEN is not set
986# CONFIG_USB_YEALINK is not set
987# CONFIG_USB_XPAD is not set
988# CONFIG_USB_ATI_REMOTE is not set
989# CONFIG_USB_ATI_REMOTE2 is not set
990# CONFIG_USB_KEYSPAN_REMOTE is not set
991# CONFIG_USB_APPLETOUCH is not set
992
993#
994# USB Imaging devices
995#
996# CONFIG_USB_MDC800 is not set
997# CONFIG_USB_MICROTEK is not set
998
999#
1000# USB Network Adapters
1001#
1002# CONFIG_USB_CATC is not set
1003# CONFIG_USB_KAWETH is not set
1004# CONFIG_USB_PEGASUS is not set
1005# CONFIG_USB_RTL8150 is not set
1006# CONFIG_USB_USBNET is not set
1007CONFIG_USB_MON=y
1008
1009#
1010# USB port drivers
1011#
1012
1013#
1014# USB Serial Converter support
1015#
1016# CONFIG_USB_SERIAL is not set
1017
1018#
1019# USB Miscellaneous drivers
1020#
1021# CONFIG_USB_EMI62 is not set
1022# CONFIG_USB_EMI26 is not set
1023# CONFIG_USB_AUERSWALD is not set
1024# CONFIG_USB_RIO500 is not set
1025# CONFIG_USB_LEGOTOWER is not set
1026# CONFIG_USB_LCD is not set
1027# CONFIG_USB_LED is not set
1028# CONFIG_USB_CY7C63 is not set
1029# CONFIG_USB_CYTHERM is not set
1030# CONFIG_USB_PHIDGETKIT is not set
1031# CONFIG_USB_PHIDGETSERVO is not set
1032# CONFIG_USB_IDMOUSE is not set
1033# CONFIG_USB_APPLEDISPLAY is not set
1034# CONFIG_USB_SISUSBVGA is not set
1035# CONFIG_USB_LD is not set
1036# CONFIG_USB_TEST is not set
1037
1038#
1039# USB DSL modem support
1040#
1041
1042#
1043# USB Gadget Support
1044#
1045CONFIG_USB_GADGET=y
1046# CONFIG_USB_GADGET_DEBUG_FILES is not set
1047CONFIG_USB_GADGET_SELECTED=y
1048CONFIG_USB_GADGET_NET2280=y
1049CONFIG_USB_NET2280=y
1050# CONFIG_USB_GADGET_PXA2XX is not set
1051# CONFIG_USB_GADGET_GOKU is not set
1052# CONFIG_USB_GADGET_LH7A40X is not set
1053# CONFIG_USB_GADGET_OMAP is not set
1054# CONFIG_USB_GADGET_AT91 is not set
1055# CONFIG_USB_GADGET_DUMMY_HCD is not set
1056CONFIG_USB_GADGET_DUALSPEED=y
1057# CONFIG_USB_ZERO is not set
1058CONFIG_USB_ETH=y
1059CONFIG_USB_ETH_RNDIS=y
1060# CONFIG_USB_GADGETFS is not set
1061# CONFIG_USB_FILE_STORAGE is not set
1062# CONFIG_USB_G_SERIAL is not set
1063
1064#
1065# MMC/SD Card support
1066#
1067# CONFIG_MMC is not set
1068
1069#
1070# LED devices
1071#
1072# CONFIG_NEW_LEDS is not set
1073
1074#
1075# LED drivers
1076#
1077
1078#
1079# LED Triggers
1080#
1081
1082#
1083# InfiniBand support
1084#
1085# CONFIG_INFINIBAND is not set
1086
1087#
1088# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1089#
1090
1091#
1092# Real Time Clock
1093#
1094CONFIG_RTC_LIB=y
1095CONFIG_RTC_CLASS=y
1096CONFIG_RTC_HCTOSYS=y
1097CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1098
1099#
1100# RTC interfaces
1101#
1102CONFIG_RTC_INTF_SYSFS=y
1103CONFIG_RTC_INTF_PROC=y
1104CONFIG_RTC_INTF_DEV=y
1105CONFIG_RTC_INTF_DEV_UIE_EMUL=y
1106
1107#
1108# RTC drivers
1109#
1110# CONFIG_RTC_DRV_X1205 is not set
1111CONFIG_RTC_DRV_DS1307=y
1112# CONFIG_RTC_DRV_DS1553 is not set
1113# CONFIG_RTC_DRV_DS1672 is not set
1114# CONFIG_RTC_DRV_DS1742 is not set
1115# CONFIG_RTC_DRV_PCF8563 is not set
1116# CONFIG_RTC_DRV_PCF8583 is not set
1117# CONFIG_RTC_DRV_RS5C348 is not set
1118# CONFIG_RTC_DRV_RS5C372 is not set
1119# CONFIG_RTC_DRV_M48T86 is not set
1120# CONFIG_RTC_DRV_TEST is not set
1121# CONFIG_RTC_DRV_MAX6902 is not set
1122# CONFIG_RTC_DRV_V3020 is not set
1123
1124#
1125# DMA Engine support
1126#
1127CONFIG_DMA_ENGINE=y
1128
1129#
1130# DMA Clients
1131#
1132CONFIG_NET_DMA=y
1133
1134#
1135# DMA Devices
1136#
1137CONFIG_INTEL_IOATDMA=y
1138
1139#
1140# File systems
1141#
1142CONFIG_EXT2_FS=y
1143# CONFIG_EXT2_FS_XATTR is not set
1144# CONFIG_EXT2_FS_XIP is not set
1145CONFIG_EXT3_FS=y
1146CONFIG_EXT3_FS_XATTR=y
1147# CONFIG_EXT3_FS_POSIX_ACL is not set
1148# CONFIG_EXT3_FS_SECURITY is not set
1149CONFIG_JBD=y
1150# CONFIG_JBD_DEBUG is not set
1151CONFIG_FS_MBCACHE=y
1152# CONFIG_REISERFS_FS is not set
1153# CONFIG_JFS_FS is not set
1154# CONFIG_FS_POSIX_ACL is not set
1155# CONFIG_XFS_FS is not set
1156# CONFIG_OCFS2_FS is not set
1157# CONFIG_MINIX_FS is not set
1158# CONFIG_ROMFS_FS is not set
1159CONFIG_INOTIFY=y
1160CONFIG_INOTIFY_USER=y
1161# CONFIG_QUOTA is not set
1162CONFIG_DNOTIFY=y
1163# CONFIG_AUTOFS_FS is not set
1164# CONFIG_AUTOFS4_FS is not set
1165# CONFIG_FUSE_FS is not set
1166
1167#
1168# CD-ROM/DVD Filesystems
1169#
1170# CONFIG_ISO9660_FS is not set
1171# CONFIG_UDF_FS is not set
1172
1173#
1174# DOS/FAT/NT Filesystems
1175#
1176# CONFIG_MSDOS_FS is not set
1177# CONFIG_VFAT_FS is not set
1178# CONFIG_NTFS_FS is not set
1179
1180#
1181# Pseudo filesystems
1182#
1183CONFIG_PROC_FS=y
1184CONFIG_PROC_KCORE=y
1185CONFIG_SYSFS=y
1186CONFIG_TMPFS=y
1187# CONFIG_HUGETLB_PAGE is not set
1188CONFIG_RAMFS=y
1189# CONFIG_CONFIGFS_FS is not set
1190
1191#
1192# Miscellaneous filesystems
1193#
1194# CONFIG_ADFS_FS is not set
1195# CONFIG_AFFS_FS is not set
1196# CONFIG_HFS_FS is not set
1197# CONFIG_HFSPLUS_FS is not set
1198# CONFIG_BEFS_FS is not set
1199# CONFIG_BFS_FS is not set
1200# CONFIG_EFS_FS is not set
1201# CONFIG_JFFS_FS is not set
1202# CONFIG_JFFS2_FS is not set
1203# CONFIG_CRAMFS is not set
1204# CONFIG_VXFS_FS is not set
1205# CONFIG_HPFS_FS is not set
1206# CONFIG_QNX4FS_FS is not set
1207# CONFIG_SYSV_FS is not set
1208# CONFIG_UFS_FS is not set
1209
1210#
1211# Network File Systems
1212#
1213CONFIG_NFS_FS=y
1214CONFIG_NFS_V3=y
1215# CONFIG_NFS_V3_ACL is not set
1216CONFIG_NFS_V4=y
1217# CONFIG_NFS_DIRECTIO is not set
1218# CONFIG_NFSD is not set
1219CONFIG_ROOT_NFS=y
1220CONFIG_LOCKD=y
1221CONFIG_LOCKD_V4=y
1222CONFIG_NFS_COMMON=y
1223CONFIG_SUNRPC=y
1224CONFIG_SUNRPC_GSS=y
1225CONFIG_RPCSEC_GSS_KRB5=y
1226# CONFIG_RPCSEC_GSS_SPKM3 is not set
1227# CONFIG_SMB_FS is not set
1228# CONFIG_CIFS is not set
1229# CONFIG_CIFS_DEBUG2 is not set
1230# CONFIG_NCP_FS is not set
1231# CONFIG_CODA_FS is not set
1232# CONFIG_AFS_FS is not set
1233# CONFIG_9P_FS is not set
1234
1235#
1236# Partition Types
1237#
1238CONFIG_PARTITION_ADVANCED=y
1239# CONFIG_ACORN_PARTITION is not set
1240# CONFIG_OSF_PARTITION is not set
1241# CONFIG_AMIGA_PARTITION is not set
1242# CONFIG_ATARI_PARTITION is not set
1243# CONFIG_MAC_PARTITION is not set
1244# CONFIG_MSDOS_PARTITION is not set
1245# CONFIG_LDM_PARTITION is not set
1246# CONFIG_SGI_PARTITION is not set
1247# CONFIG_ULTRIX_PARTITION is not set
1248# CONFIG_SUN_PARTITION is not set
1249# CONFIG_KARMA_PARTITION is not set
1250# CONFIG_EFI_PARTITION is not set
1251
1252#
1253# Native Language Support
1254#
1255# CONFIG_NLS is not set
1256
1257#
1258# Library routines
1259#
1260# CONFIG_CRC_CCITT is not set
1261# CONFIG_CRC16 is not set
1262CONFIG_CRC32=y
1263# CONFIG_LIBCRC32C is not set
1264CONFIG_PLIST=y
1265
1266#
1267# Instrumentation Support
1268#
1269# CONFIG_PROFILING is not set
1270
1271#
1272# Kernel hacking
1273#
1274CONFIG_PRINTK_TIME=y
1275# CONFIG_MAGIC_SYSRQ is not set
1276# CONFIG_UNUSED_SYMBOLS is not set
1277CONFIG_DEBUG_KERNEL=y
1278CONFIG_LOG_BUF_SHIFT=17
1279CONFIG_DETECT_SOFTLOCKUP=y
1280# CONFIG_SCHEDSTATS is not set
1281# CONFIG_DEBUG_SLAB is not set
1282# CONFIG_DEBUG_MUTEXES is not set
1283# CONFIG_DEBUG_RT_MUTEXES is not set
1284# CONFIG_RT_MUTEX_TESTER is not set
1285# CONFIG_DEBUG_SPINLOCK is not set
1286# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1287# CONFIG_DEBUG_KOBJECT is not set
1288CONFIG_DEBUG_INFO=y
1289# CONFIG_DEBUG_FS is not set
1290# CONFIG_DEBUG_VM is not set
1291CONFIG_FORCED_INLINING=y
1292# CONFIG_RCU_TORTURE_TEST is not set
1293# CONFIG_DEBUGGER is not set
1294# CONFIG_BDI_SWITCH is not set
1295CONFIG_BOOTX_TEXT=y
1296CONFIG_SERIAL_TEXT_DEBUG=y
1297# CONFIG_PPC_EARLY_DEBUG is not set
1298
1299#
1300# Security options
1301#
1302# CONFIG_KEYS is not set
1303# CONFIG_SECURITY is not set
1304
1305#
1306# Cryptographic options
1307#
1308CONFIG_CRYPTO=y
1309# CONFIG_CRYPTO_HMAC is not set
1310# CONFIG_CRYPTO_NULL is not set
1311# CONFIG_CRYPTO_MD4 is not set
1312CONFIG_CRYPTO_MD5=y
1313# CONFIG_CRYPTO_SHA1 is not set
1314# CONFIG_CRYPTO_SHA256 is not set
1315# CONFIG_CRYPTO_SHA512 is not set
1316# CONFIG_CRYPTO_WP512 is not set
1317# CONFIG_CRYPTO_TGR192 is not set
1318CONFIG_CRYPTO_DES=y
1319# CONFIG_CRYPTO_BLOWFISH is not set
1320# CONFIG_CRYPTO_TWOFISH is not set
1321# CONFIG_CRYPTO_SERPENT is not set
1322# CONFIG_CRYPTO_AES is not set
1323# CONFIG_CRYPTO_CAST5 is not set
1324# CONFIG_CRYPTO_CAST6 is not set
1325# CONFIG_CRYPTO_TEA is not set
1326# CONFIG_CRYPTO_ARC4 is not set
1327# CONFIG_CRYPTO_KHAZAD is not set
1328# CONFIG_CRYPTO_ANUBIS is not set
1329# CONFIG_CRYPTO_DEFLATE is not set
1330# CONFIG_CRYPTO_MICHAEL_MIC is not set
1331# CONFIG_CRYPTO_CRC32C is not set
1332# CONFIG_CRYPTO_TEST is not set
1333
1334#
1335# Hardware crypto devices
1336#
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index a6920919d68e..f4e5e14ee2b6 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -111,7 +111,7 @@ void __init btext_setup_display(int width, int height, int depth, int pitch,
111 logicalDisplayBase = (unsigned char *)address; 111 logicalDisplayBase = (unsigned char *)address;
112 dispDeviceBase = (unsigned char *)address; 112 dispDeviceBase = (unsigned char *)address;
113 dispDeviceRowBytes = pitch; 113 dispDeviceRowBytes = pitch;
114 dispDeviceDepth = depth; 114 dispDeviceDepth = depth == 15 ? 16 : depth;
115 dispDeviceRect[0] = dispDeviceRect[1] = 0; 115 dispDeviceRect[0] = dispDeviceRect[1] = 0;
116 dispDeviceRect[2] = width; 116 dispDeviceRect[2] = width;
117 dispDeviceRect[3] = height; 117 dispDeviceRect[3] = height;
@@ -160,20 +160,28 @@ int btext_initialize(struct device_node *np)
160 unsigned long address = 0; 160 unsigned long address = 0;
161 u32 *prop; 161 u32 *prop;
162 162
163 prop = (u32 *)get_property(np, "width", NULL); 163 prop = (u32 *)get_property(np, "linux,bootx-width", NULL);
164 if (prop == NULL)
165 prop = (u32 *)get_property(np, "width", NULL);
164 if (prop == NULL) 166 if (prop == NULL)
165 return -EINVAL; 167 return -EINVAL;
166 width = *prop; 168 width = *prop;
167 prop = (u32 *)get_property(np, "height", NULL); 169 prop = (u32 *)get_property(np, "linux,bootx-height", NULL);
170 if (prop == NULL)
171 prop = (u32 *)get_property(np, "height", NULL);
168 if (prop == NULL) 172 if (prop == NULL)
169 return -EINVAL; 173 return -EINVAL;
170 height = *prop; 174 height = *prop;
171 prop = (u32 *)get_property(np, "depth", NULL); 175 prop = (u32 *)get_property(np, "linux,bootx-depth", NULL);
176 if (prop == NULL)
177 prop = (u32 *)get_property(np, "depth", NULL);
172 if (prop == NULL) 178 if (prop == NULL)
173 return -EINVAL; 179 return -EINVAL;
174 depth = *prop; 180 depth = *prop;
175 pitch = width * ((depth + 7) / 8); 181 pitch = width * ((depth + 7) / 8);
176 prop = (u32 *)get_property(np, "linebytes", NULL); 182 prop = (u32 *)get_property(np, "linux,bootx-linebytes", NULL);
183 if (prop == NULL)
184 prop = (u32 *)get_property(np, "linebytes", NULL);
177 if (prop) 185 if (prop)
178 pitch = *prop; 186 pitch = *prop;
179 if (pitch == 1) 187 if (pitch == 1)
@@ -194,7 +202,7 @@ int btext_initialize(struct device_node *np)
194 g_max_loc_Y = height / 16; 202 g_max_loc_Y = height / 16;
195 dispDeviceBase = (unsigned char *)address; 203 dispDeviceBase = (unsigned char *)address;
196 dispDeviceRowBytes = pitch; 204 dispDeviceRowBytes = pitch;
197 dispDeviceDepth = depth; 205 dispDeviceDepth = depth == 15 ? 16 : depth;
198 dispDeviceRect[0] = dispDeviceRect[1] = 0; 206 dispDeviceRect[0] = dispDeviceRect[1] = 0;
199 dispDeviceRect[2] = width; 207 dispDeviceRect[2] = width;
200 dispDeviceRect[3] = height; 208 dispDeviceRect[3] = height;
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index e47d40ac6f39..97ddc02a3d42 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -323,13 +323,11 @@ int ibmebus_request_irq(struct ibmebus_dev *dev,
323 unsigned long irq_flags, const char * devname, 323 unsigned long irq_flags, const char * devname,
324 void *dev_id) 324 void *dev_id)
325{ 325{
326 unsigned int irq = virt_irq_create_mapping(ist); 326 unsigned int irq = irq_create_mapping(NULL, ist, 0);
327 327
328 if (irq == NO_IRQ) 328 if (irq == NO_IRQ)
329 return -EINVAL; 329 return -EINVAL;
330 330
331 irq = irq_offset_up(irq);
332
333 return request_irq(irq, handler, 331 return request_irq(irq, handler,
334 irq_flags, devname, dev_id); 332 irq_flags, devname, dev_id);
335} 333}
@@ -337,12 +335,9 @@ EXPORT_SYMBOL(ibmebus_request_irq);
337 335
338void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id) 336void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id)
339{ 337{
340 unsigned int irq = virt_irq_create_mapping(ist); 338 unsigned int irq = irq_find_mapping(NULL, ist);
341 339
342 irq = irq_offset_up(irq);
343 free_irq(irq, dev_id); 340 free_irq(irq, dev_id);
344
345 return;
346} 341}
347EXPORT_SYMBOL(ibmebus_free_irq); 342EXPORT_SYMBOL(ibmebus_free_irq);
348 343
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 525baab45d2d..8cf987809c66 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -29,6 +29,8 @@
29 * to reduce code space and undefined function references. 29 * to reduce code space and undefined function references.
30 */ 30 */
31 31
32#undef DEBUG
33
32#include <linux/module.h> 34#include <linux/module.h>
33#include <linux/threads.h> 35#include <linux/threads.h>
34#include <linux/kernel_stat.h> 36#include <linux/kernel_stat.h>
@@ -46,7 +48,10 @@
46#include <linux/cpumask.h> 48#include <linux/cpumask.h>
47#include <linux/profile.h> 49#include <linux/profile.h>
48#include <linux/bitops.h> 50#include <linux/bitops.h>
49#include <linux/pci.h> 51#include <linux/list.h>
52#include <linux/radix-tree.h>
53#include <linux/mutex.h>
54#include <linux/bootmem.h>
50 55
51#include <asm/uaccess.h> 56#include <asm/uaccess.h>
52#include <asm/system.h> 57#include <asm/system.h>
@@ -57,39 +62,38 @@
57#include <asm/prom.h> 62#include <asm/prom.h>
58#include <asm/ptrace.h> 63#include <asm/ptrace.h>
59#include <asm/machdep.h> 64#include <asm/machdep.h>
65#include <asm/udbg.h>
60#ifdef CONFIG_PPC_ISERIES 66#ifdef CONFIG_PPC_ISERIES
61#include <asm/paca.h> 67#include <asm/paca.h>
62#endif 68#endif
63 69
64int __irq_offset_value; 70int __irq_offset_value;
65#ifdef CONFIG_PPC32
66EXPORT_SYMBOL(__irq_offset_value);
67#endif
68
69static int ppc_spurious_interrupts; 71static int ppc_spurious_interrupts;
70 72
71#ifdef CONFIG_PPC32 73#ifdef CONFIG_PPC32
72#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 74EXPORT_SYMBOL(__irq_offset_value);
75atomic_t ppc_n_lost_interrupts;
73 76
77#ifndef CONFIG_PPC_MERGE
78#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
74unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 79unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
75atomic_t ppc_n_lost_interrupts; 80#endif
76 81
77#ifdef CONFIG_TAU_INT 82#ifdef CONFIG_TAU_INT
78extern int tau_initialized; 83extern int tau_initialized;
79extern int tau_interrupts(int); 84extern int tau_interrupts(int);
80#endif 85#endif
86#endif /* CONFIG_PPC32 */
81 87
82#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) 88#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
83extern atomic_t ipi_recv; 89extern atomic_t ipi_recv;
84extern atomic_t ipi_sent; 90extern atomic_t ipi_sent;
85#endif 91#endif
86#endif /* CONFIG_PPC32 */
87 92
88#ifdef CONFIG_PPC64 93#ifdef CONFIG_PPC64
89EXPORT_SYMBOL(irq_desc); 94EXPORT_SYMBOL(irq_desc);
90 95
91int distribute_irqs = 1; 96int distribute_irqs = 1;
92u64 ppc64_interrupt_controller;
93#endif /* CONFIG_PPC64 */ 97#endif /* CONFIG_PPC64 */
94 98
95int show_interrupts(struct seq_file *p, void *v) 99int show_interrupts(struct seq_file *p, void *v)
@@ -182,7 +186,7 @@ void fixup_irqs(cpumask_t map)
182 186
183void do_IRQ(struct pt_regs *regs) 187void do_IRQ(struct pt_regs *regs)
184{ 188{
185 int irq; 189 unsigned int irq;
186#ifdef CONFIG_IRQSTACKS 190#ifdef CONFIG_IRQSTACKS
187 struct thread_info *curtp, *irqtp; 191 struct thread_info *curtp, *irqtp;
188#endif 192#endif
@@ -213,22 +217,26 @@ void do_IRQ(struct pt_regs *regs)
213 */ 217 */
214 irq = ppc_md.get_irq(regs); 218 irq = ppc_md.get_irq(regs);
215 219
216 if (irq >= 0) { 220 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
217#ifdef CONFIG_IRQSTACKS 221#ifdef CONFIG_IRQSTACKS
218 /* Switch to the irq stack to handle this */ 222 /* Switch to the irq stack to handle this */
219 curtp = current_thread_info(); 223 curtp = current_thread_info();
220 irqtp = hardirq_ctx[smp_processor_id()]; 224 irqtp = hardirq_ctx[smp_processor_id()];
221 if (curtp != irqtp) { 225 if (curtp != irqtp) {
226 struct irq_desc *desc = irq_desc + irq;
227 void *handler = desc->handle_irq;
228 if (handler == NULL)
229 handler = &__do_IRQ;
222 irqtp->task = curtp->task; 230 irqtp->task = curtp->task;
223 irqtp->flags = 0; 231 irqtp->flags = 0;
224 call___do_IRQ(irq, regs, irqtp); 232 call_handle_irq(irq, desc, regs, irqtp, handler);
225 irqtp->task = NULL; 233 irqtp->task = NULL;
226 if (irqtp->flags) 234 if (irqtp->flags)
227 set_bits(irqtp->flags, &curtp->flags); 235 set_bits(irqtp->flags, &curtp->flags);
228 } else 236 } else
229#endif 237#endif
230 __do_IRQ(irq, regs); 238 generic_handle_irq(irq, regs);
231 } else if (irq != -2) 239 } else if (irq != NO_IRQ_IGNORE)
232 /* That's not SMP safe ... but who cares ? */ 240 /* That's not SMP safe ... but who cares ? */
233 ppc_spurious_interrupts++; 241 ppc_spurious_interrupts++;
234 242
@@ -245,196 +253,562 @@ void do_IRQ(struct pt_regs *regs)
245 253
246void __init init_IRQ(void) 254void __init init_IRQ(void)
247{ 255{
256 ppc_md.init_IRQ();
248#ifdef CONFIG_PPC64 257#ifdef CONFIG_PPC64
249 static int once = 0; 258 irq_ctx_init();
259#endif
260}
261
262
263#ifdef CONFIG_IRQSTACKS
264struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
265struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
266
267void irq_ctx_init(void)
268{
269 struct thread_info *tp;
270 int i;
271
272 for_each_possible_cpu(i) {
273 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
274 tp = softirq_ctx[i];
275 tp->cpu = i;
276 tp->preempt_count = SOFTIRQ_OFFSET;
277
278 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
279 tp = hardirq_ctx[i];
280 tp->cpu = i;
281 tp->preempt_count = HARDIRQ_OFFSET;
282 }
283}
284
285static inline void do_softirq_onstack(void)
286{
287 struct thread_info *curtp, *irqtp;
288
289 curtp = current_thread_info();
290 irqtp = softirq_ctx[smp_processor_id()];
291 irqtp->task = curtp->task;
292 call_do_softirq(irqtp);
293 irqtp->task = NULL;
294}
250 295
251 if (once) 296#else
297#define do_softirq_onstack() __do_softirq()
298#endif /* CONFIG_IRQSTACKS */
299
300void do_softirq(void)
301{
302 unsigned long flags;
303
304 if (in_interrupt())
252 return; 305 return;
253 306
254 once++; 307 local_irq_save(flags);
255 308
256#endif 309 if (local_softirq_pending())
257 ppc_md.init_IRQ(); 310 do_softirq_onstack();
258#ifdef CONFIG_PPC64 311
259 irq_ctx_init(); 312 local_irq_restore(flags);
260#endif
261} 313}
314EXPORT_SYMBOL(do_softirq);
315
262 316
263#ifdef CONFIG_PPC64
264/* 317/*
265 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. 318 * IRQ controller and virtual interrupts
266 */ 319 */
267 320
268#define UNDEFINED_IRQ 0xffffffff 321#ifdef CONFIG_PPC_MERGE
269unsigned int virt_irq_to_real_map[NR_IRQS];
270 322
271/* 323static LIST_HEAD(irq_hosts);
272 * Don't use virtual irqs 0, 1, 2 for devices. 324static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
273 * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
274 * and 2 is the XICS IPI interrupt.
275 * We limit virtual irqs to __irq_offet_value less than virt_irq_max so
276 * that when we offset them we don't end up with an interrupt
277 * number >= virt_irq_max.
278 */
279#define MIN_VIRT_IRQ 3
280 325
281unsigned int virt_irq_max; 326struct irq_map_entry irq_map[NR_IRQS];
282static unsigned int max_virt_irq; 327static unsigned int irq_virq_count = NR_IRQS;
283static unsigned int nr_virt_irqs; 328static struct irq_host *irq_default_host;
284 329
285void 330struct irq_host *irq_alloc_host(unsigned int revmap_type,
286virt_irq_init(void) 331 unsigned int revmap_arg,
332 struct irq_host_ops *ops,
333 irq_hw_number_t inval_irq)
287{ 334{
288 int i; 335 struct irq_host *host;
336 unsigned int size = sizeof(struct irq_host);
337 unsigned int i;
338 unsigned int *rmap;
339 unsigned long flags;
289 340
290 if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1))) 341 /* Allocate structure and revmap table if using linear mapping */
291 virt_irq_max = NR_IRQS - 1; 342 if (revmap_type == IRQ_HOST_MAP_LINEAR)
292 max_virt_irq = virt_irq_max - __irq_offset_value; 343 size += revmap_arg * sizeof(unsigned int);
293 nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1; 344 if (mem_init_done)
345 host = kzalloc(size, GFP_KERNEL);
346 else {
347 host = alloc_bootmem(size);
348 if (host)
349 memset(host, 0, size);
350 }
351 if (host == NULL)
352 return NULL;
294 353
295 for (i = 0; i < NR_IRQS; i++) 354 /* Fill structure */
296 virt_irq_to_real_map[i] = UNDEFINED_IRQ; 355 host->revmap_type = revmap_type;
356 host->inval_irq = inval_irq;
357 host->ops = ops;
358
359 spin_lock_irqsave(&irq_big_lock, flags);
360
361 /* If it's a legacy controller, check for duplicates and
362 * mark it as allocated (we use irq 0 host pointer for that
363 */
364 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
365 if (irq_map[0].host != NULL) {
366 spin_unlock_irqrestore(&irq_big_lock, flags);
367 /* If we are early boot, we can't free the structure,
368 * too bad...
369 * this will be fixed once slab is made available early
370 * instead of the current cruft
371 */
372 if (mem_init_done)
373 kfree(host);
374 return NULL;
375 }
376 irq_map[0].host = host;
377 }
378
379 list_add(&host->link, &irq_hosts);
380 spin_unlock_irqrestore(&irq_big_lock, flags);
381
382 /* Additional setups per revmap type */
383 switch(revmap_type) {
384 case IRQ_HOST_MAP_LEGACY:
385 /* 0 is always the invalid number for legacy */
386 host->inval_irq = 0;
387 /* setup us as the host for all legacy interrupts */
388 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
389 irq_map[i].hwirq = 0;
390 smp_wmb();
391 irq_map[i].host = host;
392 smp_wmb();
393
394 /* Clear some flags */
395 get_irq_desc(i)->status
396 &= ~(IRQ_NOREQUEST | IRQ_LEVEL);
397
398 /* Legacy flags are left to default at this point,
399 * one can then use irq_create_mapping() to
400 * explicitely change them
401 */
402 ops->map(host, i, i, 0);
403 }
404 break;
405 case IRQ_HOST_MAP_LINEAR:
406 rmap = (unsigned int *)(host + 1);
407 for (i = 0; i < revmap_arg; i++)
408 rmap[i] = IRQ_NONE;
409 host->revmap_data.linear.size = revmap_arg;
410 smp_wmb();
411 host->revmap_data.linear.revmap = rmap;
412 break;
413 default:
414 break;
415 }
416
417 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
418
419 return host;
297} 420}
298 421
299/* Create a mapping for a real_irq if it doesn't already exist. 422struct irq_host *irq_find_host(struct device_node *node)
300 * Return the virtual irq as a convenience.
301 */
302int virt_irq_create_mapping(unsigned int real_irq)
303{ 423{
304 unsigned int virq, first_virq; 424 struct irq_host *h, *found = NULL;
305 static int warned; 425 unsigned long flags;
426
427 /* We might want to match the legacy controller last since
428 * it might potentially be set to match all interrupts in
429 * the absence of a device node. This isn't a problem so far
430 * yet though...
431 */
432 spin_lock_irqsave(&irq_big_lock, flags);
433 list_for_each_entry(h, &irq_hosts, link)
434 if (h->ops->match == NULL || h->ops->match(h, node)) {
435 found = h;
436 break;
437 }
438 spin_unlock_irqrestore(&irq_big_lock, flags);
439 return found;
440}
441EXPORT_SYMBOL_GPL(irq_find_host);
442
443void irq_set_default_host(struct irq_host *host)
444{
445 pr_debug("irq: Default host set to @0x%p\n", host);
446
447 irq_default_host = host;
448}
306 449
307 if (ppc64_interrupt_controller == IC_OPEN_PIC) 450void irq_set_virq_count(unsigned int count)
308 return real_irq; /* no mapping for openpic (for now) */ 451{
452 pr_debug("irq: Trying to set virq count to %d\n", count);
309 453
310 if (ppc64_interrupt_controller == IC_CELL_PIC) 454 BUG_ON(count < NUM_ISA_INTERRUPTS);
311 return real_irq; /* no mapping for iic either */ 455 if (count < NR_IRQS)
456 irq_virq_count = count;
457}
312 458
313 /* don't map interrupts < MIN_VIRT_IRQ */ 459unsigned int irq_create_mapping(struct irq_host *host,
314 if (real_irq < MIN_VIRT_IRQ) { 460 irq_hw_number_t hwirq,
315 virt_irq_to_real_map[real_irq] = real_irq; 461 unsigned int flags)
316 return real_irq; 462{
463 unsigned int virq, hint;
464
465 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx, 0x%x)\n",
466 host, hwirq, flags);
467
468 /* Look for default host if nececssary */
469 if (host == NULL)
470 host = irq_default_host;
471 if (host == NULL) {
472 printk(KERN_WARNING "irq_create_mapping called for"
473 " NULL host, hwirq=%lx\n", hwirq);
474 WARN_ON(1);
475 return NO_IRQ;
317 } 476 }
477 pr_debug("irq: -> using host @%p\n", host);
318 478
319 /* map to a number between MIN_VIRT_IRQ and max_virt_irq */ 479 /* Check if mapping already exist, if it does, call
320 virq = real_irq; 480 * host->ops->map() to update the flags
321 if (virq > max_virt_irq) 481 */
322 virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; 482 virq = irq_find_mapping(host, hwirq);
323 483 if (virq != IRQ_NONE) {
324 /* search for this number or a free slot */ 484 pr_debug("irq: -> existing mapping on virq %d\n", virq);
325 first_virq = virq; 485 host->ops->map(host, virq, hwirq, flags);
326 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { 486 return virq;
327 if (virt_irq_to_real_map[virq] == real_irq) 487 }
328 return virq; 488
329 if (++virq > max_virt_irq) 489 /* Get a virtual interrupt number */
330 virq = MIN_VIRT_IRQ; 490 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
331 if (virq == first_virq) 491 /* Handle legacy */
332 goto nospace; /* oops, no free slots */ 492 virq = (unsigned int)hwirq;
493 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
494 return NO_IRQ;
495 return virq;
496 } else {
497 /* Allocate a virtual interrupt number */
498 hint = hwirq % irq_virq_count;
499 virq = irq_alloc_virt(host, 1, hint);
500 if (virq == NO_IRQ) {
501 pr_debug("irq: -> virq allocation failed\n");
502 return NO_IRQ;
503 }
333 } 504 }
505 pr_debug("irq: -> obtained virq %d\n", virq);
334 506
335 virt_irq_to_real_map[virq] = real_irq; 507 /* Clear some flags */
508 get_irq_desc(virq)->status &= ~(IRQ_NOREQUEST | IRQ_LEVEL);
509
510 /* map it */
511 if (host->ops->map(host, virq, hwirq, flags)) {
512 pr_debug("irq: -> mapping failed, freeing\n");
513 irq_free_virt(virq, 1);
514 return NO_IRQ;
515 }
516 smp_wmb();
517 irq_map[virq].hwirq = hwirq;
518 smp_mb();
336 return virq; 519 return virq;
520}
521EXPORT_SYMBOL_GPL(irq_create_mapping);
337 522
338 nospace: 523extern unsigned int irq_create_of_mapping(struct device_node *controller,
339 if (!warned) { 524 u32 *intspec, unsigned int intsize)
340 printk(KERN_CRIT "Interrupt table is full\n"); 525{
341 printk(KERN_CRIT "Increase virt_irq_max (currently %d) " 526 struct irq_host *host;
342 "in your kernel sources and rebuild.\n", virt_irq_max); 527 irq_hw_number_t hwirq;
343 warned = 1; 528 unsigned int flags = IRQ_TYPE_NONE;
529
530 if (controller == NULL)
531 host = irq_default_host;
532 else
533 host = irq_find_host(controller);
534 if (host == NULL)
535 return NO_IRQ;
536
537 /* If host has no translation, then we assume interrupt line */
538 if (host->ops->xlate == NULL)
539 hwirq = intspec[0];
540 else {
541 if (host->ops->xlate(host, controller, intspec, intsize,
542 &hwirq, &flags))
543 return NO_IRQ;
344 } 544 }
345 return NO_IRQ; 545
546 return irq_create_mapping(host, hwirq, flags);
346} 547}
548EXPORT_SYMBOL_GPL(irq_create_of_mapping);
347 549
348/* 550unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
349 * In most cases will get a hit on the very first slot checked in the
350 * virt_irq_to_real_map. Only when there are a large number of
351 * IRQs will this be expensive.
352 */
353unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
354{ 551{
355 unsigned int virq; 552 struct of_irq oirq;
356 unsigned int first_virq;
357 553
358 virq = real_irq; 554 if (of_irq_map_one(dev, index, &oirq))
555 return NO_IRQ;
359 556
360 if (virq > max_virt_irq) 557 return irq_create_of_mapping(oirq.controller, oirq.specifier,
361 virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; 558 oirq.size);
559}
560EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
362 561
363 first_virq = virq; 562void irq_dispose_mapping(unsigned int virq)
563{
564 struct irq_host *host = irq_map[virq].host;
565 irq_hw_number_t hwirq;
566 unsigned long flags;
364 567
365 do { 568 WARN_ON (host == NULL);
366 if (virt_irq_to_real_map[virq] == real_irq) 569 if (host == NULL)
367 return virq; 570 return;
368 571
369 virq++; 572 /* Never unmap legacy interrupts */
573 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
574 return;
370 575
371 if (virq >= max_virt_irq) 576 /* remove chip and handler */
372 virq = 0; 577 set_irq_chip_and_handler(virq, NULL, NULL);
578
579 /* Make sure it's completed */
580 synchronize_irq(virq);
581
582 /* Tell the PIC about it */
583 if (host->ops->unmap)
584 host->ops->unmap(host, virq);
585 smp_mb();
586
587 /* Clear reverse map */
588 hwirq = irq_map[virq].hwirq;
589 switch(host->revmap_type) {
590 case IRQ_HOST_MAP_LINEAR:
591 if (hwirq < host->revmap_data.linear.size)
592 host->revmap_data.linear.revmap[hwirq] = IRQ_NONE;
593 break;
594 case IRQ_HOST_MAP_TREE:
595 /* Check if radix tree allocated yet */
596 if (host->revmap_data.tree.gfp_mask == 0)
597 break;
598 /* XXX radix tree not safe ! remove lock whem it becomes safe
599 * and use some RCU sync to make sure everything is ok before we
600 * can re-use that map entry
601 */
602 spin_lock_irqsave(&irq_big_lock, flags);
603 radix_tree_delete(&host->revmap_data.tree, hwirq);
604 spin_unlock_irqrestore(&irq_big_lock, flags);
605 break;
606 }
373 607
374 } while (first_virq != virq); 608 /* Destroy map */
609 smp_mb();
610 irq_map[virq].hwirq = host->inval_irq;
375 611
376 return NO_IRQ; 612 /* Set some flags */
613 get_irq_desc(virq)->status |= IRQ_NOREQUEST;
377 614
615 /* Free it */
616 irq_free_virt(virq, 1);
378} 617}
379#endif /* CONFIG_PPC64 */ 618EXPORT_SYMBOL_GPL(irq_dispose_mapping);
380 619
381#ifdef CONFIG_IRQSTACKS 620unsigned int irq_find_mapping(struct irq_host *host,
382struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 621 irq_hw_number_t hwirq)
383struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; 622{
623 unsigned int i;
624 unsigned int hint = hwirq % irq_virq_count;
625
626 /* Look for default host if nececssary */
627 if (host == NULL)
628 host = irq_default_host;
629 if (host == NULL)
630 return NO_IRQ;
631
632 /* legacy -> bail early */
633 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
634 return hwirq;
635
636 /* Slow path does a linear search of the map */
637 if (hint < NUM_ISA_INTERRUPTS)
638 hint = NUM_ISA_INTERRUPTS;
639 i = hint;
640 do {
641 if (irq_map[i].host == host &&
642 irq_map[i].hwirq == hwirq)
643 return i;
644 i++;
645 if (i >= irq_virq_count)
646 i = NUM_ISA_INTERRUPTS;
647 } while(i != hint);
648 return NO_IRQ;
649}
650EXPORT_SYMBOL_GPL(irq_find_mapping);
384 651
385void irq_ctx_init(void) 652
653unsigned int irq_radix_revmap(struct irq_host *host,
654 irq_hw_number_t hwirq)
386{ 655{
387 struct thread_info *tp; 656 struct radix_tree_root *tree;
388 int i; 657 struct irq_map_entry *ptr;
658 unsigned int virq;
659 unsigned long flags;
389 660
390 for_each_possible_cpu(i) { 661 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
391 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
392 tp = softirq_ctx[i];
393 tp->cpu = i;
394 tp->preempt_count = SOFTIRQ_OFFSET;
395 662
396 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 663 /* Check if the radix tree exist yet. We test the value of
397 tp = hardirq_ctx[i]; 664 * the gfp_mask for that. Sneaky but saves another int in the
398 tp->cpu = i; 665 * structure. If not, we fallback to slow mode
399 tp->preempt_count = HARDIRQ_OFFSET; 666 */
667 tree = &host->revmap_data.tree;
668 if (tree->gfp_mask == 0)
669 return irq_find_mapping(host, hwirq);
670
671 /* XXX Current radix trees are NOT SMP safe !!! Remove that lock
672 * when that is fixed (when Nick's patch gets in
673 */
674 spin_lock_irqsave(&irq_big_lock, flags);
675
676 /* Now try to resolve */
677 ptr = radix_tree_lookup(tree, hwirq);
678 /* Found it, return */
679 if (ptr) {
680 virq = ptr - irq_map;
681 goto bail;
400 } 682 }
683
684 /* If not there, try to insert it */
685 virq = irq_find_mapping(host, hwirq);
686 if (virq != NO_IRQ)
687 radix_tree_insert(tree, virq, &irq_map[virq]);
688 bail:
689 spin_unlock_irqrestore(&irq_big_lock, flags);
690 return virq;
401} 691}
402 692
403static inline void do_softirq_onstack(void) 693unsigned int irq_linear_revmap(struct irq_host *host,
694 irq_hw_number_t hwirq)
404{ 695{
405 struct thread_info *curtp, *irqtp; 696 unsigned int *revmap;
406 697
407 curtp = current_thread_info(); 698 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
408 irqtp = softirq_ctx[smp_processor_id()]; 699
409 irqtp->task = curtp->task; 700 /* Check revmap bounds */
410 call_do_softirq(irqtp); 701 if (unlikely(hwirq >= host->revmap_data.linear.size))
411 irqtp->task = NULL; 702 return irq_find_mapping(host, hwirq);
703
704 /* Check if revmap was allocated */
705 revmap = host->revmap_data.linear.revmap;
706 if (unlikely(revmap == NULL))
707 return irq_find_mapping(host, hwirq);
708
709 /* Fill up revmap with slow path if no mapping found */
710 if (unlikely(revmap[hwirq] == NO_IRQ))
711 revmap[hwirq] = irq_find_mapping(host, hwirq);
712
713 return revmap[hwirq];
412} 714}
413 715
414#else 716unsigned int irq_alloc_virt(struct irq_host *host,
415#define do_softirq_onstack() __do_softirq() 717 unsigned int count,
416#endif /* CONFIG_IRQSTACKS */ 718 unsigned int hint)
719{
720 unsigned long flags;
721 unsigned int i, j, found = NO_IRQ;
722 unsigned int limit = irq_virq_count - count;
417 723
418void do_softirq(void) 724 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
725 return NO_IRQ;
726
727 spin_lock_irqsave(&irq_big_lock, flags);
728
729 /* Use hint for 1 interrupt if any */
730 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
731 hint < irq_virq_count && irq_map[hint].host == NULL) {
732 found = hint;
733 goto hint_found;
734 }
735
736 /* Look for count consecutive numbers in the allocatable
737 * (non-legacy) space
738 */
739 for (i = NUM_ISA_INTERRUPTS; i <= limit; ) {
740 for (j = i; j < (i + count); j++)
741 if (irq_map[j].host != NULL) {
742 i = j + 1;
743 continue;
744 }
745 found = i;
746 break;
747 }
748 if (found == NO_IRQ) {
749 spin_unlock_irqrestore(&irq_big_lock, flags);
750 return NO_IRQ;
751 }
752 hint_found:
753 for (i = found; i < (found + count); i++) {
754 irq_map[i].hwirq = host->inval_irq;
755 smp_wmb();
756 irq_map[i].host = host;
757 }
758 spin_unlock_irqrestore(&irq_big_lock, flags);
759 return found;
760}
761
762void irq_free_virt(unsigned int virq, unsigned int count)
419{ 763{
420 unsigned long flags; 764 unsigned long flags;
765 unsigned int i;
421 766
422 if (in_interrupt()) 767 WARN_ON (virq < NUM_ISA_INTERRUPTS);
423 return; 768 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
424 769
425 local_irq_save(flags); 770 spin_lock_irqsave(&irq_big_lock, flags);
771 for (i = virq; i < (virq + count); i++) {
772 struct irq_host *host;
426 773
427 if (local_softirq_pending()) { 774 if (i < NUM_ISA_INTERRUPTS ||
428 account_system_vtime(current); 775 (virq + count) > irq_virq_count)
429 local_bh_disable(); 776 continue;
430 do_softirq_onstack(); 777
431 account_system_vtime(current); 778 host = irq_map[i].host;
432 __local_bh_enable(); 779 irq_map[i].hwirq = host->inval_irq;
780 smp_wmb();
781 irq_map[i].host = NULL;
433 } 782 }
783 spin_unlock_irqrestore(&irq_big_lock, flags);
784}
434 785
435 local_irq_restore(flags); 786void irq_early_init(void)
787{
788 unsigned int i;
789
790 for (i = 0; i < NR_IRQS; i++)
791 get_irq_desc(i)->status |= IRQ_NOREQUEST;
436} 792}
437EXPORT_SYMBOL(do_softirq); 793
794/* We need to create the radix trees late */
795static int irq_late_init(void)
796{
797 struct irq_host *h;
798 unsigned long flags;
799
800 spin_lock_irqsave(&irq_big_lock, flags);
801 list_for_each_entry(h, &irq_hosts, link) {
802 if (h->revmap_type == IRQ_HOST_MAP_TREE)
803 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
804 }
805 spin_unlock_irqrestore(&irq_big_lock, flags);
806
807 return 0;
808}
809arch_initcall(irq_late_init);
810
811#endif /* CONFIG_PPC_MERGE */
438 812
439#ifdef CONFIG_PCI_MSI 813#ifdef CONFIG_PCI_MSI
440int pci_enable_msi(struct pci_dev * pdev) 814int pci_enable_msi(struct pci_dev * pdev)
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 4cf0b971976b..7e98e778b52f 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -28,6 +28,7 @@ static struct legacy_serial_info {
28 struct device_node *np; 28 struct device_node *np;
29 unsigned int speed; 29 unsigned int speed;
30 unsigned int clock; 30 unsigned int clock;
31 int irq_check_parent;
31 phys_addr_t taddr; 32 phys_addr_t taddr;
32} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; 33} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
33static unsigned int legacy_serial_count; 34static unsigned int legacy_serial_count;
@@ -36,7 +37,7 @@ static int legacy_serial_console = -1;
36static int __init add_legacy_port(struct device_node *np, int want_index, 37static int __init add_legacy_port(struct device_node *np, int want_index,
37 int iotype, phys_addr_t base, 38 int iotype, phys_addr_t base,
38 phys_addr_t taddr, unsigned long irq, 39 phys_addr_t taddr, unsigned long irq,
39 upf_t flags) 40 upf_t flags, int irq_check_parent)
40{ 41{
41 u32 *clk, *spd, clock = BASE_BAUD * 16; 42 u32 *clk, *spd, clock = BASE_BAUD * 16;
42 int index; 43 int index;
@@ -68,7 +69,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
68 if (legacy_serial_infos[index].np != 0) { 69 if (legacy_serial_infos[index].np != 0) {
69 /* if we still have some room, move it, else override */ 70 /* if we still have some room, move it, else override */
70 if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) { 71 if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) {
71 printk(KERN_INFO "Moved legacy port %d -> %d\n", 72 printk(KERN_DEBUG "Moved legacy port %d -> %d\n",
72 index, legacy_serial_count); 73 index, legacy_serial_count);
73 legacy_serial_ports[legacy_serial_count] = 74 legacy_serial_ports[legacy_serial_count] =
74 legacy_serial_ports[index]; 75 legacy_serial_ports[index];
@@ -76,7 +77,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
76 legacy_serial_infos[index]; 77 legacy_serial_infos[index];
77 legacy_serial_count++; 78 legacy_serial_count++;
78 } else { 79 } else {
79 printk(KERN_INFO "Replacing legacy port %d\n", index); 80 printk(KERN_DEBUG "Replacing legacy port %d\n", index);
80 } 81 }
81 } 82 }
82 83
@@ -95,10 +96,11 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
95 legacy_serial_infos[index].np = of_node_get(np); 96 legacy_serial_infos[index].np = of_node_get(np);
96 legacy_serial_infos[index].clock = clock; 97 legacy_serial_infos[index].clock = clock;
97 legacy_serial_infos[index].speed = spd ? *spd : 0; 98 legacy_serial_infos[index].speed = spd ? *spd : 0;
99 legacy_serial_infos[index].irq_check_parent = irq_check_parent;
98 100
99 printk(KERN_INFO "Found legacy serial port %d for %s\n", 101 printk(KERN_DEBUG "Found legacy serial port %d for %s\n",
100 index, np->full_name); 102 index, np->full_name);
101 printk(KERN_INFO " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n", 103 printk(KERN_DEBUG " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n",
102 (iotype == UPIO_PORT) ? "port" : "mem", 104 (iotype == UPIO_PORT) ? "port" : "mem",
103 (unsigned long long)base, (unsigned long long)taddr, irq, 105 (unsigned long long)base, (unsigned long long)taddr, irq,
104 legacy_serial_ports[index].uartclk, 106 legacy_serial_ports[index].uartclk,
@@ -126,11 +128,13 @@ static int __init add_legacy_soc_port(struct device_node *np,
126 return -1; 128 return -1;
127 129
128 addr = of_translate_address(soc_dev, addrp); 130 addr = of_translate_address(soc_dev, addrp);
131 if (addr == OF_BAD_ADDR)
132 return -1;
129 133
130 /* Add port, irq will be dealt with later. We passed a translated 134 /* Add port, irq will be dealt with later. We passed a translated
131 * IO port value. It will be fixed up later along with the irq 135 * IO port value. It will be fixed up later along with the irq
132 */ 136 */
133 return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags); 137 return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
134} 138}
135 139
136static int __init add_legacy_isa_port(struct device_node *np, 140static int __init add_legacy_isa_port(struct device_node *np,
@@ -141,6 +145,8 @@ static int __init add_legacy_isa_port(struct device_node *np,
141 int index = -1; 145 int index = -1;
142 phys_addr_t taddr; 146 phys_addr_t taddr;
143 147
148 DBG(" -> add_legacy_isa_port(%s)\n", np->full_name);
149
144 /* Get the ISA port number */ 150 /* Get the ISA port number */
145 reg = (u32 *)get_property(np, "reg", NULL); 151 reg = (u32 *)get_property(np, "reg", NULL);
146 if (reg == NULL) 152 if (reg == NULL)
@@ -161,9 +167,12 @@ static int __init add_legacy_isa_port(struct device_node *np,
161 167
162 /* Translate ISA address */ 168 /* Translate ISA address */
163 taddr = of_translate_address(np, reg); 169 taddr = of_translate_address(np, reg);
170 if (taddr == OF_BAD_ADDR)
171 return -1;
164 172
165 /* Add port, irq will be dealt with later */ 173 /* Add port, irq will be dealt with later */
166 return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, NO_IRQ, UPF_BOOT_AUTOCONF); 174 return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr,
175 NO_IRQ, UPF_BOOT_AUTOCONF, 0);
167 176
168} 177}
169 178
@@ -176,6 +185,8 @@ static int __init add_legacy_pci_port(struct device_node *np,
176 unsigned int flags; 185 unsigned int flags;
177 int iotype, index = -1, lindex = 0; 186 int iotype, index = -1, lindex = 0;
178 187
188 DBG(" -> add_legacy_pci_port(%s)\n", np->full_name);
189
179 /* We only support ports that have a clock frequency properly 190 /* We only support ports that have a clock frequency properly
180 * encoded in the device-tree (that is have an fcode). Anything 191 * encoded in the device-tree (that is have an fcode). Anything
181 * else can't be used that early and will be normally probed by 192 * else can't be used that early and will be normally probed by
@@ -194,6 +205,8 @@ static int __init add_legacy_pci_port(struct device_node *np,
194 /* We only support BAR 0 for now */ 205 /* We only support BAR 0 for now */
195 iotype = (flags & IORESOURCE_MEM) ? UPIO_MEM : UPIO_PORT; 206 iotype = (flags & IORESOURCE_MEM) ? UPIO_MEM : UPIO_PORT;
196 addr = of_translate_address(pci_dev, addrp); 207 addr = of_translate_address(pci_dev, addrp);
208 if (addr == OF_BAD_ADDR)
209 return -1;
197 210
198 /* Set the IO base to the same as the translated address for MMIO, 211 /* Set the IO base to the same as the translated address for MMIO,
199 * or to the domain local IO base for PIO (it will be fixed up later) 212 * or to the domain local IO base for PIO (it will be fixed up later)
@@ -231,7 +244,8 @@ static int __init add_legacy_pci_port(struct device_node *np,
231 /* Add port, irq will be dealt with later. We passed a translated 244 /* Add port, irq will be dealt with later. We passed a translated
232 * IO port value. It will be fixed up later along with the irq 245 * IO port value. It will be fixed up later along with the irq
233 */ 246 */
234 return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, UPF_BOOT_AUTOCONF); 247 return add_legacy_port(np, index, iotype, base, addr, NO_IRQ,
248 UPF_BOOT_AUTOCONF, np != pci_dev);
235} 249}
236#endif 250#endif
237 251
@@ -362,27 +376,22 @@ static void __init fixup_port_irq(int index,
362 struct device_node *np, 376 struct device_node *np,
363 struct plat_serial8250_port *port) 377 struct plat_serial8250_port *port)
364{ 378{
379 unsigned int virq;
380
365 DBG("fixup_port_irq(%d)\n", index); 381 DBG("fixup_port_irq(%d)\n", index);
366 382
367 /* Check for interrupts in that node */ 383 virq = irq_of_parse_and_map(np, 0);
368 if (np->n_intrs > 0) { 384 if (virq == NO_IRQ && legacy_serial_infos[index].irq_check_parent) {
369 port->irq = np->intrs[0].line; 385 np = of_get_parent(np);
370 DBG(" port %d (%s), irq=%d\n", 386 if (np == NULL)
371 index, np->full_name, port->irq); 387 return;
372 return; 388 virq = irq_of_parse_and_map(np, 0);
389 of_node_put(np);
373 } 390 }
374 391 if (virq == NO_IRQ)
375 /* Check for interrupts in the parent */
376 np = of_get_parent(np);
377 if (np == NULL)
378 return; 392 return;
379 393
380 if (np->n_intrs > 0) { 394 port->irq = virq;
381 port->irq = np->intrs[0].line;
382 DBG(" port %d (%s), irq=%d\n",
383 index, np->full_name, port->irq);
384 }
385 of_node_put(np);
386} 395}
387 396
388static void __init fixup_port_pio(int index, 397static void __init fixup_port_pio(int index,
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 0c3c70d115c6..bfb407fc1aa1 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -51,12 +51,14 @@ _GLOBAL(call_do_softirq)
51 mtlr r0 51 mtlr r0
52 blr 52 blr
53 53
54_GLOBAL(call___do_IRQ) 54_GLOBAL(call_handle_irq)
55 ld r8,0(r7)
55 mflr r0 56 mflr r0
56 std r0,16(r1) 57 std r0,16(r1)
57 stdu r1,THREAD_SIZE-112(r5) 58 mtctr r8
58 mr r1,r5 59 stdu r1,THREAD_SIZE-112(r6)
59 bl .__do_IRQ 60 mr r1,r6
61 bctrl
60 ld r1,0(r1) 62 ld r1,0(r1)
61 ld r0,16(r1) 63 ld r0,16(r1)
62 mtlr r0 64 mtlr r0
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 1333335c474e..898dae8ab6d9 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -1404,6 +1404,43 @@ pcibios_update_irq(struct pci_dev *dev, int irq)
1404 /* XXX FIXME - update OF device tree node interrupt property */ 1404 /* XXX FIXME - update OF device tree node interrupt property */
1405} 1405}
1406 1406
1407#ifdef CONFIG_PPC_MERGE
1408/* XXX This is a copy of the ppc64 version. This is temporary until we start
1409 * merging the 2 PCI layers
1410 */
1411/*
1412 * Reads the interrupt pin to determine if interrupt is use by card.
1413 * If the interrupt is used, then gets the interrupt line from the
1414 * openfirmware and sets it in the pci_dev and pci_config line.
1415 */
1416int pci_read_irq_line(struct pci_dev *pci_dev)
1417{
1418 struct of_irq oirq;
1419 unsigned int virq;
1420
1421 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1422
1423 if (of_irq_map_pci(pci_dev, &oirq)) {
1424 DBG(" -> failed !\n");
1425 return -1;
1426 }
1427
1428 DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
1429 oirq.size, oirq.specifier[0], oirq.controller->full_name);
1430
1431 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size);
1432 if(virq == NO_IRQ) {
1433 DBG(" -> failed to map !\n");
1434 return -1;
1435 }
1436 pci_dev->irq = virq;
1437 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
1438
1439 return 0;
1440}
1441EXPORT_SYMBOL(pci_read_irq_line);
1442#endif /* CONFIG_PPC_MERGE */
1443
1407int pcibios_enable_device(struct pci_dev *dev, int mask) 1444int pcibios_enable_device(struct pci_dev *dev, int mask)
1408{ 1445{
1409 u16 cmd, old_cmd; 1446 u16 cmd, old_cmd;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index bea8451fb57b..efc0b5559ee0 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -398,12 +398,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
398 } else { 398 } else {
399 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 399 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
400 dev->rom_base_reg = PCI_ROM_ADDRESS; 400 dev->rom_base_reg = PCI_ROM_ADDRESS;
401 /* Maybe do a default OF mapping here */
401 dev->irq = NO_IRQ; 402 dev->irq = NO_IRQ;
402 if (node->n_intrs > 0) {
403 dev->irq = node->intrs[0].line;
404 pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
405 dev->irq);
406 }
407 } 403 }
408 404
409 pci_parse_of_addrs(node, dev); 405 pci_parse_of_addrs(node, dev);
@@ -1288,23 +1284,26 @@ EXPORT_SYMBOL(pcibios_fixup_bus);
1288 */ 1284 */
1289int pci_read_irq_line(struct pci_dev *pci_dev) 1285int pci_read_irq_line(struct pci_dev *pci_dev)
1290{ 1286{
1291 u8 intpin; 1287 struct of_irq oirq;
1292 struct device_node *node; 1288 unsigned int virq;
1293
1294 pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin);
1295 if (intpin == 0)
1296 return 0;
1297 1289
1298 node = pci_device_to_OF_node(pci_dev); 1290 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1299 if (node == NULL)
1300 return -1;
1301 1291
1302 if (node->n_intrs == 0) 1292 if (of_irq_map_pci(pci_dev, &oirq)) {
1293 DBG(" -> failed !\n");
1303 return -1; 1294 return -1;
1295 }
1304 1296
1305 pci_dev->irq = node->intrs[0].line; 1297 DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
1298 oirq.size, oirq.specifier[0], oirq.controller->full_name);
1306 1299
1307 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq); 1300 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size);
1301 if(virq == NO_IRQ) {
1302 DBG(" -> failed to map !\n");
1303 return -1;
1304 }
1305 pci_dev->irq = virq;
1306 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
1308 1307
1309 return 0; 1308 return 0;
1310} 1309}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 4c524cb52184..a1787ffb6319 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/kexec.h> 31#include <linux/kexec.h>
32#include <linux/debugfs.h> 32#include <linux/debugfs.h>
33#include <linux/irq.h>
33 34
34#include <asm/prom.h> 35#include <asm/prom.h>
35#include <asm/rtas.h> 36#include <asm/rtas.h>
@@ -86,424 +87,6 @@ static DEFINE_RWLOCK(devtree_lock);
86/* export that to outside world */ 87/* export that to outside world */
87struct device_node *of_chosen; 88struct device_node *of_chosen;
88 89
89struct device_node *dflt_interrupt_controller;
90int num_interrupt_controllers;
91
92/*
93 * Wrapper for allocating memory for various data that needs to be
94 * attached to device nodes as they are processed at boot or when
95 * added to the device tree later (e.g. DLPAR). At boot there is
96 * already a region reserved so we just increment *mem_start by size;
97 * otherwise we call kmalloc.
98 */
99static void * prom_alloc(unsigned long size, unsigned long *mem_start)
100{
101 unsigned long tmp;
102
103 if (!mem_start)
104 return kmalloc(size, GFP_KERNEL);
105
106 tmp = *mem_start;
107 *mem_start += size;
108 return (void *)tmp;
109}
110
111/*
112 * Find the device_node with a given phandle.
113 */
114static struct device_node * find_phandle(phandle ph)
115{
116 struct device_node *np;
117
118 for (np = allnodes; np != 0; np = np->allnext)
119 if (np->linux_phandle == ph)
120 return np;
121 return NULL;
122}
123
124/*
125 * Find the interrupt parent of a node.
126 */
127static struct device_node * __devinit intr_parent(struct device_node *p)
128{
129 phandle *parp;
130
131 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
132 if (parp == NULL)
133 return p->parent;
134 p = find_phandle(*parp);
135 if (p != NULL)
136 return p;
137 /*
138 * On a powermac booted with BootX, we don't get to know the
139 * phandles for any nodes, so find_phandle will return NULL.
140 * Fortunately these machines only have one interrupt controller
141 * so there isn't in fact any ambiguity. -- paulus
142 */
143 if (num_interrupt_controllers == 1)
144 p = dflt_interrupt_controller;
145 return p;
146}
147
148/*
149 * Find out the size of each entry of the interrupts property
150 * for a node.
151 */
152int __devinit prom_n_intr_cells(struct device_node *np)
153{
154 struct device_node *p;
155 unsigned int *icp;
156
157 for (p = np; (p = intr_parent(p)) != NULL; ) {
158 icp = (unsigned int *)
159 get_property(p, "#interrupt-cells", NULL);
160 if (icp != NULL)
161 return *icp;
162 if (get_property(p, "interrupt-controller", NULL) != NULL
163 || get_property(p, "interrupt-map", NULL) != NULL) {
164 printk("oops, node %s doesn't have #interrupt-cells\n",
165 p->full_name);
166 return 1;
167 }
168 }
169#ifdef DEBUG_IRQ
170 printk("prom_n_intr_cells failed for %s\n", np->full_name);
171#endif
172 return 1;
173}
174
175/*
176 * Map an interrupt from a device up to the platform interrupt
177 * descriptor.
178 */
179static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
180 struct device_node *np, unsigned int *ints,
181 int nintrc)
182{
183 struct device_node *p, *ipar;
184 unsigned int *imap, *imask, *ip;
185 int i, imaplen, match;
186 int newintrc = 0, newaddrc = 0;
187 unsigned int *reg;
188 int naddrc;
189
190 reg = (unsigned int *) get_property(np, "reg", NULL);
191 naddrc = prom_n_addr_cells(np);
192 p = intr_parent(np);
193 while (p != NULL) {
194 if (get_property(p, "interrupt-controller", NULL) != NULL)
195 /* this node is an interrupt controller, stop here */
196 break;
197 imap = (unsigned int *)
198 get_property(p, "interrupt-map", &imaplen);
199 if (imap == NULL) {
200 p = intr_parent(p);
201 continue;
202 }
203 imask = (unsigned int *)
204 get_property(p, "interrupt-map-mask", NULL);
205 if (imask == NULL) {
206 printk("oops, %s has interrupt-map but no mask\n",
207 p->full_name);
208 return 0;
209 }
210 imaplen /= sizeof(unsigned int);
211 match = 0;
212 ipar = NULL;
213 while (imaplen > 0 && !match) {
214 /* check the child-interrupt field */
215 match = 1;
216 for (i = 0; i < naddrc && match; ++i)
217 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
218 for (; i < naddrc + nintrc && match; ++i)
219 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
220 imap += naddrc + nintrc;
221 imaplen -= naddrc + nintrc;
222 /* grab the interrupt parent */
223 ipar = find_phandle((phandle) *imap++);
224 --imaplen;
225 if (ipar == NULL && num_interrupt_controllers == 1)
226 /* cope with BootX not giving us phandles */
227 ipar = dflt_interrupt_controller;
228 if (ipar == NULL) {
229 printk("oops, no int parent %x in map of %s\n",
230 imap[-1], p->full_name);
231 return 0;
232 }
233 /* find the parent's # addr and intr cells */
234 ip = (unsigned int *)
235 get_property(ipar, "#interrupt-cells", NULL);
236 if (ip == NULL) {
237 printk("oops, no #interrupt-cells on %s\n",
238 ipar->full_name);
239 return 0;
240 }
241 newintrc = *ip;
242 ip = (unsigned int *)
243 get_property(ipar, "#address-cells", NULL);
244 newaddrc = (ip == NULL)? 0: *ip;
245 imap += newaddrc + newintrc;
246 imaplen -= newaddrc + newintrc;
247 }
248 if (imaplen < 0) {
249 printk("oops, error decoding int-map on %s, len=%d\n",
250 p->full_name, imaplen);
251 return 0;
252 }
253 if (!match) {
254#ifdef DEBUG_IRQ
255 printk("oops, no match in %s int-map for %s\n",
256 p->full_name, np->full_name);
257#endif
258 return 0;
259 }
260 p = ipar;
261 naddrc = newaddrc;
262 nintrc = newintrc;
263 ints = imap - nintrc;
264 reg = ints - naddrc;
265 }
266 if (p == NULL) {
267#ifdef DEBUG_IRQ
268 printk("hmmm, int tree for %s doesn't have ctrler\n",
269 np->full_name);
270#endif
271 return 0;
272 }
273 *irq = ints;
274 *ictrler = p;
275 return nintrc;
276}
277
278static unsigned char map_isa_senses[4] = {
279 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
280 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
281 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
282 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
283};
284
285static unsigned char map_mpic_senses[4] = {
286 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
287 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
288 /* 2 seems to be used for the 8259 cascade... */
289 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
290 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
291};
292
293static int __devinit finish_node_interrupts(struct device_node *np,
294 unsigned long *mem_start,
295 int measure_only)
296{
297 unsigned int *ints;
298 int intlen, intrcells, intrcount;
299 int i, j, n, sense;
300 unsigned int *irq, virq;
301 struct device_node *ic;
302 int trace = 0;
303
304 //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0)
305#define TRACE(fmt...)
306
307 if (!strcmp(np->name, "smu-doorbell"))
308 trace = 1;
309
310 TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n",
311 num_interrupt_controllers);
312
313 if (num_interrupt_controllers == 0) {
314 /*
315 * Old machines just have a list of interrupt numbers
316 * and no interrupt-controller nodes.
317 */
318 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
319 &intlen);
320 /* XXX old interpret_pci_props looked in parent too */
321 /* XXX old interpret_macio_props looked for interrupts
322 before AAPL,interrupts */
323 if (ints == NULL)
324 ints = (unsigned int *) get_property(np, "interrupts",
325 &intlen);
326 if (ints == NULL)
327 return 0;
328
329 np->n_intrs = intlen / sizeof(unsigned int);
330 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
331 mem_start);
332 if (!np->intrs)
333 return -ENOMEM;
334 if (measure_only)
335 return 0;
336
337 for (i = 0; i < np->n_intrs; ++i) {
338 np->intrs[i].line = *ints++;
339 np->intrs[i].sense = IRQ_SENSE_LEVEL
340 | IRQ_POLARITY_NEGATIVE;
341 }
342 return 0;
343 }
344
345 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
346 TRACE("ints=%p, intlen=%d\n", ints, intlen);
347 if (ints == NULL)
348 return 0;
349 intrcells = prom_n_intr_cells(np);
350 intlen /= intrcells * sizeof(unsigned int);
351 TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen);
352 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
353 if (!np->intrs)
354 return -ENOMEM;
355
356 if (measure_only)
357 return 0;
358
359 intrcount = 0;
360 for (i = 0; i < intlen; ++i, ints += intrcells) {
361 n = map_interrupt(&irq, &ic, np, ints, intrcells);
362 TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n);
363 if (n <= 0)
364 continue;
365
366 /* don't map IRQ numbers under a cascaded 8259 controller */
367 if (ic && device_is_compatible(ic, "chrp,iic")) {
368 np->intrs[intrcount].line = irq[0];
369 sense = (n > 1)? (irq[1] & 3): 3;
370 np->intrs[intrcount].sense = map_isa_senses[sense];
371 } else {
372 virq = virt_irq_create_mapping(irq[0]);
373 TRACE("virq=%d\n", virq);
374#ifdef CONFIG_PPC64
375 if (virq == NO_IRQ) {
376 printk(KERN_CRIT "Could not allocate interrupt"
377 " number for %s\n", np->full_name);
378 continue;
379 }
380#endif
381 np->intrs[intrcount].line = irq_offset_up(virq);
382 sense = (n > 1)? (irq[1] & 3): 1;
383
384 /* Apple uses bits in there in a different way, let's
385 * only keep the real sense bit on macs
386 */
387 if (machine_is(powermac))
388 sense &= 0x1;
389 np->intrs[intrcount].sense = map_mpic_senses[sense];
390 }
391
392#ifdef CONFIG_PPC64
393 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
394 if (machine_is(powermac) && ic && ic->parent) {
395 char *name = get_property(ic->parent, "name", NULL);
396 if (name && !strcmp(name, "u3"))
397 np->intrs[intrcount].line += 128;
398 else if (!(name && (!strcmp(name, "mac-io") ||
399 !strcmp(name, "u4"))))
400 /* ignore other cascaded controllers, such as
401 the k2-sata-root */
402 break;
403 }
404#endif /* CONFIG_PPC64 */
405 if (n > 2) {
406 printk("hmmm, got %d intr cells for %s:", n,
407 np->full_name);
408 for (j = 0; j < n; ++j)
409 printk(" %d", irq[j]);
410 printk("\n");
411 }
412 ++intrcount;
413 }
414 np->n_intrs = intrcount;
415
416 return 0;
417}
418
419static int __devinit finish_node(struct device_node *np,
420 unsigned long *mem_start,
421 int measure_only)
422{
423 struct device_node *child;
424 int rc = 0;
425
426 rc = finish_node_interrupts(np, mem_start, measure_only);
427 if (rc)
428 goto out;
429
430 for (child = np->child; child != NULL; child = child->sibling) {
431 rc = finish_node(child, mem_start, measure_only);
432 if (rc)
433 goto out;
434 }
435out:
436 return rc;
437}
438
439static void __init scan_interrupt_controllers(void)
440{
441 struct device_node *np;
442 int n = 0;
443 char *name, *ic;
444 int iclen;
445
446 for (np = allnodes; np != NULL; np = np->allnext) {
447 ic = get_property(np, "interrupt-controller", &iclen);
448 name = get_property(np, "name", NULL);
449 /* checking iclen makes sure we don't get a false
450 match on /chosen.interrupt_controller */
451 if ((name != NULL
452 && strcmp(name, "interrupt-controller") == 0)
453 || (ic != NULL && iclen == 0
454 && strcmp(name, "AppleKiwi"))) {
455 if (n == 0)
456 dflt_interrupt_controller = np;
457 ++n;
458 }
459 }
460 num_interrupt_controllers = n;
461}
462
463/**
464 * finish_device_tree is called once things are running normally
465 * (i.e. with text and data mapped to the address they were linked at).
466 * It traverses the device tree and fills in some of the additional,
467 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
468 * mapping is also initialized at this point.
469 */
470void __init finish_device_tree(void)
471{
472 unsigned long start, end, size = 0;
473
474 DBG(" -> finish_device_tree\n");
475
476#ifdef CONFIG_PPC64
477 /* Initialize virtual IRQ map */
478 virt_irq_init();
479#endif
480 scan_interrupt_controllers();
481
482 /*
483 * Finish device-tree (pre-parsing some properties etc...)
484 * We do this in 2 passes. One with "measure_only" set, which
485 * will only measure the amount of memory needed, then we can
486 * allocate that memory, and call finish_node again. However,
487 * we must be careful as most routines will fail nowadays when
488 * prom_alloc() returns 0, so we must make sure our first pass
489 * doesn't start at 0. We pre-initialize size to 16 for that
490 * reason and then remove those additional 16 bytes
491 */
492 size = 16;
493 finish_node(allnodes, &size, 1);
494 size -= 16;
495
496 if (0 == size)
497 end = start = 0;
498 else
499 end = start = (unsigned long)__va(lmb_alloc(size, 128));
500
501 finish_node(allnodes, &end, 0);
502 BUG_ON(end != start + size);
503
504 DBG(" <- finish_device_tree\n");
505}
506
507static inline char *find_flat_dt_string(u32 offset) 90static inline char *find_flat_dt_string(u32 offset)
508{ 91{
509 return ((char *)initial_boot_params) + 92 return ((char *)initial_boot_params) +
@@ -1389,27 +972,6 @@ prom_n_size_cells(struct device_node* np)
1389EXPORT_SYMBOL(prom_n_size_cells); 972EXPORT_SYMBOL(prom_n_size_cells);
1390 973
1391/** 974/**
1392 * Work out the sense (active-low level / active-high edge)
1393 * of each interrupt from the device tree.
1394 */
1395void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1396{
1397 struct device_node *np;
1398 int i, j;
1399
1400 /* default to level-triggered */
1401 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1402
1403 for (np = allnodes; np != 0; np = np->allnext) {
1404 for (j = 0; j < np->n_intrs; j++) {
1405 i = np->intrs[j].line;
1406 if (i >= off && i < max)
1407 senses[i-off] = np->intrs[j].sense;
1408 }
1409 }
1410}
1411
1412/**
1413 * Construct and return a list of the device_nodes with a given name. 975 * Construct and return a list of the device_nodes with a given name.
1414 */ 976 */
1415struct device_node *find_devices(const char *name) 977struct device_node *find_devices(const char *name)
@@ -1808,7 +1370,6 @@ static void of_node_release(struct kref *kref)
1808 node->deadprops = NULL; 1370 node->deadprops = NULL;
1809 } 1371 }
1810 } 1372 }
1811 kfree(node->intrs);
1812 kfree(node->full_name); 1373 kfree(node->full_name);
1813 kfree(node->data); 1374 kfree(node->data);
1814 kfree(node); 1375 kfree(node);
@@ -1881,13 +1442,7 @@ void of_detach_node(const struct device_node *np)
1881#ifdef CONFIG_PPC_PSERIES 1442#ifdef CONFIG_PPC_PSERIES
1882/* 1443/*
1883 * Fix up the uninitialized fields in a new device node: 1444 * Fix up the uninitialized fields in a new device node:
1884 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields 1445 * name, type and pci-specific fields
1885 *
1886 * A lot of boot-time code is duplicated here, because functions such
1887 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1888 * slab allocator.
1889 *
1890 * This should probably be split up into smaller chunks.
1891 */ 1446 */
1892 1447
1893static int of_finish_dynamic_node(struct device_node *node) 1448static int of_finish_dynamic_node(struct device_node *node)
@@ -1928,8 +1483,6 @@ static int prom_reconfig_notifier(struct notifier_block *nb,
1928 switch (action) { 1483 switch (action) {
1929 case PSERIES_RECONFIG_ADD: 1484 case PSERIES_RECONFIG_ADD:
1930 err = of_finish_dynamic_node(node); 1485 err = of_finish_dynamic_node(node);
1931 if (!err)
1932 finish_node(node, NULL, 0);
1933 if (err < 0) { 1486 if (err < 0) {
1934 printk(KERN_ERR "finish_node returned %d\n", err); 1487 printk(KERN_ERR "finish_node returned %d\n", err);
1935 err = NOTIFY_BAD; 1488 err = NOTIFY_BAD;
@@ -1975,8 +1528,7 @@ struct property *of_find_property(struct device_node *np, const char *name,
1975 * Find a property with a given name for a given node 1528 * Find a property with a given name for a given node
1976 * and return the value. 1529 * and return the value.
1977 */ 1530 */
1978unsigned char *get_property(struct device_node *np, const char *name, 1531void *get_property(struct device_node *np, const char *name, int *lenp)
1979 int *lenp)
1980{ 1532{
1981 struct property *pp = of_find_property(np,name,lenp); 1533 struct property *pp = of_find_property(np,name,lenp);
1982 return pp ? pp->value : NULL; 1534 return pp ? pp->value : NULL;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1e95a9f8cda1..ebd501a59abd 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1990,12 +1990,22 @@ static void __init flatten_device_tree(void)
1990static void __init fixup_device_tree_maple(void) 1990static void __init fixup_device_tree_maple(void)
1991{ 1991{
1992 phandle isa; 1992 phandle isa;
1993 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
1993 u32 isa_ranges[6]; 1994 u32 isa_ranges[6];
1994 1995 char *name;
1995 isa = call_prom("finddevice", 1, 1, ADDR("/ht@0/isa@4")); 1996
1997 name = "/ht@0/isa@4";
1998 isa = call_prom("finddevice", 1, 1, ADDR(name));
1999 if (!PHANDLE_VALID(isa)) {
2000 name = "/ht@0/isa@6";
2001 isa = call_prom("finddevice", 1, 1, ADDR(name));
2002 rloc = 0x01003000; /* IO space; PCI device = 6 */
2003 }
1996 if (!PHANDLE_VALID(isa)) 2004 if (!PHANDLE_VALID(isa))
1997 return; 2005 return;
1998 2006
2007 if (prom_getproplen(isa, "ranges") != 12)
2008 return;
1999 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2009 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2000 == PROM_ERROR) 2010 == PROM_ERROR)
2001 return; 2011 return;
@@ -2005,15 +2015,15 @@ static void __init fixup_device_tree_maple(void)
2005 isa_ranges[2] != 0x00010000) 2015 isa_ranges[2] != 0x00010000)
2006 return; 2016 return;
2007 2017
2008 prom_printf("fixing up bogus ISA range on Maple...\n"); 2018 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2009 2019
2010 isa_ranges[0] = 0x1; 2020 isa_ranges[0] = 0x1;
2011 isa_ranges[1] = 0x0; 2021 isa_ranges[1] = 0x0;
2012 isa_ranges[2] = 0x01002000; /* IO space; PCI device = 4 */ 2022 isa_ranges[2] = rloc;
2013 isa_ranges[3] = 0x0; 2023 isa_ranges[3] = 0x0;
2014 isa_ranges[4] = 0x0; 2024 isa_ranges[4] = 0x0;
2015 isa_ranges[5] = 0x00010000; 2025 isa_ranges[5] = 0x00010000;
2016 prom_setprop(isa, "/ht@0/isa@4", "ranges", 2026 prom_setprop(isa, name, "ranges",
2017 isa_ranges, sizeof(isa_ranges)); 2027 isa_ranges, sizeof(isa_ranges));
2018} 2028}
2019#else 2029#else
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 45df420383cc..21009b1f7869 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -38,14 +38,6 @@ static void of_dump_addr(const char *s, u32 *addr, int na)
38static void of_dump_addr(const char *s, u32 *addr, int na) { } 38static void of_dump_addr(const char *s, u32 *addr, int na) { }
39#endif 39#endif
40 40
41/* Read a big address */
42static inline u64 of_read_addr(u32 *cell, int size)
43{
44 u64 r = 0;
45 while (size--)
46 r = (r << 32) | *(cell++);
47 return r;
48}
49 41
50/* Callbacks for bus specific translators */ 42/* Callbacks for bus specific translators */
51struct of_bus { 43struct of_bus {
@@ -77,9 +69,9 @@ static u64 of_bus_default_map(u32 *addr, u32 *range, int na, int ns, int pna)
77{ 69{
78 u64 cp, s, da; 70 u64 cp, s, da;
79 71
80 cp = of_read_addr(range, na); 72 cp = of_read_number(range, na);
81 s = of_read_addr(range + na + pna, ns); 73 s = of_read_number(range + na + pna, ns);
82 da = of_read_addr(addr, na); 74 da = of_read_number(addr, na);
83 75
84 DBG("OF: default map, cp="PRu64", s="PRu64", da="PRu64"\n", 76 DBG("OF: default map, cp="PRu64", s="PRu64", da="PRu64"\n",
85 cp, s, da); 77 cp, s, da);
@@ -91,7 +83,7 @@ static u64 of_bus_default_map(u32 *addr, u32 *range, int na, int ns, int pna)
91 83
92static int of_bus_default_translate(u32 *addr, u64 offset, int na) 84static int of_bus_default_translate(u32 *addr, u64 offset, int na)
93{ 85{
94 u64 a = of_read_addr(addr, na); 86 u64 a = of_read_number(addr, na);
95 memset(addr, 0, na * 4); 87 memset(addr, 0, na * 4);
96 a += offset; 88 a += offset;
97 if (na > 1) 89 if (na > 1)
@@ -135,9 +127,9 @@ static u64 of_bus_pci_map(u32 *addr, u32 *range, int na, int ns, int pna)
135 return OF_BAD_ADDR; 127 return OF_BAD_ADDR;
136 128
137 /* Read address values, skipping high cell */ 129 /* Read address values, skipping high cell */
138 cp = of_read_addr(range + 1, na - 1); 130 cp = of_read_number(range + 1, na - 1);
139 s = of_read_addr(range + na + pna, ns); 131 s = of_read_number(range + na + pna, ns);
140 da = of_read_addr(addr + 1, na - 1); 132 da = of_read_number(addr + 1, na - 1);
141 133
142 DBG("OF: PCI map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da); 134 DBG("OF: PCI map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da);
143 135
@@ -195,9 +187,9 @@ static u64 of_bus_isa_map(u32 *addr, u32 *range, int na, int ns, int pna)
195 return OF_BAD_ADDR; 187 return OF_BAD_ADDR;
196 188
197 /* Read address values, skipping high cell */ 189 /* Read address values, skipping high cell */
198 cp = of_read_addr(range + 1, na - 1); 190 cp = of_read_number(range + 1, na - 1);
199 s = of_read_addr(range + na + pna, ns); 191 s = of_read_number(range + na + pna, ns);
200 da = of_read_addr(addr + 1, na - 1); 192 da = of_read_number(addr + 1, na - 1);
201 193
202 DBG("OF: ISA map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da); 194 DBG("OF: ISA map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da);
203 195
@@ -295,7 +287,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
295 */ 287 */
296 ranges = (u32 *)get_property(parent, "ranges", &rlen); 288 ranges = (u32 *)get_property(parent, "ranges", &rlen);
297 if (ranges == NULL || rlen == 0) { 289 if (ranges == NULL || rlen == 0) {
298 offset = of_read_addr(addr, na); 290 offset = of_read_number(addr, na);
299 memset(addr, 0, pna * 4); 291 memset(addr, 0, pna * 4);
300 DBG("OF: no ranges, 1:1 translation\n"); 292 DBG("OF: no ranges, 1:1 translation\n");
301 goto finish; 293 goto finish;
@@ -378,7 +370,7 @@ u64 of_translate_address(struct device_node *dev, u32 *in_addr)
378 /* If root, we have finished */ 370 /* If root, we have finished */
379 if (parent == NULL) { 371 if (parent == NULL) {
380 DBG("OF: reached root node\n"); 372 DBG("OF: reached root node\n");
381 result = of_read_addr(addr, na); 373 result = of_read_number(addr, na);
382 break; 374 break;
383 } 375 }
384 376
@@ -442,7 +434,7 @@ u32 *of_get_address(struct device_node *dev, int index, u64 *size,
442 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) 434 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
443 if (i == index) { 435 if (i == index) {
444 if (size) 436 if (size)
445 *size = of_read_addr(prop + na, ns); 437 *size = of_read_number(prop + na, ns);
446 if (flags) 438 if (flags)
447 *flags = bus->get_flags(prop); 439 *flags = bus->get_flags(prop);
448 return prop; 440 return prop;
@@ -484,7 +476,7 @@ u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
484 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) 476 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
485 if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { 477 if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
486 if (size) 478 if (size)
487 *size = of_read_addr(prop + na, ns); 479 *size = of_read_number(prop + na, ns);
488 if (flags) 480 if (flags)
489 *flags = bus->get_flags(prop); 481 *flags = bus->get_flags(prop);
490 return prop; 482 return prop;
@@ -565,11 +557,414 @@ void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop,
565 prop = get_property(dn, "#address-cells", NULL); 557 prop = get_property(dn, "#address-cells", NULL);
566 558
567 cells = prop ? *(u32 *)prop : prom_n_addr_cells(dn); 559 cells = prop ? *(u32 *)prop : prom_n_addr_cells(dn);
568 *phys = of_read_addr(dma_window, cells); 560 *phys = of_read_number(dma_window, cells);
569 561
570 dma_window += cells; 562 dma_window += cells;
571 563
572 prop = get_property(dn, "ibm,#dma-size-cells", NULL); 564 prop = get_property(dn, "ibm,#dma-size-cells", NULL);
573 cells = prop ? *(u32 *)prop : prom_n_size_cells(dn); 565 cells = prop ? *(u32 *)prop : prom_n_size_cells(dn);
574 *size = of_read_addr(dma_window, cells); 566 *size = of_read_number(dma_window, cells);
567}
568
569/*
570 * Interrupt remapper
571 */
572
573static unsigned int of_irq_workarounds;
574static struct device_node *of_irq_dflt_pic;
575
576static struct device_node *of_irq_find_parent(struct device_node *child)
577{
578 struct device_node *p;
579 phandle *parp;
580
581 if (!of_node_get(child))
582 return NULL;
583
584 do {
585 parp = (phandle *)get_property(child, "interrupt-parent", NULL);
586 if (parp == NULL)
587 p = of_get_parent(child);
588 else {
589 if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
590 p = of_node_get(of_irq_dflt_pic);
591 else
592 p = of_find_node_by_phandle(*parp);
593 }
594 of_node_put(child);
595 child = p;
596 } while (p && get_property(p, "#interrupt-cells", NULL) == NULL);
597
598 return p;
599}
600
601static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
602{
603 return (((pin - 1) + slot) % 4) + 1;
575} 604}
605
606/* This doesn't need to be called if you don't have any special workaround
607 * flags to pass
608 */
609void of_irq_map_init(unsigned int flags)
610{
611 of_irq_workarounds = flags;
612
613 /* OldWorld, don't bother looking at other things */
614 if (flags & OF_IMAP_OLDWORLD_MAC)
615 return;
616
617 /* If we don't have phandles, let's try to locate a default interrupt
618 * controller (happens when booting with BootX). We do a first match
619 * here, hopefully, that only ever happens on machines with one
620 * controller.
621 */
622 if (flags & OF_IMAP_NO_PHANDLE) {
623 struct device_node *np;
624
625 for(np = NULL; (np = of_find_all_nodes(np)) != NULL;) {
626 if (get_property(np, "interrupt-controller", NULL)
627 == NULL)
628 continue;
629 /* Skip /chosen/interrupt-controller */
630 if (strcmp(np->name, "chosen") == 0)
631 continue;
632 /* It seems like at least one person on this planet wants
633 * to use BootX on a machine with an AppleKiwi controller
634 * which happens to pretend to be an interrupt
635 * controller too.
636 */
637 if (strcmp(np->name, "AppleKiwi") == 0)
638 continue;
639 /* I think we found one ! */
640 of_irq_dflt_pic = np;
641 break;
642 }
643 }
644
645}
646
647int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 *addr,
648 struct of_irq *out_irq)
649{
650 struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
651 u32 *tmp, *imap, *imask;
652 u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
653 int imaplen, match, i;
654
655 ipar = of_node_get(parent);
656
657 /* First get the #interrupt-cells property of the current cursor
658 * that tells us how to interpret the passed-in intspec. If there
659 * is none, we are nice and just walk up the tree
660 */
661 do {
662 tmp = (u32 *)get_property(ipar, "#interrupt-cells", NULL);
663 if (tmp != NULL) {
664 intsize = *tmp;
665 break;
666 }
667 tnode = ipar;
668 ipar = of_irq_find_parent(ipar);
669 of_node_put(tnode);
670 } while (ipar);
671 if (ipar == NULL) {
672 DBG(" -> no parent found !\n");
673 goto fail;
674 }
675
676 DBG("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
677
678 /* Look for this #address-cells. We have to implement the old linux
679 * trick of looking for the parent here as some device-trees rely on it
680 */
681 old = of_node_get(ipar);
682 do {
683 tmp = (u32 *)get_property(old, "#address-cells", NULL);
684 tnode = of_get_parent(old);
685 of_node_put(old);
686 old = tnode;
687 } while(old && tmp == NULL);
688 of_node_put(old);
689 old = NULL;
690 addrsize = (tmp == NULL) ? 2 : *tmp;
691
692 DBG(" -> addrsize=%d\n", addrsize);
693
694 /* Now start the actual "proper" walk of the interrupt tree */
695 while (ipar != NULL) {
696 /* Now check if cursor is an interrupt-controller and if it is
697 * then we are done
698 */
699 if (get_property(ipar, "interrupt-controller", NULL) != NULL) {
700 DBG(" -> got it !\n");
701 memcpy(out_irq->specifier, intspec,
702 intsize * sizeof(u32));
703 out_irq->size = intsize;
704 out_irq->controller = ipar;
705 of_node_put(old);
706 return 0;
707 }
708
709 /* Now look for an interrupt-map */
710 imap = (u32 *)get_property(ipar, "interrupt-map", &imaplen);
711 /* No interrupt map, check for an interrupt parent */
712 if (imap == NULL) {
713 DBG(" -> no map, getting parent\n");
714 newpar = of_irq_find_parent(ipar);
715 goto skiplevel;
716 }
717 imaplen /= sizeof(u32);
718
719 /* Look for a mask */
720 imask = (u32 *)get_property(ipar, "interrupt-map-mask", NULL);
721
722 /* If we were passed no "reg" property and we attempt to parse
723 * an interrupt-map, then #address-cells must be 0.
724 * Fail if it's not.
725 */
726 if (addr == NULL && addrsize != 0) {
727 DBG(" -> no reg passed in when needed !\n");
728 goto fail;
729 }
730
731 /* Parse interrupt-map */
732 match = 0;
733 while (imaplen > (addrsize + intsize + 1) && !match) {
734 /* Compare specifiers */
735 match = 1;
736 for (i = 0; i < addrsize && match; ++i) {
737 u32 mask = imask ? imask[i] : 0xffffffffu;
738 match = ((addr[i] ^ imap[i]) & mask) == 0;
739 }
740 for (; i < (addrsize + intsize) && match; ++i) {
741 u32 mask = imask ? imask[i] : 0xffffffffu;
742 match =
743 ((intspec[i-addrsize] ^ imap[i]) & mask) == 0;
744 }
745 imap += addrsize + intsize;
746 imaplen -= addrsize + intsize;
747
748 DBG(" -> match=%d (imaplen=%d)\n", match, imaplen);
749
750 /* Get the interrupt parent */
751 if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
752 newpar = of_node_get(of_irq_dflt_pic);
753 else
754 newpar = of_find_node_by_phandle((phandle)*imap);
755 imap++;
756 --imaplen;
757
758 /* Check if not found */
759 if (newpar == NULL) {
760 DBG(" -> imap parent not found !\n");
761 goto fail;
762 }
763
764 /* Get #interrupt-cells and #address-cells of new
765 * parent
766 */
767 tmp = (u32 *)get_property(newpar, "#interrupt-cells",
768 NULL);
769 if (tmp == NULL) {
770 DBG(" -> parent lacks #interrupt-cells !\n");
771 goto fail;
772 }
773 newintsize = *tmp;
774 tmp = (u32 *)get_property(newpar, "#address-cells",
775 NULL);
776 newaddrsize = (tmp == NULL) ? 0 : *tmp;
777
778 DBG(" -> newintsize=%d, newaddrsize=%d\n",
779 newintsize, newaddrsize);
780
781 /* Check for malformed properties */
782 if (imaplen < (newaddrsize + newintsize))
783 goto fail;
784
785 imap += newaddrsize + newintsize;
786 imaplen -= newaddrsize + newintsize;
787
788 DBG(" -> imaplen=%d\n", imaplen);
789 }
790 if (!match)
791 goto fail;
792
793 of_node_put(old);
794 old = of_node_get(newpar);
795 addrsize = newaddrsize;
796 intsize = newintsize;
797 intspec = imap - intsize;
798 addr = intspec - addrsize;
799
800 skiplevel:
801 /* Iterate again with new parent */
802 DBG(" -> new parent: %s\n", newpar ? newpar->full_name : "<>");
803 of_node_put(ipar);
804 ipar = newpar;
805 newpar = NULL;
806 }
807 fail:
808 of_node_put(ipar);
809 of_node_put(old);
810 of_node_put(newpar);
811
812 return -EINVAL;
813}
814EXPORT_SYMBOL_GPL(of_irq_map_raw);
815
816#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
817static int of_irq_map_oldworld(struct device_node *device, int index,
818 struct of_irq *out_irq)
819{
820 u32 *ints;
821 int intlen;
822
823 /*
824 * Old machines just have a list of interrupt numbers
825 * and no interrupt-controller nodes.
826 */
827 ints = (u32 *) get_property(device, "AAPL,interrupts", &intlen);
828 if (ints == NULL)
829 return -EINVAL;
830 intlen /= sizeof(u32);
831
832 if (index >= intlen)
833 return -EINVAL;
834
835 out_irq->controller = NULL;
836 out_irq->specifier[0] = ints[index];
837 out_irq->size = 1;
838
839 return 0;
840}
841#else /* defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) */
842static int of_irq_map_oldworld(struct device_node *device, int index,
843 struct of_irq *out_irq)
844{
845 return -EINVAL;
846}
847#endif /* !(defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)) */
848
849int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq)
850{
851 struct device_node *p;
852 u32 *intspec, *tmp, intsize, intlen, *addr;
853 int res;
854
855 DBG("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index);
856
857 /* OldWorld mac stuff is "special", handle out of line */
858 if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
859 return of_irq_map_oldworld(device, index, out_irq);
860
861 /* Get the interrupts property */
862 intspec = (u32 *)get_property(device, "interrupts", &intlen);
863 if (intspec == NULL)
864 return -EINVAL;
865 intlen /= sizeof(u32);
866
867 /* Get the reg property (if any) */
868 addr = (u32 *)get_property(device, "reg", NULL);
869
870 /* Look for the interrupt parent. */
871 p = of_irq_find_parent(device);
872 if (p == NULL)
873 return -EINVAL;
874
875 /* Get size of interrupt specifier */
876 tmp = (u32 *)get_property(p, "#interrupt-cells", NULL);
877 if (tmp == NULL) {
878 of_node_put(p);
879 return -EINVAL;
880 }
881 intsize = *tmp;
882
883 /* Check index */
884 if (index * intsize >= intlen)
885 return -EINVAL;
886
887 /* Get new specifier and map it */
888 res = of_irq_map_raw(p, intspec + index * intsize, addr, out_irq);
889 of_node_put(p);
890 return res;
891}
892EXPORT_SYMBOL_GPL(of_irq_map_one);
893
894int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
895{
896 struct device_node *dn, *ppnode;
897 struct pci_dev *ppdev;
898 u32 lspec;
899 u32 laddr[3];
900 u8 pin;
901 int rc;
902
903 /* Check if we have a device node, if yes, fallback to standard OF
904 * parsing
905 */
906 dn = pci_device_to_OF_node(pdev);
907 if (dn)
908 return of_irq_map_one(dn, 0, out_irq);
909
910 /* Ok, we don't, time to have fun. Let's start by building up an
911 * interrupt spec. we assume #interrupt-cells is 1, which is standard
912 * for PCI. If you do different, then don't use that routine.
913 */
914 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
915 if (rc != 0)
916 return rc;
917 /* No pin, exit */
918 if (pin == 0)
919 return -ENODEV;
920
921 /* Now we walk up the PCI tree */
922 lspec = pin;
923 for (;;) {
924 /* Get the pci_dev of our parent */
925 ppdev = pdev->bus->self;
926
927 /* Ouch, it's a host bridge... */
928 if (ppdev == NULL) {
929#ifdef CONFIG_PPC64
930 ppnode = pci_bus_to_OF_node(pdev->bus);
931#else
932 struct pci_controller *host;
933 host = pci_bus_to_host(pdev->bus);
934 ppnode = host ? host->arch_data : NULL;
935#endif
936 /* No node for host bridge ? give up */
937 if (ppnode == NULL)
938 return -EINVAL;
939 } else
940 /* We found a P2P bridge, check if it has a node */
941 ppnode = pci_device_to_OF_node(ppdev);
942
943 /* Ok, we have found a parent with a device-node, hand over to
944 * the OF parsing code.
945 * We build a unit address from the linux device to be used for
946 * resolution. Note that we use the linux bus number which may
947 * not match your firmware bus numbering.
948 * Fortunately, in most cases, interrupt-map-mask doesn't include
949 * the bus number as part of the matching.
950 * You should still be careful about that though if you intend
951 * to rely on this function (you ship a firmware that doesn't
952 * create device nodes for all PCI devices).
953 */
954 if (ppnode)
955 break;
956
957 /* We can only get here if we hit a P2P bridge with no node,
958 * let's do standard swizzling and try again
959 */
960 lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
961 pdev = ppdev;
962 }
963
964 laddr[0] = (pdev->bus->number << 16)
965 | (pdev->devfn << 8);
966 laddr[1] = laddr[2] = 0;
967 return of_irq_map_raw(ppnode, &lspec, laddr, out_irq);
968}
969EXPORT_SYMBOL_GPL(of_irq_map_pci);
970
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 6eb7e49b394a..cda022657324 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -297,19 +297,9 @@ unsigned long __init find_and_init_phbs(void)
297 struct device_node *node; 297 struct device_node *node;
298 struct pci_controller *phb; 298 struct pci_controller *phb;
299 unsigned int index; 299 unsigned int index;
300 unsigned int root_size_cells = 0;
301 unsigned int *opprop = NULL;
302 struct device_node *root = of_find_node_by_path("/"); 300 struct device_node *root = of_find_node_by_path("/");
303 301
304 if (ppc64_interrupt_controller == IC_OPEN_PIC) {
305 opprop = (unsigned int *)get_property(root,
306 "platform-open-pic", NULL);
307 }
308
309 root_size_cells = prom_n_size_cells(root);
310
311 index = 0; 302 index = 0;
312
313 for (node = of_get_next_child(root, NULL); 303 for (node = of_get_next_child(root, NULL);
314 node != NULL; 304 node != NULL;
315 node = of_get_next_child(root, node)) { 305 node = of_get_next_child(root, node)) {
@@ -324,13 +314,6 @@ unsigned long __init find_and_init_phbs(void)
324 setup_phb(node, phb); 314 setup_phb(node, phb);
325 pci_process_bridge_OF_ranges(phb, node, 0); 315 pci_process_bridge_OF_ranges(phb, node, 0);
326 pci_setup_phb_io(phb, index == 0); 316 pci_setup_phb_io(phb, index == 0);
327#ifdef CONFIG_PPC_PSERIES
328 /* XXX This code need serious fixing ... --BenH */
329 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
330 int addr = root_size_cells * (index + 2) - 1;
331 mpic_assign_isu(pSeries_mpic, index, opprop[addr]);
332 }
333#endif
334 index++; 317 index++;
335 } 318 }
336 319
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index ba7cd50d820d..e0df2ba1ab9f 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -51,7 +51,6 @@
51 51
52extern void bootx_init(unsigned long r4, unsigned long phys); 52extern void bootx_init(unsigned long r4, unsigned long phys);
53 53
54boot_infos_t *boot_infos;
55struct ide_machdep_calls ppc_ide_md; 54struct ide_machdep_calls ppc_ide_md;
56 55
57int boot_cpuid; 56int boot_cpuid;
@@ -240,7 +239,6 @@ void __init setup_arch(char **cmdline_p)
240 ppc_md.init_early(); 239 ppc_md.init_early();
241 240
242 find_legacy_serial_ports(); 241 find_legacy_serial_ports();
243 finish_device_tree();
244 242
245 smp_setup_cpu_maps(); 243 smp_setup_cpu_maps();
246 244
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ac7276c40685..fd1785e4c9bb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -361,12 +361,15 @@ void __init setup_system(void)
361 361
362 /* 362 /*
363 * Fill the ppc64_caches & systemcfg structures with informations 363 * Fill the ppc64_caches & systemcfg structures with informations
364 * retrieved from the device-tree. Need to be called before 364 * retrieved from the device-tree.
365 * finish_device_tree() since the later requires some of the
366 * informations filled up here to properly parse the interrupt tree.
367 */ 365 */
368 initialize_cache_info(); 366 initialize_cache_info();
369 367
368 /*
369 * Initialize irq remapping subsystem
370 */
371 irq_early_init();
372
370#ifdef CONFIG_PPC_RTAS 373#ifdef CONFIG_PPC_RTAS
371 /* 374 /*
372 * Initialize RTAS if available 375 * Initialize RTAS if available
@@ -394,12 +397,6 @@ void __init setup_system(void)
394 find_legacy_serial_ports(); 397 find_legacy_serial_ports();
395 398
396 /* 399 /*
397 * "Finish" the device-tree, that is do the actual parsing of
398 * some of the properties like the interrupt map
399 */
400 finish_device_tree();
401
402 /*
403 * Initialize xmon 400 * Initialize xmon
404 */ 401 */
405#ifdef CONFIG_XMON_DEFAULT 402#ifdef CONFIG_XMON_DEFAULT
@@ -427,8 +424,6 @@ void __init setup_system(void)
427 424
428 printk("-----------------------------------------------------\n"); 425 printk("-----------------------------------------------------\n");
429 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 426 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
430 printk("ppc64_interrupt_controller = 0x%ld\n",
431 ppc64_interrupt_controller);
432 printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); 427 printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size());
433 printk("ppc64_caches.dcache_line_size = 0x%x\n", 428 printk("ppc64_caches.dcache_line_size = 0x%x\n",
434 ppc64_caches.dline_size); 429 ppc64_caches.dline_size);
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index cdf5867838a6..fad8580f9081 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -218,7 +218,6 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
218{ 218{
219 struct vio_dev *viodev; 219 struct vio_dev *viodev;
220 unsigned int *unit_address; 220 unsigned int *unit_address;
221 unsigned int *irq_p;
222 221
223 /* we need the 'device_type' property, in order to match with drivers */ 222 /* we need the 'device_type' property, in order to match with drivers */
224 if (of_node->type == NULL) { 223 if (of_node->type == NULL) {
@@ -243,16 +242,7 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
243 242
244 viodev->dev.platform_data = of_node_get(of_node); 243 viodev->dev.platform_data = of_node_get(of_node);
245 244
246 viodev->irq = NO_IRQ; 245 viodev->irq = irq_of_parse_and_map(of_node, 0);
247 irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
248 if (irq_p) {
249 int virq = virt_irq_create_mapping(*irq_p);
250 if (virq == NO_IRQ) {
251 printk(KERN_ERR "Unable to allocate interrupt "
252 "number for %s\n", of_node->full_name);
253 } else
254 viodev->irq = irq_offset_up(virq);
255 }
256 246
257 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); 247 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
258 viodev->name = of_node->name; 248 viodev->name = of_node->name;
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 7675e675dce1..5fe7b7faf45f 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -16,12 +16,21 @@ config MPC834x_SYS
16 3 PCI slots. The PIBs PCI initialization is the bootloader's 16 3 PCI slots. The PIBs PCI initialization is the bootloader's
17 responsiblilty. 17 responsiblilty.
18 18
19config MPC834x_ITX
20 bool "Freescale MPC834x ITX"
21 select DEFAULT_UIMAGE
22 help
23 This option enables support for the MPC 834x ITX evaluation board.
24
25 Be aware that PCI initialization is the bootloader's
26 responsiblilty.
27
19endchoice 28endchoice
20 29
21config MPC834x 30config MPC834x
22 bool 31 bool
23 select PPC_UDBG_16550 32 select PPC_UDBG_16550
24 select PPC_INDIRECT_PCI 33 select PPC_INDIRECT_PCI
25 default y if MPC834x_SYS 34 default y if MPC834x_SYS || MPC834x_ITX
26 35
27endmenu 36endmenu
diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile
index 5c72367441a8..9387a110d28a 100644
--- a/arch/powerpc/platforms/83xx/Makefile
+++ b/arch/powerpc/platforms/83xx/Makefile
@@ -4,3 +4,4 @@
4obj-y := misc.o 4obj-y := misc.o
5obj-$(CONFIG_PCI) += pci.o 5obj-$(CONFIG_PCI) += pci.o
6obj-$(CONFIG_MPC834x_SYS) += mpc834x_sys.o 6obj-$(CONFIG_MPC834x_SYS) += mpc834x_sys.o
7obj-$(CONFIG_MPC834x_ITX) += mpc834x_itx.o
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
new file mode 100644
index 000000000000..b46305645d38
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -0,0 +1,156 @@
1/*
2 * arch/powerpc/platforms/83xx/mpc834x_itx.c
3 *
4 * MPC834x ITX board specific routines
5 *
6 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/config.h>
15#include <linux/stddef.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/reboot.h>
20#include <linux/pci.h>
21#include <linux/kdev_t.h>
22#include <linux/major.h>
23#include <linux/console.h>
24#include <linux/delay.h>
25#include <linux/seq_file.h>
26#include <linux/root_dev.h>
27
28#include <asm/system.h>
29#include <asm/atomic.h>
30#include <asm/time.h>
31#include <asm/io.h>
32#include <asm/machdep.h>
33#include <asm/ipic.h>
34#include <asm/bootinfo.h>
35#include <asm/irq.h>
36#include <asm/prom.h>
37#include <asm/udbg.h>
38#include <sysdev/fsl_soc.h>
39
40#include "mpc83xx.h"
41
42#include <platforms/83xx/mpc834x_sys.h>
43
44#ifndef CONFIG_PCI
45unsigned long isa_io_base = 0;
46unsigned long isa_mem_base = 0;
47#endif
48
49#ifdef CONFIG_PCI
50static int
51mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
52{
53 static char pci_irq_table[][4] =
54 /*
55 * PCI IDSEL/INTPIN->INTLINE
56 * A B C D
57 */
58 {
59 {PIRQB, PIRQC, PIRQD, PIRQA}, /* idsel 0x0e */
60 {PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x0f */
61 {PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x10 */
62 };
63
64 const long min_idsel = 0x0e, max_idsel = 0x10, irqs_per_slot = 4;
65 return PCI_IRQ_TABLE_LOOKUP;
66}
67#endif /* CONFIG_PCI */
68
69/* ************************************************************************
70 *
71 * Setup the architecture
72 *
73 */
74static void __init mpc834x_itx_setup_arch(void)
75{
76 struct device_node *np;
77
78 if (ppc_md.progress)
79 ppc_md.progress("mpc834x_itx_setup_arch()", 0);
80
81 np = of_find_node_by_type(NULL, "cpu");
82 if (np != 0) {
83 unsigned int *fp =
84 (int *)get_property(np, "clock-frequency", NULL);
85 if (fp != 0)
86 loops_per_jiffy = *fp / HZ;
87 else
88 loops_per_jiffy = 50000000 / HZ;
89 of_node_put(np);
90 }
91#ifdef CONFIG_PCI
92 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
93 add_bridge(np);
94
95 ppc_md.pci_swizzle = common_swizzle;
96 ppc_md.pci_map_irq = mpc83xx_map_irq;
97 ppc_md.pci_exclude_device = mpc83xx_exclude_device;
98#endif
99
100#ifdef CONFIG_ROOT_NFS
101 ROOT_DEV = Root_NFS;
102#else
103 ROOT_DEV = Root_HDA1;
104#endif
105}
106
107void __init mpc834x_itx_init_IRQ(void)
108{
109 u8 senses[8] = {
110 0, /* EXT 0 */
111 IRQ_SENSE_LEVEL, /* EXT 1 */
112 IRQ_SENSE_LEVEL, /* EXT 2 */
113 0, /* EXT 3 */
114#ifdef CONFIG_PCI
115 IRQ_SENSE_LEVEL, /* EXT 4 */
116 IRQ_SENSE_LEVEL, /* EXT 5 */
117 IRQ_SENSE_LEVEL, /* EXT 6 */
118 IRQ_SENSE_LEVEL, /* EXT 7 */
119#else
120 0, /* EXT 4 */
121 0, /* EXT 5 */
122 0, /* EXT 6 */
123 0, /* EXT 7 */
124#endif
125 };
126
127 ipic_init(get_immrbase() + 0x00700, 0, 0, senses, 8);
128
129 /* Initialize the default interrupt mapping priorities,
130 * in case the boot rom changed something on us.
131 */
132 ipic_set_default_priority();
133}
134
135/*
136 * Called very early, MMU is off, device-tree isn't unflattened
137 */
138static int __init mpc834x_itx_probe(void)
139{
140 /* We always match for now, eventually we should look at the flat
141 dev tree to ensure this is the board we are suppose to run on
142 */
143 return 1;
144}
145
146define_machine(mpc834x_itx) {
147 .name = "MPC834x ITX",
148 .probe = mpc834x_itx_probe,
149 .setup_arch = mpc834x_itx_setup_arch,
150 .init_IRQ = mpc834x_itx_init_IRQ,
151 .get_irq = ipic_get_irq,
152 .restart = mpc83xx_restart,
153 .time_init = mpc83xx_time_init,
154 .calibrate_decr = generic_calibrate_decr,
155 .progress = udbg_progress,
156};
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.h b/arch/powerpc/platforms/83xx/mpc834x_itx.h
new file mode 100644
index 000000000000..174ca4ef55f3
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.h
@@ -0,0 +1,23 @@
1/*
2 * arch/powerpc/platforms/83xx/mpc834x_itx.h
3 *
4 * MPC834X ITX common board definitions
5 *
6 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#ifndef __MACH_MPC83XX_ITX_H__
16#define __MACH_MPC83XX_ITX_H__
17
18#define PIRQA MPC83xx_IRQ_EXT4
19#define PIRQB MPC83xx_IRQ_EXT5
20#define PIRQC MPC83xx_IRQ_EXT6
21#define PIRQD MPC83xx_IRQ_EXT7
22
23#endif /* __MACH_MPC83XX_ITX_H__ */
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 22da1335445a..9d5da7896892 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * Cell Internal Interrupt Controller 2 * Cell Internal Interrupt Controller
3 * 3 *
4 * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
5 * IBM, Corp.
6 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 * 8 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com> 9 * Author: Arnd Bergmann <arndb@de.ibm.com>
@@ -25,11 +28,13 @@
25#include <linux/module.h> 28#include <linux/module.h>
26#include <linux/percpu.h> 29#include <linux/percpu.h>
27#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/ioport.h>
28 32
29#include <asm/io.h> 33#include <asm/io.h>
30#include <asm/pgtable.h> 34#include <asm/pgtable.h>
31#include <asm/prom.h> 35#include <asm/prom.h>
32#include <asm/ptrace.h> 36#include <asm/ptrace.h>
37#include <asm/machdep.h>
33 38
34#include "interrupt.h" 39#include "interrupt.h"
35#include "cbe_regs.h" 40#include "cbe_regs.h"
@@ -37,231 +42,65 @@
37struct iic { 42struct iic {
38 struct cbe_iic_thread_regs __iomem *regs; 43 struct cbe_iic_thread_regs __iomem *regs;
39 u8 target_id; 44 u8 target_id;
45 u8 eoi_stack[16];
46 int eoi_ptr;
47 struct irq_host *host;
40}; 48};
41 49
42static DEFINE_PER_CPU(struct iic, iic); 50static DEFINE_PER_CPU(struct iic, iic);
51#define IIC_NODE_COUNT 2
52static struct irq_host *iic_hosts[IIC_NODE_COUNT];
43 53
44void iic_local_enable(void) 54/* Convert between "pending" bits and hw irq number */
55static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
45{ 56{
46 struct iic *iic = &__get_cpu_var(iic); 57 unsigned char unit = bits.source & 0xf;
47 u64 tmp;
48
49 /*
50 * There seems to be a bug that is present in DD2.x CPUs
51 * and still only partially fixed in DD3.1.
52 * This bug causes a value written to the priority register
53 * not to make it there, resulting in a system hang unless we
54 * write it again.
55 * Masking with 0xf0 is done because the Cell BE does not
56 * implement the lower four bits of the interrupt priority,
57 * they always read back as zeroes, although future CPUs
58 * might implement different bits.
59 */
60 do {
61 out_be64(&iic->regs->prio, 0xff);
62 tmp = in_be64(&iic->regs->prio);
63 } while ((tmp & 0xf0) != 0xf0);
64}
65
66void iic_local_disable(void)
67{
68 out_be64(&__get_cpu_var(iic).regs->prio, 0x0);
69}
70 58
71static unsigned int iic_startup(unsigned int irq) 59 if (bits.flags & CBE_IIC_IRQ_IPI)
72{ 60 return IIC_IRQ_IPI0 | (bits.prio >> 4);
73 return 0; 61 else if (bits.class <= 3)
62 return (bits.class << 4) | unit;
63 else
64 return IIC_IRQ_INVALID;
74} 65}
75 66
76static void iic_enable(unsigned int irq) 67static void iic_mask(unsigned int irq)
77{ 68{
78 iic_local_enable();
79} 69}
80 70
81static void iic_disable(unsigned int irq) 71static void iic_unmask(unsigned int irq)
82{ 72{
83} 73}
84 74
85static void iic_end(unsigned int irq) 75static void iic_eoi(unsigned int irq)
86{ 76{
87 iic_local_enable(); 77 struct iic *iic = &__get_cpu_var(iic);
78 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
79 BUG_ON(iic->eoi_ptr < 0);
88} 80}
89 81
90static struct hw_interrupt_type iic_pic = { 82static struct irq_chip iic_chip = {
91 .typename = " CELL-IIC ", 83 .typename = " CELL-IIC ",
92 .startup = iic_startup, 84 .mask = iic_mask,
93 .enable = iic_enable, 85 .unmask = iic_unmask,
94 .disable = iic_disable, 86 .eoi = iic_eoi,
95 .end = iic_end,
96}; 87};
97 88
98static int iic_external_get_irq(struct cbe_iic_pending_bits pending)
99{
100 int irq;
101 unsigned char node, unit;
102
103 node = pending.source >> 4;
104 unit = pending.source & 0xf;
105 irq = -1;
106
107 /*
108 * This mapping is specific to the Cell Broadband
109 * Engine. We might need to get the numbers
110 * from the device tree to support future CPUs.
111 */
112 switch (unit) {
113 case 0x00:
114 case 0x0b:
115 /*
116 * One of these units can be connected
117 * to an external interrupt controller.
118 */
119 if (pending.class != 2)
120 break;
121 irq = IIC_EXT_OFFSET
122 + spider_get_irq(node)
123 + node * IIC_NODE_STRIDE;
124 break;
125 case 0x01 ... 0x04:
126 case 0x07 ... 0x0a:
127 /*
128 * These units are connected to the SPEs
129 */
130 if (pending.class > 2)
131 break;
132 irq = IIC_SPE_OFFSET
133 + pending.class * IIC_CLASS_STRIDE
134 + node * IIC_NODE_STRIDE
135 + unit;
136 break;
137 }
138 if (irq == -1)
139 printk(KERN_WARNING "Unexpected interrupt class %02x, "
140 "source %02x, prio %02x, cpu %02x\n", pending.class,
141 pending.source, pending.prio, smp_processor_id());
142 return irq;
143}
144
145/* Get an IRQ number from the pending state register of the IIC */ 89/* Get an IRQ number from the pending state register of the IIC */
146int iic_get_irq(struct pt_regs *regs) 90static unsigned int iic_get_irq(struct pt_regs *regs)
147{ 91{
148 struct iic *iic; 92 struct cbe_iic_pending_bits pending;
149 int irq; 93 struct iic *iic;
150 struct cbe_iic_pending_bits pending; 94
151 95 iic = &__get_cpu_var(iic);
152 iic = &__get_cpu_var(iic); 96 *(unsigned long *) &pending =
153 *(unsigned long *) &pending = 97 in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
154 in_be64((unsigned long __iomem *) &iic->regs->pending_destr); 98 iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
155 99 BUG_ON(iic->eoi_ptr > 15);
156 irq = -1; 100 if (pending.flags & CBE_IIC_IRQ_VALID)
157 if (pending.flags & CBE_IIC_IRQ_VALID) { 101 return irq_linear_revmap(iic->host,
158 if (pending.flags & CBE_IIC_IRQ_IPI) { 102 iic_pending_to_hwnum(pending));
159 irq = IIC_IPI_OFFSET + (pending.prio >> 4); 103 return NO_IRQ;
160/*
161 if (irq > 0x80)
162 printk(KERN_WARNING "Unexpected IPI prio %02x"
163 "on CPU %02x\n", pending.prio,
164 smp_processor_id());
165*/
166 } else {
167 irq = iic_external_get_irq(pending);
168 }
169 }
170 return irq;
171}
172
173/* hardcoded part to be compatible with older firmware */
174
175static int setup_iic_hardcoded(void)
176{
177 struct device_node *np;
178 int nodeid, cpu;
179 unsigned long regs;
180 struct iic *iic;
181
182 for_each_possible_cpu(cpu) {
183 iic = &per_cpu(iic, cpu);
184 nodeid = cpu/2;
185
186 for (np = of_find_node_by_type(NULL, "cpu");
187 np;
188 np = of_find_node_by_type(np, "cpu")) {
189 if (nodeid == *(int *)get_property(np, "node-id", NULL))
190 break;
191 }
192
193 if (!np) {
194 printk(KERN_WARNING "IIC: CPU %d not found\n", cpu);
195 iic->regs = NULL;
196 iic->target_id = 0xff;
197 return -ENODEV;
198 }
199
200 regs = *(long *)get_property(np, "iic", NULL);
201
202 /* hack until we have decided on the devtree info */
203 regs += 0x400;
204 if (cpu & 1)
205 regs += 0x20;
206
207 printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs);
208 iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs));
209 iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe);
210 }
211
212 return 0;
213}
214
215static int setup_iic(void)
216{
217 struct device_node *dn;
218 unsigned long *regs;
219 char *compatible;
220 unsigned *np, found = 0;
221 struct iic *iic = NULL;
222
223 for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
224 compatible = (char *)get_property(dn, "compatible", NULL);
225
226 if (!compatible) {
227 printk(KERN_WARNING "no compatible property found !\n");
228 continue;
229 }
230
231 if (strstr(compatible, "IBM,CBEA-Internal-Interrupt-Controller"))
232 regs = (unsigned long *)get_property(dn,"reg", NULL);
233 else
234 continue;
235
236 if (!regs)
237 printk(KERN_WARNING "IIC: no reg property\n");
238
239 np = (unsigned int *)get_property(dn, "ibm,interrupt-server-ranges", NULL);
240
241 if (!np) {
242 printk(KERN_WARNING "IIC: CPU association not found\n");
243 iic->regs = NULL;
244 iic->target_id = 0xff;
245 return -ENODEV;
246 }
247
248 iic = &per_cpu(iic, np[0]);
249 iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs));
250 iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe);
251 printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs);
252
253 iic = &per_cpu(iic, np[1]);
254 iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs));
255 iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe);
256 printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs);
257
258 found++;
259 }
260
261 if (found)
262 return 0;
263 else
264 return -ENODEV;
265} 104}
266 105
267#ifdef CONFIG_SMP 106#ifdef CONFIG_SMP
@@ -269,12 +108,12 @@ static int setup_iic(void)
269/* Use the highest interrupt priorities for IPI */ 108/* Use the highest interrupt priorities for IPI */
270static inline int iic_ipi_to_irq(int ipi) 109static inline int iic_ipi_to_irq(int ipi)
271{ 110{
272 return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi; 111 return IIC_IRQ_IPI0 + IIC_NUM_IPIS - 1 - ipi;
273} 112}
274 113
275static inline int iic_irq_to_ipi(int irq) 114static inline int iic_irq_to_ipi(int irq)
276{ 115{
277 return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET); 116 return IIC_NUM_IPIS - 1 - (irq - IIC_IRQ_IPI0);
278} 117}
279 118
280void iic_setup_cpu(void) 119void iic_setup_cpu(void)
@@ -293,22 +132,51 @@ u8 iic_get_target_id(int cpu)
293} 132}
294EXPORT_SYMBOL_GPL(iic_get_target_id); 133EXPORT_SYMBOL_GPL(iic_get_target_id);
295 134
135struct irq_host *iic_get_irq_host(int node)
136{
137 if (node < 0 || node >= IIC_NODE_COUNT)
138 return NULL;
139 return iic_hosts[node];
140}
141EXPORT_SYMBOL_GPL(iic_get_irq_host);
142
143
296static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 144static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
297{ 145{
298 smp_message_recv(iic_irq_to_ipi(irq), regs); 146 int ipi = (int)(long)dev_id;
147
148 smp_message_recv(ipi, regs);
149
299 return IRQ_HANDLED; 150 return IRQ_HANDLED;
300} 151}
301 152
302static void iic_request_ipi(int ipi, const char *name) 153static void iic_request_ipi(int ipi, const char *name)
303{ 154{
304 int irq; 155 int node, virq;
305 156
306 irq = iic_ipi_to_irq(ipi); 157 for (node = 0; node < IIC_NODE_COUNT; node++) {
307 /* IPIs are marked IRQF_DISABLED as they must run with irqs 158 char *rname;
308 * disabled */ 159 if (iic_hosts[node] == NULL)
309 get_irq_desc(irq)->chip = &iic_pic; 160 continue;
310 get_irq_desc(irq)->status |= IRQ_PER_CPU; 161 virq = irq_create_mapping(iic_hosts[node],
311 request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL); 162 iic_ipi_to_irq(ipi), 0);
163 if (virq == NO_IRQ) {
164 printk(KERN_ERR
165 "iic: failed to map IPI %s on node %d\n",
166 name, node);
167 continue;
168 }
169 rname = kzalloc(strlen(name) + 16, GFP_KERNEL);
170 if (rname)
171 sprintf(rname, "%s node %d", name, node);
172 else
173 rname = (char *)name;
174 if (request_irq(virq, iic_ipi_action, IRQF_DISABLED,
175 rname, (void *)(long)ipi))
176 printk(KERN_ERR
177 "iic: failed to request IPI %s on node %d\n",
178 name, node);
179 }
312} 180}
313 181
314void iic_request_IPIs(void) 182void iic_request_IPIs(void)
@@ -319,34 +187,119 @@ void iic_request_IPIs(void)
319 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); 187 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
320#endif /* CONFIG_DEBUGGER */ 188#endif /* CONFIG_DEBUGGER */
321} 189}
190
322#endif /* CONFIG_SMP */ 191#endif /* CONFIG_SMP */
323 192
324static void iic_setup_spe_handlers(void) 193
194static int iic_host_match(struct irq_host *h, struct device_node *node)
195{
196 return h->host_data != NULL && node == h->host_data;
197}
198
199static int iic_host_map(struct irq_host *h, unsigned int virq,
200 irq_hw_number_t hw, unsigned int flags)
201{
202 if (hw < IIC_IRQ_IPI0)
203 set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq);
204 else
205 set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
206 return 0;
207}
208
209static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
210 u32 *intspec, unsigned int intsize,
211 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
212
213{
214 /* Currently, we don't translate anything. That needs to be fixed as
215 * we get better defined device-trees. iic interrupts have to be
216 * explicitely mapped by whoever needs them
217 */
218 return -ENODEV;
219}
220
221static struct irq_host_ops iic_host_ops = {
222 .match = iic_host_match,
223 .map = iic_host_map,
224 .xlate = iic_host_xlate,
225};
226
227static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
228 struct irq_host *host)
325{ 229{
326 int be, isrc; 230 /* XXX FIXME: should locate the linux CPU number from the HW cpu
231 * number properly. We are lucky for now
232 */
233 struct iic *iic = &per_cpu(iic, hw_cpu);
234
235 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
236 BUG_ON(iic->regs == NULL);
327 237
328 /* Assume two threads per BE are present */ 238 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
329 for (be=0; be < num_present_cpus() / 2; be++) { 239 iic->eoi_stack[0] = 0xff;
330 for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) { 240 iic->host = host;
331 int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc; 241 out_be64(&iic->regs->prio, 0);
332 get_irq_desc(irq)->chip = &iic_pic; 242
243 printk(KERN_INFO "IIC for CPU %d at %lx mapped to %p, target id 0x%x\n",
244 hw_cpu, addr, iic->regs, iic->target_id);
245}
246
247static int __init setup_iic(void)
248{
249 struct device_node *dn;
250 struct resource r0, r1;
251 struct irq_host *host;
252 int found = 0;
253 u32 *np;
254
255 for (dn = NULL;
256 (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) {
257 if (!device_is_compatible(dn,
258 "IBM,CBEA-Internal-Interrupt-Controller"))
259 continue;
260 np = (u32 *)get_property(dn, "ibm,interrupt-server-ranges",
261 NULL);
262 if (np == NULL) {
263 printk(KERN_WARNING "IIC: CPU association not found\n");
264 of_node_put(dn);
265 return -ENODEV;
266 }
267 if (of_address_to_resource(dn, 0, &r0) ||
268 of_address_to_resource(dn, 1, &r1)) {
269 printk(KERN_WARNING "IIC: Can't resolve addresses\n");
270 of_node_put(dn);
271 return -ENODEV;
333 } 272 }
273 host = NULL;
274 if (found < IIC_NODE_COUNT) {
275 host = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
276 IIC_SOURCE_COUNT,
277 &iic_host_ops,
278 IIC_IRQ_INVALID);
279 iic_hosts[found] = host;
280 BUG_ON(iic_hosts[found] == NULL);
281 iic_hosts[found]->host_data = of_node_get(dn);
282 found++;
283 }
284 init_one_iic(np[0], r0.start, host);
285 init_one_iic(np[1], r1.start, host);
334 } 286 }
287
288 if (found)
289 return 0;
290 else
291 return -ENODEV;
335} 292}
336 293
337void iic_init_IRQ(void) 294void __init iic_init_IRQ(void)
338{ 295{
339 int cpu, irq_offset; 296 /* Discover and initialize iics */
340 struct iic *iic;
341
342 if (setup_iic() < 0) 297 if (setup_iic() < 0)
343 setup_iic_hardcoded(); 298 panic("IIC: Failed to initialize !\n");
344 299
345 irq_offset = 0; 300 /* Set master interrupt handling function */
346 for_each_possible_cpu(cpu) { 301 ppc_md.get_irq = iic_get_irq;
347 iic = &per_cpu(iic, cpu); 302
348 if (iic->regs) 303 /* Enable on current CPU */
349 out_be64(&iic->regs->prio, 0xff); 304 iic_setup_cpu();
350 }
351 iic_setup_spe_handlers();
352} 305}
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 799f77d98f96..5560a92ec3ab 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -37,27 +37,24 @@
37 */ 37 */
38 38
39enum { 39enum {
40 IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */ 40 IIC_IRQ_INVALID = 0xff,
41 IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */ 41 IIC_IRQ_MAX = 0x3f,
42 IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */ 42 IIC_IRQ_EXT_IOIF0 = 0x20,
43 IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */ 43 IIC_IRQ_EXT_IOIF1 = 0x2b,
44 IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */ 44 IIC_IRQ_IPI0 = 0x40,
45 IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ 45 IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */
46 IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */ 46 IIC_SOURCE_COUNT = 0x50,
47}; 47};
48 48
49extern void iic_init_IRQ(void); 49extern void iic_init_IRQ(void);
50extern int iic_get_irq(struct pt_regs *regs);
51extern void iic_cause_IPI(int cpu, int mesg); 50extern void iic_cause_IPI(int cpu, int mesg);
52extern void iic_request_IPIs(void); 51extern void iic_request_IPIs(void);
53extern void iic_setup_cpu(void); 52extern void iic_setup_cpu(void);
54extern void iic_local_enable(void);
55extern void iic_local_disable(void);
56 53
57extern u8 iic_get_target_id(int cpu); 54extern u8 iic_get_target_id(int cpu);
55extern struct irq_host *iic_get_irq_host(int node);
58 56
59extern void spider_init_IRQ(void); 57extern void spider_init_IRQ(void);
60extern int spider_get_irq(int node);
61 58
62#endif 59#endif
63#endif /* ASM_CELL_PIC_H */ 60#endif /* ASM_CELL_PIC_H */
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index d8c2a29b3c15..282987d6d4a2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -49,6 +49,7 @@
49#include <asm/irq.h> 49#include <asm/irq.h>
50#include <asm/spu.h> 50#include <asm/spu.h>
51#include <asm/spu_priv1.h> 51#include <asm/spu_priv1.h>
52#include <asm/udbg.h>
52 53
53#include "interrupt.h" 54#include "interrupt.h"
54#include "iommu.h" 55#include "iommu.h"
@@ -79,10 +80,22 @@ static void cell_progress(char *s, unsigned short hex)
79 printk("*** %04x : %s\n", hex, s ? s : ""); 80 printk("*** %04x : %s\n", hex, s ? s : "");
80} 81}
81 82
83static void __init cell_pcibios_fixup(void)
84{
85 struct pci_dev *dev = NULL;
86
87 for_each_pci_dev(dev)
88 pci_read_irq_line(dev);
89}
90
91static void __init cell_init_irq(void)
92{
93 iic_init_IRQ();
94 spider_init_IRQ();
95}
96
82static void __init cell_setup_arch(void) 97static void __init cell_setup_arch(void)
83{ 98{
84 ppc_md.init_IRQ = iic_init_IRQ;
85 ppc_md.get_irq = iic_get_irq;
86#ifdef CONFIG_SPU_BASE 99#ifdef CONFIG_SPU_BASE
87 spu_priv1_ops = &spu_priv1_mmio_ops; 100 spu_priv1_ops = &spu_priv1_mmio_ops;
88#endif 101#endif
@@ -108,7 +121,6 @@ static void __init cell_setup_arch(void)
108 /* Find and initialize PCI host bridges */ 121 /* Find and initialize PCI host bridges */
109 init_pci_config_tokens(); 122 init_pci_config_tokens();
110 find_and_init_phbs(); 123 find_and_init_phbs();
111 spider_init_IRQ();
112 cbe_pervasive_init(); 124 cbe_pervasive_init();
113#ifdef CONFIG_DUMMY_CONSOLE 125#ifdef CONFIG_DUMMY_CONSOLE
114 conswitchp = &dummy_con; 126 conswitchp = &dummy_con;
@@ -126,8 +138,6 @@ static void __init cell_init_early(void)
126 138
127 cell_init_iommu(); 139 cell_init_iommu();
128 140
129 ppc64_interrupt_controller = IC_CELL_PIC;
130
131 DBG(" <- cell_init_early()\n"); 141 DBG(" <- cell_init_early()\n");
132} 142}
133 143
@@ -173,6 +183,8 @@ define_machine(cell) {
173 .calibrate_decr = generic_calibrate_decr, 183 .calibrate_decr = generic_calibrate_decr,
174 .check_legacy_ioport = cell_check_legacy_ioport, 184 .check_legacy_ioport = cell_check_legacy_ioport,
175 .progress = cell_progress, 185 .progress = cell_progress,
186 .init_IRQ = cell_init_irq,
187 .pcibios_fixup = cell_pcibios_fixup,
176#ifdef CONFIG_KEXEC 188#ifdef CONFIG_KEXEC
177 .machine_kexec = default_machine_kexec, 189 .machine_kexec = default_machine_kexec,
178 .machine_kexec_prepare = default_machine_kexec_prepare, 190 .machine_kexec_prepare = default_machine_kexec_prepare,
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 7c3a0b6d34fd..ae7ef88f1a37 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/ioport.h>
25 26
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/prom.h> 28#include <asm/prom.h>
@@ -56,184 +57,313 @@ enum {
56 REISWAITEN = 0x508, /* Reissue Wait Control*/ 57 REISWAITEN = 0x508, /* Reissue Wait Control*/
57}; 58};
58 59
59static void __iomem *spider_pics[4]; 60#define SPIDER_CHIP_COUNT 4
61#define SPIDER_SRC_COUNT 64
62#define SPIDER_IRQ_INVALID 63
60 63
61static void __iomem *spider_get_pic(int irq) 64struct spider_pic {
62{ 65 struct irq_host *host;
63 int node = irq / IIC_NODE_STRIDE; 66 struct device_node *of_node;
64 irq %= IIC_NODE_STRIDE; 67 void __iomem *regs;
65 68 unsigned int node_id;
66 if (irq >= IIC_EXT_OFFSET && 69};
67 irq < IIC_EXT_OFFSET + IIC_NUM_EXT && 70static struct spider_pic spider_pics[SPIDER_CHIP_COUNT];
68 spider_pics)
69 return spider_pics[node];
70 return NULL;
71}
72 71
73static int spider_get_nr(unsigned int irq) 72static struct spider_pic *spider_virq_to_pic(unsigned int virq)
74{ 73{
75 return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET; 74 return irq_map[virq].host->host_data;
76} 75}
77 76
78static void __iomem *spider_get_irq_config(int irq) 77static void __iomem *spider_get_irq_config(struct spider_pic *pic,
78 unsigned int src)
79{ 79{
80 void __iomem *pic; 80 return pic->regs + TIR_CFGA + 8 * src;
81 pic = spider_get_pic(irq);
82 return pic + TIR_CFGA + 8 * spider_get_nr(irq);
83} 81}
84 82
85static void spider_enable_irq(unsigned int irq) 83static void spider_unmask_irq(unsigned int virq)
86{ 84{
87 int nodeid = (irq / IIC_NODE_STRIDE) * 0x10; 85 struct spider_pic *pic = spider_virq_to_pic(virq);
88 void __iomem *cfg = spider_get_irq_config(irq); 86 void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq);
89 irq = spider_get_nr(irq);
90 87
91 out_be32(cfg, (in_be32(cfg) & ~0xf0)| 0x3107000eu | nodeid); 88 /* We use no locking as we should be covered by the descriptor lock
92 out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq); 89 * for access to invidual source configuration registers
90 */
91 out_be32(cfg, in_be32(cfg) | 0x30000000u);
93} 92}
94 93
95static void spider_disable_irq(unsigned int irq) 94static void spider_mask_irq(unsigned int virq)
96{ 95{
97 void __iomem *cfg = spider_get_irq_config(irq); 96 struct spider_pic *pic = spider_virq_to_pic(virq);
98 irq = spider_get_nr(irq); 97 void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq);
99 98
99 /* We use no locking as we should be covered by the descriptor lock
100 * for access to invidual source configuration registers
101 */
100 out_be32(cfg, in_be32(cfg) & ~0x30000000u); 102 out_be32(cfg, in_be32(cfg) & ~0x30000000u);
101} 103}
102 104
103static unsigned int spider_startup_irq(unsigned int irq) 105static void spider_ack_irq(unsigned int virq)
104{ 106{
105 spider_enable_irq(irq); 107 struct spider_pic *pic = spider_virq_to_pic(virq);
106 return 0; 108 unsigned int src = irq_map[virq].hwirq;
107}
108 109
109static void spider_shutdown_irq(unsigned int irq) 110 /* Reset edge detection logic if necessary
110{ 111 */
111 spider_disable_irq(irq); 112 if (get_irq_desc(virq)->status & IRQ_LEVEL)
112} 113 return;
113 114
114static void spider_end_irq(unsigned int irq) 115 /* Only interrupts 47 to 50 can be set to edge */
115{ 116 if (src < 47 || src > 50)
116 spider_enable_irq(irq); 117 return;
117}
118 118
119static void spider_ack_irq(unsigned int irq) 119 /* Perform the clear of the edge logic */
120{ 120 out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf));
121 spider_disable_irq(irq);
122 iic_local_enable();
123} 121}
124 122
125static struct hw_interrupt_type spider_pic = { 123static struct irq_chip spider_pic = {
126 .typename = " SPIDER ", 124 .typename = " SPIDER ",
127 .startup = spider_startup_irq, 125 .unmask = spider_unmask_irq,
128 .shutdown = spider_shutdown_irq, 126 .mask = spider_mask_irq,
129 .enable = spider_enable_irq,
130 .disable = spider_disable_irq,
131 .ack = spider_ack_irq, 127 .ack = spider_ack_irq,
132 .end = spider_end_irq,
133}; 128};
134 129
135int spider_get_irq(int node) 130static int spider_host_match(struct irq_host *h, struct device_node *node)
136{ 131{
137 unsigned long cs; 132 struct spider_pic *pic = h->host_data;
138 void __iomem *regs = spider_pics[node]; 133 return node == pic->of_node;
139
140 cs = in_be32(regs + TIR_CS) >> 24;
141
142 if (cs == 63)
143 return -1;
144 else
145 return cs;
146} 134}
147 135
148/* hardcoded part to be compatible with older firmware */ 136static int spider_host_map(struct irq_host *h, unsigned int virq,
149 137 irq_hw_number_t hw, unsigned int flags)
150void spider_init_IRQ_hardcoded(void)
151{ 138{
152 int node; 139 unsigned int sense = flags & IRQ_TYPE_SENSE_MASK;
153 long spiderpic; 140 struct spider_pic *pic = h->host_data;
154 long pics[] = { 0x24000008000, 0x34000008000 }; 141 void __iomem *cfg = spider_get_irq_config(pic, hw);
155 int n; 142 int level = 0;
156 143 u32 ic;
157 pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__); 144
158 145 /* Note that only level high is supported for most interrupts */
159 for (node = 0; node < num_present_cpus()/2; node++) { 146 if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH &&
160 spiderpic = pics[node]; 147 (hw < 47 || hw > 50))
161 printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic); 148 return -EINVAL;
162 spider_pics[node] = ioremap(spiderpic, 0x800); 149
163 for (n = 0; n < IIC_NUM_EXT; n++) { 150 /* Decode sense type */
164 int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; 151 switch(sense) {
165 get_irq_desc(irq)->chip = &spider_pic; 152 case IRQ_TYPE_EDGE_RISING:
166 } 153 ic = 0x3;
167 154 break;
168 /* do not mask any interrupts because of level */ 155 case IRQ_TYPE_EDGE_FALLING:
169 out_be32(spider_pics[node] + TIR_MSK, 0x0); 156 ic = 0x2;
170 157 break;
171 /* disable edge detection clear */ 158 case IRQ_TYPE_LEVEL_LOW:
172 /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ 159 ic = 0x0;
173 160 level = 1;
174 /* enable interrupt packets to be output */ 161 break;
175 out_be32(spider_pics[node] + TIR_PIEN, 162 case IRQ_TYPE_LEVEL_HIGH:
176 in_be32(spider_pics[node] + TIR_PIEN) | 0x1); 163 case IRQ_TYPE_NONE:
177 164 ic = 0x1;
178 /* Enable the interrupt detection enable bit. Do this last! */ 165 level = 1;
179 out_be32(spider_pics[node] + TIR_DEN, 166 break;
180 in_be32(spider_pics[node] + TIR_DEN) | 0x1); 167 default:
168 return -EINVAL;
181 } 169 }
182}
183 170
184void spider_init_IRQ(void) 171 /* Configure the source. One gross hack that was there before and
185{ 172 * that I've kept around is the priority to the BE which I set to
186 long spider_reg; 173 * be the same as the interrupt source number. I don't know wether
187 struct device_node *dn; 174 * that's supposed to make any kind of sense however, we'll have to
188 char *compatible; 175 * decide that, but for now, I'm not changing the behaviour.
189 int n, node = 0; 176 */
177 out_be32(cfg, (ic << 24) | (0x7 << 16) | (pic->node_id << 4) | 0xe);
178 out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff));
179
180 if (level)
181 get_irq_desc(virq)->status |= IRQ_LEVEL;
182 set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq);
183 return 0;
184}
190 185
191 for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { 186static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
192 compatible = (char *)get_property(dn, "compatible", NULL); 187 u32 *intspec, unsigned int intsize,
188 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
193 189
194 if (!compatible) 190{
195 continue; 191 /* Spider interrupts have 2 cells, first is the interrupt source,
192 * second, well, I don't know for sure yet ... We mask the top bits
193 * because old device-trees encode a node number in there
194 */
195 *out_hwirq = intspec[0] & 0x3f;
196 *out_flags = IRQ_TYPE_LEVEL_HIGH;
197 return 0;
198}
196 199
197 if (strstr(compatible, "CBEA,platform-spider-pic")) 200static struct irq_host_ops spider_host_ops = {
198 spider_reg = *(long *)get_property(dn,"reg", NULL); 201 .match = spider_host_match,
199 else if (strstr(compatible, "sti,platform-spider-pic")) { 202 .map = spider_host_map,
200 spider_init_IRQ_hardcoded(); 203 .xlate = spider_host_xlate,
201 return; 204};
202 } else
203 continue;
204 205
205 if (!spider_reg) 206static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc,
206 printk("interrupt controller does not have reg property !\n"); 207 struct pt_regs *regs)
208{
209 struct spider_pic *pic = desc->handler_data;
210 unsigned int cs, virq;
207 211
208 n = prom_n_addr_cells(dn); 212 cs = in_be32(pic->regs + TIR_CS) >> 24;
213 if (cs == SPIDER_IRQ_INVALID)
214 virq = NO_IRQ;
215 else
216 virq = irq_linear_revmap(pic->host, cs);
217 if (virq != NO_IRQ)
218 generic_handle_irq(virq, regs);
219 desc->chip->eoi(irq);
220}
209 221
210 if ( n != 2) 222/* For hooking up the cascace we have a problem. Our device-tree is
211 printk("reg property with invalid number of elements \n"); 223 * crap and we don't know on which BE iic interrupt we are hooked on at
224 * least not the "standard" way. We can reconstitute it based on two
225 * informations though: which BE node we are connected to and wether
226 * we are connected to IOIF0 or IOIF1. Right now, we really only care
227 * about the IBM cell blade and we know that its firmware gives us an
228 * interrupt-map property which is pretty strange.
229 */
230static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
231{
232 unsigned int virq;
233 u32 *imap, *tmp;
234 int imaplen, intsize, unit;
235 struct device_node *iic;
236 struct irq_host *iic_host;
237
238#if 0 /* Enable that when we have a way to retreive the node as well */
239 /* First, we check wether we have a real "interrupts" in the device
240 * tree in case the device-tree is ever fixed
241 */
242 struct of_irq oirq;
243 if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) {
244 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
245 oirq.size);
246 goto bail;
247 }
248#endif
249
250 /* Now do the horrible hacks */
251 tmp = (u32 *)get_property(pic->of_node, "#interrupt-cells", NULL);
252 if (tmp == NULL)
253 return NO_IRQ;
254 intsize = *tmp;
255 imap = (u32 *)get_property(pic->of_node, "interrupt-map", &imaplen);
256 if (imap == NULL || imaplen < (intsize + 1))
257 return NO_IRQ;
258 iic = of_find_node_by_phandle(imap[intsize]);
259 if (iic == NULL)
260 return NO_IRQ;
261 imap += intsize + 1;
262 tmp = (u32 *)get_property(iic, "#interrupt-cells", NULL);
263 if (tmp == NULL)
264 return NO_IRQ;
265 intsize = *tmp;
266 /* Assume unit is last entry of interrupt specifier */
267 unit = imap[intsize - 1];
268 /* Ok, we have a unit, now let's try to get the node */
269 tmp = (u32 *)get_property(iic, "ibm,interrupt-server-ranges", NULL);
270 if (tmp == NULL) {
271 of_node_put(iic);
272 return NO_IRQ;
273 }
274 /* ugly as hell but works for now */
275 pic->node_id = (*tmp) >> 1;
276 of_node_put(iic);
277
278 /* Ok, now let's get cracking. You may ask me why I just didn't match
279 * the iic host from the iic OF node, but that way I'm still compatible
280 * with really really old old firmwares for which we don't have a node
281 */
282 iic_host = iic_get_irq_host(pic->node_id);
283 if (iic_host == NULL)
284 return NO_IRQ;
285 /* Manufacture an IIC interrupt number of class 2 */
286 virq = irq_create_mapping(iic_host, 0x20 | unit, 0);
287 if (virq == NO_IRQ)
288 printk(KERN_ERR "spider_pic: failed to map cascade !");
289 return virq;
290}
212 291
213 spider_pics[node] = ioremap(spider_reg, 0x800);
214 292
215 printk("SPIDER addr: %lx with %i addr_cells mapped to %p\n", 293static void __init spider_init_one(struct device_node *of_node, int chip,
216 spider_reg, n, spider_pics[node]); 294 unsigned long addr)
295{
296 struct spider_pic *pic = &spider_pics[chip];
297 int i, virq;
298
299 /* Map registers */
300 pic->regs = ioremap(addr, 0x1000);
301 if (pic->regs == NULL)
302 panic("spider_pic: can't map registers !");
303
304 /* Allocate a host */
305 pic->host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, SPIDER_SRC_COUNT,
306 &spider_host_ops, SPIDER_IRQ_INVALID);
307 if (pic->host == NULL)
308 panic("spider_pic: can't allocate irq host !");
309 pic->host->host_data = pic;
310
311 /* Fill out other bits */
312 pic->of_node = of_node_get(of_node);
313
314 /* Go through all sources and disable them */
315 for (i = 0; i < SPIDER_SRC_COUNT; i++) {
316 void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i;
317 out_be32(cfg, in_be32(cfg) & ~0x30000000u);
318 }
217 319
218 for (n = 0; n < IIC_NUM_EXT; n++) { 320 /* do not mask any interrupts because of level */
219 int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; 321 out_be32(pic->regs + TIR_MSK, 0x0);
220 get_irq_desc(irq)->chip = &spider_pic;
221 }
222 322
223 /* do not mask any interrupts because of level */ 323 /* enable interrupt packets to be output */
224 out_be32(spider_pics[node] + TIR_MSK, 0x0); 324 out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1);
225 325
226 /* disable edge detection clear */ 326 /* Hook up the cascade interrupt to the iic and nodeid */
227 /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ 327 virq = spider_find_cascade_and_node(pic);
328 if (virq == NO_IRQ)
329 return;
330 set_irq_data(virq, pic);
331 set_irq_chained_handler(virq, spider_irq_cascade);
228 332
229 /* enable interrupt packets to be output */ 333 printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n",
230 out_be32(spider_pics[node] + TIR_PIEN, 334 pic->node_id, addr, of_node->full_name);
231 in_be32(spider_pics[node] + TIR_PIEN) | 0x1);
232 335
233 /* Enable the interrupt detection enable bit. Do this last! */ 336 /* Enable the interrupt detection enable bit. Do this last! */
234 out_be32(spider_pics[node] + TIR_DEN, 337 out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1);
235 in_be32(spider_pics[node] + TIR_DEN) | 0x1); 338}
236 339
237 node++; 340void __init spider_init_IRQ(void)
341{
342 struct resource r;
343 struct device_node *dn;
344 int chip = 0;
345
346 /* XXX node numbers are totally bogus. We _hope_ we get the device
347 * nodes in the right order here but that's definitely not guaranteed,
348 * we need to get the node from the device tree instead.
349 * There is currently no proper property for it (but our whole
350 * device-tree is bogus anyway) so all we can do is pray or maybe test
351 * the address and deduce the node-id
352 */
353 for (dn = NULL;
354 (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
355 if (device_is_compatible(dn, "CBEA,platform-spider-pic")) {
356 if (of_address_to_resource(dn, 0, &r)) {
357 printk(KERN_WARNING "spider-pic: Failed\n");
358 continue;
359 }
360 } else if (device_is_compatible(dn, "sti,platform-spider-pic")
361 && (chip < 2)) {
362 static long hard_coded_pics[] =
363 { 0x24000008000, 0x34000008000 };
364 r.start = hard_coded_pics[chip];
365 } else
366 continue;
367 spider_init_one(dn, chip++, r.start);
238 } 368 }
239} 369}
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 656c1ef5f4ad..5d2313a6c82b 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -264,51 +264,57 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
264 return stat ? IRQ_HANDLED : IRQ_NONE; 264 return stat ? IRQ_HANDLED : IRQ_NONE;
265} 265}
266 266
267static int 267static int spu_request_irqs(struct spu *spu)
268spu_request_irqs(struct spu *spu)
269{ 268{
270 int ret; 269 int ret = 0;
271 int irq_base;
272
273 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
274
275 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
276 ret = request_irq(irq_base + spu->isrc,
277 spu_irq_class_0, IRQF_DISABLED, spu->irq_c0, spu);
278 if (ret)
279 goto out;
280
281 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
282 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
283 spu_irq_class_1, IRQF_DISABLED, spu->irq_c1, spu);
284 if (ret)
285 goto out1;
286 270
287 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); 271 if (spu->irqs[0] != NO_IRQ) {
288 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, 272 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
289 spu_irq_class_2, IRQF_DISABLED, spu->irq_c2, spu); 273 spu->number);
290 if (ret) 274 ret = request_irq(spu->irqs[0], spu_irq_class_0,
291 goto out2; 275 IRQF_DISABLED,
292 goto out; 276 spu->irq_c0, spu);
277 if (ret)
278 goto bail0;
279 }
280 if (spu->irqs[1] != NO_IRQ) {
281 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
282 spu->number);
283 ret = request_irq(spu->irqs[1], spu_irq_class_1,
284 IRQF_DISABLED,
285 spu->irq_c1, spu);
286 if (ret)
287 goto bail1;
288 }
289 if (spu->irqs[2] != NO_IRQ) {
290 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
291 spu->number);
292 ret = request_irq(spu->irqs[2], spu_irq_class_2,
293 IRQF_DISABLED,
294 spu->irq_c2, spu);
295 if (ret)
296 goto bail2;
297 }
298 return 0;
293 299
294out2: 300bail2:
295 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); 301 if (spu->irqs[1] != NO_IRQ)
296out1: 302 free_irq(spu->irqs[1], spu);
297 free_irq(irq_base + spu->isrc, spu); 303bail1:
298out: 304 if (spu->irqs[0] != NO_IRQ)
305 free_irq(spu->irqs[0], spu);
306bail0:
299 return ret; 307 return ret;
300} 308}
301 309
302static void 310static void spu_free_irqs(struct spu *spu)
303spu_free_irqs(struct spu *spu)
304{ 311{
305 int irq_base; 312 if (spu->irqs[0] != NO_IRQ)
306 313 free_irq(spu->irqs[0], spu);
307 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; 314 if (spu->irqs[1] != NO_IRQ)
308 315 free_irq(spu->irqs[1], spu);
309 free_irq(irq_base + spu->isrc, spu); 316 if (spu->irqs[2] != NO_IRQ)
310 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); 317 free_irq(spu->irqs[2], spu);
311 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
312} 318}
313 319
314static LIST_HEAD(spu_list); 320static LIST_HEAD(spu_list);
@@ -559,17 +565,38 @@ static void spu_unmap(struct spu *spu)
559 iounmap((u8 __iomem *)spu->local_store); 565 iounmap((u8 __iomem *)spu->local_store);
560} 566}
561 567
568/* This function shall be abstracted for HV platforms */
569static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
570{
571 struct irq_host *host;
572 unsigned int isrc;
573 u32 *tmp;
574
575 host = iic_get_irq_host(spu->node);
576 if (host == NULL)
577 return -ENODEV;
578
579 /* Get the interrupt source from the device-tree */
580 tmp = (u32 *)get_property(np, "isrc", NULL);
581 if (!tmp)
582 return -ENODEV;
583 spu->isrc = isrc = tmp[0];
584
585 /* Now map interrupts of all 3 classes */
586 spu->irqs[0] = irq_create_mapping(host, 0x00 | isrc, 0);
587 spu->irqs[1] = irq_create_mapping(host, 0x10 | isrc, 0);
588 spu->irqs[2] = irq_create_mapping(host, 0x20 | isrc, 0);
589
590 /* Right now, we only fail if class 2 failed */
591 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
592}
593
562static int __init spu_map_device(struct spu *spu, struct device_node *node) 594static int __init spu_map_device(struct spu *spu, struct device_node *node)
563{ 595{
564 char *prop; 596 char *prop;
565 int ret; 597 int ret;
566 598
567 ret = -ENODEV; 599 ret = -ENODEV;
568 prop = get_property(node, "isrc", NULL);
569 if (!prop)
570 goto out;
571 spu->isrc = *(unsigned int *)prop;
572
573 spu->name = get_property(node, "name", NULL); 600 spu->name = get_property(node, "name", NULL);
574 if (!spu->name) 601 if (!spu->name)
575 goto out; 602 goto out;
@@ -636,7 +663,8 @@ static int spu_create_sysdev(struct spu *spu)
636 return ret; 663 return ret;
637 } 664 }
638 665
639 sysdev_create_file(&spu->sysdev, &attr_isrc); 666 if (spu->isrc != 0)
667 sysdev_create_file(&spu->sysdev, &attr_isrc);
640 sysfs_add_device_to_node(&spu->sysdev, spu->nid); 668 sysfs_add_device_to_node(&spu->sysdev, spu->nid);
641 669
642 return 0; 670 return 0;
@@ -668,6 +696,9 @@ static int __init create_spu(struct device_node *spe)
668 spu->nid = of_node_to_nid(spe); 696 spu->nid = of_node_to_nid(spe);
669 if (spu->nid == -1) 697 if (spu->nid == -1)
670 spu->nid = 0; 698 spu->nid = 0;
699 ret = spu_map_interrupts(spu, spe);
700 if (ret)
701 goto out_unmap;
671 spin_lock_init(&spu->register_lock); 702 spin_lock_init(&spu->register_lock);
672 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); 703 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
673 spu_mfc_sr1_set(spu, 0x33); 704 spu_mfc_sr1_set(spu, 0x33);
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 66c253498803..6802cdc3168a 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -18,7 +18,6 @@
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/sections.h> 19#include <asm/sections.h>
20#include <asm/pci-bridge.h> 20#include <asm/pci-bridge.h>
21#include <asm/open_pic.h>
22#include <asm/grackle.h> 21#include <asm/grackle.h>
23#include <asm/rtas.h> 22#include <asm/rtas.h>
24 23
@@ -161,15 +160,9 @@ void __init
161chrp_pcibios_fixup(void) 160chrp_pcibios_fixup(void)
162{ 161{
163 struct pci_dev *dev = NULL; 162 struct pci_dev *dev = NULL;
164 struct device_node *np;
165 163
166 /* PCI interrupts are controlled by the OpenPIC */ 164 for_each_pci_dev(dev)
167 for_each_pci_dev(dev) { 165 pci_read_irq_line(dev);
168 np = pci_device_to_OF_node(dev);
169 if ((np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0))
170 dev->irq = np->intrs[0].line;
171 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
172 }
173} 166}
174 167
175#define PRG_CL_RESET_VALID 0x00010000 168#define PRG_CL_RESET_VALID 0x00010000
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 1f1771b212b4..538e337d63e2 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -24,7 +24,7 @@
24#include <linux/reboot.h> 24#include <linux/reboot.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/version.h> 27#include <linux/utsrelease.h>
28#include <linux/adb.h> 28#include <linux/adb.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
@@ -59,7 +59,7 @@ void rtas_indicator_progress(char *, unsigned short);
59int _chrp_type; 59int _chrp_type;
60EXPORT_SYMBOL(_chrp_type); 60EXPORT_SYMBOL(_chrp_type);
61 61
62struct mpic *chrp_mpic; 62static struct mpic *chrp_mpic;
63 63
64/* Used for doing CHRP event-scans */ 64/* Used for doing CHRP event-scans */
65DEFINE_PER_CPU(struct timer_list, heartbeat_timer); 65DEFINE_PER_CPU(struct timer_list, heartbeat_timer);
@@ -315,24 +315,32 @@ chrp_event_scan(unsigned long unused)
315 jiffies + event_scan_interval); 315 jiffies + event_scan_interval);
316} 316}
317 317
318static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc,
319 struct pt_regs *regs)
320{
321 unsigned int cascade_irq = i8259_irq(regs);
322 if (cascade_irq != NO_IRQ)
323 generic_handle_irq(cascade_irq, regs);
324 desc->chip->eoi(irq);
325}
326
318/* 327/*
319 * Finds the open-pic node and sets up the mpic driver. 328 * Finds the open-pic node and sets up the mpic driver.
320 */ 329 */
321static void __init chrp_find_openpic(void) 330static void __init chrp_find_openpic(void)
322{ 331{
323 struct device_node *np, *root; 332 struct device_node *np, *root;
324 int len, i, j, irq_count; 333 int len, i, j;
325 int isu_size, idu_size; 334 int isu_size, idu_size;
326 unsigned int *iranges, *opprop = NULL; 335 unsigned int *iranges, *opprop = NULL;
327 int oplen = 0; 336 int oplen = 0;
328 unsigned long opaddr; 337 unsigned long opaddr;
329 int na = 1; 338 int na = 1;
330 unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS];
331 339
332 np = find_type_devices("open-pic"); 340 np = of_find_node_by_type(NULL, "open-pic");
333 if (np == NULL) 341 if (np == NULL)
334 return; 342 return;
335 root = find_path_device("/"); 343 root = of_find_node_by_path("/");
336 if (root) { 344 if (root) {
337 opprop = (unsigned int *) get_property 345 opprop = (unsigned int *) get_property
338 (root, "platform-open-pic", &oplen); 346 (root, "platform-open-pic", &oplen);
@@ -343,19 +351,15 @@ static void __init chrp_find_openpic(void)
343 oplen /= na * sizeof(unsigned int); 351 oplen /= na * sizeof(unsigned int);
344 } else { 352 } else {
345 struct resource r; 353 struct resource r;
346 if (of_address_to_resource(np, 0, &r)) 354 if (of_address_to_resource(np, 0, &r)) {
347 return; 355 goto bail;
356 }
348 opaddr = r.start; 357 opaddr = r.start;
349 oplen = 0; 358 oplen = 0;
350 } 359 }
351 360
352 printk(KERN_INFO "OpenPIC at %lx\n", opaddr); 361 printk(KERN_INFO "OpenPIC at %lx\n", opaddr);
353 362
354 irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
355 prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4);
356 /* i8259 cascade is always positive level */
357 init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE;
358
359 iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); 363 iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len);
360 if (iranges == NULL) 364 if (iranges == NULL)
361 len = 0; /* non-distributed mpic */ 365 len = 0; /* non-distributed mpic */
@@ -382,15 +386,12 @@ static void __init chrp_find_openpic(void)
382 if (len > 1) 386 if (len > 1)
383 isu_size = iranges[3]; 387 isu_size = iranges[3];
384 388
385 chrp_mpic = mpic_alloc(opaddr, MPIC_PRIMARY, 389 chrp_mpic = mpic_alloc(np, opaddr, MPIC_PRIMARY,
386 isu_size, NUM_ISA_INTERRUPTS, irq_count, 390 isu_size, 0, " MPIC ");
387 NR_IRQS - 4, init_senses, irq_count,
388 " MPIC ");
389 if (chrp_mpic == NULL) { 391 if (chrp_mpic == NULL) {
390 printk(KERN_ERR "Failed to allocate MPIC structure\n"); 392 printk(KERN_ERR "Failed to allocate MPIC structure\n");
391 return; 393 goto bail;
392 } 394 }
393
394 j = na - 1; 395 j = na - 1;
395 for (i = 1; i < len; ++i) { 396 for (i = 1; i < len; ++i) {
396 iranges += 2; 397 iranges += 2;
@@ -402,7 +403,10 @@ static void __init chrp_find_openpic(void)
402 } 403 }
403 404
404 mpic_init(chrp_mpic); 405 mpic_init(chrp_mpic);
405 mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL); 406 ppc_md.get_irq = mpic_get_irq;
407 bail:
408 of_node_put(root);
409 of_node_put(np);
406} 410}
407 411
408#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) 412#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
@@ -413,14 +417,34 @@ static struct irqaction xmon_irqaction = {
413}; 417};
414#endif 418#endif
415 419
416void __init chrp_init_IRQ(void) 420static void __init chrp_find_8259(void)
417{ 421{
418 struct device_node *np; 422 struct device_node *np, *pic = NULL;
419 unsigned long chrp_int_ack = 0; 423 unsigned long chrp_int_ack = 0;
420#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) 424 unsigned int cascade_irq;
421 struct device_node *kbd; 425
422#endif 426 /* Look for cascade */
427 for_each_node_by_type(np, "interrupt-controller")
428 if (device_is_compatible(np, "chrp,iic")) {
429 pic = np;
430 break;
431 }
432 /* Ok, 8259 wasn't found. We need to handle the case where
433 * we have a pegasos that claims to be chrp but doesn't have
434 * a proper interrupt tree
435 */
436 if (pic == NULL && chrp_mpic != NULL) {
437 printk(KERN_ERR "i8259: Not found in device-tree"
438 " assuming no legacy interrupts\n");
439 return;
440 }
423 441
442 /* Look for intack. In a perfect world, we would look for it on
443 * the ISA bus that holds the 8259 but heh... Works that way. If
444 * we ever see a problem, we can try to re-use the pSeries code here.
445 * Also, Pegasos-type platforms don't have a proper node to start
446 * from anyway
447 */
424 for (np = find_devices("pci"); np != NULL; np = np->next) { 448 for (np = find_devices("pci"); np != NULL; np = np->next) {
425 unsigned int *addrp = (unsigned int *) 449 unsigned int *addrp = (unsigned int *)
426 get_property(np, "8259-interrupt-acknowledge", NULL); 450 get_property(np, "8259-interrupt-acknowledge", NULL);
@@ -431,11 +455,29 @@ void __init chrp_init_IRQ(void)
431 break; 455 break;
432 } 456 }
433 if (np == NULL) 457 if (np == NULL)
434 printk(KERN_ERR "Cannot find PCI interrupt acknowledge address\n"); 458 printk(KERN_WARNING "Cannot find PCI interrupt acknowledge"
459 " address, polling\n");
460
461 i8259_init(pic, chrp_int_ack);
462 if (ppc_md.get_irq == NULL)
463 ppc_md.get_irq = i8259_irq;
464 if (chrp_mpic != NULL) {
465 cascade_irq = irq_of_parse_and_map(pic, 0);
466 if (cascade_irq == NO_IRQ)
467 printk(KERN_ERR "i8259: failed to map cascade irq\n");
468 else
469 set_irq_chained_handler(cascade_irq,
470 chrp_8259_cascade);
471 }
472}
435 473
474void __init chrp_init_IRQ(void)
475{
476#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
477 struct device_node *kbd;
478#endif
436 chrp_find_openpic(); 479 chrp_find_openpic();
437 480 chrp_find_8259();
438 i8259_init(chrp_int_ack, 0);
439 481
440 if (_chrp_type == _CHRP_Pegasos) 482 if (_chrp_type == _CHRP_Pegasos)
441 ppc_md.get_irq = i8259_irq; 483 ppc_md.get_irq = i8259_irq;
@@ -520,10 +562,6 @@ static int __init chrp_probe(void)
520 DMA_MODE_READ = 0x44; 562 DMA_MODE_READ = 0x44;
521 DMA_MODE_WRITE = 0x48; 563 DMA_MODE_WRITE = 0x48;
522 isa_io_base = CHRP_ISA_IO_BASE; /* default value */ 564 isa_io_base = CHRP_ISA_IO_BASE; /* default value */
523 ppc_do_canonicalize_irqs = 1;
524
525 /* Assume we have an 8259... */
526 __irq_offset_value = NUM_ISA_INTERRUPTS;
527 565
528 return 1; 566 return 1;
529} 567}
@@ -535,7 +573,6 @@ define_machine(chrp) {
535 .init = chrp_init2, 573 .init = chrp_init2,
536 .show_cpuinfo = chrp_show_cpuinfo, 574 .show_cpuinfo = chrp_show_cpuinfo,
537 .init_IRQ = chrp_init_IRQ, 575 .init_IRQ = chrp_init_IRQ,
538 .get_irq = mpic_get_irq,
539 .pcibios_fixup = chrp_pcibios_fixup, 576 .pcibios_fixup = chrp_pcibios_fixup,
540 .restart = rtas_restart, 577 .restart = rtas_restart,
541 .power_off = rtas_power_off, 578 .power_off = rtas_power_off,
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index c298ca1ea680..1d2307e87c30 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -29,7 +29,6 @@
29#include <asm/smp.h> 29#include <asm/smp.h>
30#include <asm/residual.h> 30#include <asm/residual.h>
31#include <asm/time.h> 31#include <asm/time.h>
32#include <asm/open_pic.h>
33#include <asm/machdep.h> 32#include <asm/machdep.h>
34#include <asm/smp.h> 33#include <asm/smp.h>
35#include <asm/mpic.h> 34#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index f70e820e7304..2275e64f3152 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -162,27 +162,6 @@ static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs)
162 printk(KERN_ERR "pci_event_handler: NULL event received\n"); 162 printk(KERN_ERR "pci_event_handler: NULL event received\n");
163} 163}
164 164
165/*
166 * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
167 * It must be called before the bus walk.
168 */
169void __init iSeries_init_IRQ(void)
170{
171 /* Register PCI event handler and open an event path */
172 int ret;
173
174 ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
175 &pci_event_handler);
176 if (ret == 0) {
177 ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
178 if (ret != 0)
179 printk(KERN_ERR "iseries_init_IRQ: open event path "
180 "failed with rc 0x%x\n", ret);
181 } else
182 printk(KERN_ERR "iseries_init_IRQ: register handler "
183 "failed with rc 0x%x\n", ret);
184}
185
186#define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) 165#define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff)
187#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) 166#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
188#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) 167#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
@@ -196,7 +175,7 @@ static void iseries_enable_IRQ(unsigned int irq)
196{ 175{
197 u32 bus, dev_id, function, mask; 176 u32 bus, dev_id, function, mask;
198 const u32 sub_bus = 0; 177 const u32 sub_bus = 0;
199 unsigned int rirq = virt_irq_to_real_map[irq]; 178 unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
200 179
201 /* The IRQ has already been locked by the caller */ 180 /* The IRQ has already been locked by the caller */
202 bus = REAL_IRQ_TO_BUS(rirq); 181 bus = REAL_IRQ_TO_BUS(rirq);
@@ -213,7 +192,7 @@ static unsigned int iseries_startup_IRQ(unsigned int irq)
213{ 192{
214 u32 bus, dev_id, function, mask; 193 u32 bus, dev_id, function, mask;
215 const u32 sub_bus = 0; 194 const u32 sub_bus = 0;
216 unsigned int rirq = virt_irq_to_real_map[irq]; 195 unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
217 196
218 bus = REAL_IRQ_TO_BUS(rirq); 197 bus = REAL_IRQ_TO_BUS(rirq);
219 function = REAL_IRQ_TO_FUNC(rirq); 198 function = REAL_IRQ_TO_FUNC(rirq);
@@ -254,7 +233,7 @@ static void iseries_shutdown_IRQ(unsigned int irq)
254{ 233{
255 u32 bus, dev_id, function, mask; 234 u32 bus, dev_id, function, mask;
256 const u32 sub_bus = 0; 235 const u32 sub_bus = 0;
257 unsigned int rirq = virt_irq_to_real_map[irq]; 236 unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
258 237
259 /* irq should be locked by the caller */ 238 /* irq should be locked by the caller */
260 bus = REAL_IRQ_TO_BUS(rirq); 239 bus = REAL_IRQ_TO_BUS(rirq);
@@ -277,7 +256,7 @@ static void iseries_disable_IRQ(unsigned int irq)
277{ 256{
278 u32 bus, dev_id, function, mask; 257 u32 bus, dev_id, function, mask;
279 const u32 sub_bus = 0; 258 const u32 sub_bus = 0;
280 unsigned int rirq = virt_irq_to_real_map[irq]; 259 unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
281 260
282 /* The IRQ has already been locked by the caller */ 261 /* The IRQ has already been locked by the caller */
283 bus = REAL_IRQ_TO_BUS(rirq); 262 bus = REAL_IRQ_TO_BUS(rirq);
@@ -291,19 +270,19 @@ static void iseries_disable_IRQ(unsigned int irq)
291 270
292static void iseries_end_IRQ(unsigned int irq) 271static void iseries_end_IRQ(unsigned int irq)
293{ 272{
294 unsigned int rirq = virt_irq_to_real_map[irq]; 273 unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
295 274
296 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), 275 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
297 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); 276 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
298} 277}
299 278
300static hw_irq_controller iSeries_IRQ_handler = { 279static struct irq_chip iseries_pic = {
301 .typename = "iSeries irq controller", 280 .typename = "iSeries irq controller",
302 .startup = iseries_startup_IRQ, 281 .startup = iseries_startup_IRQ,
303 .shutdown = iseries_shutdown_IRQ, 282 .shutdown = iseries_shutdown_IRQ,
304 .enable = iseries_enable_IRQ, 283 .unmask = iseries_enable_IRQ,
305 .disable = iseries_disable_IRQ, 284 .mask = iseries_disable_IRQ,
306 .end = iseries_end_IRQ 285 .eoi = iseries_end_IRQ
307}; 286};
308 287
309/* 288/*
@@ -314,17 +293,14 @@ static hw_irq_controller iSeries_IRQ_handler = {
314int __init iSeries_allocate_IRQ(HvBusNumber bus, 293int __init iSeries_allocate_IRQ(HvBusNumber bus,
315 HvSubBusNumber sub_bus, u32 bsubbus) 294 HvSubBusNumber sub_bus, u32 bsubbus)
316{ 295{
317 int virtirq;
318 unsigned int realirq; 296 unsigned int realirq;
319 u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus); 297 u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus);
320 u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus); 298 u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus);
321 299
322 realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) 300 realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
323 + function; 301 + function;
324 virtirq = virt_irq_create_mapping(realirq);
325 302
326 irq_desc[virtirq].chip = &iSeries_IRQ_handler; 303 return irq_create_mapping(NULL, realirq, IRQ_TYPE_NONE);
327 return virtirq;
328} 304}
329 305
330#endif /* CONFIG_PCI */ 306#endif /* CONFIG_PCI */
@@ -332,10 +308,9 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus,
332/* 308/*
333 * Get the next pending IRQ. 309 * Get the next pending IRQ.
334 */ 310 */
335int iSeries_get_irq(struct pt_regs *regs) 311unsigned int iSeries_get_irq(struct pt_regs *regs)
336{ 312{
337 /* -2 means ignore this interrupt */ 313 int irq = NO_IRQ_IGNORE;
338 int irq = -2;
339 314
340#ifdef CONFIG_SMP 315#ifdef CONFIG_SMP
341 if (get_lppaca()->int_dword.fields.ipi_cnt) { 316 if (get_lppaca()->int_dword.fields.ipi_cnt) {
@@ -358,9 +333,57 @@ int iSeries_get_irq(struct pt_regs *regs)
358 } 333 }
359 spin_unlock(&pending_irqs_lock); 334 spin_unlock(&pending_irqs_lock);
360 if (irq >= NR_IRQS) 335 if (irq >= NR_IRQS)
361 irq = -2; 336 irq = NO_IRQ_IGNORE;
362 } 337 }
363#endif 338#endif
364 339
365 return irq; 340 return irq;
366} 341}
342
343static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
344 irq_hw_number_t hw, unsigned int flags)
345{
346 set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
347
348 return 0;
349}
350
351static struct irq_host_ops iseries_irq_host_ops = {
352 .map = iseries_irq_host_map,
353};
354
355/*
356 * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
357 * It must be called before the bus walk.
358 */
359void __init iSeries_init_IRQ(void)
360{
361 /* Register PCI event handler and open an event path */
362 struct irq_host *host;
363 int ret;
364
365 /*
366 * The Hypervisor only allows us up to 256 interrupt
367 * sources (the irq number is passed in a u8).
368 */
369 irq_set_virq_count(256);
370
371 /* Create irq host. No need for a revmap since HV will give us
372 * back our virtual irq number
373 */
374 host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &iseries_irq_host_ops, 0);
375 BUG_ON(host == NULL);
376 irq_set_default_host(host);
377
378 ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
379 &pci_event_handler);
380 if (ret == 0) {
381 ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
382 if (ret != 0)
383 printk(KERN_ERR "iseries_init_IRQ: open event path "
384 "failed with rc 0x%x\n", ret);
385 } else
386 printk(KERN_ERR "iseries_init_IRQ: register handler "
387 "failed with rc 0x%x\n", ret);
388}
389
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
index 188aa808abd7..1ee8985140e5 100644
--- a/arch/powerpc/platforms/iseries/irq.h
+++ b/arch/powerpc/platforms/iseries/irq.h
@@ -4,6 +4,6 @@
4extern void iSeries_init_IRQ(void); 4extern void iSeries_init_IRQ(void);
5extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32); 5extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32);
6extern void iSeries_activate_IRQs(void); 6extern void iSeries_activate_IRQs(void);
7extern int iSeries_get_irq(struct pt_regs *); 7extern unsigned int iSeries_get_irq(struct pt_regs *);
8 8
9#endif /* _ISERIES_IRQ_H */ 9#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index c877074745b2..c9605d773a77 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -294,8 +294,6 @@ static void __init iSeries_init_early(void)
294{ 294{
295 DBG(" -> iSeries_init_early()\n"); 295 DBG(" -> iSeries_init_early()\n");
296 296
297 ppc64_interrupt_controller = IC_ISERIES;
298
299#if defined(CONFIG_BLK_DEV_INITRD) 297#if defined(CONFIG_BLK_DEV_INITRD)
300 /* 298 /*
301 * If the init RAM disk has been configured and there is 299 * If the init RAM disk has been configured and there is
@@ -659,12 +657,6 @@ static int __init iseries_probe(void)
659 powerpc_firmware_features |= FW_FEATURE_ISERIES; 657 powerpc_firmware_features |= FW_FEATURE_ISERIES;
660 powerpc_firmware_features |= FW_FEATURE_LPAR; 658 powerpc_firmware_features |= FW_FEATURE_LPAR;
661 659
662 /*
663 * The Hypervisor only allows us up to 256 interrupt
664 * sources (the irq number is passed in a u8).
665 */
666 virt_irq_max = 255;
667
668 hpte_init_iSeries(); 660 hpte_init_iSeries();
669 661
670 return 1; 662 return 1;
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index f7170ff86dab..63a1670d3bfd 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -443,18 +443,23 @@ void __init maple_pci_init(void)
443int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) 443int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
444{ 444{
445 struct device_node *np; 445 struct device_node *np;
446 int irq = channel ? 15 : 14; 446 unsigned int defirq = channel ? 15 : 14;
447 unsigned int irq;
447 448
448 if (pdev->vendor != PCI_VENDOR_ID_AMD || 449 if (pdev->vendor != PCI_VENDOR_ID_AMD ||
449 pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) 450 pdev->device != PCI_DEVICE_ID_AMD_8111_IDE)
450 return irq; 451 return defirq;
451 452
452 np = pci_device_to_OF_node(pdev); 453 np = pci_device_to_OF_node(pdev);
453 if (np == NULL) 454 if (np == NULL)
454 return irq; 455 return defirq;
455 if (np->n_intrs < 2) 456 irq = irq_of_parse_and_map(np, channel & 0x1);
456 return irq; 457 if (irq == NO_IRQ) {
457 return np->intrs[channel & 0x1].line; 458 printk("Failed to map onboard IDE interrupt for channel %d\n",
459 channel);
460 return defirq;
461 }
462 return irq;
458} 463}
459 464
460/* XXX: To remove once all firmwares are ok */ 465/* XXX: To remove once all firmwares are ok */
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 5cf90c28b141..cb528c9de4c3 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -11,7 +11,7 @@
11 * 11 *
12 */ 12 */
13 13
14#define DEBUG 14#undef DEBUG
15 15
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
@@ -198,50 +198,81 @@ static void __init maple_init_early(void)
198{ 198{
199 DBG(" -> maple_init_early\n"); 199 DBG(" -> maple_init_early\n");
200 200
201 /* Setup interrupt mapping options */
202 ppc64_interrupt_controller = IC_OPEN_PIC;
203
204 iommu_init_early_dart(); 201 iommu_init_early_dart();
205 202
206 DBG(" <- maple_init_early\n"); 203 DBG(" <- maple_init_early\n");
207} 204}
208 205
209 206/*
210static __init void maple_init_IRQ(void) 207 * This is almost identical to pSeries and CHRP. We need to make that
208 * code generic at one point, with appropriate bits in the device-tree to
209 * identify the presence of an HT APIC
210 */
211static void __init maple_init_IRQ(void)
211{ 212{
212 struct device_node *root; 213 struct device_node *root, *np, *mpic_node = NULL;
213 unsigned int *opprop; 214 unsigned int *opprop;
214 unsigned long opic_addr; 215 unsigned long openpic_addr = 0;
216 int naddr, n, i, opplen, has_isus = 0;
215 struct mpic *mpic; 217 struct mpic *mpic;
216 unsigned char senses[128]; 218 unsigned int flags = MPIC_PRIMARY;
217 int n;
218 219
219 DBG(" -> maple_init_IRQ\n"); 220 /* Locate MPIC in the device-tree. Note that there is a bug
221 * in Maple device-tree where the type of the controller is
222 * open-pic and not interrupt-controller
223 */
224 for_each_node_by_type(np, "open-pic") {
225 mpic_node = np;
226 break;
227 }
228 if (mpic_node == NULL) {
229 printk(KERN_ERR
230 "Failed to locate the MPIC interrupt controller\n");
231 return;
232 }
220 233
221 /* XXX: Non standard, replace that with a proper openpic/mpic node 234 /* Find address list in /platform-open-pic */
222 * in the device-tree. Find the Open PIC if present */
223 root = of_find_node_by_path("/"); 235 root = of_find_node_by_path("/");
224 opprop = (unsigned int *) get_property(root, 236 naddr = prom_n_addr_cells(root);
225 "platform-open-pic", NULL); 237 opprop = (unsigned int *) get_property(root, "platform-open-pic",
226 if (opprop == 0) 238 &opplen);
227 panic("OpenPIC not found !\n"); 239 if (opprop != 0) {
228 240 openpic_addr = of_read_number(opprop, naddr);
229 n = prom_n_addr_cells(root); 241 has_isus = (opplen > naddr);
230 for (opic_addr = 0; n > 0; --n) 242 printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n",
231 opic_addr = (opic_addr << 32) + *opprop++; 243 openpic_addr, has_isus);
244 }
232 of_node_put(root); 245 of_node_put(root);
233 246
234 /* Obtain sense values from device-tree */ 247 BUG_ON(openpic_addr == 0);
235 prom_get_irq_senses(senses, 0, 128); 248
249 /* Check for a big endian MPIC */
250 if (get_property(np, "big-endian", NULL) != NULL)
251 flags |= MPIC_BIG_ENDIAN;
236 252
237 mpic = mpic_alloc(opic_addr, 253 /* XXX Maple specific bits */
238 MPIC_PRIMARY | MPIC_BIG_ENDIAN | 254 flags |= MPIC_BROKEN_U3 | MPIC_WANTS_RESET;
239 MPIC_BROKEN_U3 | MPIC_WANTS_RESET, 255
240 0, 0, 128, 128, senses, 128, "U3-MPIC"); 256 /* Setup the openpic driver. More device-tree junks, we hard code no
257 * ISUs for now. I'll have to revisit some stuffs with the folks doing
258 * the firmware for those
259 */
260 mpic = mpic_alloc(mpic_node, openpic_addr, flags,
261 /*has_isus ? 16 :*/ 0, 0, " MPIC ");
241 BUG_ON(mpic == NULL); 262 BUG_ON(mpic == NULL);
242 mpic_init(mpic);
243 263
244 DBG(" <- maple_init_IRQ\n"); 264 /* Add ISUs */
265 opplen /= sizeof(u32);
266 for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
267 unsigned long isuaddr = of_read_number(opprop + i, naddr);
268 mpic_assign_isu(mpic, n, isuaddr);
269 }
270
271 /* All ISUs are setup, complete initialization */
272 mpic_init(mpic);
273 ppc_md.get_irq = mpic_get_irq;
274 of_node_put(mpic_node);
275 of_node_put(root);
245} 276}
246 277
247static void __init maple_progress(char *s, unsigned short hex) 278static void __init maple_progress(char *s, unsigned short hex)
@@ -256,7 +287,9 @@ static void __init maple_progress(char *s, unsigned short hex)
256static int __init maple_probe(void) 287static int __init maple_probe(void)
257{ 288{
258 unsigned long root = of_get_flat_dt_root(); 289 unsigned long root = of_get_flat_dt_root();
259 if (!of_flat_dt_is_compatible(root, "Momentum,Maple")) 290
291 if (!of_flat_dt_is_compatible(root, "Momentum,Maple") &&
292 !of_flat_dt_is_compatible(root, "Momentum,Apache"))
260 return 0; 293 return 0;
261 /* 294 /*
262 * On U3, the DART (iommu) must be allocated now since it 295 * On U3, the DART (iommu) must be allocated now since it
@@ -277,7 +310,6 @@ define_machine(maple_md) {
277 .setup_arch = maple_setup_arch, 310 .setup_arch = maple_setup_arch,
278 .init_early = maple_init_early, 311 .init_early = maple_init_early,
279 .init_IRQ = maple_init_IRQ, 312 .init_IRQ = maple_init_IRQ,
280 .get_irq = mpic_get_irq,
281 .pcibios_fixup = maple_pcibios_fixup, 313 .pcibios_fixup = maple_pcibios_fixup,
282 .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, 314 .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
283 .restart = maple_restart, 315 .restart = maple_restart,
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index cb257aeb91f6..871b002c9f90 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -12,7 +12,7 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/version.h> 15#include <linux/utsrelease.h>
16#include <asm/sections.h> 16#include <asm/sections.h>
17#include <asm/prom.h> 17#include <asm/prom.h>
18#include <asm/page.h> 18#include <asm/page.h>
@@ -162,6 +162,8 @@ static void __init bootx_add_chosen_props(unsigned long base,
162{ 162{
163 u32 val; 163 u32 val;
164 164
165 bootx_dt_add_prop("linux,bootx", NULL, 0, mem_end);
166
165 if (bootx_info->kernelParamsOffset) { 167 if (bootx_info->kernelParamsOffset) {
166 char *args = (char *)((unsigned long)bootx_info) + 168 char *args = (char *)((unsigned long)bootx_info) +
167 bootx_info->kernelParamsOffset; 169 bootx_info->kernelParamsOffset;
@@ -181,8 +183,25 @@ static void __init bootx_add_chosen_props(unsigned long base,
181static void __init bootx_add_display_props(unsigned long base, 183static void __init bootx_add_display_props(unsigned long base,
182 unsigned long *mem_end) 184 unsigned long *mem_end)
183{ 185{
186 boot_infos_t *bi = bootx_info;
187 u32 tmp;
188
184 bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end); 189 bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end);
185 bootx_dt_add_prop("linux,opened", NULL, 0, mem_end); 190 bootx_dt_add_prop("linux,opened", NULL, 0, mem_end);
191 tmp = bi->dispDeviceDepth;
192 bootx_dt_add_prop("linux,bootx-depth", &tmp, 4, mem_end);
193 tmp = bi->dispDeviceRect[2] - bi->dispDeviceRect[0];
194 bootx_dt_add_prop("linux,bootx-width", &tmp, 4, mem_end);
195 tmp = bi->dispDeviceRect[3] - bi->dispDeviceRect[1];
196 bootx_dt_add_prop("linux,bootx-height", &tmp, 4, mem_end);
197 tmp = bi->dispDeviceRowBytes;
198 bootx_dt_add_prop("linux,bootx-linebytes", &tmp, 4, mem_end);
199 tmp = (u32)bi->dispDeviceBase;
200 if (tmp == 0)
201 tmp = (u32)bi->logicalDisplayBase;
202 tmp += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes;
203 tmp += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8);
204 bootx_dt_add_prop("linux,bootx-addr", &tmp, 4, mem_end);
186} 205}
187 206
188static void __init bootx_dt_add_string(char *s, unsigned long *mem_end) 207static void __init bootx_dt_add_string(char *s, unsigned long *mem_end)
@@ -211,7 +230,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base,
211 230
212 if (!strcmp(namep, "/chosen")) { 231 if (!strcmp(namep, "/chosen")) {
213 DBG(" detected /chosen ! adding properties names !\n"); 232 DBG(" detected /chosen ! adding properties names !\n");
214 bootx_dt_add_string("linux,platform", mem_end); 233 bootx_dt_add_string("linux,bootx", mem_end);
215 bootx_dt_add_string("linux,stdout-path", mem_end); 234 bootx_dt_add_string("linux,stdout-path", mem_end);
216 bootx_dt_add_string("linux,initrd-start", mem_end); 235 bootx_dt_add_string("linux,initrd-start", mem_end);
217 bootx_dt_add_string("linux,initrd-end", mem_end); 236 bootx_dt_add_string("linux,initrd-end", mem_end);
@@ -222,6 +241,11 @@ static void __init bootx_scan_dt_build_strings(unsigned long base,
222 DBG(" detected display ! adding properties names !\n"); 241 DBG(" detected display ! adding properties names !\n");
223 bootx_dt_add_string("linux,boot-display", mem_end); 242 bootx_dt_add_string("linux,boot-display", mem_end);
224 bootx_dt_add_string("linux,opened", mem_end); 243 bootx_dt_add_string("linux,opened", mem_end);
244 bootx_dt_add_string("linux,bootx-depth", mem_end);
245 bootx_dt_add_string("linux,bootx-width", mem_end);
246 bootx_dt_add_string("linux,bootx-height", mem_end);
247 bootx_dt_add_string("linux,bootx-linebytes", mem_end);
248 bootx_dt_add_string("linux,bootx-addr", mem_end);
225 strncpy(bootx_disp_path, namep, 255); 249 strncpy(bootx_disp_path, namep, 255);
226 } 250 }
227 251
@@ -443,7 +467,14 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
443 if (!BOOT_INFO_IS_V2_COMPATIBLE(bi)) 467 if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
444 bi->logicalDisplayBase = bi->dispDeviceBase; 468 bi->logicalDisplayBase = bi->dispDeviceBase;
445 469
470 /* Fixup depth 16 -> 15 as that's what MacOS calls 16bpp */
471 if (bi->dispDeviceDepth == 16)
472 bi->dispDeviceDepth = 15;
473
446#ifdef CONFIG_BOOTX_TEXT 474#ifdef CONFIG_BOOTX_TEXT
475 ptr = (unsigned long)bi->logicalDisplayBase;
476 ptr += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes;
477 ptr += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8);
447 btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0], 478 btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0],
448 bi->dispDeviceRect[3] - bi->dispDeviceRect[1], 479 bi->dispDeviceRect[3] - bi->dispDeviceRect[1],
449 bi->dispDeviceDepth, bi->dispDeviceRowBytes, 480 bi->dispDeviceDepth, bi->dispDeviceRowBytes,
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index ceafaf52a668..8677f50c2586 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -522,10 +522,11 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
522 host->speed = KW_I2C_MODE_25KHZ; 522 host->speed = KW_I2C_MODE_25KHZ;
523 break; 523 break;
524 } 524 }
525 if (np->n_intrs > 0) 525 host->irq = irq_of_parse_and_map(np, 0);
526 host->irq = np->intrs[0].line; 526 if (host->irq == NO_IRQ)
527 else 527 printk(KERN_WARNING
528 host->irq = NO_IRQ; 528 "low_i2c: Failed to map interrupt for %s\n",
529 np->full_name);
529 530
530 host->base = ioremap((*addrp), 0x1000); 531 host->base = ioremap((*addrp), 0x1000);
531 if (host->base == NULL) { 532 if (host->base == NULL) {
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 41fa2409482a..6a36ea9bf673 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -29,6 +29,8 @@
29#include <asm/machdep.h> 29#include <asm/machdep.h>
30#include <asm/nvram.h> 30#include <asm/nvram.h>
31 31
32#include "pmac.h"
33
32#define DEBUG 34#define DEBUG
33 35
34#ifdef DEBUG 36#ifdef DEBUG
@@ -80,9 +82,6 @@ static int nvram_partitions[3];
80// XXX Turn that into a sem 82// XXX Turn that into a sem
81static DEFINE_SPINLOCK(nv_lock); 83static DEFINE_SPINLOCK(nv_lock);
82 84
83extern int pmac_newworld;
84extern int system_running;
85
86static int (*core99_write_bank)(int bank, u8* datas); 85static int (*core99_write_bank)(int bank, u8* datas);
87static int (*core99_erase_bank)(int bank); 86static int (*core99_erase_bank)(int bank);
88 87
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index d524a915aa86..556b349797e8 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -46,6 +46,9 @@ static int has_uninorth;
46static struct pci_controller *u3_agp; 46static struct pci_controller *u3_agp;
47static struct pci_controller *u4_pcie; 47static struct pci_controller *u4_pcie;
48static struct pci_controller *u3_ht; 48static struct pci_controller *u3_ht;
49#define has_second_ohare 0
50#else
51static int has_second_ohare;
49#endif /* CONFIG_PPC64 */ 52#endif /* CONFIG_PPC64 */
50 53
51extern u8 pci_cache_line_size; 54extern u8 pci_cache_line_size;
@@ -647,6 +650,33 @@ static void __init init_p2pbridge(void)
647 early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); 650 early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
648} 651}
649 652
653static void __init init_second_ohare(void)
654{
655 struct device_node *np = of_find_node_by_name(NULL, "pci106b,7");
656 unsigned char bus, devfn;
657 unsigned short cmd;
658
659 if (np == NULL)
660 return;
661
662 /* This must run before we initialize the PICs since the second
663 * ohare hosts a PIC that will be accessed there.
664 */
665 if (pci_device_from_OF_node(np, &bus, &devfn) == 0) {
666 struct pci_controller* hose =
667 pci_find_hose_for_OF_device(np);
668 if (!hose) {
669 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
670 return;
671 }
672 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
673 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
674 cmd &= ~PCI_COMMAND_IO;
675 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
676 }
677 has_second_ohare = 1;
678}
679
650/* 680/*
651 * Some Apple desktop machines have a NEC PD720100A USB2 controller 681 * Some Apple desktop machines have a NEC PD720100A USB2 controller
652 * on the motherboard. Open Firmware, on these, will disable the 682 * on the motherboard. Open Firmware, on these, will disable the
@@ -688,9 +718,6 @@ static void __init fixup_nec_usb2(void)
688 " EHCI, fixing up...\n"); 718 " EHCI, fixing up...\n");
689 data &= ~1UL; 719 data &= ~1UL;
690 early_write_config_dword(hose, bus, devfn, 0xe4, data); 720 early_write_config_dword(hose, bus, devfn, 0xe4, data);
691 early_write_config_byte(hose, bus,
692 devfn | 2, PCI_INTERRUPT_LINE,
693 nec->intrs[0].line);
694 } 721 }
695 } 722 }
696} 723}
@@ -958,32 +985,28 @@ static int __init add_bridge(struct device_node *dev)
958 return 0; 985 return 0;
959} 986}
960 987
961static void __init pcibios_fixup_OF_interrupts(void) 988void __init pmac_pcibios_fixup(void)
962{ 989{
963 struct pci_dev* dev = NULL; 990 struct pci_dev* dev = NULL;
964 991
965 /*
966 * Open Firmware often doesn't initialize the
967 * PCI_INTERRUPT_LINE config register properly, so we
968 * should find the device node and apply the interrupt
969 * obtained from the OF device-tree
970 */
971 for_each_pci_dev(dev) { 992 for_each_pci_dev(dev) {
972 struct device_node *node; 993 /* Read interrupt from the device-tree */
973 node = pci_device_to_OF_node(dev); 994 pci_read_irq_line(dev);
974 /* this is the node, see if it has interrupts */ 995
975 if (node && node->n_intrs > 0) 996 /* Fixup interrupt for the modem/ethernet combo controller.
976 dev->irq = node->intrs[0].line; 997 * on machines with a second ohare chip.
977 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); 998 * The number in the device tree (27) is bogus (correct for
999 * the ethernet-only board but not the combo ethernet/modem
1000 * board). The real interrupt is 28 on the second controller
1001 * -> 28+32 = 60.
1002 */
1003 if (has_second_ohare &&
1004 dev->vendor == PCI_VENDOR_ID_DEC &&
1005 dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS)
1006 dev->irq = irq_create_mapping(NULL, 60, 0);
978 } 1007 }
979} 1008}
980 1009
981void __init pmac_pcibios_fixup(void)
982{
983 /* Fixup interrupts according to OF tree */
984 pcibios_fixup_OF_interrupts();
985}
986
987#ifdef CONFIG_PPC64 1010#ifdef CONFIG_PPC64
988static void __init pmac_fixup_phb_resources(void) 1011static void __init pmac_fixup_phb_resources(void)
989{ 1012{
@@ -1071,6 +1094,7 @@ void __init pmac_pci_init(void)
1071 1094
1072#else /* CONFIG_PPC64 */ 1095#else /* CONFIG_PPC64 */
1073 init_p2pbridge(); 1096 init_p2pbridge();
1097 init_second_ohare();
1074 fixup_nec_usb2(); 1098 fixup_nec_usb2();
1075 1099
1076 /* We are still having some issues with the Xserve G4, enabling 1100 /* We are still having some issues with the Xserve G4, enabling
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index d6eab8b3f7de..6d66359ec8c8 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -24,19 +24,18 @@ static irqreturn_t macio_gpio_irq(int irq, void *data, struct pt_regs *regs)
24 24
25static int macio_do_gpio_irq_enable(struct pmf_function *func) 25static int macio_do_gpio_irq_enable(struct pmf_function *func)
26{ 26{
27 if (func->node->n_intrs < 1) 27 unsigned int irq = irq_of_parse_and_map(func->node, 0);
28 if (irq == NO_IRQ)
28 return -EINVAL; 29 return -EINVAL;
29 30 return request_irq(irq, macio_gpio_irq, 0, func->node->name, func);
30 return request_irq(func->node->intrs[0].line, macio_gpio_irq, 0,
31 func->node->name, func);
32} 31}
33 32
34static int macio_do_gpio_irq_disable(struct pmf_function *func) 33static int macio_do_gpio_irq_disable(struct pmf_function *func)
35{ 34{
36 if (func->node->n_intrs < 1) 35 unsigned int irq = irq_of_parse_and_map(func->node, 0);
36 if (irq == NO_IRQ)
37 return -EINVAL; 37 return -EINVAL;
38 38 free_irq(irq, func);
39 free_irq(func->node->intrs[0].line, func);
40 return 0; 39 return 0;
41} 40}
42 41
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index c9b09a9e6050..3d328bc1f7e0 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -65,39 +65,36 @@ static u32 level_mask[4];
65 65
66static DEFINE_SPINLOCK(pmac_pic_lock); 66static DEFINE_SPINLOCK(pmac_pic_lock);
67 67
68#define GATWICK_IRQ_POOL_SIZE 10
69static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
70
71#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 68#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
72static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; 69static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
70static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
71static int pmac_irq_cascade = -1;
72static struct irq_host *pmac_pic_host;
73 73
74/* 74static void __pmac_retrigger(unsigned int irq_nr)
75 * Mark an irq as "lost". This is only used on the pmac
76 * since it can lose interrupts (see pmac_set_irq_mask).
77 * -- Cort
78 */
79void __set_lost(unsigned long irq_nr, int nokick)
80{ 75{
81 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) { 76 if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) {
77 __set_bit(irq_nr, ppc_lost_interrupts);
78 irq_nr = pmac_irq_cascade;
79 mb();
80 }
81 if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
82 atomic_inc(&ppc_n_lost_interrupts); 82 atomic_inc(&ppc_n_lost_interrupts);
83 if (!nokick) 83 set_dec(1);
84 set_dec(1);
85 } 84 }
86} 85}
87 86
88static void pmac_mask_and_ack_irq(unsigned int irq_nr) 87static void pmac_mask_and_ack_irq(unsigned int virq)
89{ 88{
90 unsigned long bit = 1UL << (irq_nr & 0x1f); 89 unsigned int src = irq_map[virq].hwirq;
91 int i = irq_nr >> 5; 90 unsigned long bit = 1UL << (virq & 0x1f);
91 int i = virq >> 5;
92 unsigned long flags; 92 unsigned long flags;
93 93
94 if ((unsigned)irq_nr >= max_irqs)
95 return;
96
97 clear_bit(irq_nr, ppc_cached_irq_mask);
98 if (test_and_clear_bit(irq_nr, ppc_lost_interrupts))
99 atomic_dec(&ppc_n_lost_interrupts);
100 spin_lock_irqsave(&pmac_pic_lock, flags); 94 spin_lock_irqsave(&pmac_pic_lock, flags);
95 __clear_bit(src, ppc_cached_irq_mask);
96 if (__test_and_clear_bit(src, ppc_lost_interrupts))
97 atomic_dec(&ppc_n_lost_interrupts);
101 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); 98 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
102 out_le32(&pmac_irq_hw[i]->ack, bit); 99 out_le32(&pmac_irq_hw[i]->ack, bit);
103 do { 100 do {
@@ -109,16 +106,29 @@ static void pmac_mask_and_ack_irq(unsigned int irq_nr)
109 spin_unlock_irqrestore(&pmac_pic_lock, flags); 106 spin_unlock_irqrestore(&pmac_pic_lock, flags);
110} 107}
111 108
112static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) 109static void pmac_ack_irq(unsigned int virq)
110{
111 unsigned int src = irq_map[virq].hwirq;
112 unsigned long bit = 1UL << (src & 0x1f);
113 int i = src >> 5;
114 unsigned long flags;
115
116 spin_lock_irqsave(&pmac_pic_lock, flags);
117 if (__test_and_clear_bit(src, ppc_lost_interrupts))
118 atomic_dec(&ppc_n_lost_interrupts);
119 out_le32(&pmac_irq_hw[i]->ack, bit);
120 (void)in_le32(&pmac_irq_hw[i]->ack);
121 spin_unlock_irqrestore(&pmac_pic_lock, flags);
122}
123
124static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
113{ 125{
114 unsigned long bit = 1UL << (irq_nr & 0x1f); 126 unsigned long bit = 1UL << (irq_nr & 0x1f);
115 int i = irq_nr >> 5; 127 int i = irq_nr >> 5;
116 unsigned long flags;
117 128
118 if ((unsigned)irq_nr >= max_irqs) 129 if ((unsigned)irq_nr >= max_irqs)
119 return; 130 return;
120 131
121 spin_lock_irqsave(&pmac_pic_lock, flags);
122 /* enable unmasked interrupts */ 132 /* enable unmasked interrupts */
123 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); 133 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
124 134
@@ -135,71 +145,78 @@ static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
135 * the bit in the flag register or request another interrupt. 145 * the bit in the flag register or request another interrupt.
136 */ 146 */
137 if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level)) 147 if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
138 __set_lost((ulong)irq_nr, nokicklost); 148 __pmac_retrigger(irq_nr);
139 spin_unlock_irqrestore(&pmac_pic_lock, flags);
140} 149}
141 150
142/* When an irq gets requested for the first client, if it's an 151/* When an irq gets requested for the first client, if it's an
143 * edge interrupt, we clear any previous one on the controller 152 * edge interrupt, we clear any previous one on the controller
144 */ 153 */
145static unsigned int pmac_startup_irq(unsigned int irq_nr) 154static unsigned int pmac_startup_irq(unsigned int virq)
146{ 155{
147 unsigned long bit = 1UL << (irq_nr & 0x1f); 156 unsigned long flags;
148 int i = irq_nr >> 5; 157 unsigned int src = irq_map[virq].hwirq;
158 unsigned long bit = 1UL << (src & 0x1f);
159 int i = src >> 5;
149 160
150 if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0) 161 spin_lock_irqsave(&pmac_pic_lock, flags);
162 if ((irq_desc[virq].status & IRQ_LEVEL) == 0)
151 out_le32(&pmac_irq_hw[i]->ack, bit); 163 out_le32(&pmac_irq_hw[i]->ack, bit);
152 set_bit(irq_nr, ppc_cached_irq_mask); 164 __set_bit(src, ppc_cached_irq_mask);
153 pmac_set_irq_mask(irq_nr, 0); 165 __pmac_set_irq_mask(src, 0);
166 spin_unlock_irqrestore(&pmac_pic_lock, flags);
154 167
155 return 0; 168 return 0;
156} 169}
157 170
158static void pmac_mask_irq(unsigned int irq_nr) 171static void pmac_mask_irq(unsigned int virq)
159{ 172{
160 clear_bit(irq_nr, ppc_cached_irq_mask); 173 unsigned long flags;
161 pmac_set_irq_mask(irq_nr, 0); 174 unsigned int src = irq_map[virq].hwirq;
162 mb(); 175
176 spin_lock_irqsave(&pmac_pic_lock, flags);
177 __clear_bit(src, ppc_cached_irq_mask);
178 __pmac_set_irq_mask(src, 0);
179 spin_unlock_irqrestore(&pmac_pic_lock, flags);
163} 180}
164 181
165static void pmac_unmask_irq(unsigned int irq_nr) 182static void pmac_unmask_irq(unsigned int virq)
166{ 183{
167 set_bit(irq_nr, ppc_cached_irq_mask); 184 unsigned long flags;
168 pmac_set_irq_mask(irq_nr, 0); 185 unsigned int src = irq_map[virq].hwirq;
186
187 spin_lock_irqsave(&pmac_pic_lock, flags);
188 __set_bit(src, ppc_cached_irq_mask);
189 __pmac_set_irq_mask(src, 0);
190 spin_unlock_irqrestore(&pmac_pic_lock, flags);
169} 191}
170 192
171static void pmac_end_irq(unsigned int irq_nr) 193static int pmac_retrigger(unsigned int virq)
172{ 194{
173 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)) 195 unsigned long flags;
174 && irq_desc[irq_nr].action) {
175 set_bit(irq_nr, ppc_cached_irq_mask);
176 pmac_set_irq_mask(irq_nr, 1);
177 }
178}
179 196
197 spin_lock_irqsave(&pmac_pic_lock, flags);
198 __pmac_retrigger(irq_map[virq].hwirq);
199 spin_unlock_irqrestore(&pmac_pic_lock, flags);
200 return 1;
201}
180 202
181struct hw_interrupt_type pmac_pic = { 203static struct irq_chip pmac_pic = {
182 .typename = " PMAC-PIC ", 204 .typename = " PMAC-PIC ",
183 .startup = pmac_startup_irq, 205 .startup = pmac_startup_irq,
184 .enable = pmac_unmask_irq, 206 .mask = pmac_mask_irq,
185 .disable = pmac_mask_irq, 207 .ack = pmac_ack_irq,
186 .ack = pmac_mask_and_ack_irq, 208 .mask_ack = pmac_mask_and_ack_irq,
187 .end = pmac_end_irq, 209 .unmask = pmac_unmask_irq,
188}; 210 .retrigger = pmac_retrigger,
189
190struct hw_interrupt_type gatwick_pic = {
191 .typename = " GATWICK ",
192 .startup = pmac_startup_irq,
193 .enable = pmac_unmask_irq,
194 .disable = pmac_mask_irq,
195 .ack = pmac_mask_and_ack_irq,
196 .end = pmac_end_irq,
197}; 211};
198 212
199static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) 213static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
200{ 214{
215 unsigned long flags;
201 int irq, bits; 216 int irq, bits;
217 int rc = IRQ_NONE;
202 218
219 spin_lock_irqsave(&pmac_pic_lock, flags);
203 for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { 220 for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
204 int i = irq >> 5; 221 int i = irq >> 5;
205 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; 222 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
@@ -209,17 +226,20 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
209 if (bits == 0) 226 if (bits == 0)
210 continue; 227 continue;
211 irq += __ilog2(bits); 228 irq += __ilog2(bits);
229 spin_unlock_irqrestore(&pmac_pic_lock, flags);
212 __do_IRQ(irq, regs); 230 __do_IRQ(irq, regs);
213 return IRQ_HANDLED; 231 spin_lock_irqsave(&pmac_pic_lock, flags);
232 rc = IRQ_HANDLED;
214 } 233 }
215 printk("gatwick irq not from gatwick pic\n"); 234 spin_unlock_irqrestore(&pmac_pic_lock, flags);
216 return IRQ_NONE; 235 return rc;
217} 236}
218 237
219static int pmac_get_irq(struct pt_regs *regs) 238static unsigned int pmac_pic_get_irq(struct pt_regs *regs)
220{ 239{
221 int irq; 240 int irq;
222 unsigned long bits = 0; 241 unsigned long bits = 0;
242 unsigned long flags;
223 243
224#ifdef CONFIG_SMP 244#ifdef CONFIG_SMP
225 void psurge_smp_message_recv(struct pt_regs *); 245 void psurge_smp_message_recv(struct pt_regs *);
@@ -227,9 +247,10 @@ static int pmac_get_irq(struct pt_regs *regs)
227 /* IPI's are a hack on the powersurge -- Cort */ 247 /* IPI's are a hack on the powersurge -- Cort */
228 if ( smp_processor_id() != 0 ) { 248 if ( smp_processor_id() != 0 ) {
229 psurge_smp_message_recv(regs); 249 psurge_smp_message_recv(regs);
230 return -2; /* ignore, already handled */ 250 return NO_IRQ_IGNORE; /* ignore, already handled */
231 } 251 }
232#endif /* CONFIG_SMP */ 252#endif /* CONFIG_SMP */
253 spin_lock_irqsave(&pmac_pic_lock, flags);
233 for (irq = max_real_irqs; (irq -= 32) >= 0; ) { 254 for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
234 int i = irq >> 5; 255 int i = irq >> 5;
235 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; 256 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
@@ -241,133 +262,10 @@ static int pmac_get_irq(struct pt_regs *regs)
241 irq += __ilog2(bits); 262 irq += __ilog2(bits);
242 break; 263 break;
243 } 264 }
244 265 spin_unlock_irqrestore(&pmac_pic_lock, flags);
245 return irq; 266 if (unlikely(irq < 0))
246} 267 return NO_IRQ;
247 268 return irq_linear_revmap(pmac_pic_host, irq);
248/* This routine will fix some missing interrupt values in the device tree
249 * on the gatwick mac-io controller used by some PowerBooks
250 *
251 * Walking of OF nodes could use a bit more fixing up here, but it's not
252 * very important as this is all boot time code on static portions of the
253 * device-tree.
254 *
255 * However, the modifications done to "intrs" will have to be removed and
256 * replaced with proper updates of the "interrupts" properties or
257 * AAPL,interrupts, yet to be decided, once the dynamic parsing is there.
258 */
259static void __init pmac_fix_gatwick_interrupts(struct device_node *gw,
260 int irq_base)
261{
262 struct device_node *node;
263 int count;
264
265 memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
266 count = 0;
267 for (node = NULL; (node = of_get_next_child(gw, node)) != NULL;) {
268 /* Fix SCC */
269 if ((strcasecmp(node->name, "escc") == 0) && node->child) {
270 if (node->child->n_intrs < 3) {
271 node->child->intrs = &gatwick_int_pool[count];
272 count += 3;
273 }
274 node->child->n_intrs = 3;
275 node->child->intrs[0].line = 15+irq_base;
276 node->child->intrs[1].line = 4+irq_base;
277 node->child->intrs[2].line = 5+irq_base;
278 printk(KERN_INFO "irq: fixed SCC on gatwick"
279 " (%d,%d,%d)\n",
280 node->child->intrs[0].line,
281 node->child->intrs[1].line,
282 node->child->intrs[2].line);
283 }
284 /* Fix media-bay & left SWIM */
285 if (strcasecmp(node->name, "media-bay") == 0) {
286 struct device_node* ya_node;
287
288 if (node->n_intrs == 0)
289 node->intrs = &gatwick_int_pool[count++];
290 node->n_intrs = 1;
291 node->intrs[0].line = 29+irq_base;
292 printk(KERN_INFO "irq: fixed media-bay on gatwick"
293 " (%d)\n", node->intrs[0].line);
294
295 ya_node = node->child;
296 while(ya_node) {
297 if (strcasecmp(ya_node->name, "floppy") == 0) {
298 if (ya_node->n_intrs < 2) {
299 ya_node->intrs = &gatwick_int_pool[count];
300 count += 2;
301 }
302 ya_node->n_intrs = 2;
303 ya_node->intrs[0].line = 19+irq_base;
304 ya_node->intrs[1].line = 1+irq_base;
305 printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n",
306 ya_node->intrs[0].line, ya_node->intrs[1].line);
307 }
308 if (strcasecmp(ya_node->name, "ata4") == 0) {
309 if (ya_node->n_intrs < 2) {
310 ya_node->intrs = &gatwick_int_pool[count];
311 count += 2;
312 }
313 ya_node->n_intrs = 2;
314 ya_node->intrs[0].line = 14+irq_base;
315 ya_node->intrs[1].line = 3+irq_base;
316 printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n",
317 ya_node->intrs[0].line, ya_node->intrs[1].line);
318 }
319 ya_node = ya_node->sibling;
320 }
321 }
322 }
323 if (count > 10) {
324 printk("WARNING !! Gatwick interrupt pool overflow\n");
325 printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE);
326 printk(" requested = %d\n", count);
327 }
328}
329
330/*
331 * The PowerBook 3400/2400/3500 can have a combo ethernet/modem
332 * card which includes an ohare chip that acts as a second interrupt
333 * controller. If we find this second ohare, set it up and fix the
334 * interrupt value in the device tree for the ethernet chip.
335 */
336static void __init enable_second_ohare(struct device_node *np)
337{
338 unsigned char bus, devfn;
339 unsigned short cmd;
340 struct device_node *ether;
341
342 /* This code doesn't strictly belong here, it could be part of
343 * either the PCI initialisation or the feature code. It's kept
344 * here for historical reasons.
345 */
346 if (pci_device_from_OF_node(np, &bus, &devfn) == 0) {
347 struct pci_controller* hose =
348 pci_find_hose_for_OF_device(np);
349 if (!hose) {
350 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
351 return;
352 }
353 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
354 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
355 cmd &= ~PCI_COMMAND_IO;
356 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
357 }
358
359 /* Fix interrupt for the modem/ethernet combo controller. The number
360 * in the device tree (27) is bogus (correct for the ethernet-only
361 * board but not the combo ethernet/modem board).
362 * The real interrupt is 28 on the second controller -> 28+32 = 60.
363 */
364 ether = of_find_node_by_name(NULL, "pci1011,14");
365 if (ether && ether->n_intrs > 0) {
366 ether->intrs[0].line = 60;
367 printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
368 ether->intrs[0].line);
369 }
370 of_node_put(ether);
371} 269}
372 270
373#ifdef CONFIG_XMON 271#ifdef CONFIG_XMON
@@ -386,17 +284,60 @@ static struct irqaction gatwick_cascade_action = {
386 .name = "cascade", 284 .name = "cascade",
387}; 285};
388 286
287static int pmac_pic_host_match(struct irq_host *h, struct device_node *node)
288{
289 /* We match all, we don't always have a node anyway */
290 return 1;
291}
292
293static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
294 irq_hw_number_t hw, unsigned int flags)
295{
296 struct irq_desc *desc = get_irq_desc(virq);
297 int level;
298
299 if (hw >= max_irqs)
300 return -EINVAL;
301
302 /* Mark level interrupts, set delayed disable for edge ones and set
303 * handlers
304 */
305 level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f)));
306 if (level)
307 desc->status |= IRQ_LEVEL;
308 else
309 desc->status |= IRQ_DELAYED_DISABLE;
310 set_irq_chip_and_handler(virq, &pmac_pic, level ?
311 handle_level_irq : handle_edge_irq);
312 return 0;
313}
314
315static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct,
316 u32 *intspec, unsigned int intsize,
317 irq_hw_number_t *out_hwirq,
318 unsigned int *out_flags)
319
320{
321 *out_hwirq = *intspec;
322 return 0;
323}
324
325static struct irq_host_ops pmac_pic_host_ops = {
326 .match = pmac_pic_host_match,
327 .map = pmac_pic_host_map,
328 .xlate = pmac_pic_host_xlate,
329};
330
389static void __init pmac_pic_probe_oldstyle(void) 331static void __init pmac_pic_probe_oldstyle(void)
390{ 332{
391 int i; 333 int i;
392 int irq_cascade = -1;
393 struct device_node *master = NULL; 334 struct device_node *master = NULL;
394 struct device_node *slave = NULL; 335 struct device_node *slave = NULL;
395 u8 __iomem *addr; 336 u8 __iomem *addr;
396 struct resource r; 337 struct resource r;
397 338
398 /* Set our get_irq function */ 339 /* Set our get_irq function */
399 ppc_md.get_irq = pmac_get_irq; 340 ppc_md.get_irq = pmac_pic_get_irq;
400 341
401 /* 342 /*
402 * Find the interrupt controller type & node 343 * Find the interrupt controller type & node
@@ -414,7 +355,6 @@ static void __init pmac_pic_probe_oldstyle(void)
414 if (slave) { 355 if (slave) {
415 max_irqs = 64; 356 max_irqs = 64;
416 level_mask[1] = OHARE_LEVEL_MASK; 357 level_mask[1] = OHARE_LEVEL_MASK;
417 enable_second_ohare(slave);
418 } 358 }
419 } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { 359 } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
420 max_irqs = max_real_irqs = 64; 360 max_irqs = max_real_irqs = 64;
@@ -438,14 +378,18 @@ static void __init pmac_pic_probe_oldstyle(void)
438 max_irqs = 128; 378 max_irqs = 128;
439 level_mask[2] = HEATHROW_LEVEL_MASK; 379 level_mask[2] = HEATHROW_LEVEL_MASK;
440 level_mask[3] = 0; 380 level_mask[3] = 0;
441 pmac_fix_gatwick_interrupts(slave, max_real_irqs);
442 } 381 }
443 } 382 }
444 BUG_ON(master == NULL); 383 BUG_ON(master == NULL);
445 384
446 /* Set the handler for the main PIC */ 385 /*
447 for ( i = 0; i < max_real_irqs ; i++ ) 386 * Allocate an irq host
448 irq_desc[i].chip = &pmac_pic; 387 */
388 pmac_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, max_irqs,
389 &pmac_pic_host_ops,
390 max_irqs);
391 BUG_ON(pmac_pic_host == NULL);
392 irq_set_default_host(pmac_pic_host);
449 393
450 /* Get addresses of first controller if we have a node for it */ 394 /* Get addresses of first controller if we have a node for it */
451 BUG_ON(of_address_to_resource(master, 0, &r)); 395 BUG_ON(of_address_to_resource(master, 0, &r));
@@ -472,39 +416,38 @@ static void __init pmac_pic_probe_oldstyle(void)
472 pmac_irq_hw[i++] = 416 pmac_irq_hw[i++] =
473 (volatile struct pmac_irq_hw __iomem *) 417 (volatile struct pmac_irq_hw __iomem *)
474 (addr + 0x10); 418 (addr + 0x10);
475 irq_cascade = slave->intrs[0].line; 419 pmac_irq_cascade = irq_of_parse_and_map(slave, 0);
476 420
477 printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" 421 printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
478 " cascade: %d\n", slave->full_name, 422 " cascade: %d\n", slave->full_name,
479 max_irqs - max_real_irqs, irq_cascade); 423 max_irqs - max_real_irqs, pmac_irq_cascade);
480 } 424 }
481 of_node_put(slave); 425 of_node_put(slave);
482 426
483 /* disable all interrupts in all controllers */ 427 /* Disable all interrupts in all controllers */
484 for (i = 0; i * 32 < max_irqs; ++i) 428 for (i = 0; i * 32 < max_irqs; ++i)
485 out_le32(&pmac_irq_hw[i]->enable, 0); 429 out_le32(&pmac_irq_hw[i]->enable, 0);
486 430
487 /* mark level interrupts */ 431 /* Hookup cascade irq */
488 for (i = 0; i < max_irqs; i++) 432 if (slave && pmac_irq_cascade != NO_IRQ)
489 if (level_mask[i >> 5] & (1UL << (i & 0x1f))) 433 setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
490 irq_desc[i].status = IRQ_LEVEL;
491 434
492 /* Setup handlers for secondary controller and hook cascade irq*/
493 if (slave) {
494 for ( i = max_real_irqs ; i < max_irqs ; i++ )
495 irq_desc[i].chip = &gatwick_pic;
496 setup_irq(irq_cascade, &gatwick_cascade_action);
497 }
498 printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); 435 printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
499#ifdef CONFIG_XMON 436#ifdef CONFIG_XMON
500 setup_irq(20, &xmon_action); 437 setup_irq(irq_create_mapping(NULL, 20, 0), &xmon_action);
501#endif 438#endif
502} 439}
503#endif /* CONFIG_PPC32 */ 440#endif /* CONFIG_PPC32 */
504 441
505static int pmac_u3_cascade(struct pt_regs *regs, void *data) 442static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc,
443 struct pt_regs *regs)
506{ 444{
507 return mpic_get_one_irq((struct mpic *)data, regs); 445 struct mpic *mpic = desc->handler_data;
446
447 unsigned int cascade_irq = mpic_get_one_irq(mpic, regs);
448 if (cascade_irq != NO_IRQ)
449 generic_handle_irq(cascade_irq, regs);
450 desc->chip->eoi(irq);
508} 451}
509 452
510static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) 453static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
@@ -514,21 +457,20 @@ static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
514 int nmi_irq; 457 int nmi_irq;
515 458
516 pswitch = of_find_node_by_name(NULL, "programmer-switch"); 459 pswitch = of_find_node_by_name(NULL, "programmer-switch");
517 if (pswitch && pswitch->n_intrs) { 460 if (pswitch) {
518 nmi_irq = pswitch->intrs[0].line; 461 nmi_irq = irq_of_parse_and_map(pswitch, 0);
519 mpic_irq_set_priority(nmi_irq, 9); 462 if (nmi_irq != NO_IRQ) {
520 setup_irq(nmi_irq, &xmon_action); 463 mpic_irq_set_priority(nmi_irq, 9);
464 setup_irq(nmi_irq, &xmon_action);
465 }
466 of_node_put(pswitch);
521 } 467 }
522 of_node_put(pswitch);
523#endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ 468#endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */
524} 469}
525 470
526static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, 471static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
527 int master) 472 int master)
528{ 473{
529 unsigned char senses[128];
530 int offset = master ? 0 : 128;
531 int count = master ? 128 : 124;
532 const char *name = master ? " MPIC 1 " : " MPIC 2 "; 474 const char *name = master ? " MPIC 1 " : " MPIC 2 ";
533 struct resource r; 475 struct resource r;
534 struct mpic *mpic; 476 struct mpic *mpic;
@@ -541,8 +483,6 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
541 483
542 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); 484 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
543 485
544 prom_get_irq_senses(senses, offset, offset + count);
545
546 flags |= MPIC_WANTS_RESET; 486 flags |= MPIC_WANTS_RESET;
547 if (get_property(np, "big-endian", NULL)) 487 if (get_property(np, "big-endian", NULL))
548 flags |= MPIC_BIG_ENDIAN; 488 flags |= MPIC_BIG_ENDIAN;
@@ -553,8 +493,7 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
553 if (master && (flags & MPIC_BIG_ENDIAN)) 493 if (master && (flags & MPIC_BIG_ENDIAN))
554 flags |= MPIC_BROKEN_U3; 494 flags |= MPIC_BROKEN_U3;
555 495
556 mpic = mpic_alloc(r.start, flags, 0, offset, count, master ? 252 : 0, 496 mpic = mpic_alloc(np, r.start, flags, 0, 0, name);
557 senses, count, name);
558 if (mpic == NULL) 497 if (mpic == NULL)
559 return NULL; 498 return NULL;
560 499
@@ -567,6 +506,7 @@ static int __init pmac_pic_probe_mpic(void)
567{ 506{
568 struct mpic *mpic1, *mpic2; 507 struct mpic *mpic1, *mpic2;
569 struct device_node *np, *master = NULL, *slave = NULL; 508 struct device_node *np, *master = NULL, *slave = NULL;
509 unsigned int cascade;
570 510
571 /* We can have up to 2 MPICs cascaded */ 511 /* We can have up to 2 MPICs cascaded */
572 for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) 512 for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
@@ -603,8 +543,15 @@ static int __init pmac_pic_probe_mpic(void)
603 of_node_put(master); 543 of_node_put(master);
604 544
605 /* No slave, let's go out */ 545 /* No slave, let's go out */
606 if (slave == NULL || slave->n_intrs < 1) 546 if (slave == NULL)
547 return 0;
548
549 /* Get/Map slave interrupt */
550 cascade = irq_of_parse_and_map(slave, 0);
551 if (cascade == NO_IRQ) {
552 printk(KERN_ERR "Failed to map cascade IRQ\n");
607 return 0; 553 return 0;
554 }
608 555
609 mpic2 = pmac_setup_one_mpic(slave, 0); 556 mpic2 = pmac_setup_one_mpic(slave, 0);
610 if (mpic2 == NULL) { 557 if (mpic2 == NULL) {
@@ -612,7 +559,8 @@ static int __init pmac_pic_probe_mpic(void)
612 of_node_put(slave); 559 of_node_put(slave);
613 return 0; 560 return 0;
614 } 561 }
615 mpic_setup_cascade(slave->intrs[0].line, pmac_u3_cascade, mpic2); 562 set_irq_data(cascade, mpic2);
563 set_irq_chained_handler(cascade, pmac_u3_cascade);
616 564
617 of_node_put(slave); 565 of_node_put(slave);
618 return 0; 566 return 0;
@@ -621,6 +569,19 @@ static int __init pmac_pic_probe_mpic(void)
621 569
622void __init pmac_pic_init(void) 570void __init pmac_pic_init(void)
623{ 571{
572 unsigned int flags = 0;
573
574 /* We configure the OF parsing based on our oldworld vs. newworld
575 * platform type and wether we were booted by BootX.
576 */
577#ifdef CONFIG_PPC32
578 if (!pmac_newworld)
579 flags |= OF_IMAP_OLDWORLD_MAC;
580 if (get_property(of_chosen, "linux,bootx", NULL) != NULL)
581 flags |= OF_IMAP_NO_PHANDLE;
582 of_irq_map_init(flags);
583#endif /* CONFIG_PPC_32 */
584
624 /* We first try to detect Apple's new Core99 chipset, since mac-io 585 /* We first try to detect Apple's new Core99 chipset, since mac-io
625 * is quite different on those machines and contains an IBM MPIC2. 586 * is quite different on those machines and contains an IBM MPIC2.
626 */ 587 */
@@ -643,6 +604,7 @@ unsigned long sleep_save_mask[2];
643 604
644/* This used to be passed by the PMU driver but that link got 605/* This used to be passed by the PMU driver but that link got
645 * broken with the new driver model. We use this tweak for now... 606 * broken with the new driver model. We use this tweak for now...
607 * We really want to do things differently though...
646 */ 608 */
647static int pmacpic_find_viaint(void) 609static int pmacpic_find_viaint(void)
648{ 610{
@@ -656,7 +618,7 @@ static int pmacpic_find_viaint(void)
656 np = of_find_node_by_name(NULL, "via-pmu"); 618 np = of_find_node_by_name(NULL, "via-pmu");
657 if (np == NULL) 619 if (np == NULL)
658 goto not_found; 620 goto not_found;
659 viaint = np->intrs[0].line; 621 viaint = irq_of_parse_and_map(np, 0);;
660#endif /* CONFIG_ADB_PMU */ 622#endif /* CONFIG_ADB_PMU */
661 623
662not_found: 624not_found:
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 21c7b0f8f329..94e7b24b840b 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -12,6 +12,8 @@
12 12
13struct rtc_time; 13struct rtc_time;
14 14
15extern int pmac_newworld;
16
15extern long pmac_time_init(void); 17extern long pmac_time_init(void);
16extern unsigned long pmac_get_boot_time(void); 18extern unsigned long pmac_get_boot_time(void);
17extern void pmac_get_rtc_time(struct rtc_time *); 19extern void pmac_get_rtc_time(struct rtc_time *);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 8654b5f07836..31a9da769fa2 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -613,9 +613,6 @@ static void __init pmac_init_early(void)
613 udbg_adb_init(!!strstr(cmd_line, "btextdbg")); 613 udbg_adb_init(!!strstr(cmd_line, "btextdbg"));
614 614
615#ifdef CONFIG_PPC64 615#ifdef CONFIG_PPC64
616 /* Setup interrupt mapping options */
617 ppc64_interrupt_controller = IC_OPEN_PIC;
618
619 iommu_init_early_dart(); 616 iommu_init_early_dart();
620#endif 617#endif
621} 618}
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 9639c66b453d..9df783088b61 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -72,32 +72,62 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
72 72
73/* #define DEBUG */ 73/* #define DEBUG */
74 74
75static void request_ras_irqs(struct device_node *np, char *propname, 75
76static void request_ras_irqs(struct device_node *np,
76 irqreturn_t (*handler)(int, void *, struct pt_regs *), 77 irqreturn_t (*handler)(int, void *, struct pt_regs *),
77 const char *name) 78 const char *name)
78{ 79{
79 unsigned int *ireg, len, i; 80 int i, index, count = 0;
80 int virq, n_intr; 81 struct of_irq oirq;
81 82 u32 *opicprop;
82 ireg = (unsigned int *)get_property(np, propname, &len); 83 unsigned int opicplen;
83 if (ireg == NULL) 84 unsigned int virqs[16];
84 return; 85
85 n_intr = prom_n_intr_cells(np); 86 /* Check for obsolete "open-pic-interrupt" property. If present, then
86 len /= n_intr * sizeof(*ireg); 87 * map those interrupts using the default interrupt host and default
87 88 * trigger
88 for (i = 0; i < len; i++) { 89 */
89 virq = virt_irq_create_mapping(*ireg); 90 opicprop = (u32 *)get_property(np, "open-pic-interrupt", &opicplen);
90 if (virq == NO_IRQ) { 91 if (opicprop) {
91 printk(KERN_ERR "Unable to allocate interrupt " 92 opicplen /= sizeof(u32);
92 "number for %s\n", np->full_name); 93 for (i = 0; i < opicplen; i++) {
93 return; 94 if (count > 15)
95 break;
96 virqs[count] = irq_create_mapping(NULL, *(opicprop++),
97 IRQ_TYPE_NONE);
98 if (virqs[count] == NO_IRQ)
99 printk(KERN_ERR "Unable to allocate interrupt "
100 "number for %s\n", np->full_name);
101 else
102 count++;
103
94 } 104 }
95 if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) { 105 }
106 /* Else use normal interrupt tree parsing */
107 else {
108 /* First try to do a proper OF tree parsing */
109 for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
110 index++) {
111 if (count > 15)
112 break;
113 virqs[count] = irq_create_of_mapping(oirq.controller,
114 oirq.specifier,
115 oirq.size);
116 if (virqs[count] == NO_IRQ)
117 printk(KERN_ERR "Unable to allocate interrupt "
118 "number for %s\n", np->full_name);
119 else
120 count++;
121 }
122 }
123
124 /* Now request them */
125 for (i = 0; i < count; i++) {
126 if (request_irq(virqs[i], handler, 0, name, NULL)) {
96 printk(KERN_ERR "Unable to request interrupt %d for " 127 printk(KERN_ERR "Unable to request interrupt %d for "
97 "%s\n", irq_offset_up(virq), np->full_name); 128 "%s\n", virqs[i], np->full_name);
98 return; 129 return;
99 } 130 }
100 ireg += n_intr;
101 } 131 }
102} 132}
103 133
@@ -115,20 +145,14 @@ static int __init init_ras_IRQ(void)
115 /* Internal Errors */ 145 /* Internal Errors */
116 np = of_find_node_by_path("/event-sources/internal-errors"); 146 np = of_find_node_by_path("/event-sources/internal-errors");
117 if (np != NULL) { 147 if (np != NULL) {
118 request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt, 148 request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR");
119 "RAS_ERROR");
120 request_ras_irqs(np, "interrupts", ras_error_interrupt,
121 "RAS_ERROR");
122 of_node_put(np); 149 of_node_put(np);
123 } 150 }
124 151
125 /* EPOW Events */ 152 /* EPOW Events */
126 np = of_find_node_by_path("/event-sources/epow-events"); 153 np = of_find_node_by_path("/event-sources/epow-events");
127 if (np != NULL) { 154 if (np != NULL) {
128 request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt, 155 request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW");
129 "RAS_EPOW");
130 request_ras_irqs(np, "interrupts", ras_epow_interrupt,
131 "RAS_EPOW");
132 of_node_put(np); 156 of_node_put(np);
133 } 157 }
134 158
@@ -162,7 +186,7 @@ ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
162 186
163 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 187 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
164 RAS_VECTOR_OFFSET, 188 RAS_VECTOR_OFFSET,
165 virt_irq_to_real(irq_offset_down(irq)), 189 irq_map[irq].hwirq,
166 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, 190 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
167 critical, __pa(&ras_log_buf), 191 critical, __pa(&ras_log_buf),
168 rtas_get_error_log_max()); 192 rtas_get_error_log_max());
@@ -198,7 +222,7 @@ ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs)
198 222
199 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 223 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
200 RAS_VECTOR_OFFSET, 224 RAS_VECTOR_OFFSET,
201 virt_irq_to_real(irq_offset_down(irq)), 225 irq_map[irq].hwirq,
202 RTAS_INTERNAL_ERROR, 1 /*Time Critical */, 226 RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
203 __pa(&ras_log_buf), 227 __pa(&ras_log_buf),
204 rtas_get_error_log_max()); 228 rtas_get_error_log_max());
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 999509d28af8..54a52437265c 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -76,6 +76,9 @@
76#define DBG(fmt...) 76#define DBG(fmt...)
77#endif 77#endif
78 78
79/* move those away to a .h */
80extern void smp_init_pseries_mpic(void);
81extern void smp_init_pseries_xics(void);
79extern void find_udbg_vterm(void); 82extern void find_udbg_vterm(void);
80 83
81int fwnmi_active; /* TRUE if an FWNMI handler is present */ 84int fwnmi_active; /* TRUE if an FWNMI handler is present */
@@ -83,7 +86,7 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */
83static void pseries_shared_idle_sleep(void); 86static void pseries_shared_idle_sleep(void);
84static void pseries_dedicated_idle_sleep(void); 87static void pseries_dedicated_idle_sleep(void);
85 88
86struct mpic *pSeries_mpic; 89static struct device_node *pSeries_mpic_node;
87 90
88static void pSeries_show_cpuinfo(struct seq_file *m) 91static void pSeries_show_cpuinfo(struct seq_file *m)
89{ 92{
@@ -118,63 +121,92 @@ static void __init fwnmi_init(void)
118 fwnmi_active = 1; 121 fwnmi_active = 1;
119} 122}
120 123
121static void __init pSeries_init_mpic(void) 124void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc,
125 struct pt_regs *regs)
122{ 126{
123 unsigned int *addrp; 127 unsigned int cascade_irq = i8259_irq(regs);
124 struct device_node *np; 128 if (cascade_irq != NO_IRQ)
125 unsigned long intack = 0; 129 generic_handle_irq(cascade_irq, regs);
126 130 desc->chip->eoi(irq);
127 /* All ISUs are setup, complete initialization */
128 mpic_init(pSeries_mpic);
129
130 /* Check what kind of cascade ACK we have */
131 if (!(np = of_find_node_by_name(NULL, "pci"))
132 || !(addrp = (unsigned int *)
133 get_property(np, "8259-interrupt-acknowledge", NULL)))
134 printk(KERN_ERR "Cannot find pci to get ack address\n");
135 else
136 intack = addrp[prom_n_addr_cells(np)-1];
137 of_node_put(np);
138
139 /* Setup the legacy interrupts & controller */
140 i8259_init(intack, 0);
141
142 /* Hook cascade to mpic */
143 mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
144} 131}
145 132
146static void __init pSeries_setup_mpic(void) 133static void __init pseries_mpic_init_IRQ(void)
147{ 134{
135 struct device_node *np, *old, *cascade = NULL;
136 unsigned int *addrp;
137 unsigned long intack = 0;
148 unsigned int *opprop; 138 unsigned int *opprop;
149 unsigned long openpic_addr = 0; 139 unsigned long openpic_addr = 0;
150 unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS]; 140 unsigned int cascade_irq;
151 struct device_node *root; 141 int naddr, n, i, opplen;
152 int irq_count; 142 struct mpic *mpic;
153 143
154 /* Find the Open PIC if present */ 144 np = of_find_node_by_path("/");
155 root = of_find_node_by_path("/"); 145 naddr = prom_n_addr_cells(np);
156 opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL); 146 opprop = (unsigned int *) get_property(np, "platform-open-pic", &opplen);
157 if (opprop != 0) { 147 if (opprop != 0) {
158 int n = prom_n_addr_cells(root); 148 openpic_addr = of_read_number(opprop, naddr);
159
160 for (openpic_addr = 0; n > 0; --n)
161 openpic_addr = (openpic_addr << 32) + *opprop++;
162 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); 149 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
163 } 150 }
164 of_node_put(root); 151 of_node_put(np);
165 152
166 BUG_ON(openpic_addr == 0); 153 BUG_ON(openpic_addr == 0);
167 154
168 /* Get the sense values from OF */
169 prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
170
171 /* Setup the openpic driver */ 155 /* Setup the openpic driver */
172 irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ 156 mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
173 pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY, 157 MPIC_PRIMARY,
174 16, 16, irq_count, /* isu size, irq offset, irq count */ 158 16, 250, /* isu size, irq count */
175 NR_IRQS - 4, /* ipi offset */ 159 " MPIC ");
176 senses, irq_count, /* sense & sense size */ 160 BUG_ON(mpic == NULL);
177 " MPIC "); 161
162 /* Add ISUs */
163 opplen /= sizeof(u32);
164 for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
165 unsigned long isuaddr = of_read_number(opprop + i, naddr);
166 mpic_assign_isu(mpic, n, isuaddr);
167 }
168
169 /* All ISUs are setup, complete initialization */
170 mpic_init(mpic);
171
172 /* Look for cascade */
173 for_each_node_by_type(np, "interrupt-controller")
174 if (device_is_compatible(np, "chrp,iic")) {
175 cascade = np;
176 break;
177 }
178 if (cascade == NULL)
179 return;
180
181 cascade_irq = irq_of_parse_and_map(cascade, 0);
182 if (cascade == NO_IRQ) {
183 printk(KERN_ERR "xics: failed to map cascade interrupt");
184 return;
185 }
186
187 /* Check ACK type */
188 for (old = of_node_get(cascade); old != NULL ; old = np) {
189 np = of_get_parent(old);
190 of_node_put(old);
191 if (np == NULL)
192 break;
193 if (strcmp(np->name, "pci") != 0)
194 continue;
195 addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge",
196 NULL);
197 if (addrp == NULL)
198 continue;
199 naddr = prom_n_addr_cells(np);
200 intack = addrp[naddr-1];
201 if (naddr > 1)
202 intack |= ((unsigned long)addrp[naddr-2]) << 32;
203 }
204 if (intack)
205 printk(KERN_DEBUG "mpic: PCI 8259 intack at 0x%016lx\n",
206 intack);
207 i8259_init(cascade, intack);
208 of_node_put(cascade);
209 set_irq_chained_handler(cascade_irq, pseries_8259_cascade);
178} 210}
179 211
180static void pseries_lpar_enable_pmcs(void) 212static void pseries_lpar_enable_pmcs(void)
@@ -192,23 +224,67 @@ static void pseries_lpar_enable_pmcs(void)
192 get_lppaca()->pmcregs_in_use = 1; 224 get_lppaca()->pmcregs_in_use = 1;
193} 225}
194 226
195static void __init pSeries_setup_arch(void) 227#ifdef CONFIG_KEXEC
228static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary)
196{ 229{
197 /* Fixup ppc_md depending on the type of interrupt controller */ 230 mpic_teardown_this_cpu(secondary);
198 if (ppc64_interrupt_controller == IC_OPEN_PIC) { 231}
199 ppc_md.init_IRQ = pSeries_init_mpic; 232
200 ppc_md.get_irq = mpic_get_irq; 233static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary)
201 /* Allocate the mpic now, so that find_and_init_phbs() can 234{
202 * fill the ISUs */ 235 /* Don't risk a hypervisor call if we're crashing */
203 pSeries_setup_mpic(); 236 if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
204 } else { 237 unsigned long vpa = __pa(get_lppaca());
205 ppc_md.init_IRQ = xics_init_IRQ; 238
206 ppc_md.get_irq = xics_get_irq; 239 if (unregister_vpa(hard_smp_processor_id(), vpa)) {
240 printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
241 "failed\n", smp_processor_id(),
242 hard_smp_processor_id());
243 }
207 } 244 }
245 xics_teardown_cpu(secondary);
246}
247#endif /* CONFIG_KEXEC */
208 248
249static void __init pseries_discover_pic(void)
250{
251 struct device_node *np;
252 char *typep;
253
254 for (np = NULL; (np = of_find_node_by_name(np,
255 "interrupt-controller"));) {
256 typep = (char *)get_property(np, "compatible", NULL);
257 if (strstr(typep, "open-pic")) {
258 pSeries_mpic_node = of_node_get(np);
259 ppc_md.init_IRQ = pseries_mpic_init_IRQ;
260 ppc_md.get_irq = mpic_get_irq;
261#ifdef CONFIG_KEXEC
262 ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic;
263#endif
264#ifdef CONFIG_SMP
265 smp_init_pseries_mpic();
266#endif
267 return;
268 } else if (strstr(typep, "ppc-xicp")) {
269 ppc_md.init_IRQ = xics_init_IRQ;
270#ifdef CONFIG_KEXEC
271 ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics;
272#endif
209#ifdef CONFIG_SMP 273#ifdef CONFIG_SMP
210 smp_init_pSeries(); 274 smp_init_pseries_xics();
211#endif 275#endif
276 return;
277 }
278 }
279 printk(KERN_ERR "pSeries_discover_pic: failed to recognize"
280 " interrupt-controller\n");
281}
282
283static void __init pSeries_setup_arch(void)
284{
285 /* Discover PIC type and setup ppc_md accordingly */
286 pseries_discover_pic();
287
212 /* openpic global configuration register (64-bit format). */ 288 /* openpic global configuration register (64-bit format). */
213 /* openpic Interrupt Source Unit pointer (64-bit format). */ 289 /* openpic Interrupt Source Unit pointer (64-bit format). */
214 /* python0 facility area (mmio) (64-bit format) REAL address. */ 290 /* python0 facility area (mmio) (64-bit format) REAL address. */
@@ -260,41 +336,11 @@ static int __init pSeries_init_panel(void)
260} 336}
261arch_initcall(pSeries_init_panel); 337arch_initcall(pSeries_init_panel);
262 338
263static void __init pSeries_discover_pic(void)
264{
265 struct device_node *np;
266 char *typep;
267
268 /*
269 * Setup interrupt mapping options that are needed for finish_device_tree
270 * to properly parse the OF interrupt tree & do the virtual irq mapping
271 */
272 __irq_offset_value = NUM_ISA_INTERRUPTS;
273 ppc64_interrupt_controller = IC_INVALID;
274 for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
275 typep = (char *)get_property(np, "compatible", NULL);
276 if (strstr(typep, "open-pic")) {
277 ppc64_interrupt_controller = IC_OPEN_PIC;
278 break;
279 } else if (strstr(typep, "ppc-xicp")) {
280 ppc64_interrupt_controller = IC_PPC_XIC;
281 break;
282 }
283 }
284 if (ppc64_interrupt_controller == IC_INVALID)
285 printk("pSeries_discover_pic: failed to recognize"
286 " interrupt-controller\n");
287
288}
289
290static void pSeries_mach_cpu_die(void) 339static void pSeries_mach_cpu_die(void)
291{ 340{
292 local_irq_disable(); 341 local_irq_disable();
293 idle_task_exit(); 342 idle_task_exit();
294 /* Some hardware requires clearing the CPPR, while other hardware does not 343 xics_teardown_cpu(0);
295 * it is safe either way
296 */
297 pSeriesLP_cppr_info(0, 0);
298 rtas_stop_self(); 344 rtas_stop_self();
299 /* Should never get here... */ 345 /* Should never get here... */
300 BUG(); 346 BUG();
@@ -332,8 +378,6 @@ static void __init pSeries_init_early(void)
332 378
333 iommu_init_early_pSeries(); 379 iommu_init_early_pSeries();
334 380
335 pSeries_discover_pic();
336
337 DBG(" <- pSeries_init_early()\n"); 381 DBG(" <- pSeries_init_early()\n");
338} 382}
339 383
@@ -505,27 +549,6 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
505 return PCI_PROBE_NORMAL; 549 return PCI_PROBE_NORMAL;
506} 550}
507 551
508#ifdef CONFIG_KEXEC
509static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
510{
511 /* Don't risk a hypervisor call if we're crashing */
512 if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
513 unsigned long vpa = __pa(get_lppaca());
514
515 if (unregister_vpa(hard_smp_processor_id(), vpa)) {
516 printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
517 "failed\n", smp_processor_id(),
518 hard_smp_processor_id());
519 }
520 }
521
522 if (ppc64_interrupt_controller == IC_OPEN_PIC)
523 mpic_teardown_this_cpu(secondary);
524 else
525 xics_teardown_cpu(secondary);
526}
527#endif
528
529define_machine(pseries) { 552define_machine(pseries) {
530 .name = "pSeries", 553 .name = "pSeries",
531 .probe = pSeries_probe, 554 .probe = pSeries_probe,
@@ -550,7 +573,6 @@ define_machine(pseries) {
550 .system_reset_exception = pSeries_system_reset_exception, 573 .system_reset_exception = pSeries_system_reset_exception,
551 .machine_check_exception = pSeries_machine_check_exception, 574 .machine_check_exception = pSeries_machine_check_exception,
552#ifdef CONFIG_KEXEC 575#ifdef CONFIG_KEXEC
553 .kexec_cpu_down = pseries_kexec_cpu_down,
554 .machine_kexec = default_machine_kexec, 576 .machine_kexec = default_machine_kexec,
555 .machine_kexec_prepare = default_machine_kexec_prepare, 577 .machine_kexec_prepare = default_machine_kexec_prepare,
556 .machine_crash_shutdown = default_machine_crash_shutdown, 578 .machine_crash_shutdown = default_machine_crash_shutdown,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 4ad144df49c2..ac61098ff401 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -416,27 +416,12 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
416#endif 416#endif
417 417
418/* This is called very early */ 418/* This is called very early */
419void __init smp_init_pSeries(void) 419static void __init smp_init_pseries(void)
420{ 420{
421 int i; 421 int i;
422 422
423 DBG(" -> smp_init_pSeries()\n"); 423 DBG(" -> smp_init_pSeries()\n");
424 424
425 switch (ppc64_interrupt_controller) {
426#ifdef CONFIG_MPIC
427 case IC_OPEN_PIC:
428 smp_ops = &pSeries_mpic_smp_ops;
429 break;
430#endif
431#ifdef CONFIG_XICS
432 case IC_PPC_XIC:
433 smp_ops = &pSeries_xics_smp_ops;
434 break;
435#endif
436 default:
437 panic("Invalid interrupt controller");
438 }
439
440#ifdef CONFIG_HOTPLUG_CPU 425#ifdef CONFIG_HOTPLUG_CPU
441 smp_ops->cpu_disable = pSeries_cpu_disable; 426 smp_ops->cpu_disable = pSeries_cpu_disable;
442 smp_ops->cpu_die = pSeries_cpu_die; 427 smp_ops->cpu_die = pSeries_cpu_die;
@@ -471,3 +456,18 @@ void __init smp_init_pSeries(void)
471 DBG(" <- smp_init_pSeries()\n"); 456 DBG(" <- smp_init_pSeries()\n");
472} 457}
473 458
459#ifdef CONFIG_MPIC
460void __init smp_init_pseries_mpic(void)
461{
462 smp_ops = &pSeries_mpic_smp_ops;
463
464 smp_init_pseries();
465}
466#endif
467
468void __init smp_init_pseries_xics(void)
469{
470 smp_ops = &pSeries_xics_smp_ops;
471
472 smp_init_pseries();
473}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 2ffebe31cb2d..716972aa9777 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -8,6 +8,9 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11
12#undef DEBUG
13
11#include <linux/types.h> 14#include <linux/types.h>
12#include <linux/threads.h> 15#include <linux/threads.h>
13#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -19,6 +22,7 @@
19#include <linux/gfp.h> 22#include <linux/gfp.h>
20#include <linux/radix-tree.h> 23#include <linux/radix-tree.h>
21#include <linux/cpu.h> 24#include <linux/cpu.h>
25
22#include <asm/firmware.h> 26#include <asm/firmware.h>
23#include <asm/prom.h> 27#include <asm/prom.h>
24#include <asm/io.h> 28#include <asm/io.h>
@@ -31,26 +35,6 @@
31 35
32#include "xics.h" 36#include "xics.h"
33 37
34static unsigned int xics_startup(unsigned int irq);
35static void xics_enable_irq(unsigned int irq);
36static void xics_disable_irq(unsigned int irq);
37static void xics_mask_and_ack_irq(unsigned int irq);
38static void xics_end_irq(unsigned int irq);
39static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
40
41static struct hw_interrupt_type xics_pic = {
42 .typename = " XICS ",
43 .startup = xics_startup,
44 .enable = xics_enable_irq,
45 .disable = xics_disable_irq,
46 .ack = xics_mask_and_ack_irq,
47 .end = xics_end_irq,
48 .set_affinity = xics_set_affinity
49};
50
51/* This is used to map real irq numbers to virtual */
52static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
53
54#define XICS_IPI 2 38#define XICS_IPI 2
55#define XICS_IRQ_SPURIOUS 0 39#define XICS_IRQ_SPURIOUS 0
56 40
@@ -81,12 +65,12 @@ struct xics_ipl {
81 65
82static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; 66static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
83 67
84static int xics_irq_8259_cascade = 0;
85static int xics_irq_8259_cascade_real = 0;
86static unsigned int default_server = 0xFF; 68static unsigned int default_server = 0xFF;
87static unsigned int default_distrib_server = 0; 69static unsigned int default_distrib_server = 0;
88static unsigned int interrupt_server_size = 8; 70static unsigned int interrupt_server_size = 8;
89 71
72static struct irq_host *xics_host;
73
90/* 74/*
91 * XICS only has a single IPI, so encode the messages per CPU 75 * XICS only has a single IPI, so encode the messages per CPU
92 */ 76 */
@@ -98,48 +82,34 @@ static int ibm_set_xive;
98static int ibm_int_on; 82static int ibm_int_on;
99static int ibm_int_off; 83static int ibm_int_off;
100 84
101typedef struct {
102 int (*xirr_info_get)(int cpu);
103 void (*xirr_info_set)(int cpu, int val);
104 void (*cppr_info)(int cpu, u8 val);
105 void (*qirr_info)(int cpu, u8 val);
106} xics_ops;
107 85
86/* Direct HW low level accessors */
108 87
109/* SMP */
110 88
111static int pSeries_xirr_info_get(int n_cpu) 89static inline unsigned int direct_xirr_info_get(int n_cpu)
112{ 90{
113 return in_be32(&xics_per_cpu[n_cpu]->xirr.word); 91 return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
114} 92}
115 93
116static void pSeries_xirr_info_set(int n_cpu, int value) 94static inline void direct_xirr_info_set(int n_cpu, int value)
117{ 95{
118 out_be32(&xics_per_cpu[n_cpu]->xirr.word, value); 96 out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
119} 97}
120 98
121static void pSeries_cppr_info(int n_cpu, u8 value) 99static inline void direct_cppr_info(int n_cpu, u8 value)
122{ 100{
123 out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value); 101 out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
124} 102}
125 103
126static void pSeries_qirr_info(int n_cpu, u8 value) 104static inline void direct_qirr_info(int n_cpu, u8 value)
127{ 105{
128 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); 106 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
129} 107}
130 108
131static xics_ops pSeries_ops = {
132 pSeries_xirr_info_get,
133 pSeries_xirr_info_set,
134 pSeries_cppr_info,
135 pSeries_qirr_info
136};
137 109
138static xics_ops *ops = &pSeries_ops; 110/* LPAR low level accessors */
139 111
140 112
141/* LPAR */
142
143static inline long plpar_eoi(unsigned long xirr) 113static inline long plpar_eoi(unsigned long xirr)
144{ 114{
145 return plpar_hcall_norets(H_EOI, xirr); 115 return plpar_hcall_norets(H_EOI, xirr);
@@ -161,7 +131,7 @@ static inline long plpar_xirr(unsigned long *xirr_ret)
161 return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); 131 return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
162} 132}
163 133
164static int pSeriesLP_xirr_info_get(int n_cpu) 134static inline unsigned int lpar_xirr_info_get(int n_cpu)
165{ 135{
166 unsigned long lpar_rc; 136 unsigned long lpar_rc;
167 unsigned long return_value; 137 unsigned long return_value;
@@ -169,10 +139,10 @@ static int pSeriesLP_xirr_info_get(int n_cpu)
169 lpar_rc = plpar_xirr(&return_value); 139 lpar_rc = plpar_xirr(&return_value);
170 if (lpar_rc != H_SUCCESS) 140 if (lpar_rc != H_SUCCESS)
171 panic(" bad return code xirr - rc = %lx \n", lpar_rc); 141 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
172 return (int)return_value; 142 return (unsigned int)return_value;
173} 143}
174 144
175static void pSeriesLP_xirr_info_set(int n_cpu, int value) 145static inline void lpar_xirr_info_set(int n_cpu, int value)
176{ 146{
177 unsigned long lpar_rc; 147 unsigned long lpar_rc;
178 unsigned long val64 = value & 0xffffffff; 148 unsigned long val64 = value & 0xffffffff;
@@ -183,7 +153,7 @@ static void pSeriesLP_xirr_info_set(int n_cpu, int value)
183 val64); 153 val64);
184} 154}
185 155
186void pSeriesLP_cppr_info(int n_cpu, u8 value) 156static inline void lpar_cppr_info(int n_cpu, u8 value)
187{ 157{
188 unsigned long lpar_rc; 158 unsigned long lpar_rc;
189 159
@@ -192,7 +162,7 @@ void pSeriesLP_cppr_info(int n_cpu, u8 value)
192 panic("bad return code cppr - rc = %lx\n", lpar_rc); 162 panic("bad return code cppr - rc = %lx\n", lpar_rc);
193} 163}
194 164
195static void pSeriesLP_qirr_info(int n_cpu , u8 value) 165static inline void lpar_qirr_info(int n_cpu , u8 value)
196{ 166{
197 unsigned long lpar_rc; 167 unsigned long lpar_rc;
198 168
@@ -201,43 +171,16 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value)
201 panic("bad return code qirr - rc = %lx\n", lpar_rc); 171 panic("bad return code qirr - rc = %lx\n", lpar_rc);
202} 172}
203 173
204xics_ops pSeriesLP_ops = {
205 pSeriesLP_xirr_info_get,
206 pSeriesLP_xirr_info_set,
207 pSeriesLP_cppr_info,
208 pSeriesLP_qirr_info
209};
210
211static unsigned int xics_startup(unsigned int virq)
212{
213 unsigned int irq;
214
215 irq = irq_offset_down(virq);
216 if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
217 &virt_irq_to_real_map[irq]) == -ENOMEM)
218 printk(KERN_CRIT "Out of memory creating real -> virtual"
219 " IRQ mapping for irq %u (real 0x%x)\n",
220 virq, virt_irq_to_real(irq));
221 xics_enable_irq(virq);
222 return 0; /* return value is ignored */
223}
224 174
225static unsigned int real_irq_to_virt(unsigned int real_irq) 175/* High level handlers and init code */
226{
227 unsigned int *ptr;
228 176
229 ptr = radix_tree_lookup(&irq_map, real_irq);
230 if (ptr == NULL)
231 return NO_IRQ;
232 return ptr - virt_irq_to_real_map;
233}
234 177
235#ifdef CONFIG_SMP 178#ifdef CONFIG_SMP
236static int get_irq_server(unsigned int irq) 179static int get_irq_server(unsigned int virq)
237{ 180{
238 unsigned int server; 181 unsigned int server;
239 /* For the moment only implement delivery to all cpus or one cpu */ 182 /* For the moment only implement delivery to all cpus or one cpu */
240 cpumask_t cpumask = irq_desc[irq].affinity; 183 cpumask_t cpumask = irq_desc[virq].affinity;
241 cpumask_t tmp = CPU_MASK_NONE; 184 cpumask_t tmp = CPU_MASK_NONE;
242 185
243 if (!distribute_irqs) 186 if (!distribute_irqs)
@@ -258,23 +201,28 @@ static int get_irq_server(unsigned int irq)
258 201
259} 202}
260#else 203#else
261static int get_irq_server(unsigned int irq) 204static int get_irq_server(unsigned int virq)
262{ 205{
263 return default_server; 206 return default_server;
264} 207}
265#endif 208#endif
266 209
267static void xics_enable_irq(unsigned int virq) 210
211static void xics_unmask_irq(unsigned int virq)
268{ 212{
269 unsigned int irq; 213 unsigned int irq;
270 int call_status; 214 int call_status;
271 unsigned int server; 215 unsigned int server;
272 216
273 irq = virt_irq_to_real(irq_offset_down(virq)); 217 pr_debug("xics: unmask virq %d\n", virq);
274 if (irq == XICS_IPI) 218
219 irq = (unsigned int)irq_map[virq].hwirq;
220 pr_debug(" -> map to hwirq 0x%x\n", irq);
221 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
275 return; 222 return;
276 223
277 server = get_irq_server(virq); 224 server = get_irq_server(virq);
225
278 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 226 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
279 DEFAULT_PRIORITY); 227 DEFAULT_PRIORITY);
280 if (call_status != 0) { 228 if (call_status != 0) {
@@ -293,7 +241,7 @@ static void xics_enable_irq(unsigned int virq)
293 } 241 }
294} 242}
295 243
296static void xics_disable_real_irq(unsigned int irq) 244static void xics_mask_real_irq(unsigned int irq)
297{ 245{
298 int call_status; 246 int call_status;
299 unsigned int server; 247 unsigned int server;
@@ -318,75 +266,86 @@ static void xics_disable_real_irq(unsigned int irq)
318 } 266 }
319} 267}
320 268
321static void xics_disable_irq(unsigned int virq) 269static void xics_mask_irq(unsigned int virq)
322{ 270{
323 unsigned int irq; 271 unsigned int irq;
324 272
325 irq = virt_irq_to_real(irq_offset_down(virq)); 273 pr_debug("xics: mask virq %d\n", virq);
326 xics_disable_real_irq(irq); 274
275 irq = (unsigned int)irq_map[virq].hwirq;
276 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
277 return;
278 xics_mask_real_irq(irq);
279}
280
281static unsigned int xics_startup(unsigned int virq)
282{
283 unsigned int irq;
284
285 /* force a reverse mapping of the interrupt so it gets in the cache */
286 irq = (unsigned int)irq_map[virq].hwirq;
287 irq_radix_revmap(xics_host, irq);
288
289 /* unmask it */
290 xics_unmask_irq(virq);
291 return 0;
327} 292}
328 293
329static void xics_end_irq(unsigned int irq) 294static void xics_eoi_direct(unsigned int virq)
330{ 295{
331 int cpu = smp_processor_id(); 296 int cpu = smp_processor_id();
297 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
332 298
333 iosync(); 299 iosync();
334 ops->xirr_info_set(cpu, ((0xff << 24) | 300 direct_xirr_info_set(cpu, (0xff << 24) | irq);
335 (virt_irq_to_real(irq_offset_down(irq)))));
336
337} 301}
338 302
339static void xics_mask_and_ack_irq(unsigned int irq) 303
304static void xics_eoi_lpar(unsigned int virq)
340{ 305{
341 int cpu = smp_processor_id(); 306 int cpu = smp_processor_id();
307 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
342 308
343 if (irq < irq_offset_value()) { 309 iosync();
344 i8259_pic.ack(irq); 310 lpar_xirr_info_set(cpu, (0xff << 24) | irq);
345 iosync();
346 ops->xirr_info_set(cpu, ((0xff<<24) |
347 xics_irq_8259_cascade_real));
348 iosync();
349 }
350} 311}
351 312
352int xics_get_irq(struct pt_regs *regs) 313static inline unsigned int xics_remap_irq(unsigned int vec)
353{ 314{
354 unsigned int cpu = smp_processor_id(); 315 unsigned int irq;
355 unsigned int vec;
356 int irq;
357 316
358 vec = ops->xirr_info_get(cpu);
359 /* (vec >> 24) == old priority */
360 vec &= 0x00ffffff; 317 vec &= 0x00ffffff;
361 318
362 /* for sanity, this had better be < NR_IRQS - 16 */ 319 if (vec == XICS_IRQ_SPURIOUS)
363 if (vec == xics_irq_8259_cascade_real) { 320 return NO_IRQ;
364 irq = i8259_irq(regs); 321 irq = irq_radix_revmap(xics_host, vec);
365 xics_end_irq(irq_offset_up(xics_irq_8259_cascade)); 322 if (likely(irq != NO_IRQ))
366 } else if (vec == XICS_IRQ_SPURIOUS) { 323 return irq;
367 irq = -1; 324
368 } else { 325 printk(KERN_ERR "Interrupt %u (real) is invalid,"
369 irq = real_irq_to_virt(vec); 326 " disabling it.\n", vec);
370 if (irq == NO_IRQ) 327 xics_mask_real_irq(vec);
371 irq = real_irq_to_virt_slowpath(vec); 328 return NO_IRQ;
372 if (irq == NO_IRQ) {
373 printk(KERN_ERR "Interrupt %u (real) is invalid,"
374 " disabling it.\n", vec);
375 xics_disable_real_irq(vec);
376 } else
377 irq = irq_offset_up(irq);
378 }
379 return irq;
380} 329}
381 330
382#ifdef CONFIG_SMP 331static unsigned int xics_get_irq_direct(struct pt_regs *regs)
332{
333 unsigned int cpu = smp_processor_id();
383 334
384static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 335 return xics_remap_irq(direct_xirr_info_get(cpu));
336}
337
338static unsigned int xics_get_irq_lpar(struct pt_regs *regs)
385{ 339{
386 int cpu = smp_processor_id(); 340 unsigned int cpu = smp_processor_id();
341
342 return xics_remap_irq(lpar_xirr_info_get(cpu));
343}
387 344
388 ops->qirr_info(cpu, 0xff); 345#ifdef CONFIG_SMP
389 346
347static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs)
348{
390 WARN_ON(cpu_is_offline(cpu)); 349 WARN_ON(cpu_is_offline(cpu));
391 350
392 while (xics_ipi_message[cpu].value) { 351 while (xics_ipi_message[cpu].value) {
@@ -418,18 +377,88 @@ static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
418 return IRQ_HANDLED; 377 return IRQ_HANDLED;
419} 378}
420 379
380static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id, struct pt_regs *regs)
381{
382 int cpu = smp_processor_id();
383
384 direct_qirr_info(cpu, 0xff);
385
386 return xics_ipi_dispatch(cpu, regs);
387}
388
389static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id, struct pt_regs *regs)
390{
391 int cpu = smp_processor_id();
392
393 lpar_qirr_info(cpu, 0xff);
394
395 return xics_ipi_dispatch(cpu, regs);
396}
397
421void xics_cause_IPI(int cpu) 398void xics_cause_IPI(int cpu)
422{ 399{
423 ops->qirr_info(cpu, IPI_PRIORITY); 400 if (firmware_has_feature(FW_FEATURE_LPAR))
401 lpar_qirr_info(cpu, IPI_PRIORITY);
402 else
403 direct_qirr_info(cpu, IPI_PRIORITY);
424} 404}
405
425#endif /* CONFIG_SMP */ 406#endif /* CONFIG_SMP */
426 407
408static void xics_set_cpu_priority(int cpu, unsigned char cppr)
409{
410 if (firmware_has_feature(FW_FEATURE_LPAR))
411 lpar_cppr_info(cpu, cppr);
412 else
413 direct_cppr_info(cpu, cppr);
414 iosync();
415}
416
417static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
418{
419 unsigned int irq;
420 int status;
421 int xics_status[2];
422 unsigned long newmask;
423 cpumask_t tmp = CPU_MASK_NONE;
424
425 irq = (unsigned int)irq_map[virq].hwirq;
426 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
427 return;
428
429 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
430
431 if (status) {
432 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
433 "returns %d\n", irq, status);
434 return;
435 }
436
437 /* For the moment only implement delivery to all cpus or one cpu */
438 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
439 newmask = default_distrib_server;
440 } else {
441 cpus_and(tmp, cpu_online_map, cpumask);
442 if (cpus_empty(tmp))
443 return;
444 newmask = get_hard_smp_processor_id(first_cpu(tmp));
445 }
446
447 status = rtas_call(ibm_set_xive, 3, 1, NULL,
448 irq, newmask, xics_status[1]);
449
450 if (status) {
451 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
452 "returns %d\n", irq, status);
453 return;
454 }
455}
456
427void xics_setup_cpu(void) 457void xics_setup_cpu(void)
428{ 458{
429 int cpu = smp_processor_id(); 459 int cpu = smp_processor_id();
430 460
431 ops->cppr_info(cpu, 0xff); 461 xics_set_cpu_priority(cpu, 0xff);
432 iosync();
433 462
434 /* 463 /*
435 * Put the calling processor into the GIQ. This is really only 464 * Put the calling processor into the GIQ. This is really only
@@ -442,72 +471,266 @@ void xics_setup_cpu(void)
442 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); 471 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
443} 472}
444 473
445void xics_init_IRQ(void) 474
475static struct irq_chip xics_pic_direct = {
476 .typename = " XICS ",
477 .startup = xics_startup,
478 .mask = xics_mask_irq,
479 .unmask = xics_unmask_irq,
480 .eoi = xics_eoi_direct,
481 .set_affinity = xics_set_affinity
482};
483
484
485static struct irq_chip xics_pic_lpar = {
486 .typename = " XICS ",
487 .startup = xics_startup,
488 .mask = xics_mask_irq,
489 .unmask = xics_unmask_irq,
490 .eoi = xics_eoi_lpar,
491 .set_affinity = xics_set_affinity
492};
493
494
495static int xics_host_match(struct irq_host *h, struct device_node *node)
496{
497 /* IBM machines have interrupt parents of various funky types for things
498 * like vdevices, events, etc... The trick we use here is to match
499 * everything here except the legacy 8259 which is compatible "chrp,iic"
500 */
501 return !device_is_compatible(node, "chrp,iic");
502}
503
504static int xics_host_map_direct(struct irq_host *h, unsigned int virq,
505 irq_hw_number_t hw, unsigned int flags)
506{
507 unsigned int sense = flags & IRQ_TYPE_SENSE_MASK;
508
509 pr_debug("xics: map_direct virq %d, hwirq 0x%lx, flags: 0x%x\n",
510 virq, hw, flags);
511
512 if (sense && sense != IRQ_TYPE_LEVEL_LOW)
513 printk(KERN_WARNING "xics: using unsupported sense 0x%x"
514 " for irq %d (h: 0x%lx)\n", flags, virq, hw);
515
516 get_irq_desc(virq)->status |= IRQ_LEVEL;
517 set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq);
518 return 0;
519}
520
521static int xics_host_map_lpar(struct irq_host *h, unsigned int virq,
522 irq_hw_number_t hw, unsigned int flags)
523{
524 unsigned int sense = flags & IRQ_TYPE_SENSE_MASK;
525
526 pr_debug("xics: map_lpar virq %d, hwirq 0x%lx, flags: 0x%x\n",
527 virq, hw, flags);
528
529 if (sense && sense != IRQ_TYPE_LEVEL_LOW)
530 printk(KERN_WARNING "xics: using unsupported sense 0x%x"
531 " for irq %d (h: 0x%lx)\n", flags, virq, hw);
532
533 get_irq_desc(virq)->status |= IRQ_LEVEL;
534 set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq);
535 return 0;
536}
537
538static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
539 u32 *intspec, unsigned int intsize,
540 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
541
542{
543 /* Current xics implementation translates everything
544 * to level. It is not technically right for MSIs but this
545 * is irrelevant at this point. We might get smarter in the future
546 */
547 *out_hwirq = intspec[0];
548 *out_flags = IRQ_TYPE_LEVEL_LOW;
549
550 return 0;
551}
552
553static struct irq_host_ops xics_host_direct_ops = {
554 .match = xics_host_match,
555 .map = xics_host_map_direct,
556 .xlate = xics_host_xlate,
557};
558
559static struct irq_host_ops xics_host_lpar_ops = {
560 .match = xics_host_match,
561 .map = xics_host_map_lpar,
562 .xlate = xics_host_xlate,
563};
564
565static void __init xics_init_host(void)
566{
567 struct irq_host_ops *ops;
568
569 if (firmware_has_feature(FW_FEATURE_LPAR))
570 ops = &xics_host_lpar_ops;
571 else
572 ops = &xics_host_direct_ops;
573 xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops,
574 XICS_IRQ_SPURIOUS);
575 BUG_ON(xics_host == NULL);
576 irq_set_default_host(xics_host);
577}
578
579static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
580 unsigned long size)
446{ 581{
582#ifdef CONFIG_SMP
447 int i; 583 int i;
448 unsigned long intr_size = 0;
449 struct device_node *np;
450 uint *ireg, ilen, indx = 0;
451 unsigned long intr_base = 0;
452 struct xics_interrupt_node {
453 unsigned long addr;
454 unsigned long size;
455 } intnodes[NR_CPUS];
456 584
457 ppc64_boot_msg(0x20, "XICS Init"); 585 /* This may look gross but it's good enough for now, we don't quite
586 * have a hard -> linux processor id matching.
587 */
588 for_each_possible_cpu(i) {
589 if (!cpu_present(i))
590 continue;
591 if (hw_id == get_hard_smp_processor_id(i)) {
592 xics_per_cpu[i] = ioremap(addr, size);
593 return;
594 }
595 }
596#else
597 if (hw_id != 0)
598 return;
599 xics_per_cpu[0] = ioremap(addr, size);
600#endif /* CONFIG_SMP */
601}
458 602
459 ibm_get_xive = rtas_token("ibm,get-xive"); 603static void __init xics_init_one_node(struct device_node *np,
460 ibm_set_xive = rtas_token("ibm,set-xive"); 604 unsigned int *indx)
461 ibm_int_on = rtas_token("ibm,int-on"); 605{
462 ibm_int_off = rtas_token("ibm,int-off"); 606 unsigned int ilen;
607 u32 *ireg;
463 608
464 np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); 609 /* This code does the theorically broken assumption that the interrupt
465 if (!np) 610 * server numbers are the same as the hard CPU numbers.
466 panic("xics_init_IRQ: can't find interrupt presentation"); 611 * This happens to be the case so far but we are playing with fire...
612 * should be fixed one of these days. -BenH.
613 */
614 ireg = (u32 *)get_property(np, "ibm,interrupt-server-ranges", NULL);
467 615
468nextnode: 616 /* Do that ever happen ? we'll know soon enough... but even good'old
469 ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL); 617 * f80 does have that property ..
618 */
619 WARN_ON(ireg == NULL);
470 if (ireg) { 620 if (ireg) {
471 /* 621 /*
472 * set node starting index for this node 622 * set node starting index for this node
473 */ 623 */
474 indx = *ireg; 624 *indx = *ireg;
475 } 625 }
476 626 ireg = (u32 *)get_property(np, "reg", &ilen);
477 ireg = (uint *)get_property(np, "reg", &ilen);
478 if (!ireg) 627 if (!ireg)
479 panic("xics_init_IRQ: can't find interrupt reg property"); 628 panic("xics_init_IRQ: can't find interrupt reg property");
480 629
481 while (ilen) { 630 while (ilen >= (4 * sizeof(u32))) {
482 intnodes[indx].addr = (unsigned long)*ireg++ << 32; 631 unsigned long addr, size;
483 ilen -= sizeof(uint); 632
484 intnodes[indx].addr |= *ireg++; 633 /* XXX Use proper OF parsing code here !!! */
485 ilen -= sizeof(uint); 634 addr = (unsigned long)*ireg++ << 32;
486 intnodes[indx].size = (unsigned long)*ireg++ << 32; 635 ilen -= sizeof(u32);
487 ilen -= sizeof(uint); 636 addr |= *ireg++;
488 intnodes[indx].size |= *ireg++; 637 ilen -= sizeof(u32);
489 ilen -= sizeof(uint); 638 size = (unsigned long)*ireg++ << 32;
490 indx++; 639 ilen -= sizeof(u32);
491 if (indx >= NR_CPUS) break; 640 size |= *ireg++;
641 ilen -= sizeof(u32);
642 xics_map_one_cpu(*indx, addr, size);
643 (*indx)++;
644 }
645}
646
647
648static void __init xics_setup_8259_cascade(void)
649{
650 struct device_node *np, *old, *found = NULL;
651 int cascade, naddr;
652 u32 *addrp;
653 unsigned long intack = 0;
654
655 for_each_node_by_type(np, "interrupt-controller")
656 if (device_is_compatible(np, "chrp,iic")) {
657 found = np;
658 break;
659 }
660 if (found == NULL) {
661 printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
662 return;
663 }
664 cascade = irq_of_parse_and_map(found, 0);
665 if (cascade == NO_IRQ) {
666 printk(KERN_ERR "xics: failed to map cascade interrupt");
667 return;
668 }
669 pr_debug("xics: cascade mapped to irq %d\n", cascade);
670
671 for (old = of_node_get(found); old != NULL ; old = np) {
672 np = of_get_parent(old);
673 of_node_put(old);
674 if (np == NULL)
675 break;
676 if (strcmp(np->name, "pci") != 0)
677 continue;
678 addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", NULL);
679 if (addrp == NULL)
680 continue;
681 naddr = prom_n_addr_cells(np);
682 intack = addrp[naddr-1];
683 if (naddr > 1)
684 intack |= ((unsigned long)addrp[naddr-2]) << 32;
685 }
686 if (intack)
687 printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack);
688 i8259_init(found, intack);
689 of_node_put(found);
690 set_irq_chained_handler(cascade, pseries_8259_cascade);
691}
692
693void __init xics_init_IRQ(void)
694{
695 int i;
696 struct device_node *np;
697 u32 *ireg, ilen, indx = 0;
698 int found = 0;
699
700 ppc64_boot_msg(0x20, "XICS Init");
701
702 ibm_get_xive = rtas_token("ibm,get-xive");
703 ibm_set_xive = rtas_token("ibm,set-xive");
704 ibm_int_on = rtas_token("ibm,int-on");
705 ibm_int_off = rtas_token("ibm,int-off");
706
707 for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
708 found = 1;
709 if (firmware_has_feature(FW_FEATURE_LPAR))
710 break;
711 xics_init_one_node(np, &indx);
492 } 712 }
713 if (found == 0)
714 return;
493 715
494 np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation"); 716 xics_init_host();
495 if ((indx < NR_CPUS) && np) goto nextnode;
496 717
497 /* Find the server numbers for the boot cpu. */ 718 /* Find the server numbers for the boot cpu. */
498 for (np = of_find_node_by_type(NULL, "cpu"); 719 for (np = of_find_node_by_type(NULL, "cpu");
499 np; 720 np;
500 np = of_find_node_by_type(np, "cpu")) { 721 np = of_find_node_by_type(np, "cpu")) {
501 ireg = (uint *)get_property(np, "reg", &ilen); 722 ireg = (u32 *)get_property(np, "reg", &ilen);
502 if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { 723 if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
503 ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", 724 ireg = (u32 *)get_property(np,
504 &ilen); 725 "ibm,ppc-interrupt-gserver#s",
726 &ilen);
505 i = ilen / sizeof(int); 727 i = ilen / sizeof(int);
506 if (ireg && i > 0) { 728 if (ireg && i > 0) {
507 default_server = ireg[0]; 729 default_server = ireg[0];
508 default_distrib_server = ireg[i-1]; /* take last element */ 730 /* take last element */
731 default_distrib_server = ireg[i-1];
509 } 732 }
510 ireg = (uint *)get_property(np, 733 ireg = (u32 *)get_property(np,
511 "ibm,interrupt-server#-size", NULL); 734 "ibm,interrupt-server#-size", NULL);
512 if (ireg) 735 if (ireg)
513 interrupt_server_size = *ireg; 736 interrupt_server_size = *ireg;
@@ -516,135 +739,48 @@ nextnode:
516 } 739 }
517 of_node_put(np); 740 of_node_put(np);
518 741
519 intr_base = intnodes[0].addr;
520 intr_size = intnodes[0].size;
521
522 np = of_find_node_by_type(NULL, "interrupt-controller");
523 if (!np) {
524 printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
525 xics_irq_8259_cascade_real = -1;
526 xics_irq_8259_cascade = -1;
527 } else {
528 ireg = (uint *) get_property(np, "interrupts", NULL);
529 if (!ireg)
530 panic("xics_init_IRQ: can't find ISA interrupts property");
531
532 xics_irq_8259_cascade_real = *ireg;
533 xics_irq_8259_cascade
534 = virt_irq_create_mapping(xics_irq_8259_cascade_real);
535 i8259_init(0, 0);
536 of_node_put(np);
537 }
538
539 if (firmware_has_feature(FW_FEATURE_LPAR)) 742 if (firmware_has_feature(FW_FEATURE_LPAR))
540 ops = &pSeriesLP_ops; 743 ppc_md.get_irq = xics_get_irq_lpar;
541 else { 744 else
542#ifdef CONFIG_SMP 745 ppc_md.get_irq = xics_get_irq_direct;
543 for_each_possible_cpu(i) {
544 int hard_id;
545
546 /* FIXME: Do this dynamically! --RR */
547 if (!cpu_present(i))
548 continue;
549
550 hard_id = get_hard_smp_processor_id(i);
551 xics_per_cpu[i] = ioremap(intnodes[hard_id].addr,
552 intnodes[hard_id].size);
553 }
554#else
555 xics_per_cpu[0] = ioremap(intr_base, intr_size);
556#endif /* CONFIG_SMP */
557 }
558
559 for (i = irq_offset_value(); i < NR_IRQS; ++i)
560 get_irq_desc(i)->chip = &xics_pic;
561 746
562 xics_setup_cpu(); 747 xics_setup_cpu();
563 748
749 xics_setup_8259_cascade();
750
564 ppc64_boot_msg(0x21, "XICS Done"); 751 ppc64_boot_msg(0x21, "XICS Done");
565} 752}
566 753
567/*
568 * We cant do this in init_IRQ because we need the memory subsystem up for
569 * request_irq()
570 */
571static int __init xics_setup_i8259(void)
572{
573 if (ppc64_interrupt_controller == IC_PPC_XIC &&
574 xics_irq_8259_cascade != -1) {
575 if (request_irq(irq_offset_up(xics_irq_8259_cascade),
576 no_action, 0, "8259 cascade", NULL))
577 printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
578 "cascade\n");
579 }
580 return 0;
581}
582arch_initcall(xics_setup_i8259);
583 754
584#ifdef CONFIG_SMP 755#ifdef CONFIG_SMP
585void xics_request_IPIs(void) 756void xics_request_IPIs(void)
586{ 757{
587 virt_irq_to_real_map[XICS_IPI] = XICS_IPI; 758 unsigned int ipi;
759
760 ipi = irq_create_mapping(xics_host, XICS_IPI, 0);
761 BUG_ON(ipi == NO_IRQ);
588 762
589 /* 763 /*
590 * IPIs are marked IRQF_DISABLED as they must run with irqs 764 * IPIs are marked IRQF_DISABLED as they must run with irqs
591 * disabled 765 * disabled
592 */ 766 */
593 request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, 767 set_irq_handler(ipi, handle_percpu_irq);
594 IRQF_DISABLED, "IPI", NULL); 768 if (firmware_has_feature(FW_FEATURE_LPAR))
595 get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU; 769 request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
596} 770 "IPI", NULL);
597#endif 771 else
598 772 request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
599static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) 773 "IPI", NULL);
600{
601 unsigned int irq;
602 int status;
603 int xics_status[2];
604 unsigned long newmask;
605 cpumask_t tmp = CPU_MASK_NONE;
606
607 irq = virt_irq_to_real(irq_offset_down(virq));
608 if (irq == XICS_IPI || irq == NO_IRQ)
609 return;
610
611 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
612
613 if (status) {
614 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
615 "returns %d\n", irq, status);
616 return;
617 }
618
619 /* For the moment only implement delivery to all cpus or one cpu */
620 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
621 newmask = default_distrib_server;
622 } else {
623 cpus_and(tmp, cpu_online_map, cpumask);
624 if (cpus_empty(tmp))
625 return;
626 newmask = get_hard_smp_processor_id(first_cpu(tmp));
627 }
628
629 status = rtas_call(ibm_set_xive, 3, 1, NULL,
630 irq, newmask, xics_status[1]);
631
632 if (status) {
633 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
634 "returns %d\n", irq, status);
635 return;
636 }
637} 774}
775#endif /* CONFIG_SMP */
638 776
639void xics_teardown_cpu(int secondary) 777void xics_teardown_cpu(int secondary)
640{ 778{
641 int cpu = smp_processor_id(); 779 int cpu = smp_processor_id();
780 unsigned int ipi;
781 struct irq_desc *desc;
642 782
643 ops->cppr_info(cpu, 0x00); 783 xics_set_cpu_priority(cpu, 0);
644 iosync();
645
646 /* Clear IPI */
647 ops->qirr_info(cpu, 0xff);
648 784
649 /* 785 /*
650 * we need to EOI the IPI if we got here from kexec down IPI 786 * we need to EOI the IPI if we got here from kexec down IPI
@@ -653,7 +789,13 @@ void xics_teardown_cpu(int secondary)
653 * should we be flagging idle loop instead? 789 * should we be flagging idle loop instead?
654 * or creating some task to be scheduled? 790 * or creating some task to be scheduled?
655 */ 791 */
656 ops->xirr_info_set(cpu, XICS_IPI); 792
793 ipi = irq_find_mapping(xics_host, XICS_IPI);
794 if (ipi == XICS_IRQ_SPURIOUS)
795 return;
796 desc = get_irq_desc(ipi);
797 if (desc->chip && desc->chip->eoi)
798 desc->chip->eoi(XICS_IPI);
657 799
658 /* 800 /*
659 * Some machines need to have at least one cpu in the GIQ, 801 * Some machines need to have at least one cpu in the GIQ,
@@ -661,8 +803,8 @@ void xics_teardown_cpu(int secondary)
661 */ 803 */
662 if (secondary) 804 if (secondary)
663 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, 805 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
664 (1UL << interrupt_server_size) - 1 - 806 (1UL << interrupt_server_size) - 1 -
665 default_distrib_server, 0); 807 default_distrib_server, 0);
666} 808}
667 809
668#ifdef CONFIG_HOTPLUG_CPU 810#ifdef CONFIG_HOTPLUG_CPU
@@ -674,8 +816,7 @@ void xics_migrate_irqs_away(void)
674 unsigned int irq, virq, cpu = smp_processor_id(); 816 unsigned int irq, virq, cpu = smp_processor_id();
675 817
676 /* Reject any interrupt that was queued to us... */ 818 /* Reject any interrupt that was queued to us... */
677 ops->cppr_info(cpu, 0); 819 xics_set_cpu_priority(cpu, 0);
678 iosync();
679 820
680 /* remove ourselves from the global interrupt queue */ 821 /* remove ourselves from the global interrupt queue */
681 status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, 822 status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
@@ -683,24 +824,23 @@ void xics_migrate_irqs_away(void)
683 WARN_ON(status < 0); 824 WARN_ON(status < 0);
684 825
685 /* Allow IPIs again... */ 826 /* Allow IPIs again... */
686 ops->cppr_info(cpu, DEFAULT_PRIORITY); 827 xics_set_cpu_priority(cpu, DEFAULT_PRIORITY);
687 iosync();
688 828
689 for_each_irq(virq) { 829 for_each_irq(virq) {
690 irq_desc_t *desc; 830 struct irq_desc *desc;
691 int xics_status[2]; 831 int xics_status[2];
692 unsigned long flags; 832 unsigned long flags;
693 833
694 /* We cant set affinity on ISA interrupts */ 834 /* We cant set affinity on ISA interrupts */
695 if (virq < irq_offset_value()) 835 if (virq < NUM_ISA_INTERRUPTS)
696 continue; 836 continue;
697 837 if (irq_map[virq].host != xics_host)
698 desc = get_irq_desc(virq); 838 continue;
699 irq = virt_irq_to_real(irq_offset_down(virq)); 839 irq = (unsigned int)irq_map[virq].hwirq;
700
701 /* We need to get IPIs still. */ 840 /* We need to get IPIs still. */
702 if (irq == XICS_IPI || irq == NO_IRQ) 841 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
703 continue; 842 continue;
843 desc = get_irq_desc(virq);
704 844
705 /* We only need to migrate enabled IRQS */ 845 /* We only need to migrate enabled IRQS */
706 if (desc == NULL || desc->chip == NULL 846 if (desc == NULL || desc->chip == NULL
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h
index e14c70868f1d..6ee1055b0ffb 100644
--- a/arch/powerpc/platforms/pseries/xics.h
+++ b/arch/powerpc/platforms/pseries/xics.h
@@ -14,13 +14,12 @@
14 14
15#include <linux/cache.h> 15#include <linux/cache.h>
16 16
17void xics_init_IRQ(void); 17extern void xics_init_IRQ(void);
18int xics_get_irq(struct pt_regs *); 18extern void xics_setup_cpu(void);
19void xics_setup_cpu(void); 19extern void xics_teardown_cpu(int secondary);
20void xics_teardown_cpu(int secondary); 20extern void xics_cause_IPI(int cpu);
21void xics_cause_IPI(int cpu); 21extern void xics_request_IPIs(void);
22void xics_request_IPIs(void); 22extern void xics_migrate_irqs_away(void);
23void xics_migrate_irqs_away(void);
24 23
25/* first argument is ignored for now*/ 24/* first argument is ignored for now*/
26void pSeriesLP_cppr_info(int n_cpu, u8 value); 25void pSeriesLP_cppr_info(int n_cpu, u8 value);
@@ -31,4 +30,8 @@ struct xics_ipi_struct {
31 30
32extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; 31extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
33 32
33struct irq_desc;
34extern void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc,
35 struct pt_regs *regs);
36
34#endif /* _POWERPC_KERNEL_XICS_H */ 37#endif /* _POWERPC_KERNEL_XICS_H */
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 054bd8b41ef5..cebfae242602 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -4,7 +4,6 @@ endif
4 4
5obj-$(CONFIG_MPIC) += mpic.o 5obj-$(CONFIG_MPIC) += mpic.o
6obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o 6obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
7obj-$(CONFIG_PPC_I8259) += i8259.o
8obj-$(CONFIG_PPC_MPC106) += grackle.o 7obj-$(CONFIG_PPC_MPC106) += grackle.o
9obj-$(CONFIG_BOOKE) += dcr.o 8obj-$(CONFIG_BOOKE) += dcr.o
10obj-$(CONFIG_40x) += dcr.o 9obj-$(CONFIG_40x) += dcr.o
@@ -14,3 +13,7 @@ obj-$(CONFIG_PPC_83xx) += ipic.o
14obj-$(CONFIG_FSL_SOC) += fsl_soc.o 13obj-$(CONFIG_FSL_SOC) += fsl_soc.o
15obj-$(CONFIG_PPC_TODC) += todc.o 14obj-$(CONFIG_PPC_TODC) += todc.o
16obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o 15obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
16
17ifeq ($(CONFIG_PPC_MERGE),y)
18obj-$(CONFIG_PPC_I8259) += i8259.o
19 endif
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 1a3ef1ab9d6e..72c73a6105cd 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -6,11 +6,16 @@
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9#undef DEBUG
10
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/ioport.h> 12#include <linux/ioport.h>
11#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/delay.h>
12#include <asm/io.h> 16#include <asm/io.h>
13#include <asm/i8259.h> 17#include <asm/i8259.h>
18#include <asm/prom.h>
14 19
15static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */ 20static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
16 21
@@ -20,7 +25,8 @@ static unsigned char cached_8259[2] = { 0xff, 0xff };
20 25
21static DEFINE_SPINLOCK(i8259_lock); 26static DEFINE_SPINLOCK(i8259_lock);
22 27
23static int i8259_pic_irq_offset; 28static struct device_node *i8259_node;
29static struct irq_host *i8259_host;
24 30
25/* 31/*
26 * Acknowledge the IRQ using either the PCI host bridge's interrupt 32 * Acknowledge the IRQ using either the PCI host bridge's interrupt
@@ -28,16 +34,18 @@ static int i8259_pic_irq_offset;
28 * which is called. It should be noted that polling is broken on some 34 * which is called. It should be noted that polling is broken on some
29 * IBM and Motorola PReP boxes so we must use the int-ack feature on them. 35 * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
30 */ 36 */
31int i8259_irq(struct pt_regs *regs) 37unsigned int i8259_irq(struct pt_regs *regs)
32{ 38{
33 int irq; 39 int irq;
34 40 int lock = 0;
35 spin_lock(&i8259_lock);
36 41
37 /* Either int-ack or poll for the IRQ */ 42 /* Either int-ack or poll for the IRQ */
38 if (pci_intack) 43 if (pci_intack)
39 irq = readb(pci_intack); 44 irq = readb(pci_intack);
40 else { 45 else {
46 spin_lock(&i8259_lock);
47 lock = 1;
48
41 /* Perform an interrupt acknowledge cycle on controller 1. */ 49 /* Perform an interrupt acknowledge cycle on controller 1. */
42 outb(0x0C, 0x20); /* prepare for poll */ 50 outb(0x0C, 0x20); /* prepare for poll */
43 irq = inb(0x20) & 7; 51 irq = inb(0x20) & 7;
@@ -62,16 +70,13 @@ int i8259_irq(struct pt_regs *regs)
62 if (!pci_intack) 70 if (!pci_intack)
63 outb(0x0B, 0x20); /* ISR register */ 71 outb(0x0B, 0x20); /* ISR register */
64 if(~inb(0x20) & 0x80) 72 if(~inb(0x20) & 0x80)
65 irq = -1; 73 irq = NO_IRQ;
66 } 74 } else if (irq == 0xff)
75 irq = NO_IRQ;
67 76
68 spin_unlock(&i8259_lock); 77 if (lock)
69 return irq + i8259_pic_irq_offset; 78 spin_unlock(&i8259_lock);
70} 79 return irq;
71
72int i8259_irq_cascade(struct pt_regs *regs, void *unused)
73{
74 return i8259_irq(regs);
75} 80}
76 81
77static void i8259_mask_and_ack_irq(unsigned int irq_nr) 82static void i8259_mask_and_ack_irq(unsigned int irq_nr)
@@ -79,7 +84,6 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr)
79 unsigned long flags; 84 unsigned long flags;
80 85
81 spin_lock_irqsave(&i8259_lock, flags); 86 spin_lock_irqsave(&i8259_lock, flags);
82 irq_nr -= i8259_pic_irq_offset;
83 if (irq_nr > 7) { 87 if (irq_nr > 7) {
84 cached_A1 |= 1 << (irq_nr-8); 88 cached_A1 |= 1 << (irq_nr-8);
85 inb(0xA1); /* DUMMY */ 89 inb(0xA1); /* DUMMY */
@@ -105,8 +109,9 @@ static void i8259_mask_irq(unsigned int irq_nr)
105{ 109{
106 unsigned long flags; 110 unsigned long flags;
107 111
112 pr_debug("i8259_mask_irq(%d)\n", irq_nr);
113
108 spin_lock_irqsave(&i8259_lock, flags); 114 spin_lock_irqsave(&i8259_lock, flags);
109 irq_nr -= i8259_pic_irq_offset;
110 if (irq_nr < 8) 115 if (irq_nr < 8)
111 cached_21 |= 1 << irq_nr; 116 cached_21 |= 1 << irq_nr;
112 else 117 else
@@ -119,8 +124,9 @@ static void i8259_unmask_irq(unsigned int irq_nr)
119{ 124{
120 unsigned long flags; 125 unsigned long flags;
121 126
127 pr_debug("i8259_unmask_irq(%d)\n", irq_nr);
128
122 spin_lock_irqsave(&i8259_lock, flags); 129 spin_lock_irqsave(&i8259_lock, flags);
123 irq_nr -= i8259_pic_irq_offset;
124 if (irq_nr < 8) 130 if (irq_nr < 8)
125 cached_21 &= ~(1 << irq_nr); 131 cached_21 &= ~(1 << irq_nr);
126 else 132 else
@@ -129,19 +135,11 @@ static void i8259_unmask_irq(unsigned int irq_nr)
129 spin_unlock_irqrestore(&i8259_lock, flags); 135 spin_unlock_irqrestore(&i8259_lock, flags);
130} 136}
131 137
132static void i8259_end_irq(unsigned int irq) 138static struct irq_chip i8259_pic = {
133{ 139 .typename = " i8259 ",
134 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) 140 .mask = i8259_mask_irq,
135 && irq_desc[irq].action) 141 .unmask = i8259_unmask_irq,
136 i8259_unmask_irq(irq); 142 .mask_ack = i8259_mask_and_ack_irq,
137}
138
139struct hw_interrupt_type i8259_pic = {
140 .typename = " i8259 ",
141 .enable = i8259_unmask_irq,
142 .disable = i8259_mask_irq,
143 .ack = i8259_mask_and_ack_irq,
144 .end = i8259_end_irq,
145}; 143};
146 144
147static struct resource pic1_iores = { 145static struct resource pic1_iores = {
@@ -165,25 +163,84 @@ static struct resource pic_edgectrl_iores = {
165 .flags = IORESOURCE_BUSY, 163 .flags = IORESOURCE_BUSY,
166}; 164};
167 165
168static struct irqaction i8259_irqaction = { 166static int i8259_host_match(struct irq_host *h, struct device_node *node)
169 .handler = no_action, 167{
170 .flags = IRQF_DISABLED, 168 return i8259_node == NULL || i8259_node == node;
171 .mask = CPU_MASK_NONE, 169}
172 .name = "82c59 secondary cascade", 170
171static int i8259_host_map(struct irq_host *h, unsigned int virq,
172 irq_hw_number_t hw, unsigned int flags)
173{
174 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
175
176 /* We block the internal cascade */
177 if (hw == 2)
178 get_irq_desc(virq)->status |= IRQ_NOREQUEST;
179
180 /* We use the level stuff only for now, we might want to
181 * be more cautious here but that works for now
182 */
183 get_irq_desc(virq)->status |= IRQ_LEVEL;
184 set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq);
185 return 0;
186}
187
188static void i8259_host_unmap(struct irq_host *h, unsigned int virq)
189{
190 /* Make sure irq is masked in hardware */
191 i8259_mask_irq(virq);
192
193 /* remove chip and handler */
194 set_irq_chip_and_handler(virq, NULL, NULL);
195
196 /* Make sure it's completed */
197 synchronize_irq(virq);
198}
199
200static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
201 u32 *intspec, unsigned int intsize,
202 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
203{
204 static unsigned char map_isa_senses[4] = {
205 IRQ_TYPE_LEVEL_LOW,
206 IRQ_TYPE_LEVEL_HIGH,
207 IRQ_TYPE_EDGE_FALLING,
208 IRQ_TYPE_EDGE_RISING,
209 };
210
211 *out_hwirq = intspec[0];
212 if (intsize > 1 && intspec[1] < 4)
213 *out_flags = map_isa_senses[intspec[1]];
214 else
215 *out_flags = IRQ_TYPE_NONE;
216
217 return 0;
218}
219
220static struct irq_host_ops i8259_host_ops = {
221 .match = i8259_host_match,
222 .map = i8259_host_map,
223 .unmap = i8259_host_unmap,
224 .xlate = i8259_host_xlate,
173}; 225};
174 226
175/* 227/****
176 * i8259_init() 228 * i8259_init - Initialize the legacy controller
177 * intack_addr - PCI interrupt acknowledge (real) address which will return 229 * @node: device node of the legacy PIC (can be NULL, but then, it will match
178 * the active irq from the 8259 230 * all interrupts, so beware)
231 * @intack_addr: PCI interrupt acknowledge (real) address which will return
232 * the active irq from the 8259
179 */ 233 */
180void __init i8259_init(unsigned long intack_addr, int offset) 234void i8259_init(struct device_node *node, unsigned long intack_addr)
181{ 235{
182 unsigned long flags; 236 unsigned long flags;
183 int i;
184 237
238 /* initialize the controller */
185 spin_lock_irqsave(&i8259_lock, flags); 239 spin_lock_irqsave(&i8259_lock, flags);
186 i8259_pic_irq_offset = offset; 240
241 /* Mask all first */
242 outb(0xff, 0xA1);
243 outb(0xff, 0x21);
187 244
188 /* init master interrupt controller */ 245 /* init master interrupt controller */
189 outb(0x11, 0x20); /* Start init sequence */ 246 outb(0x11, 0x20); /* Start init sequence */
@@ -197,21 +254,36 @@ void __init i8259_init(unsigned long intack_addr, int offset)
197 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ 254 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
198 outb(0x01, 0xA1); /* Select 8086 mode */ 255 outb(0x01, 0xA1); /* Select 8086 mode */
199 256
257 /* That thing is slow */
258 udelay(100);
259
200 /* always read ISR */ 260 /* always read ISR */
201 outb(0x0B, 0x20); 261 outb(0x0B, 0x20);
202 outb(0x0B, 0xA0); 262 outb(0x0B, 0xA0);
203 263
204 /* Mask all interrupts */ 264 /* Unmask the internal cascade */
265 cached_21 &= ~(1 << 2);
266
267 /* Set interrupt masks */
205 outb(cached_A1, 0xA1); 268 outb(cached_A1, 0xA1);
206 outb(cached_21, 0x21); 269 outb(cached_21, 0x21);
207 270
208 spin_unlock_irqrestore(&i8259_lock, flags); 271 spin_unlock_irqrestore(&i8259_lock, flags);
209 272
210 for (i = 0; i < NUM_ISA_INTERRUPTS; ++i) 273 /* create a legacy host */
211 irq_desc[offset + i].chip = &i8259_pic; 274 if (node)
275 i8259_node = of_node_get(node);
276 i8259_host = irq_alloc_host(IRQ_HOST_MAP_LEGACY, 0, &i8259_host_ops, 0);
277 if (i8259_host == NULL) {
278 printk(KERN_ERR "i8259: failed to allocate irq host !\n");
279 return;
280 }
212 281
213 /* reserve our resources */ 282 /* reserve our resources */
214 setup_irq(offset + 2, &i8259_irqaction); 283 /* XXX should we continue doing that ? it seems to cause problems
284 * with further requesting of PCI IO resources for that range...
285 * need to look into it.
286 */
215 request_resource(&ioport_resource, &pic1_iores); 287 request_resource(&ioport_resource, &pic1_iores);
216 request_resource(&ioport_resource, &pic2_iores); 288 request_resource(&ioport_resource, &pic2_iores);
217 request_resource(&ioport_resource, &pic_edgectrl_iores); 289 request_resource(&ioport_resource, &pic_edgectrl_iores);
@@ -219,4 +291,5 @@ void __init i8259_init(unsigned long intack_addr, int offset)
219 if (intack_addr != 0) 291 if (intack_addr != 0)
220 pci_intack = ioremap(intack_addr, 1); 292 pci_intack = ioremap(intack_addr, 1);
221 293
294 printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
222} 295}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7e469358895f..7d31d7cc392d 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -100,8 +100,8 @@ static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
100 100
101 if (mpic->flags & MPIC_PRIMARY) 101 if (mpic->flags & MPIC_PRIMARY)
102 cpu = hard_smp_processor_id(); 102 cpu = hard_smp_processor_id();
103 103 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,
104 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg); 104 mpic->cpuregs[cpu], reg);
105} 105}
106 106
107static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) 107static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
@@ -340,27 +340,19 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
340#endif /* CONFIG_MPIC_BROKEN_U3 */ 340#endif /* CONFIG_MPIC_BROKEN_U3 */
341 341
342 342
343#define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
344
343/* Find an mpic associated with a given linux interrupt */ 345/* Find an mpic associated with a given linux interrupt */
344static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi) 346static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
345{ 347{
346 struct mpic *mpic = mpics; 348 unsigned int src = mpic_irq_to_hw(irq);
347 349
348 while(mpic) { 350 if (irq < NUM_ISA_INTERRUPTS)
349 /* search IPIs first since they may override the main interrupts */ 351 return NULL;
350 if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) { 352 if (is_ipi)
351 if (is_ipi) 353 *is_ipi = (src >= MPIC_VEC_IPI_0 && src <= MPIC_VEC_IPI_3);
352 *is_ipi = 1; 354
353 return mpic; 355 return irq_desc[irq].chip_data;
354 }
355 if (irq >= mpic->irq_offset &&
356 irq < (mpic->irq_offset + mpic->irq_count)) {
357 if (is_ipi)
358 *is_ipi = 0;
359 return mpic;
360 }
361 mpic = mpic -> next;
362 }
363 return NULL;
364} 356}
365 357
366/* Convert a cpu mask from logical to physical cpu numbers. */ 358/* Convert a cpu mask from logical to physical cpu numbers. */
@@ -378,14 +370,14 @@ static inline u32 mpic_physmask(u32 cpumask)
378/* Get the mpic structure from the IPI number */ 370/* Get the mpic structure from the IPI number */
379static inline struct mpic * mpic_from_ipi(unsigned int ipi) 371static inline struct mpic * mpic_from_ipi(unsigned int ipi)
380{ 372{
381 return container_of(irq_desc[ipi].chip, struct mpic, hc_ipi); 373 return irq_desc[ipi].chip_data;
382} 374}
383#endif 375#endif
384 376
385/* Get the mpic structure from the irq number */ 377/* Get the mpic structure from the irq number */
386static inline struct mpic * mpic_from_irq(unsigned int irq) 378static inline struct mpic * mpic_from_irq(unsigned int irq)
387{ 379{
388 return container_of(irq_desc[irq].chip, struct mpic, hc_irq); 380 return irq_desc[irq].chip_data;
389} 381}
390 382
391/* Send an EOI */ 383/* Send an EOI */
@@ -398,9 +390,7 @@ static inline void mpic_eoi(struct mpic *mpic)
398#ifdef CONFIG_SMP 390#ifdef CONFIG_SMP
399static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 391static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
400{ 392{
401 struct mpic *mpic = dev_id; 393 smp_message_recv(mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0, regs);
402
403 smp_message_recv(irq - mpic->ipi_offset, regs);
404 return IRQ_HANDLED; 394 return IRQ_HANDLED;
405} 395}
406#endif /* CONFIG_SMP */ 396#endif /* CONFIG_SMP */
@@ -410,11 +400,11 @@ static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
410 */ 400 */
411 401
412 402
413static void mpic_enable_irq(unsigned int irq) 403static void mpic_unmask_irq(unsigned int irq)
414{ 404{
415 unsigned int loops = 100000; 405 unsigned int loops = 100000;
416 struct mpic *mpic = mpic_from_irq(irq); 406 struct mpic *mpic = mpic_from_irq(irq);
417 unsigned int src = irq - mpic->irq_offset; 407 unsigned int src = mpic_irq_to_hw(irq);
418 408
419 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); 409 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
420 410
@@ -429,39 +419,13 @@ static void mpic_enable_irq(unsigned int irq)
429 break; 419 break;
430 } 420 }
431 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); 421 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
432
433#ifdef CONFIG_MPIC_BROKEN_U3
434 if (mpic->flags & MPIC_BROKEN_U3) {
435 unsigned int src = irq - mpic->irq_offset;
436 if (mpic_is_ht_interrupt(mpic, src) &&
437 (irq_desc[irq].status & IRQ_LEVEL))
438 mpic_ht_end_irq(mpic, src);
439 }
440#endif /* CONFIG_MPIC_BROKEN_U3 */
441}
442
443static unsigned int mpic_startup_irq(unsigned int irq)
444{
445#ifdef CONFIG_MPIC_BROKEN_U3
446 struct mpic *mpic = mpic_from_irq(irq);
447 unsigned int src = irq - mpic->irq_offset;
448#endif /* CONFIG_MPIC_BROKEN_U3 */
449
450 mpic_enable_irq(irq);
451
452#ifdef CONFIG_MPIC_BROKEN_U3
453 if (mpic_is_ht_interrupt(mpic, src))
454 mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
455#endif /* CONFIG_MPIC_BROKEN_U3 */
456
457 return 0;
458} 422}
459 423
460static void mpic_disable_irq(unsigned int irq) 424static void mpic_mask_irq(unsigned int irq)
461{ 425{
462 unsigned int loops = 100000; 426 unsigned int loops = 100000;
463 struct mpic *mpic = mpic_from_irq(irq); 427 struct mpic *mpic = mpic_from_irq(irq);
464 unsigned int src = irq - mpic->irq_offset; 428 unsigned int src = mpic_irq_to_hw(irq);
465 429
466 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); 430 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
467 431
@@ -478,23 +442,58 @@ static void mpic_disable_irq(unsigned int irq)
478 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); 442 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
479} 443}
480 444
481static void mpic_shutdown_irq(unsigned int irq) 445static void mpic_end_irq(unsigned int irq)
482{ 446{
447 struct mpic *mpic = mpic_from_irq(irq);
448
449#ifdef DEBUG_IRQ
450 DBG("%s: end_irq: %d\n", mpic->name, irq);
451#endif
452 /* We always EOI on end_irq() even for edge interrupts since that
453 * should only lower the priority, the MPIC should have properly
454 * latched another edge interrupt coming in anyway
455 */
456
457 mpic_eoi(mpic);
458}
459
483#ifdef CONFIG_MPIC_BROKEN_U3 460#ifdef CONFIG_MPIC_BROKEN_U3
461
462static void mpic_unmask_ht_irq(unsigned int irq)
463{
484 struct mpic *mpic = mpic_from_irq(irq); 464 struct mpic *mpic = mpic_from_irq(irq);
485 unsigned int src = irq - mpic->irq_offset; 465 unsigned int src = mpic_irq_to_hw(irq);
486 466
487 if (mpic_is_ht_interrupt(mpic, src)) 467 mpic_unmask_irq(irq);
488 mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
489 468
490#endif /* CONFIG_MPIC_BROKEN_U3 */ 469 if (irq_desc[irq].status & IRQ_LEVEL)
470 mpic_ht_end_irq(mpic, src);
471}
472
473static unsigned int mpic_startup_ht_irq(unsigned int irq)
474{
475 struct mpic *mpic = mpic_from_irq(irq);
476 unsigned int src = mpic_irq_to_hw(irq);
477
478 mpic_unmask_irq(irq);
479 mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
491 480
492 mpic_disable_irq(irq); 481 return 0;
493} 482}
494 483
495static void mpic_end_irq(unsigned int irq) 484static void mpic_shutdown_ht_irq(unsigned int irq)
496{ 485{
497 struct mpic *mpic = mpic_from_irq(irq); 486 struct mpic *mpic = mpic_from_irq(irq);
487 unsigned int src = mpic_irq_to_hw(irq);
488
489 mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
490 mpic_mask_irq(irq);
491}
492
493static void mpic_end_ht_irq(unsigned int irq)
494{
495 struct mpic *mpic = mpic_from_irq(irq);
496 unsigned int src = mpic_irq_to_hw(irq);
498 497
499#ifdef DEBUG_IRQ 498#ifdef DEBUG_IRQ
500 DBG("%s: end_irq: %d\n", mpic->name, irq); 499 DBG("%s: end_irq: %d\n", mpic->name, irq);
@@ -504,30 +503,25 @@ static void mpic_end_irq(unsigned int irq)
504 * latched another edge interrupt coming in anyway 503 * latched another edge interrupt coming in anyway
505 */ 504 */
506 505
507#ifdef CONFIG_MPIC_BROKEN_U3 506 if (irq_desc[irq].status & IRQ_LEVEL)
508 if (mpic->flags & MPIC_BROKEN_U3) { 507 mpic_ht_end_irq(mpic, src);
509 unsigned int src = irq - mpic->irq_offset;
510 if (mpic_is_ht_interrupt(mpic, src) &&
511 (irq_desc[irq].status & IRQ_LEVEL))
512 mpic_ht_end_irq(mpic, src);
513 }
514#endif /* CONFIG_MPIC_BROKEN_U3 */
515
516 mpic_eoi(mpic); 508 mpic_eoi(mpic);
517} 509}
518 510
511#endif /* CONFIG_MPIC_BROKEN_U3 */
512
519#ifdef CONFIG_SMP 513#ifdef CONFIG_SMP
520 514
521static void mpic_enable_ipi(unsigned int irq) 515static void mpic_unmask_ipi(unsigned int irq)
522{ 516{
523 struct mpic *mpic = mpic_from_ipi(irq); 517 struct mpic *mpic = mpic_from_ipi(irq);
524 unsigned int src = irq - mpic->ipi_offset; 518 unsigned int src = mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0;
525 519
526 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); 520 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
527 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); 521 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
528} 522}
529 523
530static void mpic_disable_ipi(unsigned int irq) 524static void mpic_mask_ipi(unsigned int irq)
531{ 525{
532 /* NEVER disable an IPI... that's just plain wrong! */ 526 /* NEVER disable an IPI... that's just plain wrong! */
533} 527}
@@ -551,29 +545,176 @@ static void mpic_end_ipi(unsigned int irq)
551static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) 545static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
552{ 546{
553 struct mpic *mpic = mpic_from_irq(irq); 547 struct mpic *mpic = mpic_from_irq(irq);
548 unsigned int src = mpic_irq_to_hw(irq);
554 549
555 cpumask_t tmp; 550 cpumask_t tmp;
556 551
557 cpus_and(tmp, cpumask, cpu_online_map); 552 cpus_and(tmp, cpumask, cpu_online_map);
558 553
559 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION, 554 mpic_irq_write(src, MPIC_IRQ_DESTINATION,
560 mpic_physmask(cpus_addr(tmp)[0])); 555 mpic_physmask(cpus_addr(tmp)[0]));
561} 556}
562 557
558static unsigned int mpic_flags_to_vecpri(unsigned int flags, int *level)
559{
560 unsigned int vecpri;
561
562 /* Now convert sense value */
563 switch(flags & IRQ_TYPE_SENSE_MASK) {
564 case IRQ_TYPE_EDGE_RISING:
565 vecpri = MPIC_VECPRI_SENSE_EDGE |
566 MPIC_VECPRI_POLARITY_POSITIVE;
567 *level = 0;
568 break;
569 case IRQ_TYPE_EDGE_FALLING:
570 vecpri = MPIC_VECPRI_SENSE_EDGE |
571 MPIC_VECPRI_POLARITY_NEGATIVE;
572 *level = 0;
573 break;
574 case IRQ_TYPE_LEVEL_HIGH:
575 vecpri = MPIC_VECPRI_SENSE_LEVEL |
576 MPIC_VECPRI_POLARITY_POSITIVE;
577 *level = 1;
578 break;
579 case IRQ_TYPE_LEVEL_LOW:
580 default:
581 vecpri = MPIC_VECPRI_SENSE_LEVEL |
582 MPIC_VECPRI_POLARITY_NEGATIVE;
583 *level = 1;
584 }
585 return vecpri;
586}
587
588static struct irq_chip mpic_irq_chip = {
589 .mask = mpic_mask_irq,
590 .unmask = mpic_unmask_irq,
591 .eoi = mpic_end_irq,
592};
593
594#ifdef CONFIG_SMP
595static struct irq_chip mpic_ipi_chip = {
596 .mask = mpic_mask_ipi,
597 .unmask = mpic_unmask_ipi,
598 .eoi = mpic_end_ipi,
599};
600#endif /* CONFIG_SMP */
601
602#ifdef CONFIG_MPIC_BROKEN_U3
603static struct irq_chip mpic_irq_ht_chip = {
604 .startup = mpic_startup_ht_irq,
605 .shutdown = mpic_shutdown_ht_irq,
606 .mask = mpic_mask_irq,
607 .unmask = mpic_unmask_ht_irq,
608 .eoi = mpic_end_ht_irq,
609};
610#endif /* CONFIG_MPIC_BROKEN_U3 */
611
612
613static int mpic_host_match(struct irq_host *h, struct device_node *node)
614{
615 struct mpic *mpic = h->host_data;
616
617 /* Exact match, unless mpic node is NULL */
618 return mpic->of_node == NULL || mpic->of_node == node;
619}
620
621static int mpic_host_map(struct irq_host *h, unsigned int virq,
622 irq_hw_number_t hw, unsigned int flags)
623{
624 struct irq_desc *desc = get_irq_desc(virq);
625 struct irq_chip *chip;
626 struct mpic *mpic = h->host_data;
627 unsigned int vecpri = MPIC_VECPRI_SENSE_LEVEL |
628 MPIC_VECPRI_POLARITY_NEGATIVE;
629 int level;
630
631 pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n",
632 virq, hw, flags);
633
634 if (hw == MPIC_VEC_SPURRIOUS)
635 return -EINVAL;
636#ifdef CONFIG_SMP
637 else if (hw >= MPIC_VEC_IPI_0) {
638 WARN_ON(!(mpic->flags & MPIC_PRIMARY));
639
640 pr_debug("mpic: mapping as IPI\n");
641 set_irq_chip_data(virq, mpic);
642 set_irq_chip_and_handler(virq, &mpic->hc_ipi,
643 handle_percpu_irq);
644 return 0;
645 }
646#endif /* CONFIG_SMP */
647
648 if (hw >= mpic->irq_count)
649 return -EINVAL;
650
651 /* If no sense provided, check default sense array */
652 if (((flags & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE) &&
653 mpic->senses && hw < mpic->senses_count)
654 flags |= mpic->senses[hw];
655
656 vecpri = mpic_flags_to_vecpri(flags, &level);
657 if (level)
658 desc->status |= IRQ_LEVEL;
659 chip = &mpic->hc_irq;
660
661#ifdef CONFIG_MPIC_BROKEN_U3
662 /* Check for HT interrupts, override vecpri */
663 if (mpic_is_ht_interrupt(mpic, hw)) {
664 vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
665 MPIC_VECPRI_POLARITY_MASK);
666 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
667 chip = &mpic->hc_ht_irq;
668 }
669#endif
670
671 /* Reconfigure irq */
672 vecpri |= MPIC_VECPRI_MASK | hw | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
673 mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri);
674
675 pr_debug("mpic: mapping as IRQ\n");
676
677 set_irq_chip_data(virq, mpic);
678 set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq);
679 return 0;
680}
681
682static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
683 u32 *intspec, unsigned int intsize,
684 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
685
686{
687 static unsigned char map_mpic_senses[4] = {
688 IRQ_TYPE_EDGE_RISING,
689 IRQ_TYPE_LEVEL_LOW,
690 IRQ_TYPE_LEVEL_HIGH,
691 IRQ_TYPE_EDGE_FALLING,
692 };
693
694 *out_hwirq = intspec[0];
695 if (intsize > 1 && intspec[1] < 4)
696 *out_flags = map_mpic_senses[intspec[1]];
697 else
698 *out_flags = IRQ_TYPE_NONE;
699
700 return 0;
701}
702
703static struct irq_host_ops mpic_host_ops = {
704 .match = mpic_host_match,
705 .map = mpic_host_map,
706 .xlate = mpic_host_xlate,
707};
563 708
564/* 709/*
565 * Exported functions 710 * Exported functions
566 */ 711 */
567 712
568 713struct mpic * __init mpic_alloc(struct device_node *node,
569struct mpic * __init mpic_alloc(unsigned long phys_addr, 714 unsigned long phys_addr,
570 unsigned int flags, 715 unsigned int flags,
571 unsigned int isu_size, 716 unsigned int isu_size,
572 unsigned int irq_offset,
573 unsigned int irq_count, 717 unsigned int irq_count,
574 unsigned int ipi_offset,
575 unsigned char *senses,
576 unsigned int senses_count,
577 const char *name) 718 const char *name)
578{ 719{
579 struct mpic *mpic; 720 struct mpic *mpic;
@@ -585,33 +726,38 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
585 if (mpic == NULL) 726 if (mpic == NULL)
586 return NULL; 727 return NULL;
587 728
588
589 memset(mpic, 0, sizeof(struct mpic)); 729 memset(mpic, 0, sizeof(struct mpic));
590 mpic->name = name; 730 mpic->name = name;
731 mpic->of_node = node ? of_node_get(node) : NULL;
591 732
733 mpic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 256,
734 &mpic_host_ops,
735 MPIC_VEC_SPURRIOUS);
736 if (mpic->irqhost == NULL) {
737 of_node_put(node);
738 return NULL;
739 }
740
741 mpic->irqhost->host_data = mpic;
742 mpic->hc_irq = mpic_irq_chip;
592 mpic->hc_irq.typename = name; 743 mpic->hc_irq.typename = name;
593 mpic->hc_irq.startup = mpic_startup_irq;
594 mpic->hc_irq.shutdown = mpic_shutdown_irq;
595 mpic->hc_irq.enable = mpic_enable_irq;
596 mpic->hc_irq.disable = mpic_disable_irq;
597 mpic->hc_irq.end = mpic_end_irq;
598 if (flags & MPIC_PRIMARY) 744 if (flags & MPIC_PRIMARY)
599 mpic->hc_irq.set_affinity = mpic_set_affinity; 745 mpic->hc_irq.set_affinity = mpic_set_affinity;
746#ifdef CONFIG_MPIC_BROKEN_U3
747 mpic->hc_ht_irq = mpic_irq_ht_chip;
748 mpic->hc_ht_irq.typename = name;
749 if (flags & MPIC_PRIMARY)
750 mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
751#endif /* CONFIG_MPIC_BROKEN_U3 */
600#ifdef CONFIG_SMP 752#ifdef CONFIG_SMP
753 mpic->hc_ipi = mpic_ipi_chip;
601 mpic->hc_ipi.typename = name; 754 mpic->hc_ipi.typename = name;
602 mpic->hc_ipi.enable = mpic_enable_ipi;
603 mpic->hc_ipi.disable = mpic_disable_ipi;
604 mpic->hc_ipi.end = mpic_end_ipi;
605#endif /* CONFIG_SMP */ 755#endif /* CONFIG_SMP */
606 756
607 mpic->flags = flags; 757 mpic->flags = flags;
608 mpic->isu_size = isu_size; 758 mpic->isu_size = isu_size;
609 mpic->irq_offset = irq_offset;
610 mpic->irq_count = irq_count; 759 mpic->irq_count = irq_count;
611 mpic->ipi_offset = ipi_offset;
612 mpic->num_sources = 0; /* so far */ 760 mpic->num_sources = 0; /* so far */
613 mpic->senses = senses;
614 mpic->senses_count = senses_count;
615 761
616 /* Map the global registers */ 762 /* Map the global registers */
617 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); 763 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
@@ -679,8 +825,10 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
679 mpic->next = mpics; 825 mpic->next = mpics;
680 mpics = mpic; 826 mpics = mpic;
681 827
682 if (flags & MPIC_PRIMARY) 828 if (flags & MPIC_PRIMARY) {
683 mpic_primary = mpic; 829 mpic_primary = mpic;
830 irq_set_default_host(mpic->irqhost);
831 }
684 832
685 return mpic; 833 return mpic;
686} 834}
@@ -697,26 +845,10 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
697 mpic->num_sources = isu_first + mpic->isu_size; 845 mpic->num_sources = isu_first + mpic->isu_size;
698} 846}
699 847
700void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler, 848void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count)
701 void *data)
702{ 849{
703 struct mpic *mpic = mpic_find(irq, NULL); 850 mpic->senses = senses;
704 unsigned long flags; 851 mpic->senses_count = count;
705
706 /* Synchronization here is a bit dodgy, so don't try to replace cascade
707 * interrupts on the fly too often ... but normally it's set up at boot.
708 */
709 spin_lock_irqsave(&mpic_lock, flags);
710 if (mpic->cascade)
711 mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
712 mpic->cascade = NULL;
713 wmb();
714 mpic->cascade_vec = irq - mpic->irq_offset;
715 mpic->cascade_data = data;
716 wmb();
717 mpic->cascade = handler;
718 mpic_enable_irq(irq);
719 spin_unlock_irqrestore(&mpic_lock, flags);
720} 852}
721 853
722void __init mpic_init(struct mpic *mpic) 854void __init mpic_init(struct mpic *mpic)
@@ -724,6 +856,11 @@ void __init mpic_init(struct mpic *mpic)
724 int i; 856 int i;
725 857
726 BUG_ON(mpic->num_sources == 0); 858 BUG_ON(mpic->num_sources == 0);
859 WARN_ON(mpic->num_sources > MPIC_VEC_IPI_0);
860
861 /* Sanitize source count */
862 if (mpic->num_sources > MPIC_VEC_IPI_0)
863 mpic->num_sources = MPIC_VEC_IPI_0;
727 864
728 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); 865 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
729 866
@@ -747,12 +884,6 @@ void __init mpic_init(struct mpic *mpic)
747 MPIC_VECPRI_MASK | 884 MPIC_VECPRI_MASK |
748 (10 << MPIC_VECPRI_PRIORITY_SHIFT) | 885 (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
749 (MPIC_VEC_IPI_0 + i)); 886 (MPIC_VEC_IPI_0 + i));
750#ifdef CONFIG_SMP
751 if (!(mpic->flags & MPIC_PRIMARY))
752 continue;
753 irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
754 irq_desc[mpic->ipi_offset+i].chip = &mpic->hc_ipi;
755#endif /* CONFIG_SMP */
756 } 887 }
757 888
758 /* Initialize interrupt sources */ 889 /* Initialize interrupt sources */
@@ -763,31 +894,21 @@ void __init mpic_init(struct mpic *mpic)
763 /* Do the HT PIC fixups on U3 broken mpic */ 894 /* Do the HT PIC fixups on U3 broken mpic */
764 DBG("MPIC flags: %x\n", mpic->flags); 895 DBG("MPIC flags: %x\n", mpic->flags);
765 if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY)) 896 if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
766 mpic_scan_ht_pics(mpic); 897 mpic_scan_ht_pics(mpic);
767#endif /* CONFIG_MPIC_BROKEN_U3 */ 898#endif /* CONFIG_MPIC_BROKEN_U3 */
768 899
769 for (i = 0; i < mpic->num_sources; i++) { 900 for (i = 0; i < mpic->num_sources; i++) {
770 /* start with vector = source number, and masked */ 901 /* start with vector = source number, and masked */
771 u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); 902 u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
772 int level = 0; 903 int level = 1;
773 904
774 /* if it's an IPI, we skip it */
775 if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
776 (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4))
777 continue;
778
779 /* do senses munging */ 905 /* do senses munging */
780 if (mpic->senses && i < mpic->senses_count) { 906 if (mpic->senses && i < mpic->senses_count)
781 if (mpic->senses[i] & IRQ_SENSE_LEVEL) 907 vecpri = mpic_flags_to_vecpri(mpic->senses[i],
782 vecpri |= MPIC_VECPRI_SENSE_LEVEL; 908 &level);
783 if (mpic->senses[i] & IRQ_POLARITY_POSITIVE) 909 else
784 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
785 } else
786 vecpri |= MPIC_VECPRI_SENSE_LEVEL; 910 vecpri |= MPIC_VECPRI_SENSE_LEVEL;
787 911
788 /* remember if it was a level interrupts */
789 level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
790
791 /* deal with broken U3 */ 912 /* deal with broken U3 */
792 if (mpic->flags & MPIC_BROKEN_U3) { 913 if (mpic->flags & MPIC_BROKEN_U3) {
793#ifdef CONFIG_MPIC_BROKEN_U3 914#ifdef CONFIG_MPIC_BROKEN_U3
@@ -808,12 +929,6 @@ void __init mpic_init(struct mpic *mpic)
808 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); 929 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
809 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 930 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
810 1 << hard_smp_processor_id()); 931 1 << hard_smp_processor_id());
811
812 /* init linux descriptors */
813 if (i < mpic->irq_count) {
814 irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
815 irq_desc[mpic->irq_offset+i].chip = &mpic->hc_irq;
816 }
817 } 932 }
818 933
819 /* Init spurrious vector */ 934 /* Init spurrious vector */
@@ -854,19 +969,20 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
854{ 969{
855 int is_ipi; 970 int is_ipi;
856 struct mpic *mpic = mpic_find(irq, &is_ipi); 971 struct mpic *mpic = mpic_find(irq, &is_ipi);
972 unsigned int src = mpic_irq_to_hw(irq);
857 unsigned long flags; 973 unsigned long flags;
858 u32 reg; 974 u32 reg;
859 975
860 spin_lock_irqsave(&mpic_lock, flags); 976 spin_lock_irqsave(&mpic_lock, flags);
861 if (is_ipi) { 977 if (is_ipi) {
862 reg = mpic_ipi_read(irq - mpic->ipi_offset) & 978 reg = mpic_ipi_read(src - MPIC_VEC_IPI_0) &
863 ~MPIC_VECPRI_PRIORITY_MASK; 979 ~MPIC_VECPRI_PRIORITY_MASK;
864 mpic_ipi_write(irq - mpic->ipi_offset, 980 mpic_ipi_write(src - MPIC_VEC_IPI_0,
865 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 981 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
866 } else { 982 } else {
867 reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI) 983 reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI)
868 & ~MPIC_VECPRI_PRIORITY_MASK; 984 & ~MPIC_VECPRI_PRIORITY_MASK;
869 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI, 985 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
870 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 986 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
871 } 987 }
872 spin_unlock_irqrestore(&mpic_lock, flags); 988 spin_unlock_irqrestore(&mpic_lock, flags);
@@ -876,14 +992,15 @@ unsigned int mpic_irq_get_priority(unsigned int irq)
876{ 992{
877 int is_ipi; 993 int is_ipi;
878 struct mpic *mpic = mpic_find(irq, &is_ipi); 994 struct mpic *mpic = mpic_find(irq, &is_ipi);
995 unsigned int src = mpic_irq_to_hw(irq);
879 unsigned long flags; 996 unsigned long flags;
880 u32 reg; 997 u32 reg;
881 998
882 spin_lock_irqsave(&mpic_lock, flags); 999 spin_lock_irqsave(&mpic_lock, flags);
883 if (is_ipi) 1000 if (is_ipi)
884 reg = mpic_ipi_read(irq - mpic->ipi_offset); 1001 reg = mpic_ipi_read(src = MPIC_VEC_IPI_0);
885 else 1002 else
886 reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI); 1003 reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI);
887 spin_unlock_irqrestore(&mpic_lock, flags); 1004 spin_unlock_irqrestore(&mpic_lock, flags);
888 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; 1005 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
889} 1006}
@@ -978,37 +1095,20 @@ void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
978 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); 1095 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
979} 1096}
980 1097
981int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) 1098unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
982{ 1099{
983 u32 irq; 1100 u32 src;
984 1101
985 irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; 1102 src = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
986#ifdef DEBUG_LOW 1103#ifdef DEBUG_LOW
987 DBG("%s: get_one_irq(): %d\n", mpic->name, irq); 1104 DBG("%s: get_one_irq(): %d\n", mpic->name, src);
988#endif 1105#endif
989 if (mpic->cascade && irq == mpic->cascade_vec) { 1106 if (unlikely(src == MPIC_VEC_SPURRIOUS))
990#ifdef DEBUG_LOW 1107 return NO_IRQ;
991 DBG("%s: cascading ...\n", mpic->name); 1108 return irq_linear_revmap(mpic->irqhost, src);
992#endif
993 irq = mpic->cascade(regs, mpic->cascade_data);
994 mpic_eoi(mpic);
995 return irq;
996 }
997 if (unlikely(irq == MPIC_VEC_SPURRIOUS))
998 return -1;
999 if (irq < MPIC_VEC_IPI_0) {
1000#ifdef DEBUG_IRQ
1001 DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset);
1002#endif
1003 return irq + mpic->irq_offset;
1004 }
1005#ifdef DEBUG_IPI
1006 DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
1007#endif
1008 return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
1009} 1109}
1010 1110
1011int mpic_get_irq(struct pt_regs *regs) 1111unsigned int mpic_get_irq(struct pt_regs *regs)
1012{ 1112{
1013 struct mpic *mpic = mpic_primary; 1113 struct mpic *mpic = mpic_primary;
1014 1114
@@ -1022,25 +1122,27 @@ int mpic_get_irq(struct pt_regs *regs)
1022void mpic_request_ipis(void) 1122void mpic_request_ipis(void)
1023{ 1123{
1024 struct mpic *mpic = mpic_primary; 1124 struct mpic *mpic = mpic_primary;
1025 1125 int i;
1126 static char *ipi_names[] = {
1127 "IPI0 (call function)",
1128 "IPI1 (reschedule)",
1129 "IPI2 (unused)",
1130 "IPI3 (debugger break)",
1131 };
1026 BUG_ON(mpic == NULL); 1132 BUG_ON(mpic == NULL);
1027
1028 printk("requesting IPIs ... \n");
1029 1133
1030 /* 1134 printk(KERN_INFO "mpic: requesting IPIs ... \n");
1031 * IPIs are marked IRQF_DISABLED as they must run with irqs 1135
1032 * disabled 1136 for (i = 0; i < 4; i++) {
1033 */ 1137 unsigned int vipi = irq_create_mapping(mpic->irqhost,
1034 request_irq(mpic->ipi_offset+0, mpic_ipi_action, IRQF_DISABLED, 1138 MPIC_VEC_IPI_0 + i, 0);
1035 "IPI0 (call function)", mpic); 1139 if (vipi == NO_IRQ) {
1036 request_irq(mpic->ipi_offset+1, mpic_ipi_action, IRQF_DISABLED, 1140 printk(KERN_ERR "Failed to map IPI %d\n", i);
1037 "IPI1 (reschedule)", mpic); 1141 break;
1038 request_irq(mpic->ipi_offset+2, mpic_ipi_action, IRQF_DISABLED, 1142 }
1039 "IPI2 (unused)", mpic); 1143 request_irq(vipi, mpic_ipi_action, IRQF_DISABLED,
1040 request_irq(mpic->ipi_offset+3, mpic_ipi_action, IRQF_DISABLED, 1144 ipi_names[i], mpic);
1041 "IPI3 (debugger break)", mpic); 1145 }
1042
1043 printk("IPIs requested... \n");
1044} 1146}
1045 1147
1046void smp_mpic_message_pass(int target, int msg) 1148void smp_mpic_message_pass(int target, int msg)
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
index 490749ca88f9..2497bbc07e76 100644
--- a/arch/ppc/syslib/Makefile
+++ b/arch/ppc/syslib/Makefile
@@ -104,3 +104,5 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc52xx_setup.o mpc52xx_pic.o \
104ifeq ($(CONFIG_PPC_MPC52xx),y) 104ifeq ($(CONFIG_PPC_MPC52xx),y)
105obj-$(CONFIG_PCI) += mpc52xx_pci.o 105obj-$(CONFIG_PCI) += mpc52xx_pci.o
106endif 106endif
107
108obj-$(CONFIG_PPC_I8259) += i8259.o
diff --git a/arch/ppc/syslib/btext.c b/arch/ppc/syslib/btext.c
index 51ab6e90fe25..d11667046f21 100644
--- a/arch/ppc/syslib/btext.c
+++ b/arch/ppc/syslib/btext.c
@@ -6,7 +6,7 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/string.h> 7#include <linux/string.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/version.h> 9#include <linux/utsrelease.h>
10 10
11#include <asm/sections.h> 11#include <asm/sections.h>
12#include <asm/bootx.h> 12#include <asm/bootx.h>
diff --git a/arch/ppc/syslib/i8259.c b/arch/ppc/syslib/i8259.c
new file mode 100644
index 000000000000..eb35353af837
--- /dev/null
+++ b/arch/ppc/syslib/i8259.c
@@ -0,0 +1,212 @@
1/*
2 * i8259 interrupt controller driver.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/init.h>
10#include <linux/ioport.h>
11#include <linux/interrupt.h>
12#include <asm/io.h>
13#include <asm/i8259.h>
14
15static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
16
17static unsigned char cached_8259[2] = { 0xff, 0xff };
18#define cached_A1 (cached_8259[0])
19#define cached_21 (cached_8259[1])
20
21static DEFINE_SPINLOCK(i8259_lock);
22
23static int i8259_pic_irq_offset;
24
25/*
26 * Acknowledge the IRQ using either the PCI host bridge's interrupt
27 * acknowledge feature or poll. How i8259_init() is called determines
28 * which is called. It should be noted that polling is broken on some
29 * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
30 */
31int i8259_irq(struct pt_regs *regs)
32{
33 int irq;
34
35 spin_lock(&i8259_lock);
36
37 /* Either int-ack or poll for the IRQ */
38 if (pci_intack)
39 irq = readb(pci_intack);
40 else {
41 /* Perform an interrupt acknowledge cycle on controller 1. */
42 outb(0x0C, 0x20); /* prepare for poll */
43 irq = inb(0x20) & 7;
44 if (irq == 2 ) {
45 /*
46 * Interrupt is cascaded so perform interrupt
47 * acknowledge on controller 2.
48 */
49 outb(0x0C, 0xA0); /* prepare for poll */
50 irq = (inb(0xA0) & 7) + 8;
51 }
52 }
53
54 if (irq == 7) {
55 /*
56 * This may be a spurious interrupt.
57 *
58 * Read the interrupt status register (ISR). If the most
59 * significant bit is not set then there is no valid
60 * interrupt.
61 */
62 if (!pci_intack)
63 outb(0x0B, 0x20); /* ISR register */
64 if(~inb(0x20) & 0x80)
65 irq = -1;
66 }
67
68 spin_unlock(&i8259_lock);
69 return irq + i8259_pic_irq_offset;
70}
71
72static void i8259_mask_and_ack_irq(unsigned int irq_nr)
73{
74 unsigned long flags;
75
76 spin_lock_irqsave(&i8259_lock, flags);
77 irq_nr -= i8259_pic_irq_offset;
78 if (irq_nr > 7) {
79 cached_A1 |= 1 << (irq_nr-8);
80 inb(0xA1); /* DUMMY */
81 outb(cached_A1, 0xA1);
82 outb(0x20, 0xA0); /* Non-specific EOI */
83 outb(0x20, 0x20); /* Non-specific EOI to cascade */
84 } else {
85 cached_21 |= 1 << irq_nr;
86 inb(0x21); /* DUMMY */
87 outb(cached_21, 0x21);
88 outb(0x20, 0x20); /* Non-specific EOI */
89 }
90 spin_unlock_irqrestore(&i8259_lock, flags);
91}
92
93static void i8259_set_irq_mask(int irq_nr)
94{
95 outb(cached_A1,0xA1);
96 outb(cached_21,0x21);
97}
98
99static void i8259_mask_irq(unsigned int irq_nr)
100{
101 unsigned long flags;
102
103 spin_lock_irqsave(&i8259_lock, flags);
104 irq_nr -= i8259_pic_irq_offset;
105 if (irq_nr < 8)
106 cached_21 |= 1 << irq_nr;
107 else
108 cached_A1 |= 1 << (irq_nr-8);
109 i8259_set_irq_mask(irq_nr);
110 spin_unlock_irqrestore(&i8259_lock, flags);
111}
112
113static void i8259_unmask_irq(unsigned int irq_nr)
114{
115 unsigned long flags;
116
117 spin_lock_irqsave(&i8259_lock, flags);
118 irq_nr -= i8259_pic_irq_offset;
119 if (irq_nr < 8)
120 cached_21 &= ~(1 << irq_nr);
121 else
122 cached_A1 &= ~(1 << (irq_nr-8));
123 i8259_set_irq_mask(irq_nr);
124 spin_unlock_irqrestore(&i8259_lock, flags);
125}
126
127static struct irq_chip i8259_pic = {
128 .typename = " i8259 ",
129 .mask = i8259_mask_irq,
130 .unmask = i8259_unmask_irq,
131 .mask_ack = i8259_mask_and_ack_irq,
132};
133
134static struct resource pic1_iores = {
135 .name = "8259 (master)",
136 .start = 0x20,
137 .end = 0x21,
138 .flags = IORESOURCE_BUSY,
139};
140
141static struct resource pic2_iores = {
142 .name = "8259 (slave)",
143 .start = 0xa0,
144 .end = 0xa1,
145 .flags = IORESOURCE_BUSY,
146};
147
148static struct resource pic_edgectrl_iores = {
149 .name = "8259 edge control",
150 .start = 0x4d0,
151 .end = 0x4d1,
152 .flags = IORESOURCE_BUSY,
153};
154
155static struct irqaction i8259_irqaction = {
156 .handler = no_action,
157 .flags = SA_INTERRUPT,
158 .mask = CPU_MASK_NONE,
159 .name = "82c59 secondary cascade",
160};
161
162/*
163 * i8259_init()
164 * intack_addr - PCI interrupt acknowledge (real) address which will return
165 * the active irq from the 8259
166 */
167void __init i8259_init(unsigned long intack_addr, int offset)
168{
169 unsigned long flags;
170 int i;
171
172 spin_lock_irqsave(&i8259_lock, flags);
173 i8259_pic_irq_offset = offset;
174
175 /* init master interrupt controller */
176 outb(0x11, 0x20); /* Start init sequence */
177 outb(0x00, 0x21); /* Vector base */
178 outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
179 outb(0x01, 0x21); /* Select 8086 mode */
180
181 /* init slave interrupt controller */
182 outb(0x11, 0xA0); /* Start init sequence */
183 outb(0x08, 0xA1); /* Vector base */
184 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
185 outb(0x01, 0xA1); /* Select 8086 mode */
186
187 /* always read ISR */
188 outb(0x0B, 0x20);
189 outb(0x0B, 0xA0);
190
191 /* Mask all interrupts */
192 outb(cached_A1, 0xA1);
193 outb(cached_21, 0x21);
194
195 spin_unlock_irqrestore(&i8259_lock, flags);
196
197 for (i = 0; i < NUM_ISA_INTERRUPTS; ++i) {
198 set_irq_chip_and_handler(offset + i, &i8259_pic,
199 handle_level_irq);
200 irq_desc[offset + i].status |= IRQ_LEVEL;
201 }
202
203 /* reserve our resources */
204 setup_irq(offset + 2, &i8259_irqaction);
205 request_resource(&ioport_resource, &pic1_iores);
206 request_resource(&ioport_resource, &pic2_iores);
207 request_resource(&ioport_resource, &pic_edgectrl_iores);
208
209 if (intack_addr != 0)
210 pci_intack = ioremap(intack_addr, 1);
211
212}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 821a141889de..224fbff79969 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -7,6 +7,14 @@ config MMU
7 bool 7 bool
8 default y 8 default y
9 9
10config LOCKDEP_SUPPORT
11 bool
12 default y
13
14config STACKTRACE_SUPPORT
15 bool
16 default y
17
10config RWSEM_GENERIC_SPINLOCK 18config RWSEM_GENERIC_SPINLOCK
11 bool 19 bool
12 20
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index f53b6d5300e5..2283933a9a93 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -1,5 +1,9 @@
1menu "Kernel hacking" 1menu "Kernel hacking"
2 2
3config TRACE_IRQFLAGS_SUPPORT
4 bool
5 default y
6
3source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
4 8
5endmenu 9endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index b3791fb094a8..74ef57dcfa60 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -34,6 +34,11 @@ cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
34cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) 34cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
35cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) 35cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
36 36
37#
38# Prevent tail-call optimizations, to get clearer backtraces:
39#
40cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
41
37# old style option for packed stacks 42# old style option for packed stacks
38ifeq ($(call cc-option-yn,-mkernel-backchain),y) 43ifeq ($(call cc-option-yn,-mkernel-backchain),y)
39cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK 44cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 9269b5788fac..eabf00a6f770 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
21obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o 21obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
22 22
23obj-$(CONFIG_VIRT_TIMER) += vtime.o 23obj-$(CONFIG_VIRT_TIMER) += vtime.o
24obj-$(CONFIG_STACKTRACE) += stacktrace.o
24 25
25# Kexec part 26# Kexec part
26S390_KEXEC_OBJS := machine_kexec.o crash.o 27S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index d8948c342caf..5b5799ac8f83 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -58,6 +58,21 @@ STACK_SIZE = 1 << STACK_SHIFT
58 58
59#define BASED(name) name-system_call(%r13) 59#define BASED(name) name-system_call(%r13)
60 60
61#ifdef CONFIG_TRACE_IRQFLAGS
62 .macro TRACE_IRQS_ON
63 l %r1,BASED(.Ltrace_irq_on)
64 basr %r14,%r1
65 .endm
66
67 .macro TRACE_IRQS_OFF
68 l %r1,BASED(.Ltrace_irq_off)
69 basr %r14,%r1
70 .endm
71#else
72#define TRACE_IRQS_ON
73#define TRACE_IRQS_OFF
74#endif
75
61/* 76/*
62 * Register usage in interrupt handlers: 77 * Register usage in interrupt handlers:
63 * R9 - pointer to current task structure 78 * R9 - pointer to current task structure
@@ -361,6 +376,7 @@ ret_from_fork:
361 st %r15,SP_R15(%r15) # store stack pointer for new kthread 376 st %r15,SP_R15(%r15) # store stack pointer for new kthread
3620: l %r1,BASED(.Lschedtail) 3770: l %r1,BASED(.Lschedtail)
363 basr %r14,%r1 378 basr %r14,%r1
379 TRACE_IRQS_ON
364 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 380 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
365 b BASED(sysc_return) 381 b BASED(sysc_return)
366 382
@@ -516,6 +532,7 @@ pgm_no_vtime3:
516 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 532 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
517 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 533 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
518 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 534 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
535 TRACE_IRQS_ON
519 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 536 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
520 b BASED(sysc_do_svc) 537 b BASED(sysc_do_svc)
521 538
@@ -539,9 +556,11 @@ io_int_handler:
539io_no_vtime: 556io_no_vtime:
540#endif 557#endif
541 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 558 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
559 TRACE_IRQS_OFF
542 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 560 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
543 la %r2,SP_PTREGS(%r15) # address of register-save area 561 la %r2,SP_PTREGS(%r15) # address of register-save area
544 basr %r14,%r1 # branch to standard irq handler 562 basr %r14,%r1 # branch to standard irq handler
563 TRACE_IRQS_ON
545 564
546io_return: 565io_return:
547 tm SP_PSW+1(%r15),0x01 # returning to user ? 566 tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -651,10 +670,12 @@ ext_int_handler:
651ext_no_vtime: 670ext_no_vtime:
652#endif 671#endif
653 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 672 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
673 TRACE_IRQS_OFF
654 la %r2,SP_PTREGS(%r15) # address of register-save area 674 la %r2,SP_PTREGS(%r15) # address of register-save area
655 lh %r3,__LC_EXT_INT_CODE # get interruption code 675 lh %r3,__LC_EXT_INT_CODE # get interruption code
656 l %r1,BASED(.Ldo_extint) 676 l %r1,BASED(.Ldo_extint)
657 basr %r14,%r1 677 basr %r14,%r1
678 TRACE_IRQS_ON
658 b BASED(io_return) 679 b BASED(io_return)
659 680
660__critical_end: 681__critical_end:
@@ -731,8 +752,10 @@ mcck_no_vtime:
731 stosm __SF_EMPTY(%r15),0x04 # turn dat on 752 stosm __SF_EMPTY(%r15),0x04 # turn dat on
732 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 753 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
733 bno BASED(mcck_return) 754 bno BASED(mcck_return)
755 TRACE_IRQS_OFF
734 l %r1,BASED(.Ls390_handle_mcck) 756 l %r1,BASED(.Ls390_handle_mcck)
735 basr %r14,%r1 # call machine check handler 757 basr %r14,%r1 # call machine check handler
758 TRACE_IRQS_ON
736mcck_return: 759mcck_return:
737 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW 760 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
738 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 761 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
@@ -1012,7 +1035,11 @@ cleanup_io_leave_insn:
1012.Lvfork: .long sys_vfork 1035.Lvfork: .long sys_vfork
1013.Lschedtail: .long schedule_tail 1036.Lschedtail: .long schedule_tail
1014.Lsysc_table: .long sys_call_table 1037.Lsysc_table: .long sys_call_table
1015 1038#ifdef CONFIG_TRACE_IRQFLAGS
1039.Ltrace_irq_on:.long trace_hardirqs_on
1040.Ltrace_irq_off:
1041 .long trace_hardirqs_off
1042#endif
1016.Lcritical_start: 1043.Lcritical_start:
1017 .long __critical_start + 0x80000000 1044 .long __critical_start + 0x80000000
1018.Lcritical_end: 1045.Lcritical_end:
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 1ca499fa54b4..56f5f613b868 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -58,6 +58,19 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
58 58
59#define BASED(name) name-system_call(%r13) 59#define BASED(name) name-system_call(%r13)
60 60
61#ifdef CONFIG_TRACE_IRQFLAGS
62 .macro TRACE_IRQS_ON
63 brasl %r14,trace_hardirqs_on
64 .endm
65
66 .macro TRACE_IRQS_OFF
67 brasl %r14,trace_hardirqs_off
68 .endm
69#else
70#define TRACE_IRQS_ON
71#define TRACE_IRQS_OFF
72#endif
73
61 .macro STORE_TIMER lc_offset 74 .macro STORE_TIMER lc_offset
62#ifdef CONFIG_VIRT_CPU_ACCOUNTING 75#ifdef CONFIG_VIRT_CPU_ACCOUNTING
63 stpt \lc_offset 76 stpt \lc_offset
@@ -354,6 +367,7 @@ ret_from_fork:
354 jo 0f 367 jo 0f
355 stg %r15,SP_R15(%r15) # store stack pointer for new kthread 368 stg %r15,SP_R15(%r15) # store stack pointer for new kthread
3560: brasl %r14,schedule_tail 3690: brasl %r14,schedule_tail
370 TRACE_IRQS_ON
357 stosm 24(%r15),0x03 # reenable interrupts 371 stosm 24(%r15),0x03 # reenable interrupts
358 j sysc_return 372 j sysc_return
359 373
@@ -535,6 +549,7 @@ pgm_no_vtime3:
535 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 549 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
536 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 550 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
537 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 551 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
552 TRACE_IRQS_ON
538 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 553 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
539 j sysc_do_svc 554 j sysc_do_svc
540 555
@@ -557,8 +572,10 @@ io_int_handler:
557io_no_vtime: 572io_no_vtime:
558#endif 573#endif
559 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 574 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
575 TRACE_IRQS_OFF
560 la %r2,SP_PTREGS(%r15) # address of register-save area 576 la %r2,SP_PTREGS(%r15) # address of register-save area
561 brasl %r14,do_IRQ # call standard irq handler 577 brasl %r14,do_IRQ # call standard irq handler
578 TRACE_IRQS_ON
562 579
563io_return: 580io_return:
564 tm SP_PSW+1(%r15),0x01 # returning to user ? 581 tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -665,9 +682,11 @@ ext_int_handler:
665ext_no_vtime: 682ext_no_vtime:
666#endif 683#endif
667 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 684 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
685 TRACE_IRQS_OFF
668 la %r2,SP_PTREGS(%r15) # address of register-save area 686 la %r2,SP_PTREGS(%r15) # address of register-save area
669 llgh %r3,__LC_EXT_INT_CODE # get interruption code 687 llgh %r3,__LC_EXT_INT_CODE # get interruption code
670 brasl %r14,do_extint 688 brasl %r14,do_extint
689 TRACE_IRQS_ON
671 j io_return 690 j io_return
672 691
673__critical_end: 692__critical_end:
@@ -743,7 +762,9 @@ mcck_no_vtime:
743 stosm __SF_EMPTY(%r15),0x04 # turn dat on 762 stosm __SF_EMPTY(%r15),0x04 # turn dat on
744 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 763 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
745 jno mcck_return 764 jno mcck_return
765 TRACE_IRQS_OFF
746 brasl %r14,s390_handle_mcck 766 brasl %r14,s390_handle_mcck
767 TRACE_IRQS_ON
747mcck_return: 768mcck_return:
748 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW 769 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
749 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 770 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 480b6a5fef3a..1eef50918615 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -69,10 +69,6 @@ asmlinkage void do_softirq(void)
69 69
70 local_irq_save(flags); 70 local_irq_save(flags);
71 71
72 account_system_vtime(current);
73
74 local_bh_disable();
75
76 if (local_softirq_pending()) { 72 if (local_softirq_pending()) {
77 /* Get current stack pointer. */ 73 /* Get current stack pointer. */
78 asm volatile("la %0,0(15)" : "=a" (old)); 74 asm volatile("la %0,0(15)" : "=a" (old));
@@ -95,10 +91,6 @@ asmlinkage void do_softirq(void)
95 __do_softirq(); 91 __do_softirq();
96 } 92 }
97 93
98 account_system_vtime(current);
99
100 __local_bh_enable();
101
102 local_irq_restore(flags); 94 local_irq_restore(flags);
103} 95}
104 96
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 1f9399191794..78c8e5548caf 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -142,6 +142,7 @@ static void default_idle(void)
142 return; 142 return;
143 } 143 }
144 144
145 trace_hardirqs_on();
145 /* Wait for external, I/O or machine check interrupt. */ 146 /* Wait for external, I/O or machine check interrupt. */
146 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT | 147 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT |
147 PSW_MASK_IO | PSW_MASK_EXT); 148 PSW_MASK_IO | PSW_MASK_EXT);
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
new file mode 100644
index 000000000000..de83f38288d0
--- /dev/null
+++ b/arch/s390/kernel/stacktrace.c
@@ -0,0 +1,90 @@
1/*
2 * arch/s390/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 * Copyright (C) IBM Corp. 2006
7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
8 */
9
10#include <linux/sched.h>
11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h>
13
14static inline unsigned long save_context_stack(struct stack_trace *trace,
15 unsigned int *skip,
16 unsigned long sp,
17 unsigned long low,
18 unsigned long high)
19{
20 struct stack_frame *sf;
21 struct pt_regs *regs;
22 unsigned long addr;
23
24 while(1) {
25 sp &= PSW_ADDR_INSN;
26 if (sp < low || sp > high)
27 return sp;
28 sf = (struct stack_frame *)sp;
29 while(1) {
30 addr = sf->gprs[8] & PSW_ADDR_INSN;
31 if (!(*skip))
32 trace->entries[trace->nr_entries++] = addr;
33 else
34 (*skip)--;
35 if (trace->nr_entries >= trace->max_entries)
36 return sp;
37 low = sp;
38 sp = sf->back_chain & PSW_ADDR_INSN;
39 if (!sp)
40 break;
41 if (sp <= low || sp > high - sizeof(*sf))
42 return sp;
43 sf = (struct stack_frame *)sp;
44 }
45 /* Zero backchain detected, check for interrupt frame. */
46 sp = (unsigned long)(sf + 1);
47 if (sp <= low || sp > high - sizeof(*regs))
48 return sp;
49 regs = (struct pt_regs *)sp;
50 addr = regs->psw.addr & PSW_ADDR_INSN;
51 if (!(*skip))
52 trace->entries[trace->nr_entries++] = addr;
53 else
54 (*skip)--;
55 if (trace->nr_entries >= trace->max_entries)
56 return sp;
57 low = sp;
58 sp = regs->gprs[15];
59 }
60}
61
62void save_stack_trace(struct stack_trace *trace,
63 struct task_struct *task, int all_contexts,
64 unsigned int skip)
65{
66 register unsigned long sp asm ("15");
67 unsigned long orig_sp;
68
69 sp &= PSW_ADDR_INSN;
70 orig_sp = sp;
71
72 sp = save_context_stack(trace, &skip, sp,
73 S390_lowcore.panic_stack - PAGE_SIZE,
74 S390_lowcore.panic_stack);
75 if ((sp != orig_sp) && !all_contexts)
76 return;
77 sp = save_context_stack(trace, &skip, sp,
78 S390_lowcore.async_stack - ASYNC_SIZE,
79 S390_lowcore.async_stack);
80 if ((sp != orig_sp) && !all_contexts)
81 return;
82 if (task)
83 save_context_stack(trace, &skip, sp,
84 (unsigned long) task_stack_page(task),
85 (unsigned long) task_stack_page(task) + THREAD_SIZE);
86 else
87 save_context_stack(trace, &skip, sp, S390_lowcore.thread_info,
88 S390_lowcore.thread_info + THREAD_SIZE);
89 return;
90}
diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c
index a9c1443fc548..8368c2dbe635 100644
--- a/arch/um/kernel/tt/process_kern.c
+++ b/arch/um/kernel/tt/process_kern.c
@@ -119,7 +119,7 @@ void suspend_new_thread(int fd)
119 panic("read failed in suspend_new_thread, err = %d", -err); 119 panic("read failed in suspend_new_thread, err = %d", -err);
120} 120}
121 121
122void schedule_tail(task_t *prev); 122void schedule_tail(struct task_struct *prev);
123 123
124static void new_thread_handler(int sig) 124static void new_thread_handler(int sig)
125{ 125{
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 7d51dd7201c3..37cfe7701f06 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -495,6 +495,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
495{ 495{
496} 496}
497 497
498#ifdef CONFIG_SMP
498void alternatives_smp_module_add(struct module *mod, char *name, 499void alternatives_smp_module_add(struct module *mod, char *name,
499 void *locks, void *locks_end, 500 void *locks, void *locks_end,
500 void *text, void *text_end) 501 void *text, void *text_end)
@@ -504,3 +505,4 @@ void alternatives_smp_module_add(struct module *mod, char *name,
504void alternatives_smp_module_del(struct module *mod) 505void alternatives_smp_module_del(struct module *mod)
505{ 506{
506} 507}
508#endif
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index e856804c447f..28df7d88ce2c 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -24,6 +24,14 @@ config X86
24 bool 24 bool
25 default y 25 default y
26 26
27config LOCKDEP_SUPPORT
28 bool
29 default y
30
31config STACKTRACE_SUPPORT
32 bool
33 default y
34
27config SEMAPHORE_SLEEPERS 35config SEMAPHORE_SLEEPERS
28 bool 36 bool
29 default y 37 default y
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index 1d92ab56c0f9..775d211a5cf9 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -1,5 +1,9 @@
1menu "Kernel hacking" 1menu "Kernel hacking"
2 2
3config TRACE_IRQFLAGS_SUPPORT
4 bool
5 default y
6
3source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
4 8
5config DEBUG_RODATA 9config DEBUG_RODATA
diff --git a/arch/x86_64/boot/setup.S b/arch/x86_64/boot/setup.S
index 7de8b8fd1685..a50b631f4d2b 100644
--- a/arch/x86_64/boot/setup.S
+++ b/arch/x86_64/boot/setup.S
@@ -46,7 +46,7 @@
46 */ 46 */
47 47
48#include <asm/segment.h> 48#include <asm/segment.h>
49#include <linux/version.h> 49#include <linux/utsrelease.h>
50#include <linux/compile.h> 50#include <linux/compile.h>
51#include <asm/boot.h> 51#include <asm/boot.h>
52#include <asm/e820.h> 52#include <asm/e820.h>
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index c536fa98ea37..9b5bb413a6e9 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -13,6 +13,7 @@
13#include <asm/thread_info.h> 13#include <asm/thread_info.h>
14#include <asm/segment.h> 14#include <asm/segment.h>
15#include <asm/vsyscall32.h> 15#include <asm/vsyscall32.h>
16#include <asm/irqflags.h>
16#include <linux/linkage.h> 17#include <linux/linkage.h>
17 18
18#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) 19#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
@@ -75,6 +76,10 @@ ENTRY(ia32_sysenter_target)
75 swapgs 76 swapgs
76 movq %gs:pda_kernelstack, %rsp 77 movq %gs:pda_kernelstack, %rsp
77 addq $(PDA_STACKOFFSET),%rsp 78 addq $(PDA_STACKOFFSET),%rsp
79 /*
80 * No need to follow this irqs on/off section: the syscall
81 * disabled irqs, here we enable it straight after entry:
82 */
78 sti 83 sti
79 movl %ebp,%ebp /* zero extension */ 84 movl %ebp,%ebp /* zero extension */
80 pushq $__USER32_DS 85 pushq $__USER32_DS
@@ -118,6 +123,7 @@ sysenter_do_call:
118 movq %rax,RAX-ARGOFFSET(%rsp) 123 movq %rax,RAX-ARGOFFSET(%rsp)
119 GET_THREAD_INFO(%r10) 124 GET_THREAD_INFO(%r10)
120 cli 125 cli
126 TRACE_IRQS_OFF
121 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) 127 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
122 jnz int_ret_from_sys_call 128 jnz int_ret_from_sys_call
123 andl $~TS_COMPAT,threadinfo_status(%r10) 129 andl $~TS_COMPAT,threadinfo_status(%r10)
@@ -132,6 +138,7 @@ sysenter_do_call:
132 CFI_REGISTER rsp,rcx 138 CFI_REGISTER rsp,rcx
133 movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */ 139 movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */
134 CFI_REGISTER rip,rdx 140 CFI_REGISTER rip,rdx
141 TRACE_IRQS_ON
135 swapgs 142 swapgs
136 sti /* sti only takes effect after the next instruction */ 143 sti /* sti only takes effect after the next instruction */
137 /* sysexit */ 144 /* sysexit */
@@ -186,6 +193,10 @@ ENTRY(ia32_cstar_target)
186 movl %esp,%r8d 193 movl %esp,%r8d
187 CFI_REGISTER rsp,r8 194 CFI_REGISTER rsp,r8
188 movq %gs:pda_kernelstack,%rsp 195 movq %gs:pda_kernelstack,%rsp
196 /*
197 * No need to follow this irqs on/off section: the syscall
198 * disabled irqs and here we enable it straight after entry:
199 */
189 sti 200 sti
190 SAVE_ARGS 8,1,1 201 SAVE_ARGS 8,1,1
191 movl %eax,%eax /* zero extension */ 202 movl %eax,%eax /* zero extension */
@@ -220,6 +231,7 @@ cstar_do_call:
220 movq %rax,RAX-ARGOFFSET(%rsp) 231 movq %rax,RAX-ARGOFFSET(%rsp)
221 GET_THREAD_INFO(%r10) 232 GET_THREAD_INFO(%r10)
222 cli 233 cli
234 TRACE_IRQS_OFF
223 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) 235 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
224 jnz int_ret_from_sys_call 236 jnz int_ret_from_sys_call
225 andl $~TS_COMPAT,threadinfo_status(%r10) 237 andl $~TS_COMPAT,threadinfo_status(%r10)
@@ -228,6 +240,7 @@ cstar_do_call:
228 CFI_REGISTER rip,rcx 240 CFI_REGISTER rip,rcx
229 movl EFLAGS-ARGOFFSET(%rsp),%r11d 241 movl EFLAGS-ARGOFFSET(%rsp),%r11d
230 /*CFI_REGISTER rflags,r11*/ 242 /*CFI_REGISTER rflags,r11*/
243 TRACE_IRQS_ON
231 movl RSP-ARGOFFSET(%rsp),%esp 244 movl RSP-ARGOFFSET(%rsp),%esp
232 CFI_RESTORE rsp 245 CFI_RESTORE rsp
233 swapgs 246 swapgs
@@ -286,7 +299,11 @@ ENTRY(ia32_syscall)
286 /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/ 299 /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/
287 /*CFI_REL_OFFSET cs,CS-RIP*/ 300 /*CFI_REL_OFFSET cs,CS-RIP*/
288 CFI_REL_OFFSET rip,RIP-RIP 301 CFI_REL_OFFSET rip,RIP-RIP
289 swapgs 302 swapgs
303 /*
304 * No need to follow this irqs on/off section: the syscall
305 * disabled irqs and here we enable it straight after entry:
306 */
290 sti 307 sti
291 movl %eax,%eax 308 movl %eax,%eax
292 pushq %rax 309 pushq %rax
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 819e84ec5b64..b5aaeafc1cd3 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -10,6 +10,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \ 10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
11 pci-dma.o pci-nommu.o alternative.o 11 pci-dma.o pci-nommu.o alternative.o
12 12
13obj-$(CONFIG_STACKTRACE) += stacktrace.o
13obj-$(CONFIG_X86_MCE) += mce.o 14obj-$(CONFIG_X86_MCE) += mce.o
14obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o 15obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
15obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o 16obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index ed92c2983606..d464dded68c0 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -41,13 +41,24 @@
41#include <asm/thread_info.h> 41#include <asm/thread_info.h>
42#include <asm/hw_irq.h> 42#include <asm/hw_irq.h>
43#include <asm/page.h> 43#include <asm/page.h>
44#include <asm/irqflags.h>
44 45
45 .code64 46 .code64
46 47
47#ifndef CONFIG_PREEMPT 48#ifndef CONFIG_PREEMPT
48#define retint_kernel retint_restore_args 49#define retint_kernel retint_restore_args
49#endif 50#endif
50 51
52
53.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
54#ifdef CONFIG_TRACE_IRQFLAGS
55 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
56 jnc 1f
57 TRACE_IRQS_ON
581:
59#endif
60.endm
61
51/* 62/*
52 * C code is not supposed to know about undefined top of stack. Every time 63 * C code is not supposed to know about undefined top of stack. Every time
53 * a C function with an pt_regs argument is called from the SYSCALL based 64 * a C function with an pt_regs argument is called from the SYSCALL based
@@ -194,6 +205,10 @@ ENTRY(system_call)
194 swapgs 205 swapgs
195 movq %rsp,%gs:pda_oldrsp 206 movq %rsp,%gs:pda_oldrsp
196 movq %gs:pda_kernelstack,%rsp 207 movq %gs:pda_kernelstack,%rsp
208 /*
209 * No need to follow this irqs off/on section - it's straight
210 * and short:
211 */
197 sti 212 sti
198 SAVE_ARGS 8,1 213 SAVE_ARGS 8,1
199 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) 214 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
@@ -219,10 +234,15 @@ ret_from_sys_call:
219sysret_check: 234sysret_check:
220 GET_THREAD_INFO(%rcx) 235 GET_THREAD_INFO(%rcx)
221 cli 236 cli
237 TRACE_IRQS_OFF
222 movl threadinfo_flags(%rcx),%edx 238 movl threadinfo_flags(%rcx),%edx
223 andl %edi,%edx 239 andl %edi,%edx
224 CFI_REMEMBER_STATE 240 CFI_REMEMBER_STATE
225 jnz sysret_careful 241 jnz sysret_careful
242 /*
243 * sysretq will re-enable interrupts:
244 */
245 TRACE_IRQS_ON
226 movq RIP-ARGOFFSET(%rsp),%rcx 246 movq RIP-ARGOFFSET(%rsp),%rcx
227 CFI_REGISTER rip,rcx 247 CFI_REGISTER rip,rcx
228 RESTORE_ARGS 0,-ARG_SKIP,1 248 RESTORE_ARGS 0,-ARG_SKIP,1
@@ -237,6 +257,7 @@ sysret_careful:
237 CFI_RESTORE_STATE 257 CFI_RESTORE_STATE
238 bt $TIF_NEED_RESCHED,%edx 258 bt $TIF_NEED_RESCHED,%edx
239 jnc sysret_signal 259 jnc sysret_signal
260 TRACE_IRQS_ON
240 sti 261 sti
241 pushq %rdi 262 pushq %rdi
242 CFI_ADJUST_CFA_OFFSET 8 263 CFI_ADJUST_CFA_OFFSET 8
@@ -247,6 +268,7 @@ sysret_careful:
247 268
248 /* Handle a signal */ 269 /* Handle a signal */
249sysret_signal: 270sysret_signal:
271 TRACE_IRQS_ON
250 sti 272 sti
251 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx 273 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
252 jz 1f 274 jz 1f
@@ -261,6 +283,7 @@ sysret_signal:
261 /* Use IRET because user could have changed frame. This 283 /* Use IRET because user could have changed frame. This
262 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ 284 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
263 cli 285 cli
286 TRACE_IRQS_OFF
264 jmp int_with_check 287 jmp int_with_check
265 288
266badsys: 289badsys:
@@ -309,6 +332,7 @@ ENTRY(int_ret_from_sys_call)
309 CFI_REL_OFFSET r10,R10-ARGOFFSET 332 CFI_REL_OFFSET r10,R10-ARGOFFSET
310 CFI_REL_OFFSET r11,R11-ARGOFFSET 333 CFI_REL_OFFSET r11,R11-ARGOFFSET
311 cli 334 cli
335 TRACE_IRQS_OFF
312 testl $3,CS-ARGOFFSET(%rsp) 336 testl $3,CS-ARGOFFSET(%rsp)
313 je retint_restore_args 337 je retint_restore_args
314 movl $_TIF_ALLWORK_MASK,%edi 338 movl $_TIF_ALLWORK_MASK,%edi
@@ -327,6 +351,7 @@ int_with_check:
327int_careful: 351int_careful:
328 bt $TIF_NEED_RESCHED,%edx 352 bt $TIF_NEED_RESCHED,%edx
329 jnc int_very_careful 353 jnc int_very_careful
354 TRACE_IRQS_ON
330 sti 355 sti
331 pushq %rdi 356 pushq %rdi
332 CFI_ADJUST_CFA_OFFSET 8 357 CFI_ADJUST_CFA_OFFSET 8
@@ -334,10 +359,12 @@ int_careful:
334 popq %rdi 359 popq %rdi
335 CFI_ADJUST_CFA_OFFSET -8 360 CFI_ADJUST_CFA_OFFSET -8
336 cli 361 cli
362 TRACE_IRQS_OFF
337 jmp int_with_check 363 jmp int_with_check
338 364
339 /* handle signals and tracing -- both require a full stack frame */ 365 /* handle signals and tracing -- both require a full stack frame */
340int_very_careful: 366int_very_careful:
367 TRACE_IRQS_ON
341 sti 368 sti
342 SAVE_REST 369 SAVE_REST
343 /* Check for syscall exit trace */ 370 /* Check for syscall exit trace */
@@ -351,6 +378,7 @@ int_very_careful:
351 CFI_ADJUST_CFA_OFFSET -8 378 CFI_ADJUST_CFA_OFFSET -8
352 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi 379 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
353 cli 380 cli
381 TRACE_IRQS_OFF
354 jmp int_restore_rest 382 jmp int_restore_rest
355 383
356int_signal: 384int_signal:
@@ -363,6 +391,7 @@ int_signal:
363int_restore_rest: 391int_restore_rest:
364 RESTORE_REST 392 RESTORE_REST
365 cli 393 cli
394 TRACE_IRQS_OFF
366 jmp int_with_check 395 jmp int_with_check
367 CFI_ENDPROC 396 CFI_ENDPROC
368END(int_ret_from_sys_call) 397END(int_ret_from_sys_call)
@@ -484,6 +513,10 @@ END(stub_rt_sigreturn)
484 swapgs 513 swapgs
4851: incl %gs:pda_irqcount # RED-PEN should check preempt count 5141: incl %gs:pda_irqcount # RED-PEN should check preempt count
486 cmoveq %gs:pda_irqstackptr,%rsp 515 cmoveq %gs:pda_irqstackptr,%rsp
516 /*
517 * We entered an interrupt context - irqs are off:
518 */
519 TRACE_IRQS_OFF
487 call \func 520 call \func
488 .endm 521 .endm
489 522
@@ -493,6 +526,7 @@ ENTRY(common_interrupt)
493 /* 0(%rsp): oldrsp-ARGOFFSET */ 526 /* 0(%rsp): oldrsp-ARGOFFSET */
494ret_from_intr: 527ret_from_intr:
495 cli 528 cli
529 TRACE_IRQS_OFF
496 decl %gs:pda_irqcount 530 decl %gs:pda_irqcount
497 leaveq 531 leaveq
498 CFI_DEF_CFA_REGISTER rsp 532 CFI_DEF_CFA_REGISTER rsp
@@ -515,9 +549,21 @@ retint_check:
515 CFI_REMEMBER_STATE 549 CFI_REMEMBER_STATE
516 jnz retint_careful 550 jnz retint_careful
517retint_swapgs: 551retint_swapgs:
552 /*
553 * The iretq could re-enable interrupts:
554 */
555 cli
556 TRACE_IRQS_IRETQ
518 swapgs 557 swapgs
558 jmp restore_args
559
519retint_restore_args: 560retint_restore_args:
520 cli 561 cli
562 /*
563 * The iretq could re-enable interrupts:
564 */
565 TRACE_IRQS_IRETQ
566restore_args:
521 RESTORE_ARGS 0,8,0 567 RESTORE_ARGS 0,8,0
522iret_label: 568iret_label:
523 iretq 569 iretq
@@ -530,6 +576,7 @@ iret_label:
530 /* running with kernel gs */ 576 /* running with kernel gs */
531bad_iret: 577bad_iret:
532 movq $11,%rdi /* SIGSEGV */ 578 movq $11,%rdi /* SIGSEGV */
579 TRACE_IRQS_ON
533 sti 580 sti
534 jmp do_exit 581 jmp do_exit
535 .previous 582 .previous
@@ -539,6 +586,7 @@ retint_careful:
539 CFI_RESTORE_STATE 586 CFI_RESTORE_STATE
540 bt $TIF_NEED_RESCHED,%edx 587 bt $TIF_NEED_RESCHED,%edx
541 jnc retint_signal 588 jnc retint_signal
589 TRACE_IRQS_ON
542 sti 590 sti
543 pushq %rdi 591 pushq %rdi
544 CFI_ADJUST_CFA_OFFSET 8 592 CFI_ADJUST_CFA_OFFSET 8
@@ -547,11 +595,13 @@ retint_careful:
547 CFI_ADJUST_CFA_OFFSET -8 595 CFI_ADJUST_CFA_OFFSET -8
548 GET_THREAD_INFO(%rcx) 596 GET_THREAD_INFO(%rcx)
549 cli 597 cli
598 TRACE_IRQS_OFF
550 jmp retint_check 599 jmp retint_check
551 600
552retint_signal: 601retint_signal:
553 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx 602 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
554 jz retint_swapgs 603 jz retint_swapgs
604 TRACE_IRQS_ON
555 sti 605 sti
556 SAVE_REST 606 SAVE_REST
557 movq $-1,ORIG_RAX(%rsp) 607 movq $-1,ORIG_RAX(%rsp)
@@ -560,6 +610,7 @@ retint_signal:
560 call do_notify_resume 610 call do_notify_resume
561 RESTORE_REST 611 RESTORE_REST
562 cli 612 cli
613 TRACE_IRQS_OFF
563 movl $_TIF_NEED_RESCHED,%edi 614 movl $_TIF_NEED_RESCHED,%edi
564 GET_THREAD_INFO(%rcx) 615 GET_THREAD_INFO(%rcx)
565 jmp retint_check 616 jmp retint_check
@@ -666,7 +717,7 @@ END(spurious_interrupt)
666 717
667 /* error code is on the stack already */ 718 /* error code is on the stack already */
668 /* handle NMI like exceptions that can happen everywhere */ 719 /* handle NMI like exceptions that can happen everywhere */
669 .macro paranoidentry sym, ist=0 720 .macro paranoidentry sym, ist=0, irqtrace=1
670 SAVE_ALL 721 SAVE_ALL
671 cld 722 cld
672 movl $1,%ebx 723 movl $1,%ebx
@@ -691,8 +742,73 @@ END(spurious_interrupt)
691 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 742 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
692 .endif 743 .endif
693 cli 744 cli
745 .if \irqtrace
746 TRACE_IRQS_OFF
747 .endif
694 .endm 748 .endm
695 749
750 /*
751 * "Paranoid" exit path from exception stack.
752 * Paranoid because this is used by NMIs and cannot take
753 * any kernel state for granted.
754 * We don't do kernel preemption checks here, because only
755 * NMI should be common and it does not enable IRQs and
756 * cannot get reschedule ticks.
757 *
758 * "trace" is 0 for the NMI handler only, because irq-tracing
759 * is fundamentally NMI-unsafe. (we cannot change the soft and
760 * hard flags at once, atomically)
761 */
762 .macro paranoidexit trace=1
763 /* ebx: no swapgs flag */
764paranoid_exit\trace:
765 testl %ebx,%ebx /* swapgs needed? */
766 jnz paranoid_restore\trace
767 testl $3,CS(%rsp)
768 jnz paranoid_userspace\trace
769paranoid_swapgs\trace:
770 TRACE_IRQS_IRETQ 0
771 swapgs
772paranoid_restore\trace:
773 RESTORE_ALL 8
774 iretq
775paranoid_userspace\trace:
776 GET_THREAD_INFO(%rcx)
777 movl threadinfo_flags(%rcx),%ebx
778 andl $_TIF_WORK_MASK,%ebx
779 jz paranoid_swapgs\trace
780 movq %rsp,%rdi /* &pt_regs */
781 call sync_regs
782 movq %rax,%rsp /* switch stack for scheduling */
783 testl $_TIF_NEED_RESCHED,%ebx
784 jnz paranoid_schedule\trace
785 movl %ebx,%edx /* arg3: thread flags */
786 .if \trace
787 TRACE_IRQS_ON
788 .endif
789 sti
790 xorl %esi,%esi /* arg2: oldset */
791 movq %rsp,%rdi /* arg1: &pt_regs */
792 call do_notify_resume
793 cli
794 .if \trace
795 TRACE_IRQS_OFF
796 .endif
797 jmp paranoid_userspace\trace
798paranoid_schedule\trace:
799 .if \trace
800 TRACE_IRQS_ON
801 .endif
802 sti
803 call schedule
804 cli
805 .if \trace
806 TRACE_IRQS_OFF
807 .endif
808 jmp paranoid_userspace\trace
809 CFI_ENDPROC
810 .endm
811
696/* 812/*
697 * Exception entry point. This expects an error code/orig_rax on the stack 813 * Exception entry point. This expects an error code/orig_rax on the stack
698 * and the exception handler in %rax. 814 * and the exception handler in %rax.
@@ -748,6 +864,7 @@ error_exit:
748 movl %ebx,%eax 864 movl %ebx,%eax
749 RESTORE_REST 865 RESTORE_REST
750 cli 866 cli
867 TRACE_IRQS_OFF
751 GET_THREAD_INFO(%rcx) 868 GET_THREAD_INFO(%rcx)
752 testl %eax,%eax 869 testl %eax,%eax
753 jne retint_kernel 870 jne retint_kernel
@@ -755,6 +872,10 @@ error_exit:
755 movl $_TIF_WORK_MASK,%edi 872 movl $_TIF_WORK_MASK,%edi
756 andl %edi,%edx 873 andl %edi,%edx
757 jnz retint_careful 874 jnz retint_careful
875 /*
876 * The iret might restore flags:
877 */
878 TRACE_IRQS_IRETQ
758 swapgs 879 swapgs
759 RESTORE_ARGS 0,8,0 880 RESTORE_ARGS 0,8,0
760 jmp iret_label 881 jmp iret_label
@@ -916,8 +1037,7 @@ KPROBE_ENTRY(debug)
916 pushq $0 1037 pushq $0
917 CFI_ADJUST_CFA_OFFSET 8 1038 CFI_ADJUST_CFA_OFFSET 8
918 paranoidentry do_debug, DEBUG_STACK 1039 paranoidentry do_debug, DEBUG_STACK
919 jmp paranoid_exit 1040 paranoidexit
920 CFI_ENDPROC
921END(debug) 1041END(debug)
922 .previous .text 1042 .previous .text
923 1043
@@ -926,49 +1046,13 @@ KPROBE_ENTRY(nmi)
926 INTR_FRAME 1046 INTR_FRAME
927 pushq $-1 1047 pushq $-1
928 CFI_ADJUST_CFA_OFFSET 8 1048 CFI_ADJUST_CFA_OFFSET 8
929 paranoidentry do_nmi 1049 paranoidentry do_nmi, 0, 0
930 /* 1050#ifdef CONFIG_TRACE_IRQFLAGS
931 * "Paranoid" exit path from exception stack. 1051 paranoidexit 0
932 * Paranoid because this is used by NMIs and cannot take 1052#else
933 * any kernel state for granted. 1053 jmp paranoid_exit1
934 * We don't do kernel preemption checks here, because only 1054 CFI_ENDPROC
935 * NMI should be common and it does not enable IRQs and 1055#endif
936 * cannot get reschedule ticks.
937 */
938 /* ebx: no swapgs flag */
939paranoid_exit:
940 testl %ebx,%ebx /* swapgs needed? */
941 jnz paranoid_restore
942 testl $3,CS(%rsp)
943 jnz paranoid_userspace
944paranoid_swapgs:
945 swapgs
946paranoid_restore:
947 RESTORE_ALL 8
948 iretq
949paranoid_userspace:
950 GET_THREAD_INFO(%rcx)
951 movl threadinfo_flags(%rcx),%ebx
952 andl $_TIF_WORK_MASK,%ebx
953 jz paranoid_swapgs
954 movq %rsp,%rdi /* &pt_regs */
955 call sync_regs
956 movq %rax,%rsp /* switch stack for scheduling */
957 testl $_TIF_NEED_RESCHED,%ebx
958 jnz paranoid_schedule
959 movl %ebx,%edx /* arg3: thread flags */
960 sti
961 xorl %esi,%esi /* arg2: oldset */
962 movq %rsp,%rdi /* arg1: &pt_regs */
963 call do_notify_resume
964 cli
965 jmp paranoid_userspace
966paranoid_schedule:
967 sti
968 call schedule
969 cli
970 jmp paranoid_userspace
971 CFI_ENDPROC
972END(nmi) 1056END(nmi)
973 .previous .text 1057 .previous .text
974 1058
@@ -977,7 +1061,7 @@ KPROBE_ENTRY(int3)
977 pushq $0 1061 pushq $0
978 CFI_ADJUST_CFA_OFFSET 8 1062 CFI_ADJUST_CFA_OFFSET 8
979 paranoidentry do_int3, DEBUG_STACK 1063 paranoidentry do_int3, DEBUG_STACK
980 jmp paranoid_exit 1064 jmp paranoid_exit1
981 CFI_ENDPROC 1065 CFI_ENDPROC
982END(int3) 1066END(int3)
983 .previous .text 1067 .previous .text
@@ -1006,7 +1090,7 @@ END(reserved)
1006ENTRY(double_fault) 1090ENTRY(double_fault)
1007 XCPT_FRAME 1091 XCPT_FRAME
1008 paranoidentry do_double_fault 1092 paranoidentry do_double_fault
1009 jmp paranoid_exit 1093 jmp paranoid_exit1
1010 CFI_ENDPROC 1094 CFI_ENDPROC
1011END(double_fault) 1095END(double_fault)
1012 1096
@@ -1022,7 +1106,7 @@ END(segment_not_present)
1022ENTRY(stack_segment) 1106ENTRY(stack_segment)
1023 XCPT_FRAME 1107 XCPT_FRAME
1024 paranoidentry do_stack_segment 1108 paranoidentry do_stack_segment
1025 jmp paranoid_exit 1109 jmp paranoid_exit1
1026 CFI_ENDPROC 1110 CFI_ENDPROC
1027END(stack_segment) 1111END(stack_segment)
1028 1112
@@ -1050,7 +1134,7 @@ ENTRY(machine_check)
1050 pushq $0 1134 pushq $0
1051 CFI_ADJUST_CFA_OFFSET 8 1135 CFI_ADJUST_CFA_OFFSET 8
1052 paranoidentry do_machine_check 1136 paranoidentry do_machine_check
1053 jmp paranoid_exit 1137 jmp paranoid_exit1
1054 CFI_ENDPROC 1138 CFI_ENDPROC
1055END(machine_check) 1139END(machine_check)
1056#endif 1140#endif
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index e6a71c9556d9..36647ce6aecb 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -85,6 +85,11 @@ void __init x86_64_start_kernel(char * real_mode_data)
85 clear_bss(); 85 clear_bss();
86 86
87 /* 87 /*
88 * This must be called really, really early:
89 */
90 lockdep_init();
91
92 /*
88 * switch to init_level4_pgt from boot_level4_pgt 93 * switch to init_level4_pgt from boot_level4_pgt
89 */ 94 */
90 memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t)); 95 memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t));
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index a1f1df5f7bfc..5221a53e90c1 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -177,8 +177,10 @@ asmlinkage void do_softirq(void)
177 local_irq_save(flags); 177 local_irq_save(flags);
178 pending = local_softirq_pending(); 178 pending = local_softirq_pending();
179 /* Switch to interrupt stack */ 179 /* Switch to interrupt stack */
180 if (pending) 180 if (pending) {
181 call_softirq(); 181 call_softirq();
182 WARN_ON_ONCE(softirq_count());
183 }
182 local_irq_restore(flags); 184 local_irq_restore(flags);
183} 185}
184EXPORT_SYMBOL(do_softirq); 186EXPORT_SYMBOL(do_softirq);
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 476c1472fc07..5baa0c726e97 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -127,7 +127,7 @@ void __cpuinit nmi_watchdog_default(void)
127static __init void nmi_cpu_busy(void *data) 127static __init void nmi_cpu_busy(void *data)
128{ 128{
129 volatile int *endflag = data; 129 volatile int *endflag = data;
130 local_irq_enable(); 130 local_irq_enable_in_hardirq();
131 /* Intentionally don't use cpu_relax here. This is 131 /* Intentionally don't use cpu_relax here. This is
132 to make sure that the performance counter really ticks, 132 to make sure that the performance counter really ticks,
133 even if there is a simulator or similar that catches the 133 even if there is a simulator or similar that catches the
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index ca56e19b8b6e..bb6745d13b8f 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -296,7 +296,7 @@ void __show_regs(struct pt_regs * regs)
296 system_utsname.version); 296 system_utsname.version);
297 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip); 297 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
298 printk_address(regs->rip); 298 printk_address(regs->rip);
299 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, 299 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
300 regs->eflags); 300 regs->eflags);
301 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", 301 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
302 regs->rax, regs->rbx, regs->rcx); 302 regs->rax, regs->rbx, regs->rcx);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 9705a6a384f1..b7c705969791 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -775,6 +775,8 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
775 }; 775 };
776 DECLARE_WORK(work, do_fork_idle, &c_idle); 776 DECLARE_WORK(work, do_fork_idle, &c_idle);
777 777
778 lockdep_set_class(&c_idle.done.wait.lock, &waitqueue_lock_key);
779
778 /* allocate memory for gdts of secondary cpus. Hotplug is considered */ 780 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
779 if (!cpu_gdt_descr[cpu].address && 781 if (!cpu_gdt_descr[cpu].address &&
780 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { 782 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
diff --git a/arch/x86_64/kernel/stacktrace.c b/arch/x86_64/kernel/stacktrace.c
new file mode 100644
index 000000000000..32cf55eb9af8
--- /dev/null
+++ b/arch/x86_64/kernel/stacktrace.c
@@ -0,0 +1,221 @@
1/*
2 * arch/x86_64/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 */
8#include <linux/sched.h>
9#include <linux/stacktrace.h>
10
11#include <asm/smp.h>
12
13static inline int
14in_range(unsigned long start, unsigned long addr, unsigned long end)
15{
16 return addr >= start && addr <= end;
17}
18
19static unsigned long
20get_stack_end(struct task_struct *task, unsigned long stack)
21{
22 unsigned long stack_start, stack_end, flags;
23 int i, cpu;
24
25 /*
26 * The most common case is that we are in the task stack:
27 */
28 stack_start = (unsigned long)task->thread_info;
29 stack_end = stack_start + THREAD_SIZE;
30
31 if (in_range(stack_start, stack, stack_end))
32 return stack_end;
33
34 /*
35 * We are in an interrupt if irqstackptr is set:
36 */
37 raw_local_irq_save(flags);
38 cpu = safe_smp_processor_id();
39 stack_end = (unsigned long)cpu_pda(cpu)->irqstackptr;
40
41 if (stack_end) {
42 stack_start = stack_end & ~(IRQSTACKSIZE-1);
43 if (in_range(stack_start, stack, stack_end))
44 goto out_restore;
45 /*
46 * We get here if we are in an IRQ context but we
47 * are also in an exception stack.
48 */
49 }
50
51 /*
52 * Iterate over all exception stacks, and figure out whether
53 * 'stack' is in one of them:
54 */
55 for (i = 0; i < N_EXCEPTION_STACKS; i++) {
56 /*
57 * set 'end' to the end of the exception stack.
58 */
59 stack_end = per_cpu(init_tss, cpu).ist[i];
60 stack_start = stack_end - EXCEPTION_STKSZ;
61
62 /*
63 * Is 'stack' above this exception frame's end?
64 * If yes then skip to the next frame.
65 */
66 if (stack >= stack_end)
67 continue;
68 /*
69 * Is 'stack' above this exception frame's start address?
70 * If yes then we found the right frame.
71 */
72 if (stack >= stack_start)
73 goto out_restore;
74
75 /*
76 * If this is a debug stack, and if it has a larger size than
77 * the usual exception stacks, then 'stack' might still
78 * be within the lower portion of the debug stack:
79 */
80#if DEBUG_STKSZ > EXCEPTION_STKSZ
81 if (i == DEBUG_STACK - 1 && stack >= stack_end - DEBUG_STKSZ) {
82 /*
83 * Black magic. A large debug stack is composed of
84 * multiple exception stack entries, which we
85 * iterate through now. Dont look:
86 */
87 do {
88 stack_end -= EXCEPTION_STKSZ;
89 stack_start -= EXCEPTION_STKSZ;
90 } while (stack < stack_start);
91
92 goto out_restore;
93 }
94#endif
95 }
96 /*
97 * Ok, 'stack' is not pointing to any of the system stacks.
98 */
99 stack_end = 0;
100
101out_restore:
102 raw_local_irq_restore(flags);
103
104 return stack_end;
105}
106
107
108/*
109 * Save stack-backtrace addresses into a stack_trace buffer:
110 */
111static inline unsigned long
112save_context_stack(struct stack_trace *trace, unsigned int skip,
113 unsigned long stack, unsigned long stack_end)
114{
115 unsigned long addr;
116
117#ifdef CONFIG_FRAME_POINTER
118 unsigned long prev_stack = 0;
119
120 while (in_range(prev_stack, stack, stack_end)) {
121 pr_debug("stack: %p\n", (void *)stack);
122 addr = (unsigned long)(((unsigned long *)stack)[1]);
123 pr_debug("addr: %p\n", (void *)addr);
124 if (!skip)
125 trace->entries[trace->nr_entries++] = addr-1;
126 else
127 skip--;
128 if (trace->nr_entries >= trace->max_entries)
129 break;
130 if (!addr)
131 return 0;
132 /*
133 * Stack frames must go forwards (otherwise a loop could
134 * happen if the stackframe is corrupted), so we move
135 * prev_stack forwards:
136 */
137 prev_stack = stack;
138 stack = (unsigned long)(((unsigned long *)stack)[0]);
139 }
140 pr_debug("invalid: %p\n", (void *)stack);
141#else
142 while (stack < stack_end) {
143 addr = ((unsigned long *)stack)[0];
144 stack += sizeof(long);
145 if (__kernel_text_address(addr)) {
146 if (!skip)
147 trace->entries[trace->nr_entries++] = addr-1;
148 else
149 skip--;
150 if (trace->nr_entries >= trace->max_entries)
151 break;
152 }
153 }
154#endif
155 return stack;
156}
157
158#define MAX_STACKS 10
159
160/*
161 * Save stack-backtrace addresses into a stack_trace buffer.
162 * If all_contexts is set, all contexts (hardirq, softirq and process)
163 * are saved. If not set then only the current context is saved.
164 */
165void save_stack_trace(struct stack_trace *trace,
166 struct task_struct *task, int all_contexts,
167 unsigned int skip)
168{
169 unsigned long stack = (unsigned long)&stack;
170 int i, nr_stacks = 0, stacks_done[MAX_STACKS];
171
172 WARN_ON(trace->nr_entries || !trace->max_entries);
173
174 if (!task)
175 task = current;
176
177 pr_debug("task: %p, ti: %p\n", task, task->thread_info);
178
179 if (!task || task == current) {
180 /* Grab rbp right from our regs: */
181 asm ("mov %%rbp, %0" : "=r" (stack));
182 pr_debug("rbp: %p\n", (void *)stack);
183 } else {
184 /* rbp is the last reg pushed by switch_to(): */
185 stack = task->thread.rsp;
186 pr_debug("other task rsp: %p\n", (void *)stack);
187 stack = (unsigned long)(((unsigned long *)stack)[0]);
188 pr_debug("other task rbp: %p\n", (void *)stack);
189 }
190
191 while (1) {
192 unsigned long stack_end = get_stack_end(task, stack);
193
194 pr_debug("stack: %p\n", (void *)stack);
195 pr_debug("stack end: %p\n", (void *)stack_end);
196
197 /*
198 * Invalid stack addres?
199 */
200 if (!stack_end)
201 return;
202 /*
203 * Were we in this stack already? (recursion)
204 */
205 for (i = 0; i < nr_stacks; i++)
206 if (stacks_done[i] == stack_end)
207 return;
208 stacks_done[nr_stacks] = stack_end;
209
210 stack = save_context_stack(trace, skip, stack, stack_end);
211 if (!all_contexts || !stack ||
212 trace->nr_entries >= trace->max_entries)
213 return;
214 trace->entries[trace->nr_entries++] = ULONG_MAX;
215 if (trace->nr_entries >= trace->max_entries)
216 return;
217 if (++nr_stacks >= MAX_STACKS)
218 return;
219 }
220}
221
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 5a5311d3de0f..79d05c482072 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -110,28 +110,31 @@ static int kstack_depth_to_print = 12;
110static int call_trace = 1; 110static int call_trace = 1;
111 111
112#ifdef CONFIG_KALLSYMS 112#ifdef CONFIG_KALLSYMS
113#include <linux/kallsyms.h> 113# include <linux/kallsyms.h>
114int printk_address(unsigned long address) 114void printk_address(unsigned long address)
115{ 115{
116 unsigned long offset = 0, symsize; 116 unsigned long offset = 0, symsize;
117 const char *symname; 117 const char *symname;
118 char *modname; 118 char *modname;
119 char *delim = ":"; 119 char *delim = ":";
120 char namebuf[128]; 120 char namebuf[128];
121 121
122 symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); 122 symname = kallsyms_lookup(address, &symsize, &offset,
123 if (!symname) 123 &modname, namebuf);
124 return printk("[<%016lx>]", address); 124 if (!symname) {
125 if (!modname) 125 printk(" [<%016lx>]\n", address);
126 return;
127 }
128 if (!modname)
126 modname = delim = ""; 129 modname = delim = "";
127 return printk("<%016lx>{%s%s%s%s%+ld}", 130 printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
128 address, delim, modname, delim, symname, offset); 131 address, delim, modname, delim, symname, offset, symsize);
129} 132}
130#else 133#else
131int printk_address(unsigned long address) 134void printk_address(unsigned long address)
132{ 135{
133 return printk("[<%016lx>]", address); 136 printk(" [<%016lx>]\n", address);
134} 137}
135#endif 138#endif
136 139
137static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, 140static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
@@ -149,10 +152,22 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
149 }; 152 };
150 unsigned k; 153 unsigned k;
151 154
155 /*
156 * Iterate over all exception stacks, and figure out whether
157 * 'stack' is in one of them:
158 */
152 for (k = 0; k < N_EXCEPTION_STACKS; k++) { 159 for (k = 0; k < N_EXCEPTION_STACKS; k++) {
153 unsigned long end; 160 unsigned long end;
154 161
162 /*
163 * set 'end' to the end of the exception stack.
164 */
155 switch (k + 1) { 165 switch (k + 1) {
166 /*
167 * TODO: this block is not needed i think, because
168 * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
169 * properly too.
170 */
156#if DEBUG_STKSZ > EXCEPTION_STKSZ 171#if DEBUG_STKSZ > EXCEPTION_STKSZ
157 case DEBUG_STACK: 172 case DEBUG_STACK:
158 end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ; 173 end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
@@ -162,19 +177,43 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
162 end = per_cpu(init_tss, cpu).ist[k]; 177 end = per_cpu(init_tss, cpu).ist[k];
163 break; 178 break;
164 } 179 }
180 /*
181 * Is 'stack' above this exception frame's end?
182 * If yes then skip to the next frame.
183 */
165 if (stack >= end) 184 if (stack >= end)
166 continue; 185 continue;
186 /*
187 * Is 'stack' above this exception frame's start address?
188 * If yes then we found the right frame.
189 */
167 if (stack >= end - EXCEPTION_STKSZ) { 190 if (stack >= end - EXCEPTION_STKSZ) {
191 /*
192 * Make sure we only iterate through an exception
193 * stack once. If it comes up for the second time
194 * then there's something wrong going on - just
195 * break out and return NULL:
196 */
168 if (*usedp & (1U << k)) 197 if (*usedp & (1U << k))
169 break; 198 break;
170 *usedp |= 1U << k; 199 *usedp |= 1U << k;
171 *idp = ids[k]; 200 *idp = ids[k];
172 return (unsigned long *)end; 201 return (unsigned long *)end;
173 } 202 }
203 /*
204 * If this is a debug stack, and if it has a larger size than
205 * the usual exception stacks, then 'stack' might still
206 * be within the lower portion of the debug stack:
207 */
174#if DEBUG_STKSZ > EXCEPTION_STKSZ 208#if DEBUG_STKSZ > EXCEPTION_STKSZ
175 if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) { 209 if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
176 unsigned j = N_EXCEPTION_STACKS - 1; 210 unsigned j = N_EXCEPTION_STACKS - 1;
177 211
212 /*
213 * Black magic. A large debug stack is composed of
214 * multiple exception stack entries, which we
215 * iterate through now. Dont look:
216 */
178 do { 217 do {
179 ++j; 218 ++j;
180 end -= EXCEPTION_STKSZ; 219 end -= EXCEPTION_STKSZ;
@@ -193,20 +232,14 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
193 232
194static int show_trace_unwind(struct unwind_frame_info *info, void *context) 233static int show_trace_unwind(struct unwind_frame_info *info, void *context)
195{ 234{
196 int i = 11, n = 0; 235 int n = 0;
197 236
198 while (unwind(info) == 0 && UNW_PC(info)) { 237 while (unwind(info) == 0 && UNW_PC(info)) {
199 ++n; 238 n++;
200 if (i > 50) { 239 printk_address(UNW_PC(info));
201 printk("\n ");
202 i = 7;
203 } else
204 i += printk(" ");
205 i += printk_address(UNW_PC(info));
206 if (arch_unw_user_mode(info)) 240 if (arch_unw_user_mode(info))
207 break; 241 break;
208 } 242 }
209 printk("\n");
210 return n; 243 return n;
211} 244}
212 245
@@ -224,7 +257,7 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
224 int i = 11; 257 int i = 11;
225 unsigned used = 0; 258 unsigned used = 0;
226 259
227 printk("\nCall Trace:"); 260 printk("\nCall Trace:\n");
228 261
229 if (!tsk) 262 if (!tsk)
230 tsk = current; 263 tsk = current;
@@ -250,16 +283,15 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
250 } 283 }
251 } 284 }
252 285
286 /*
287 * Print function call entries within a stack. 'cond' is the
288 * "end of stackframe" condition, that the 'stack++'
289 * iteration will eventually trigger.
290 */
253#define HANDLE_STACK(cond) \ 291#define HANDLE_STACK(cond) \
254 do while (cond) { \ 292 do while (cond) { \
255 unsigned long addr = *stack++; \ 293 unsigned long addr = *stack++; \
256 if (kernel_text_address(addr)) { \ 294 if (kernel_text_address(addr)) { \
257 if (i > 50) { \
258 printk("\n "); \
259 i = 0; \
260 } \
261 else \
262 i += printk(" "); \
263 /* \ 295 /* \
264 * If the address is either in the text segment of the \ 296 * If the address is either in the text segment of the \
265 * kernel, or in the region which contains vmalloc'ed \ 297 * kernel, or in the region which contains vmalloc'ed \
@@ -268,20 +300,30 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
268 * down the cause of the crash will be able to figure \ 300 * down the cause of the crash will be able to figure \
269 * out the call path that was taken. \ 301 * out the call path that was taken. \
270 */ \ 302 */ \
271 i += printk_address(addr); \ 303 printk_address(addr); \
272 } \ 304 } \
273 } while (0) 305 } while (0)
274 306
275 for(; ; ) { 307 /*
308 * Print function call entries in all stacks, starting at the
309 * current stack address. If the stacks consist of nested
310 * exceptions
311 */
312 for ( ; ; ) {
276 const char *id; 313 const char *id;
277 unsigned long *estack_end; 314 unsigned long *estack_end;
278 estack_end = in_exception_stack(cpu, (unsigned long)stack, 315 estack_end = in_exception_stack(cpu, (unsigned long)stack,
279 &used, &id); 316 &used, &id);
280 317
281 if (estack_end) { 318 if (estack_end) {
282 i += printk(" <%s>", id); 319 printk(" <%s>", id);
283 HANDLE_STACK (stack < estack_end); 320 HANDLE_STACK (stack < estack_end);
284 i += printk(" <EOE>"); 321 printk(" <EOE>");
322 /*
323 * We link to the next stack via the
324 * second-to-last pointer (index -2 to end) in the
325 * exception stack:
326 */
285 stack = (unsigned long *) estack_end[-2]; 327 stack = (unsigned long *) estack_end[-2];
286 continue; 328 continue;
287 } 329 }
@@ -291,19 +333,28 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
291 (IRQSTACKSIZE - 64) / sizeof(*irqstack); 333 (IRQSTACKSIZE - 64) / sizeof(*irqstack);
292 334
293 if (stack >= irqstack && stack < irqstack_end) { 335 if (stack >= irqstack && stack < irqstack_end) {
294 i += printk(" <IRQ>"); 336 printk(" <IRQ>");
295 HANDLE_STACK (stack < irqstack_end); 337 HANDLE_STACK (stack < irqstack_end);
338 /*
339 * We link to the next stack (which would be
340 * the process stack normally) the last
341 * pointer (index -1 to end) in the IRQ stack:
342 */
296 stack = (unsigned long *) (irqstack_end[-1]); 343 stack = (unsigned long *) (irqstack_end[-1]);
297 irqstack_end = NULL; 344 irqstack_end = NULL;
298 i += printk(" <EOI>"); 345 printk(" <EOI>");
299 continue; 346 continue;
300 } 347 }
301 } 348 }
302 break; 349 break;
303 } 350 }
304 351
352 /*
353 * This prints the process stack:
354 */
305 HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0); 355 HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
306#undef HANDLE_STACK 356#undef HANDLE_STACK
357
307 printk("\n"); 358 printk("\n");
308} 359}
309 360
@@ -337,8 +388,8 @@ static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned
337 break; 388 break;
338 } 389 }
339 if (i && ((i % 4) == 0)) 390 if (i && ((i % 4) == 0))
340 printk("\n "); 391 printk("\n");
341 printk("%016lx ", *stack++); 392 printk(" %016lx", *stack++);
342 touch_nmi_watchdog(); 393 touch_nmi_watchdog();
343 } 394 }
344 show_trace(tsk, regs, rsp); 395 show_trace(tsk, regs, rsp);
diff --git a/arch/x86_64/lib/thunk.S b/arch/x86_64/lib/thunk.S
index e49af0032e94..332ea5dff916 100644
--- a/arch/x86_64/lib/thunk.S
+++ b/arch/x86_64/lib/thunk.S
@@ -47,6 +47,11 @@
47 thunk_retrax __down_failed_interruptible,__down_interruptible 47 thunk_retrax __down_failed_interruptible,__down_interruptible
48 thunk_retrax __down_failed_trylock,__down_trylock 48 thunk_retrax __down_failed_trylock,__down_trylock
49 thunk __up_wakeup,__up 49 thunk __up_wakeup,__up
50
51#ifdef CONFIG_TRACE_IRQFLAGS
52 thunk trace_hardirqs_on_thunk,trace_hardirqs_on
53 thunk trace_hardirqs_off_thunk,trace_hardirqs_off
54#endif
50 55
51 /* SAVE_ARGS below is used only for the .cfi directives it contains. */ 56 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
52 CFI_STARTPROC 57 CFI_STARTPROC
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 5afcf6eb00fa..ac8ea66ccb94 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -570,7 +570,6 @@ no_context:
570 printk(KERN_ALERT "Unable to handle kernel paging request"); 570 printk(KERN_ALERT "Unable to handle kernel paging request");
571 printk(" at %016lx RIP: \n" KERN_ALERT,address); 571 printk(" at %016lx RIP: \n" KERN_ALERT,address);
572 printk_address(regs->rip); 572 printk_address(regs->rip);
573 printk("\n");
574 dump_pagetable(address); 573 dump_pagetable(address);
575 tsk->thread.cr2 = address; 574 tsk->thread.cr2 = address;
576 tsk->thread.trap_no = 14; 575 tsk->thread.trap_no = 14;
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 5813d63c20af..ab17c7224bb6 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2516,7 +2516,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
2516int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2516int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2517 struct request *rq, int at_head) 2517 struct request *rq, int at_head)
2518{ 2518{
2519 DECLARE_COMPLETION(wait); 2519 DECLARE_COMPLETION_ONSTACK(wait);
2520 char sense[SCSI_SENSE_BUFFERSIZE]; 2520 char sense[SCSI_SENSE_BUFFERSIZE];
2521 int err = 0; 2521 int err = 0;
2522 2522
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index bc2652d72fdc..fef7bab12244 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -352,6 +352,18 @@ config ACPI_HOTPLUG_MEMORY
352 If one selects "m," this driver can be loaded using the following 352 If one selects "m," this driver can be loaded using the following
353 command: 353 command:
354 $>modprobe acpi_memhotplug 354 $>modprobe acpi_memhotplug
355
356config ACPI_SBS
357 tristate "Smart Battery System (EXPERIMENTAL)"
358 depends on X86 && I2C
359 depends on EXPERIMENTAL
360 default y
361 help
362 This driver adds support for the Smart Battery System.
363 Depends on I2C (Device Drivers ---> I2C support)
364 A "Smart Battery" is quite old and quite rare compared
365 to today's ACPI "Control Method" battery.
366
355endif # ACPI 367endif # ACPI
356 368
357endmenu 369endmenu
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index f0a68ecf1e57..bce7ca27b429 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -58,3 +58,5 @@ obj-$(CONFIG_ACPI_IBM) += ibm_acpi.o
58obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o 58obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
59obj-y += scan.o motherboard.o 59obj-y += scan.o motherboard.o
60obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o 60obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
61obj-y += cm_sbs.o
62obj-$(CONFIG_ACPI_SBS) += i2c_ec.o sbs.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 36ca365bcead..24ccf81d135f 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -50,6 +50,9 @@ ACPI_MODULE_NAME("acpi_ac")
50MODULE_DESCRIPTION(ACPI_AC_DRIVER_NAME); 50MODULE_DESCRIPTION(ACPI_AC_DRIVER_NAME);
51MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
52 52
53extern struct proc_dir_entry *acpi_lock_ac_dir(void);
54extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
55
53static int acpi_ac_add(struct acpi_device *device); 56static int acpi_ac_add(struct acpi_device *device);
54static int acpi_ac_remove(struct acpi_device *device, int type); 57static int acpi_ac_remove(struct acpi_device *device, int type);
55static int acpi_ac_open_fs(struct inode *inode, struct file *file); 58static int acpi_ac_open_fs(struct inode *inode, struct file *file);
@@ -65,7 +68,7 @@ static struct acpi_driver acpi_ac_driver = {
65}; 68};
66 69
67struct acpi_ac { 70struct acpi_ac {
68 acpi_handle handle; 71 struct acpi_device * device;
69 unsigned long state; 72 unsigned long state;
70}; 73};
71 74
@@ -88,7 +91,7 @@ static int acpi_ac_get_state(struct acpi_ac *ac)
88 if (!ac) 91 if (!ac)
89 return -EINVAL; 92 return -EINVAL;
90 93
91 status = acpi_evaluate_integer(ac->handle, "_PSR", NULL, &ac->state); 94 status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state);
92 if (ACPI_FAILURE(status)) { 95 if (ACPI_FAILURE(status)) {
93 ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state")); 96 ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state"));
94 ac->state = ACPI_AC_STATUS_UNKNOWN; 97 ac->state = ACPI_AC_STATUS_UNKNOWN;
@@ -191,11 +194,11 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
191 if (!ac) 194 if (!ac)
192 return; 195 return;
193 196
194 if (acpi_bus_get_device(ac->handle, &device)) 197 device = ac->device;
195 return;
196
197 switch (event) { 198 switch (event) {
198 case ACPI_AC_NOTIFY_STATUS: 199 case ACPI_AC_NOTIFY_STATUS:
200 case ACPI_NOTIFY_BUS_CHECK:
201 case ACPI_NOTIFY_DEVICE_CHECK:
199 acpi_ac_get_state(ac); 202 acpi_ac_get_state(ac);
200 acpi_bus_generate_event(device, event, (u32) ac->state); 203 acpi_bus_generate_event(device, event, (u32) ac->state);
201 break; 204 break;
@@ -223,7 +226,7 @@ static int acpi_ac_add(struct acpi_device *device)
223 return -ENOMEM; 226 return -ENOMEM;
224 memset(ac, 0, sizeof(struct acpi_ac)); 227 memset(ac, 0, sizeof(struct acpi_ac));
225 228
226 ac->handle = device->handle; 229 ac->device = device;
227 strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME); 230 strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
228 strcpy(acpi_device_class(device), ACPI_AC_CLASS); 231 strcpy(acpi_device_class(device), ACPI_AC_CLASS);
229 acpi_driver_data(device) = ac; 232 acpi_driver_data(device) = ac;
@@ -236,8 +239,8 @@ static int acpi_ac_add(struct acpi_device *device)
236 if (result) 239 if (result)
237 goto end; 240 goto end;
238 241
239 status = acpi_install_notify_handler(ac->handle, 242 status = acpi_install_notify_handler(device->handle,
240 ACPI_DEVICE_NOTIFY, acpi_ac_notify, 243 ACPI_ALL_NOTIFY, acpi_ac_notify,
241 ac); 244 ac);
242 if (ACPI_FAILURE(status)) { 245 if (ACPI_FAILURE(status)) {
243 result = -ENODEV; 246 result = -ENODEV;
@@ -268,8 +271,8 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
268 271
269 ac = (struct acpi_ac *)acpi_driver_data(device); 272 ac = (struct acpi_ac *)acpi_driver_data(device);
270 273
271 status = acpi_remove_notify_handler(ac->handle, 274 status = acpi_remove_notify_handler(device->handle,
272 ACPI_DEVICE_NOTIFY, acpi_ac_notify); 275 ACPI_ALL_NOTIFY, acpi_ac_notify);
273 276
274 acpi_ac_remove_fs(device); 277 acpi_ac_remove_fs(device);
275 278
@@ -280,17 +283,16 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
280 283
281static int __init acpi_ac_init(void) 284static int __init acpi_ac_init(void)
282{ 285{
283 int result = 0; 286 int result;
284 287
285 288
286 acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); 289 acpi_ac_dir = acpi_lock_ac_dir();
287 if (!acpi_ac_dir) 290 if (!acpi_ac_dir)
288 return -ENODEV; 291 return -ENODEV;
289 acpi_ac_dir->owner = THIS_MODULE;
290 292
291 result = acpi_bus_register_driver(&acpi_ac_driver); 293 result = acpi_bus_register_driver(&acpi_ac_driver);
292 if (result < 0) { 294 if (result < 0) {
293 remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); 295 acpi_unlock_ac_dir(acpi_ac_dir);
294 return -ENODEV; 296 return -ENODEV;
295 } 297 }
296 298
@@ -302,7 +304,7 @@ static void __exit acpi_ac_exit(void)
302 304
303 acpi_bus_unregister_driver(&acpi_ac_driver); 305 acpi_bus_unregister_driver(&acpi_ac_driver);
304 306
305 remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); 307 acpi_unlock_ac_dir(acpi_ac_dir);
306 308
307 return; 309 return;
308} 310}
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index cd57372a6729..81e970adeab3 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -80,7 +80,7 @@ struct acpi_memory_info {
80}; 80};
81 81
82struct acpi_memory_device { 82struct acpi_memory_device {
83 acpi_handle handle; 83 struct acpi_device * device;
84 unsigned int state; /* State of the memory device */ 84 unsigned int state; /* State of the memory device */
85 struct list_head res_list; 85 struct list_head res_list;
86}; 86};
@@ -129,7 +129,7 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
129 struct acpi_memory_info *info, *n; 129 struct acpi_memory_info *info, *n;
130 130
131 131
132 status = acpi_walk_resources(mem_device->handle, METHOD_NAME__CRS, 132 status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS,
133 acpi_memory_get_resource, mem_device); 133 acpi_memory_get_resource, mem_device);
134 if (ACPI_FAILURE(status)) { 134 if (ACPI_FAILURE(status)) {
135 list_for_each_entry_safe(info, n, &mem_device->res_list, list) 135 list_for_each_entry_safe(info, n, &mem_device->res_list, list)
@@ -192,7 +192,7 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
192 192
193 193
194 /* Get device present/absent information from the _STA */ 194 /* Get device present/absent information from the _STA */
195 if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->handle, "_STA", 195 if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
196 NULL, &current_status))) 196 NULL, &current_status)))
197 return -ENODEV; 197 return -ENODEV;
198 /* 198 /*
@@ -222,7 +222,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
222 return result; 222 return result;
223 } 223 }
224 224
225 node = acpi_get_node(mem_device->handle); 225 node = acpi_get_node(mem_device->device->handle);
226 /* 226 /*
227 * Tell the VM there is more memory here... 227 * Tell the VM there is more memory here...
228 * Note: Assume that this function returns zero on success 228 * Note: Assume that this function returns zero on success
@@ -269,7 +269,7 @@ static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
269 arg_list.pointer = &arg; 269 arg_list.pointer = &arg;
270 arg.type = ACPI_TYPE_INTEGER; 270 arg.type = ACPI_TYPE_INTEGER;
271 arg.integer.value = 1; 271 arg.integer.value = 1;
272 status = acpi_evaluate_object(mem_device->handle, 272 status = acpi_evaluate_object(mem_device->device->handle,
273 "_EJ0", &arg_list, NULL); 273 "_EJ0", &arg_list, NULL);
274 /* Return on _EJ0 failure */ 274 /* Return on _EJ0 failure */
275 if (ACPI_FAILURE(status)) { 275 if (ACPI_FAILURE(status)) {
@@ -278,7 +278,7 @@ static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
278 } 278 }
279 279
280 /* Evalute _STA to check if the device is disabled */ 280 /* Evalute _STA to check if the device is disabled */
281 status = acpi_evaluate_integer(mem_device->handle, "_STA", 281 status = acpi_evaluate_integer(mem_device->device->handle, "_STA",
282 NULL, &current_status); 282 NULL, &current_status);
283 if (ACPI_FAILURE(status)) 283 if (ACPI_FAILURE(status))
284 return -ENODEV; 284 return -ENODEV;
@@ -398,7 +398,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
398 memset(mem_device, 0, sizeof(struct acpi_memory_device)); 398 memset(mem_device, 0, sizeof(struct acpi_memory_device));
399 399
400 INIT_LIST_HEAD(&mem_device->res_list); 400 INIT_LIST_HEAD(&mem_device->res_list);
401 mem_device->handle = device->handle; 401 mem_device->device = device;
402 sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME); 402 sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
403 sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS); 403 sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
404 acpi_driver_data(device) = mem_device; 404 acpi_driver_data(device) = mem_device;
@@ -466,7 +466,7 @@ static acpi_status is_memory_device(acpi_handle handle)
466 466
467 info = buffer.pointer; 467 info = buffer.pointer;
468 if (!(info->valid & ACPI_VALID_HID)) { 468 if (!(info->valid & ACPI_VALID_HID)) {
469 acpi_os_free(buffer.pointer); 469 kfree(buffer.pointer);
470 return AE_ERROR; 470 return AE_ERROR;
471 } 471 }
472 472
@@ -475,7 +475,7 @@ static acpi_status is_memory_device(acpi_handle handle)
475 (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID))) 475 (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
476 status = AE_ERROR; 476 status = AE_ERROR;
477 477
478 acpi_os_free(buffer.pointer); 478 kfree(buffer.pointer);
479 return status; 479 return status;
480} 480}
481 481
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index 055cfd5c8766..e9ee4c52a5f6 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -2,7 +2,7 @@
2 * asus_acpi.c - Asus Laptop ACPI Extras 2 * asus_acpi.c - Asus Laptop ACPI Extras
3 * 3 *
4 * 4 *
5 * Copyright (C) 2002, 2003, 2004 Julien Lerouge, Karol Kozimor 5 * Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -26,11 +26,8 @@
26 * Pontus Fuchs - Helper functions, cleanup 26 * Pontus Fuchs - Helper functions, cleanup
27 * Johann Wiesner - Small compile fixes 27 * Johann Wiesner - Small compile fixes
28 * John Belmonte - ACPI code for Toshiba laptop was a good starting point. 28 * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
29 * Éric Burghard - LED display support for W1N
29 * 30 *
30 * TODO:
31 * add Fn key status
32 * Add mode selection on module loading (parameter) -> still necessary?
33 * Complete display switching -- may require dirty hacks or calling _DOS?
34 */ 31 */
35 32
36#include <linux/kernel.h> 33#include <linux/kernel.h>
@@ -42,12 +39,14 @@
42#include <acpi/acpi_bus.h> 39#include <acpi/acpi_bus.h>
43#include <asm/uaccess.h> 40#include <asm/uaccess.h>
44 41
45#define ASUS_ACPI_VERSION "0.29" 42#define ASUS_ACPI_VERSION "0.30"
46 43
47#define PROC_ASUS "asus" //the directory 44#define PROC_ASUS "asus" //the directory
48#define PROC_MLED "mled" 45#define PROC_MLED "mled"
49#define PROC_WLED "wled" 46#define PROC_WLED "wled"
50#define PROC_TLED "tled" 47#define PROC_TLED "tled"
48#define PROC_BT "bluetooth"
49#define PROC_LEDD "ledd"
51#define PROC_INFO "info" 50#define PROC_INFO "info"
52#define PROC_LCD "lcd" 51#define PROC_LCD "lcd"
53#define PROC_BRN "brn" 52#define PROC_BRN "brn"
@@ -67,9 +66,10 @@
67/* 66/*
68 * Flags for hotk status 67 * Flags for hotk status
69 */ 68 */
70#define MLED_ON 0x01 //is MLED ON ? 69#define MLED_ON 0x01 //mail LED
71#define WLED_ON 0x02 70#define WLED_ON 0x02 //wireless LED
72#define TLED_ON 0x04 71#define TLED_ON 0x04 //touchpad LED
72#define BT_ON 0x08 //internal Bluetooth
73 73
74MODULE_AUTHOR("Julien Lerouge, Karol Kozimor"); 74MODULE_AUTHOR("Julien Lerouge, Karol Kozimor");
75MODULE_DESCRIPTION(ACPI_HOTK_NAME); 75MODULE_DESCRIPTION(ACPI_HOTK_NAME);
@@ -92,7 +92,10 @@ struct model_data {
92 char *wled_status; //node to handle wled reading_______A 92 char *wled_status; //node to handle wled reading_______A
93 char *mt_tled; //method to handle tled_____________R 93 char *mt_tled; //method to handle tled_____________R
94 char *tled_status; //node to handle tled reading_______A 94 char *tled_status; //node to handle tled reading_______A
95 char *mt_lcd_switch; //method to turn LCD ON/OFF_________A 95 char *mt_ledd; //method to handle LED display______R
96 char *mt_bt_switch; //method to switch Bluetooth on/off_R
97 char *bt_status; //no model currently supports this__?
98 char *mt_lcd_switch; //method to turn LCD on/off_________A
96 char *lcd_status; //node to read LCD panel state______A 99 char *lcd_status; //node to read LCD panel state______A
97 char *brightness_up; //method to set brightness up_______A 100 char *brightness_up; //method to set brightness up_______A
98 char *brightness_down; //guess what ?______________________A 101 char *brightness_down; //guess what ?______________________A
@@ -111,27 +114,31 @@ struct asus_hotk {
111 struct acpi_device *device; //the device we are in 114 struct acpi_device *device; //the device we are in
112 acpi_handle handle; //the handle of the hotk device 115 acpi_handle handle; //the handle of the hotk device
113 char status; //status of the hotk, for LEDs, ... 116 char status; //status of the hotk, for LEDs, ...
117 u32 ledd_status; //status of the LED display
114 struct model_data *methods; //methods available on the laptop 118 struct model_data *methods; //methods available on the laptop
115 u8 brightness; //brightness level 119 u8 brightness; //brightness level
116 enum { 120 enum {
117 A1x = 0, //A1340D, A1300F 121 A1x = 0, //A1340D, A1300F
118 A2x, //A2500H 122 A2x, //A2500H
123 A4G, //A4700G
119 D1x, //D1 124 D1x, //D1
120 L2D, //L2000D 125 L2D, //L2000D
121 L3C, //L3800C 126 L3C, //L3800C
122 L3D, //L3400D 127 L3D, //L3400D
123 L3H, //L3H, but also L2000E 128 L3H, //L3H, L2000E, L5D
124 L4R, //L4500R 129 L4R, //L4500R
125 L5x, //L5800C 130 L5x, //L5800C
126 L8L, //L8400L 131 L8L, //L8400L
127 M1A, //M1300A 132 M1A, //M1300A
128 M2E, //M2400E, L4400L 133 M2E, //M2400E, L4400L
129 M6N, //M6800N 134 M6N, //M6800N, W3400N
130 M6R, //M6700R 135 M6R, //M6700R, A3000G
131 P30, //Samsung P30 136 P30, //Samsung P30
132 S1x, //S1300A, but also L1400B and M2400A (L84F) 137 S1x, //S1300A, but also L1400B and M2400A (L84F)
133 S2x, //S200 (J1 reported), Victor MP-XP7210 138 S2x, //S200 (J1 reported), Victor MP-XP7210
134 xxN, //M2400N, M3700N, M5200N, S1300N, S5200N, W1OOON 139 W1N, //W1000N
140 W5A, //W5A
141 xxN, //M2400N, M3700N, M5200N, M6800N, S1300N, S5200N
135 //(Centrino) 142 //(Centrino)
136 END_MODEL 143 END_MODEL
137 } model; //Models currently supported 144 } model; //Models currently supported
@@ -149,17 +156,8 @@ struct asus_hotk {
149 156
150static struct model_data model_conf[END_MODEL] = { 157static struct model_data model_conf[END_MODEL] = {
151 /* 158 /*
152 * Those pathnames are relative to the HOTK / ATKD device :
153 * - mt_mled
154 * - mt_wled
155 * - brightness_set
156 * - brightness_get
157 * - display_set
158 * - display_get
159 *
160 * TODO I have seen a SWBX and AIBX method on some models, like L1400B, 159 * TODO I have seen a SWBX and AIBX method on some models, like L1400B,
161 * it seems to be a kind of switch, but what for ? 160 * it seems to be a kind of switch, but what for ?
162 *
163 */ 161 */
164 162
165 { 163 {
@@ -184,6 +182,16 @@ static struct model_data model_conf[END_MODEL] = {
184 .display_get = "\\INFB"}, 182 .display_get = "\\INFB"},
185 183
186 { 184 {
185 .name = "A4G",
186 .mt_mled = "MLED",
187/* WLED present, but not controlled by ACPI */
188 .mt_lcd_switch = xxN_PREFIX "_Q10",
189 .brightness_set = "SPLV",
190 .brightness_get = "GPLV",
191 .display_set = "SDSP",
192 .display_get = "\\ADVG"},
193
194 {
187 .name = "D1x", 195 .name = "D1x",
188 .mt_mled = "MLED", 196 .mt_mled = "MLED",
189 .mt_lcd_switch = "\\Q0D", 197 .mt_lcd_switch = "\\Q0D",
@@ -302,7 +310,8 @@ static struct model_data model_conf[END_MODEL] = {
302 .brightness_set = "SPLV", 310 .brightness_set = "SPLV",
303 .brightness_get = "GPLV", 311 .brightness_get = "GPLV",
304 .display_set = "SDSP", 312 .display_set = "SDSP",
305 .display_get = "\\_SB.PCI0.P0P1.VGA.GETD"}, 313 .display_get = "\\SSTE"},
314
306 { 315 {
307 .name = "M6R", 316 .name = "M6R",
308 .mt_mled = "MLED", 317 .mt_mled = "MLED",
@@ -312,7 +321,7 @@ static struct model_data model_conf[END_MODEL] = {
312 .brightness_set = "SPLV", 321 .brightness_set = "SPLV",
313 .brightness_get = "GPLV", 322 .brightness_get = "GPLV",
314 .display_set = "SDSP", 323 .display_set = "SDSP",
315 .display_get = "\\SSTE"}, 324 .display_get = "\\_SB.PCI0.P0P1.VGA.GETD"},
316 325
317 { 326 {
318 .name = "P30", 327 .name = "P30",
@@ -345,6 +354,28 @@ static struct model_data model_conf[END_MODEL] = {
345 .brightness_down = S2x_PREFIX "_Q0A"}, 354 .brightness_down = S2x_PREFIX "_Q0A"},
346 355
347 { 356 {
357 .name = "W1N",
358 .mt_mled = "MLED",
359 .mt_wled = "WLED",
360 .mt_ledd = "SLCM",
361 .mt_lcd_switch = xxN_PREFIX "_Q10",
362 .lcd_status = "\\BKLT",
363 .brightness_set = "SPLV",
364 .brightness_get = "GPLV",
365 .display_set = "SDSP",
366 .display_get = "\\ADVG"},
367
368 {
369 .name = "W5A",
370 .mt_bt_switch = "BLED",
371 .mt_wled = "WLED",
372 .mt_lcd_switch = xxN_PREFIX "_Q10",
373 .brightness_set = "SPLV",
374 .brightness_get = "GPLV",
375 .display_set = "SDSP",
376 .display_get = "\\ADVG"},
377
378 {
348 .name = "xxN", 379 .name = "xxN",
349 .mt_mled = "MLED", 380 .mt_mled = "MLED",
350/* WLED present, but not controlled by ACPI */ 381/* WLED present, but not controlled by ACPI */
@@ -563,6 +594,36 @@ proc_write_mled(struct file *file, const char __user * buffer,
563} 594}
564 595
565/* 596/*
597 * Proc handlers for LED display
598 */
599static int
600proc_read_ledd(char *page, char **start, off_t off, int count, int *eof,
601 void *data)
602{
603 return sprintf(page, "0x%08x\n", hotk->ledd_status);
604}
605
606static int
607proc_write_ledd(struct file *file, const char __user * buffer,
608 unsigned long count, void *data)
609{
610 int value;
611
612 count = parse_arg(buffer, count, &value);
613 if (count > 0) {
614 if (!write_acpi_int
615 (hotk->handle, hotk->methods->mt_ledd, value, NULL))
616 printk(KERN_WARNING
617 "Asus ACPI: LED display write failed\n");
618 else
619 hotk->ledd_status = (u32) value;
620 } else if (count < 0)
621 printk(KERN_WARNING "Asus ACPI: Error reading user input\n");
622
623 return count;
624}
625
626/*
566 * Proc handlers for WLED 627 * Proc handlers for WLED
567 */ 628 */
568static int 629static int
@@ -581,6 +642,25 @@ proc_write_wled(struct file *file, const char __user * buffer,
581} 642}
582 643
583/* 644/*
645 * Proc handlers for Bluetooth
646 */
647static int
648proc_read_bluetooth(char *page, char **start, off_t off, int count, int *eof,
649 void *data)
650{
651 return sprintf(page, "%d\n", read_led(hotk->methods->bt_status, BT_ON));
652}
653
654static int
655proc_write_bluetooth(struct file *file, const char __user * buffer,
656 unsigned long count, void *data)
657{
658 /* Note: mt_bt_switch controls both internal Bluetooth adapter's
659 presence and its LED */
660 return write_led(buffer, count, hotk->methods->mt_bt_switch, BT_ON, 0);
661}
662
663/*
584 * Proc handlers for TLED 664 * Proc handlers for TLED
585 */ 665 */
586static int 666static int
@@ -876,6 +956,11 @@ static int asus_hotk_add_fs(struct acpi_device *device)
876 mode, device); 956 mode, device);
877 } 957 }
878 958
959 if (hotk->methods->mt_ledd) {
960 asus_proc_add(PROC_LEDD, &proc_write_ledd, &proc_read_ledd,
961 mode, device);
962 }
963
879 if (hotk->methods->mt_mled) { 964 if (hotk->methods->mt_mled) {
880 asus_proc_add(PROC_MLED, &proc_write_mled, &proc_read_mled, 965 asus_proc_add(PROC_MLED, &proc_write_mled, &proc_read_mled,
881 mode, device); 966 mode, device);
@@ -886,6 +971,11 @@ static int asus_hotk_add_fs(struct acpi_device *device)
886 mode, device); 971 mode, device);
887 } 972 }
888 973
974 if (hotk->methods->mt_bt_switch) {
975 asus_proc_add(PROC_BT, &proc_write_bluetooth,
976 &proc_read_bluetooth, mode, device);
977 }
978
889 /* 979 /*
890 * We need both read node and write method as LCD switch is also accessible 980 * We need both read node and write method as LCD switch is also accessible
891 * from keyboard 981 * from keyboard
@@ -919,6 +1009,10 @@ static int asus_hotk_remove_fs(struct acpi_device *device)
919 remove_proc_entry(PROC_MLED, acpi_device_dir(device)); 1009 remove_proc_entry(PROC_MLED, acpi_device_dir(device));
920 if (hotk->methods->mt_tled) 1010 if (hotk->methods->mt_tled)
921 remove_proc_entry(PROC_TLED, acpi_device_dir(device)); 1011 remove_proc_entry(PROC_TLED, acpi_device_dir(device));
1012 if (hotk->methods->mt_ledd)
1013 remove_proc_entry(PROC_LEDD, acpi_device_dir(device));
1014 if (hotk->methods->mt_bt_switch)
1015 remove_proc_entry(PROC_BT, acpi_device_dir(device));
922 if (hotk->methods->mt_lcd_switch && hotk->methods->lcd_status) 1016 if (hotk->methods->mt_lcd_switch && hotk->methods->lcd_status)
923 remove_proc_entry(PROC_LCD, acpi_device_dir(device)); 1017 remove_proc_entry(PROC_LCD, acpi_device_dir(device));
924 if ((hotk->methods->brightness_up 1018 if ((hotk->methods->brightness_up
@@ -951,6 +1045,65 @@ static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
951} 1045}
952 1046
953/* 1047/*
1048 * Match the model string to the list of supported models. Return END_MODEL if
1049 * no match or model is NULL.
1050 */
1051static int asus_model_match(char *model)
1052{
1053 if (model == NULL)
1054 return END_MODEL;
1055
1056 if (strncmp(model, "L3D", 3) == 0)
1057 return L3D;
1058 else if (strncmp(model, "L2E", 3) == 0 ||
1059 strncmp(model, "L3H", 3) == 0 || strncmp(model, "L5D", 3) == 0)
1060 return L3H;
1061 else if (strncmp(model, "L3", 2) == 0 || strncmp(model, "L2B", 3) == 0)
1062 return L3C;
1063 else if (strncmp(model, "L8L", 3) == 0)
1064 return L8L;
1065 else if (strncmp(model, "L4R", 3) == 0)
1066 return L4R;
1067 else if (strncmp(model, "M6N", 3) == 0 || strncmp(model, "W3N", 3) == 0)
1068 return M6N;
1069 else if (strncmp(model, "M6R", 3) == 0 || strncmp(model, "A3G", 3) == 0)
1070 return M6R;
1071 else if (strncmp(model, "M2N", 3) == 0 ||
1072 strncmp(model, "M3N", 3) == 0 ||
1073 strncmp(model, "M5N", 3) == 0 ||
1074 strncmp(model, "M6N", 3) == 0 ||
1075 strncmp(model, "S1N", 3) == 0 ||
1076 strncmp(model, "S5N", 3) == 0 || strncmp(model, "W1N", 3) == 0)
1077 return xxN;
1078 else if (strncmp(model, "M1", 2) == 0)
1079 return M1A;
1080 else if (strncmp(model, "M2", 2) == 0 || strncmp(model, "L4E", 3) == 0)
1081 return M2E;
1082 else if (strncmp(model, "L2", 2) == 0)
1083 return L2D;
1084 else if (strncmp(model, "L8", 2) == 0)
1085 return S1x;
1086 else if (strncmp(model, "D1", 2) == 0)
1087 return D1x;
1088 else if (strncmp(model, "A1", 2) == 0)
1089 return A1x;
1090 else if (strncmp(model, "A2", 2) == 0)
1091 return A2x;
1092 else if (strncmp(model, "J1", 2) == 0)
1093 return S2x;
1094 else if (strncmp(model, "L5", 2) == 0)
1095 return L5x;
1096 else if (strncmp(model, "A4G", 3) == 0)
1097 return A4G;
1098 else if (strncmp(model, "W1N", 3) == 0)
1099 return W1N;
1100 else if (strncmp(model, "W5A", 3) == 0)
1101 return W5A;
1102 else
1103 return END_MODEL;
1104}
1105
1106/*
954 * This function is used to initialize the hotk with right values. In this 1107 * This function is used to initialize the hotk with right values. In this
955 * method, we can make all the detection we want, and modify the hotk struct 1108 * method, we can make all the detection we want, and modify the hotk struct
956 */ 1109 */
@@ -960,6 +1113,7 @@ static int asus_hotk_get_info(void)
960 struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL }; 1113 struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
961 union acpi_object *model = NULL; 1114 union acpi_object *model = NULL;
962 int bsts_result; 1115 int bsts_result;
1116 char *string = NULL;
963 acpi_status status; 1117 acpi_status status;
964 1118
965 /* 1119 /*
@@ -989,114 +1143,73 @@ static int asus_hotk_get_info(void)
989 printk(KERN_NOTICE " BSTS called, 0x%02x returned\n", 1143 printk(KERN_NOTICE " BSTS called, 0x%02x returned\n",
990 bsts_result); 1144 bsts_result);
991 1145
992 /* This is unlikely with implicit return */
993 if (buffer.pointer == NULL)
994 return -EINVAL;
995
996 model = (union acpi_object *) buffer.pointer;
997 /* 1146 /*
998 * Samsung P30 has a device with a valid _HID whose INIT does not 1147 * Try to match the object returned by INIT to the specific model.
999 * return anything. It used to be possible to catch this exception, 1148 * Handle every possible object (or the lack of thereof) the DSDT
1000 * but the implicit return code will now happily confuse the 1149 * writers might throw at us. When in trouble, we pass NULL to
1001 * driver. We assume that every ACPI_TYPE_STRING is a valid model 1150 * asus_model_match() and try something completely different.
1002 * identifier but it's still possible to get completely bogus data.
1003 */ 1151 */
1004 if (model->type == ACPI_TYPE_STRING) { 1152 if (buffer.pointer) {
1005 printk(KERN_NOTICE " %s model detected, ", model->string.pointer); 1153 model = (union acpi_object *)buffer.pointer;
1006 } else { 1154 switch (model->type) {
1007 if (asus_info && /* Samsung P30 */ 1155 case ACPI_TYPE_STRING:
1156 string = model->string.pointer;
1157 break;
1158 case ACPI_TYPE_BUFFER:
1159 string = model->buffer.pointer;
1160 break;
1161 default:
1162 kfree(model);
1163 break;
1164 }
1165 }
1166 hotk->model = asus_model_match(string);
1167 if (hotk->model == END_MODEL) { /* match failed */
1168 if (asus_info &&
1008 strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) { 1169 strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
1009 hotk->model = P30; 1170 hotk->model = P30;
1010 printk(KERN_NOTICE 1171 printk(KERN_NOTICE
1011 " Samsung P30 detected, supported\n"); 1172 " Samsung P30 detected, supported\n");
1012 } else { 1173 } else {
1013 hotk->model = M2E; 1174 hotk->model = M2E;
1014 printk(KERN_WARNING " no string returned by INIT\n"); 1175 printk(KERN_NOTICE " unsupported model %s, trying "
1015 printk(KERN_WARNING " trying default values, supply " 1176 "default values\n", string);
1016 "the developers with your DSDT\n"); 1177 printk(KERN_NOTICE
1178 " send /proc/acpi/dsdt to the developers\n");
1017 } 1179 }
1018 hotk->methods = &model_conf[hotk->model]; 1180 hotk->methods = &model_conf[hotk->model];
1019
1020 acpi_os_free(model);
1021
1022 return AE_OK; 1181 return AE_OK;
1023 } 1182 }
1024
1025 hotk->model = END_MODEL;
1026 if (strncmp(model->string.pointer, "L3D", 3) == 0)
1027 hotk->model = L3D;
1028 else if (strncmp(model->string.pointer, "L3H", 3) == 0 ||
1029 strncmp(model->string.pointer, "L2E", 3) == 0)
1030 hotk->model = L3H;
1031 else if (strncmp(model->string.pointer, "L3", 2) == 0 ||
1032 strncmp(model->string.pointer, "L2B", 3) == 0)
1033 hotk->model = L3C;
1034 else if (strncmp(model->string.pointer, "L8L", 3) == 0)
1035 hotk->model = L8L;
1036 else if (strncmp(model->string.pointer, "L4R", 3) == 0)
1037 hotk->model = L4R;
1038 else if (strncmp(model->string.pointer, "M6N", 3) == 0)
1039 hotk->model = M6N;
1040 else if (strncmp(model->string.pointer, "M6R", 3) == 0)
1041 hotk->model = M6R;
1042 else if (strncmp(model->string.pointer, "M2N", 3) == 0 ||
1043 strncmp(model->string.pointer, "M3N", 3) == 0 ||
1044 strncmp(model->string.pointer, "M5N", 3) == 0 ||
1045 strncmp(model->string.pointer, "M6N", 3) == 0 ||
1046 strncmp(model->string.pointer, "S1N", 3) == 0 ||
1047 strncmp(model->string.pointer, "S5N", 3) == 0 ||
1048 strncmp(model->string.pointer, "W1N", 3) == 0)
1049 hotk->model = xxN;
1050 else if (strncmp(model->string.pointer, "M1", 2) == 0)
1051 hotk->model = M1A;
1052 else if (strncmp(model->string.pointer, "M2", 2) == 0 ||
1053 strncmp(model->string.pointer, "L4E", 3) == 0)
1054 hotk->model = M2E;
1055 else if (strncmp(model->string.pointer, "L2", 2) == 0)
1056 hotk->model = L2D;
1057 else if (strncmp(model->string.pointer, "L8", 2) == 0)
1058 hotk->model = S1x;
1059 else if (strncmp(model->string.pointer, "D1", 2) == 0)
1060 hotk->model = D1x;
1061 else if (strncmp(model->string.pointer, "A1", 2) == 0)
1062 hotk->model = A1x;
1063 else if (strncmp(model->string.pointer, "A2", 2) == 0)
1064 hotk->model = A2x;
1065 else if (strncmp(model->string.pointer, "J1", 2) == 0)
1066 hotk->model = S2x;
1067 else if (strncmp(model->string.pointer, "L5", 2) == 0)
1068 hotk->model = L5x;
1069
1070 if (hotk->model == END_MODEL) {
1071 printk("unsupported, trying default values, supply the "
1072 "developers with your DSDT\n");
1073 hotk->model = M2E;
1074 } else {
1075 printk("supported\n");
1076 }
1077
1078 hotk->methods = &model_conf[hotk->model]; 1183 hotk->methods = &model_conf[hotk->model];
1184 printk(KERN_NOTICE " %s model detected, supported\n", string);
1079 1185
1080 /* Sort of per-model blacklist */ 1186 /* Sort of per-model blacklist */
1081 if (strncmp(model->string.pointer, "L2B", 3) == 0) 1187 if (strncmp(string, "L2B", 3) == 0)
1082 hotk->methods->lcd_status = NULL; 1188 hotk->methods->lcd_status = NULL;
1083 /* L2B is similar enough to L3C to use its settings, with this only 1189 /* L2B is similar enough to L3C to use its settings, with this only
1084 exception */ 1190 exception */
1085 else if (strncmp(model->string.pointer, "S5N", 3) == 0 || 1191 else if (strncmp(string, "A3G", 3) == 0)
1086 strncmp(model->string.pointer, "M5N", 3) == 0) 1192 hotk->methods->lcd_status = "\\BLFG";
1193 /* A3G is like M6R */
1194 else if (strncmp(string, "S5N", 3) == 0 ||
1195 strncmp(string, "M5N", 3) == 0 ||
1196 strncmp(string, "W3N", 3) == 0)
1087 hotk->methods->mt_mled = NULL; 1197 hotk->methods->mt_mled = NULL;
1088 /* S5N and M5N have no MLED */ 1198 /* S5N, M5N and W3N have no MLED */
1089 else if (strncmp(model->string.pointer, "M2N", 3) == 0 || 1199 else if (strncmp(string, "L5D", 3) == 0)
1090 strncmp(model->string.pointer, "W1N", 3) == 0) 1200 hotk->methods->mt_wled = NULL;
1201 /* L5D's WLED is not controlled by ACPI */
1202 else if (strncmp(string, "M2N", 3) == 0 ||
1203 strncmp(string, "S1N", 3) == 0)
1091 hotk->methods->mt_wled = "WLED"; 1204 hotk->methods->mt_wled = "WLED";
1092 /* M2N and W1N have a usable WLED */ 1205 /* M2N and S1N have a usable WLED */
1093 else if (asus_info) { 1206 else if (asus_info) {
1094 if (strncmp(asus_info->oem_table_id, "L1", 2) == 0) 1207 if (strncmp(asus_info->oem_table_id, "L1", 2) == 0)
1095 hotk->methods->mled_status = NULL; 1208 hotk->methods->mled_status = NULL;
1096 /* S1300A reports L84F, but L1400B too, account for that */ 1209 /* S1300A reports L84F, but L1400B too, account for that */
1097 } 1210 }
1098 1211
1099 acpi_os_free(model); 1212 kfree(model);
1100 1213
1101 return AE_OK; 1214 return AE_OK;
1102} 1215}
@@ -1164,8 +1277,7 @@ static int asus_hotk_add(struct acpi_device *device)
1164 /* For laptops without GPLV: init the hotk->brightness value */ 1277 /* For laptops without GPLV: init the hotk->brightness value */
1165 if ((!hotk->methods->brightness_get) 1278 if ((!hotk->methods->brightness_get)
1166 && (!hotk->methods->brightness_status) 1279 && (!hotk->methods->brightness_status)
1167 && (hotk->methods->brightness_up 1280 && (hotk->methods->brightness_up && hotk->methods->brightness_down)) {
1168 && hotk->methods->brightness_down)) {
1169 status = 1281 status =
1170 acpi_evaluate_object(NULL, hotk->methods->brightness_down, 1282 acpi_evaluate_object(NULL, hotk->methods->brightness_down,
1171 NULL, NULL); 1283 NULL, NULL);
@@ -1184,6 +1296,9 @@ static int asus_hotk_add(struct acpi_device *device)
1184 1296
1185 asus_hotk_found = 1; 1297 asus_hotk_found = 1;
1186 1298
1299 /* LED display is off by default */
1300 hotk->ledd_status = 0xFFF;
1301
1187 end: 1302 end:
1188 if (result) { 1303 if (result) {
1189 kfree(hotk); 1304 kfree(hotk);
@@ -1256,7 +1371,7 @@ static void __exit asus_acpi_exit(void)
1256 acpi_bus_unregister_driver(&asus_hotk_driver); 1371 acpi_bus_unregister_driver(&asus_hotk_driver);
1257 remove_proc_entry(PROC_ASUS, acpi_root_dir); 1372 remove_proc_entry(PROC_ASUS, acpi_root_dir);
1258 1373
1259 acpi_os_free(asus_info); 1374 kfree(asus_info);
1260 1375
1261 return; 1376 return;
1262} 1377}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 00b0728efe82..24bf4dca88cc 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -59,6 +59,9 @@ ACPI_MODULE_NAME("acpi_battery")
59MODULE_DESCRIPTION(ACPI_BATTERY_DRIVER_NAME); 59MODULE_DESCRIPTION(ACPI_BATTERY_DRIVER_NAME);
60MODULE_LICENSE("GPL"); 60MODULE_LICENSE("GPL");
61 61
62extern struct proc_dir_entry *acpi_lock_battery_dir(void);
63extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
64
62static int acpi_battery_add(struct acpi_device *device); 65static int acpi_battery_add(struct acpi_device *device);
63static int acpi_battery_remove(struct acpi_device *device, int type); 66static int acpi_battery_remove(struct acpi_device *device, int type);
64 67
@@ -108,7 +111,7 @@ struct acpi_battery_trips {
108}; 111};
109 112
110struct acpi_battery { 113struct acpi_battery {
111 acpi_handle handle; 114 struct acpi_device * device;
112 struct acpi_battery_flags flags; 115 struct acpi_battery_flags flags;
113 struct acpi_battery_trips trips; 116 struct acpi_battery_trips trips;
114 unsigned long alarm; 117 unsigned long alarm;
@@ -138,7 +141,7 @@ acpi_battery_get_info(struct acpi_battery *battery,
138 141
139 /* Evalute _BIF */ 142 /* Evalute _BIF */
140 143
141 status = acpi_evaluate_object(battery->handle, "_BIF", NULL, &buffer); 144 status = acpi_evaluate_object(battery->device->handle, "_BIF", NULL, &buffer);
142 if (ACPI_FAILURE(status)) { 145 if (ACPI_FAILURE(status)) {
143 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BIF")); 146 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BIF"));
144 return -ENODEV; 147 return -ENODEV;
@@ -171,7 +174,7 @@ acpi_battery_get_info(struct acpi_battery *battery,
171 } 174 }
172 175
173 end: 176 end:
174 acpi_os_free(buffer.pointer); 177 kfree(buffer.pointer);
175 178
176 if (!result) 179 if (!result)
177 (*bif) = (struct acpi_battery_info *)data.pointer; 180 (*bif) = (struct acpi_battery_info *)data.pointer;
@@ -198,7 +201,7 @@ acpi_battery_get_status(struct acpi_battery *battery,
198 201
199 /* Evalute _BST */ 202 /* Evalute _BST */
200 203
201 status = acpi_evaluate_object(battery->handle, "_BST", NULL, &buffer); 204 status = acpi_evaluate_object(battery->device->handle, "_BST", NULL, &buffer);
202 if (ACPI_FAILURE(status)) { 205 if (ACPI_FAILURE(status)) {
203 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BST")); 206 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BST"));
204 return -ENODEV; 207 return -ENODEV;
@@ -231,7 +234,7 @@ acpi_battery_get_status(struct acpi_battery *battery,
231 } 234 }
232 235
233 end: 236 end:
234 acpi_os_free(buffer.pointer); 237 kfree(buffer.pointer);
235 238
236 if (!result) 239 if (!result)
237 (*bst) = (struct acpi_battery_status *)data.pointer; 240 (*bst) = (struct acpi_battery_status *)data.pointer;
@@ -255,7 +258,7 @@ acpi_battery_set_alarm(struct acpi_battery *battery, unsigned long alarm)
255 258
256 arg0.integer.value = alarm; 259 arg0.integer.value = alarm;
257 260
258 status = acpi_evaluate_object(battery->handle, "_BTP", &arg_list, NULL); 261 status = acpi_evaluate_object(battery->device->handle, "_BTP", &arg_list, NULL);
259 if (ACPI_FAILURE(status)) 262 if (ACPI_FAILURE(status))
260 return -ENODEV; 263 return -ENODEV;
261 264
@@ -278,9 +281,7 @@ static int acpi_battery_check(struct acpi_battery *battery)
278 if (!battery) 281 if (!battery)
279 return -EINVAL; 282 return -EINVAL;
280 283
281 result = acpi_bus_get_device(battery->handle, &device); 284 device = battery->device;
282 if (result)
283 return result;
284 285
285 result = acpi_bus_get_status(device); 286 result = acpi_bus_get_status(device);
286 if (result) 287 if (result)
@@ -305,7 +306,7 @@ static int acpi_battery_check(struct acpi_battery *battery)
305 306
306 /* See if alarms are supported, and if so, set default */ 307 /* See if alarms are supported, and if so, set default */
307 308
308 status = acpi_get_handle(battery->handle, "_BTP", &handle); 309 status = acpi_get_handle(battery->device->handle, "_BTP", &handle);
309 if (ACPI_SUCCESS(status)) { 310 if (ACPI_SUCCESS(status)) {
310 battery->flags.alarm = 1; 311 battery->flags.alarm = 1;
311 acpi_battery_set_alarm(battery, battery->trips.warning); 312 acpi_battery_set_alarm(battery, battery->trips.warning);
@@ -662,12 +663,13 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
662 if (!battery) 663 if (!battery)
663 return; 664 return;
664 665
665 if (acpi_bus_get_device(handle, &device)) 666 device = battery->device;
666 return;
667 667
668 switch (event) { 668 switch (event) {
669 case ACPI_BATTERY_NOTIFY_STATUS: 669 case ACPI_BATTERY_NOTIFY_STATUS:
670 case ACPI_BATTERY_NOTIFY_INFO: 670 case ACPI_BATTERY_NOTIFY_INFO:
671 case ACPI_NOTIFY_BUS_CHECK:
672 case ACPI_NOTIFY_DEVICE_CHECK:
671 acpi_battery_check(battery); 673 acpi_battery_check(battery);
672 acpi_bus_generate_event(device, event, battery->flags.present); 674 acpi_bus_generate_event(device, event, battery->flags.present);
673 break; 675 break;
@@ -695,7 +697,7 @@ static int acpi_battery_add(struct acpi_device *device)
695 return -ENOMEM; 697 return -ENOMEM;
696 memset(battery, 0, sizeof(struct acpi_battery)); 698 memset(battery, 0, sizeof(struct acpi_battery));
697 699
698 battery->handle = device->handle; 700 battery->device = device;
699 strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME); 701 strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
700 strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS); 702 strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
701 acpi_driver_data(device) = battery; 703 acpi_driver_data(device) = battery;
@@ -708,8 +710,8 @@ static int acpi_battery_add(struct acpi_device *device)
708 if (result) 710 if (result)
709 goto end; 711 goto end;
710 712
711 status = acpi_install_notify_handler(battery->handle, 713 status = acpi_install_notify_handler(device->handle,
712 ACPI_DEVICE_NOTIFY, 714 ACPI_ALL_NOTIFY,
713 acpi_battery_notify, battery); 715 acpi_battery_notify, battery);
714 if (ACPI_FAILURE(status)) { 716 if (ACPI_FAILURE(status)) {
715 result = -ENODEV; 717 result = -ENODEV;
@@ -740,8 +742,8 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
740 742
741 battery = (struct acpi_battery *)acpi_driver_data(device); 743 battery = (struct acpi_battery *)acpi_driver_data(device);
742 744
743 status = acpi_remove_notify_handler(battery->handle, 745 status = acpi_remove_notify_handler(device->handle,
744 ACPI_DEVICE_NOTIFY, 746 ACPI_ALL_NOTIFY,
745 acpi_battery_notify); 747 acpi_battery_notify);
746 748
747 acpi_battery_remove_fs(device); 749 acpi_battery_remove_fs(device);
@@ -753,17 +755,15 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
753 755
754static int __init acpi_battery_init(void) 756static int __init acpi_battery_init(void)
755{ 757{
756 int result = 0; 758 int result;
757
758 759
759 acpi_battery_dir = proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); 760 acpi_battery_dir = acpi_lock_battery_dir();
760 if (!acpi_battery_dir) 761 if (!acpi_battery_dir)
761 return -ENODEV; 762 return -ENODEV;
762 acpi_battery_dir->owner = THIS_MODULE;
763 763
764 result = acpi_bus_register_driver(&acpi_battery_driver); 764 result = acpi_bus_register_driver(&acpi_battery_driver);
765 if (result < 0) { 765 if (result < 0) {
766 remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); 766 acpi_unlock_battery_dir(acpi_battery_dir);
767 return -ENODEV; 767 return -ENODEV;
768 } 768 }
769 769
@@ -775,7 +775,7 @@ static void __exit acpi_battery_exit(void)
775 775
776 acpi_bus_unregister_driver(&acpi_battery_driver); 776 acpi_bus_unregister_driver(&acpi_battery_driver);
777 777
778 remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); 778 acpi_unlock_battery_dir(acpi_battery_dir);
779 779
780 return; 780 return;
781} 781}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 02594639c4d9..fd1ba05eab68 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -82,7 +82,6 @@ static struct acpi_driver acpi_button_driver = {
82}; 82};
83 83
84struct acpi_button { 84struct acpi_button {
85 acpi_handle handle;
86 struct acpi_device *device; /* Fixed button kludge */ 85 struct acpi_device *device; /* Fixed button kludge */
87 u8 type; 86 u8 type;
88 unsigned long pushed; 87 unsigned long pushed;
@@ -137,7 +136,7 @@ static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
137 if (!button || !button->device) 136 if (!button || !button->device)
138 return 0; 137 return 0;
139 138
140 status = acpi_evaluate_integer(button->handle, "_LID", NULL, &state); 139 status = acpi_evaluate_integer(button->device->handle, "_LID", NULL, &state);
141 if (ACPI_FAILURE(status)) { 140 if (ACPI_FAILURE(status)) {
142 seq_printf(seq, "state: unsupported\n"); 141 seq_printf(seq, "state: unsupported\n");
143 } else { 142 } else {
@@ -282,7 +281,7 @@ static acpi_status acpi_button_notify_fixed(void *data)
282 if (!button) 281 if (!button)
283 return AE_BAD_PARAMETER; 282 return AE_BAD_PARAMETER;
284 283
285 acpi_button_notify(button->handle, ACPI_BUTTON_NOTIFY_STATUS, button); 284 acpi_button_notify(button->device->handle, ACPI_BUTTON_NOTIFY_STATUS, button);
286 285
287 return AE_OK; 286 return AE_OK;
288} 287}
@@ -303,7 +302,6 @@ static int acpi_button_add(struct acpi_device *device)
303 memset(button, 0, sizeof(struct acpi_button)); 302 memset(button, 0, sizeof(struct acpi_button));
304 303
305 button->device = device; 304 button->device = device;
306 button->handle = device->handle;
307 acpi_driver_data(device) = button; 305 acpi_driver_data(device) = button;
308 306
309 /* 307 /*
@@ -362,7 +360,7 @@ static int acpi_button_add(struct acpi_device *device)
362 button); 360 button);
363 break; 361 break;
364 default: 362 default:
365 status = acpi_install_notify_handler(button->handle, 363 status = acpi_install_notify_handler(device->handle,
366 ACPI_DEVICE_NOTIFY, 364 ACPI_DEVICE_NOTIFY,
367 acpi_button_notify, 365 acpi_button_notify,
368 button); 366 button);
@@ -420,7 +418,7 @@ static int acpi_button_remove(struct acpi_device *device, int type)
420 acpi_button_notify_fixed); 418 acpi_button_notify_fixed);
421 break; 419 break;
422 default: 420 default:
423 status = acpi_remove_notify_handler(button->handle, 421 status = acpi_remove_notify_handler(device->handle,
424 ACPI_DEVICE_NOTIFY, 422 ACPI_DEVICE_NOTIFY,
425 acpi_button_notify); 423 acpi_button_notify);
426 break; 424 break;
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
new file mode 100644
index 000000000000..574a75a166c5
--- /dev/null
+++ b/drivers/acpi/cm_sbs.c
@@ -0,0 +1,131 @@
1/*
2 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/acpi.h>
25#include <linux/types.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <acpi/acpi_bus.h>
29#include <acpi/acpi_drivers.h>
30#include <acpi/acmacros.h>
31#include <acpi/actypes.h>
32#include <acpi/acutils.h>
33
34ACPI_MODULE_NAME("cm_sbs")
35#define ACPI_AC_CLASS "ac_adapter"
36#define ACPI_BATTERY_CLASS "battery"
37#define ACPI_SBS_COMPONENT 0x00080000
38#define _COMPONENT ACPI_SBS_COMPONENT
39static struct proc_dir_entry *acpi_ac_dir;
40static struct proc_dir_entry *acpi_battery_dir;
41
42static struct semaphore cm_sbs_sem;
43
44static int lock_ac_dir_cnt = 0;
45static int lock_battery_dir_cnt = 0;
46
47struct proc_dir_entry *acpi_lock_ac_dir(void)
48{
49
50 down(&cm_sbs_sem);
51 if (!acpi_ac_dir) {
52 acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
53 }
54 if (acpi_ac_dir) {
55 lock_ac_dir_cnt++;
56 } else {
57 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
58 "Cannot create %s\n", ACPI_AC_CLASS));
59 }
60 up(&cm_sbs_sem);
61 return acpi_ac_dir;
62}
63
64EXPORT_SYMBOL(acpi_lock_ac_dir);
65
66void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
67{
68
69 down(&cm_sbs_sem);
70 if (acpi_ac_dir_param) {
71 lock_ac_dir_cnt--;
72 }
73 if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
74 remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
75 acpi_ac_dir = 0;
76 }
77 up(&cm_sbs_sem);
78}
79
80EXPORT_SYMBOL(acpi_unlock_ac_dir);
81
82struct proc_dir_entry *acpi_lock_battery_dir(void)
83{
84
85 down(&cm_sbs_sem);
86 if (!acpi_battery_dir) {
87 acpi_battery_dir =
88 proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
89 }
90 if (acpi_battery_dir) {
91 lock_battery_dir_cnt++;
92 } else {
93 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
94 "Cannot create %s\n", ACPI_BATTERY_CLASS));
95 }
96 up(&cm_sbs_sem);
97 return acpi_battery_dir;
98}
99
100EXPORT_SYMBOL(acpi_lock_battery_dir);
101
102void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
103{
104
105 down(&cm_sbs_sem);
106 if (acpi_battery_dir_param) {
107 lock_battery_dir_cnt--;
108 }
109 if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
110 && acpi_battery_dir) {
111 remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
112 acpi_battery_dir = 0;
113 }
114 up(&cm_sbs_sem);
115 return;
116}
117
118EXPORT_SYMBOL(acpi_unlock_battery_dir);
119
120static int __init acpi_cm_sbs_init(void)
121{
122
123 if (acpi_disabled)
124 return 0;
125
126 init_MUTEX(&cm_sbs_sem);
127
128 return 0;
129}
130
131subsys_initcall(acpi_cm_sbs_init);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 7f7e41d40a3b..871aa520ece7 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -236,7 +236,7 @@ container_walk_namespace_cb(acpi_handle handle,
236 } 236 }
237 237
238 end: 238 end:
239 acpi_os_free(buffer.pointer); 239 kfree(buffer.pointer);
240 240
241 return AE_OK; 241 return AE_OK;
242} 242}
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 38acc69b21bc..daed2460924d 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -64,7 +64,7 @@ static struct acpi_driver acpi_fan_driver = {
64}; 64};
65 65
66struct acpi_fan { 66struct acpi_fan {
67 acpi_handle handle; 67 struct acpi_device * device;
68}; 68};
69 69
70/* -------------------------------------------------------------------------- 70/* --------------------------------------------------------------------------
@@ -80,7 +80,7 @@ static int acpi_fan_read_state(struct seq_file *seq, void *offset)
80 80
81 81
82 if (fan) { 82 if (fan) {
83 if (acpi_bus_get_power(fan->handle, &state)) 83 if (acpi_bus_get_power(fan->device->handle, &state))
84 seq_printf(seq, "status: ERROR\n"); 84 seq_printf(seq, "status: ERROR\n");
85 else 85 else
86 seq_printf(seq, "status: %s\n", 86 seq_printf(seq, "status: %s\n",
@@ -112,7 +112,7 @@ acpi_fan_write_state(struct file *file, const char __user * buffer,
112 112
113 state_string[count] = '\0'; 113 state_string[count] = '\0';
114 114
115 result = acpi_bus_set_power(fan->handle, 115 result = acpi_bus_set_power(fan->device->handle,
116 simple_strtoul(state_string, NULL, 0)); 116 simple_strtoul(state_string, NULL, 0));
117 if (result) 117 if (result)
118 return result; 118 return result;
@@ -191,12 +191,12 @@ static int acpi_fan_add(struct acpi_device *device)
191 return -ENOMEM; 191 return -ENOMEM;
192 memset(fan, 0, sizeof(struct acpi_fan)); 192 memset(fan, 0, sizeof(struct acpi_fan));
193 193
194 fan->handle = device->handle; 194 fan->device = device;
195 strcpy(acpi_device_name(device), "Fan"); 195 strcpy(acpi_device_name(device), "Fan");
196 strcpy(acpi_device_class(device), ACPI_FAN_CLASS); 196 strcpy(acpi_device_class(device), ACPI_FAN_CLASS);
197 acpi_driver_data(device) = fan; 197 acpi_driver_data(device) = fan;
198 198
199 result = acpi_bus_get_power(fan->handle, &state); 199 result = acpi_bus_get_power(device->handle, &state);
200 if (result) { 200 if (result) {
201 printk(KERN_ERR PREFIX "Reading power state\n"); 201 printk(KERN_ERR PREFIX "Reading power state\n");
202 goto end; 202 goto end;
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 8daef57b994c..10f160dc75b1 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -152,7 +152,7 @@ static int get_root_bridge_busnr(acpi_handle handle)
152 bbn = bus; 152 bbn = bus;
153 } 153 }
154 exit: 154 exit:
155 acpi_os_free(buffer.pointer); 155 kfree(buffer.pointer);
156 return (int)bbn; 156 return (int)bbn;
157} 157}
158 158
@@ -192,7 +192,7 @@ find_pci_rootbridge(acpi_handle handle, u32 lvl, void *context, void **rv)
192 find->handle = handle; 192 find->handle = handle;
193 status = AE_OK; 193 status = AE_OK;
194 exit: 194 exit:
195 acpi_os_free(buffer.pointer); 195 kfree(buffer.pointer);
196 return status; 196 return status;
197} 197}
198 198
@@ -224,7 +224,7 @@ do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv)
224 info = buffer.pointer; 224 info = buffer.pointer;
225 if (info->address == find->address) 225 if (info->address == find->address)
226 find->handle = handle; 226 find->handle = handle;
227 acpi_os_free(buffer.pointer); 227 kfree(buffer.pointer);
228 } 228 }
229 return AE_OK; 229 return AE_OK;
230} 230}
@@ -330,7 +330,7 @@ static int acpi_platform_notify(struct device *dev)
330 330
331 acpi_get_name(dev->firmware_data, ACPI_FULL_PATHNAME, &buffer); 331 acpi_get_name(dev->firmware_data, ACPI_FULL_PATHNAME, &buffer);
332 DBG("Device %s -> %s\n", dev->bus_id, (char *)buffer.pointer); 332 DBG("Device %s -> %s\n", dev->bus_id, (char *)buffer.pointer);
333 acpi_os_free(buffer.pointer); 333 kfree(buffer.pointer);
334 } else 334 } else
335 DBG("Device %s -> No ACPI support\n", dev->bus_id); 335 DBG("Device %s -> No ACPI support\n", dev->bus_id);
336#endif 336#endif
diff --git a/drivers/acpi/i2c_ec.c b/drivers/acpi/i2c_ec.c
new file mode 100644
index 000000000000..84239d51dc0c
--- /dev/null
+++ b/drivers/acpi/i2c_ec.c
@@ -0,0 +1,406 @@
1/*
2 * SMBus driver for ACPI Embedded Controller ($Revision: 1.3 $)
3 *
4 * Copyright (c) 2002, 2005 Ducrot Bruno
5 * Copyright (c) 2005 Rich Townsend (tiny hacks & tweaks)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation version 2.
10 */
11
12#include <linux/version.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/stddef.h>
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <linux/i2c.h>
20#include <linux/acpi.h>
21#include <linux/delay.h>
22
23#include "i2c_ec.h"
24
25#define xudelay(t) udelay(t)
26#define xmsleep(t) msleep(t)
27
28#define ACPI_EC_HC_COMPONENT 0x00080000
29#define ACPI_EC_HC_CLASS "ec_hc_smbus"
30#define ACPI_EC_HC_HID "ACPI0001"
31#define ACPI_EC_HC_DRIVER_NAME "ACPI EC HC smbus driver"
32#define ACPI_EC_HC_DEVICE_NAME "EC HC smbus"
33
34#define _COMPONENT ACPI_EC_HC_COMPONENT
35
36ACPI_MODULE_NAME("acpi_smbus")
37
38static int acpi_ec_hc_add(struct acpi_device *device);
39static int acpi_ec_hc_remove(struct acpi_device *device, int type);
40
41static struct acpi_driver acpi_ec_hc_driver = {
42 .name = ACPI_EC_HC_DRIVER_NAME,
43 .class = ACPI_EC_HC_CLASS,
44 .ids = ACPI_EC_HC_HID,
45 .ops = {
46 .add = acpi_ec_hc_add,
47 .remove = acpi_ec_hc_remove,
48 },
49};
50
51/* Various bit mask for EC_SC (R) */
52#define OBF 0x01
53#define IBF 0x02
54#define CMD 0x08
55#define BURST 0x10
56#define SCI_EVT 0x20
57#define SMI_EVT 0x40
58
59/* Commands for EC_SC (W) */
60#define RD_EC 0x80
61#define WR_EC 0x81
62#define BE_EC 0x82
63#define BD_EC 0x83
64#define QR_EC 0x84
65
66/*
67 * ACPI 2.0 chapter 13 SMBus 2.0 EC register model
68 */
69
70#define ACPI_EC_SMB_PRTCL 0x00 /* protocol, PEC */
71#define ACPI_EC_SMB_STS 0x01 /* status */
72#define ACPI_EC_SMB_ADDR 0x02 /* address */
73#define ACPI_EC_SMB_CMD 0x03 /* command */
74#define ACPI_EC_SMB_DATA 0x04 /* 32 data registers */
75#define ACPI_EC_SMB_BCNT 0x24 /* number of data bytes */
76#define ACPI_EC_SMB_ALRM_A 0x25 /* alarm address */
77#define ACPI_EC_SMB_ALRM_D 0x26 /* 2 bytes alarm data */
78
79#define ACPI_EC_SMB_STS_DONE 0x80
80#define ACPI_EC_SMB_STS_ALRM 0x40
81#define ACPI_EC_SMB_STS_RES 0x20
82#define ACPI_EC_SMB_STS_STATUS 0x1f
83
84#define ACPI_EC_SMB_STATUS_OK 0x00
85#define ACPI_EC_SMB_STATUS_FAIL 0x07
86#define ACPI_EC_SMB_STATUS_DNAK 0x10
87#define ACPI_EC_SMB_STATUS_DERR 0x11
88#define ACPI_EC_SMB_STATUS_CMD_DENY 0x12
89#define ACPI_EC_SMB_STATUS_UNKNOWN 0x13
90#define ACPI_EC_SMB_STATUS_ACC_DENY 0x17
91#define ACPI_EC_SMB_STATUS_TIMEOUT 0x18
92#define ACPI_EC_SMB_STATUS_NOTSUP 0x19
93#define ACPI_EC_SMB_STATUS_BUSY 0x1A
94#define ACPI_EC_SMB_STATUS_PEC 0x1F
95
96#define ACPI_EC_SMB_PRTCL_WRITE 0x00
97#define ACPI_EC_SMB_PRTCL_READ 0x01
98#define ACPI_EC_SMB_PRTCL_QUICK 0x02
99#define ACPI_EC_SMB_PRTCL_BYTE 0x04
100#define ACPI_EC_SMB_PRTCL_BYTE_DATA 0x06
101#define ACPI_EC_SMB_PRTCL_WORD_DATA 0x08
102#define ACPI_EC_SMB_PRTCL_BLOCK_DATA 0x0a
103#define ACPI_EC_SMB_PRTCL_PROC_CALL 0x0c
104#define ACPI_EC_SMB_PRTCL_BLOCK_PROC_CALL 0x0d
105#define ACPI_EC_SMB_PRTCL_I2C_BLOCK_DATA 0x4a
106#define ACPI_EC_SMB_PRTCL_PEC 0x80
107
108/* Length of pre/post transaction sleep (msec) */
109#define ACPI_EC_SMB_TRANSACTION_SLEEP 1
110#define ACPI_EC_SMB_ACCESS_SLEEP1 1
111#define ACPI_EC_SMB_ACCESS_SLEEP2 10
112
113static int acpi_ec_smb_read(struct acpi_ec_smbus *smbus, u8 address, u8 * data)
114{
115 u8 val;
116 int err;
117
118 err = ec_read(smbus->base + address, &val);
119 if (!err) {
120 *data = val;
121 }
122 xmsleep(ACPI_EC_SMB_TRANSACTION_SLEEP);
123 return (err);
124}
125
126static int acpi_ec_smb_write(struct acpi_ec_smbus *smbus, u8 address, u8 data)
127{
128 int err;
129
130 err = ec_write(smbus->base + address, data);
131 return (err);
132}
133
134static int
135acpi_ec_smb_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
136 char read_write, u8 command, int size,
137 union i2c_smbus_data *data)
138{
139 struct acpi_ec_smbus *smbus = adap->algo_data;
140 unsigned char protocol, len = 0, pec, temp[2] = { 0, 0 };
141 int i;
142
143 if (read_write == I2C_SMBUS_READ) {
144 protocol = ACPI_EC_SMB_PRTCL_READ;
145 } else {
146 protocol = ACPI_EC_SMB_PRTCL_WRITE;
147 }
148 pec = (flags & I2C_CLIENT_PEC) ? ACPI_EC_SMB_PRTCL_PEC : 0;
149
150 switch (size) {
151
152 case I2C_SMBUS_QUICK:
153 protocol |= ACPI_EC_SMB_PRTCL_QUICK;
154 read_write = I2C_SMBUS_WRITE;
155 break;
156
157 case I2C_SMBUS_BYTE:
158 if (read_write == I2C_SMBUS_WRITE) {
159 acpi_ec_smb_write(smbus, ACPI_EC_SMB_DATA, data->byte);
160 }
161 protocol |= ACPI_EC_SMB_PRTCL_BYTE;
162 break;
163
164 case I2C_SMBUS_BYTE_DATA:
165 acpi_ec_smb_write(smbus, ACPI_EC_SMB_CMD, command);
166 if (read_write == I2C_SMBUS_WRITE) {
167 acpi_ec_smb_write(smbus, ACPI_EC_SMB_DATA, data->byte);
168 }
169 protocol |= ACPI_EC_SMB_PRTCL_BYTE_DATA;
170 break;
171
172 case I2C_SMBUS_WORD_DATA:
173 acpi_ec_smb_write(smbus, ACPI_EC_SMB_CMD, command);
174 if (read_write == I2C_SMBUS_WRITE) {
175 acpi_ec_smb_write(smbus, ACPI_EC_SMB_DATA, data->word);
176 acpi_ec_smb_write(smbus, ACPI_EC_SMB_DATA + 1,
177 data->word >> 8);
178 }
179 protocol |= ACPI_EC_SMB_PRTCL_WORD_DATA | pec;
180 break;
181
182 case I2C_SMBUS_BLOCK_DATA:
183 acpi_ec_smb_write(smbus, ACPI_EC_SMB_CMD, command);
184 if (read_write == I2C_SMBUS_WRITE) {
185 len = min_t(u8, data->block[0], 32);
186 acpi_ec_smb_write(smbus, ACPI_EC_SMB_BCNT, len);
187 for (i = 0; i < len; i++)
188 acpi_ec_smb_write(smbus, ACPI_EC_SMB_DATA + i,
189 data->block[i + 1]);
190 }
191 protocol |= ACPI_EC_SMB_PRTCL_BLOCK_DATA | pec;
192 break;
193
194 case I2C_SMBUS_I2C_BLOCK_DATA:
195 len = min_t(u8, data->block[0], 32);
196 acpi_ec_smb_write(smbus, ACPI_EC_SMB_CMD, command);
197 acpi_ec_smb_write(smbus, ACPI_EC_SMB_BCNT, len);
198 if (read_write == I2C_SMBUS_WRITE) {
199 for (i = 0; i < len; i++) {
200 acpi_ec_smb_write(smbus, ACPI_EC_SMB_DATA + i,
201 data->block[i + 1]);
202 }
203 }
204 protocol |= ACPI_EC_SMB_PRTCL_I2C_BLOCK_DATA;
205 break;
206
207 case I2C_SMBUS_PROC_CALL:
208 acpi_ec_smb_write(smbus, ACPI_EC_SMB_CMD, command);
209 acpi_ec_smb_write(smbus, ACPI_EC_SMB_DATA, data->word);
210 acpi_ec_smb_write(smbus, ACPI_EC_SMB_DATA + 1, data->word >> 8);
211 protocol = ACPI_EC_SMB_PRTCL_PROC_CALL | pec;
212 read_write = I2C_SMBUS_READ;
213 break;
214
215 case I2C_SMBUS_BLOCK_PROC_CALL:
216 protocol |= pec;
217 len = min_t(u8, data->block[0], 31);
218 acpi_ec_smb_write(smbus, ACPI_EC_SMB_CMD, command);
219 acpi_ec_smb_write(smbus, ACPI_EC_SMB_BCNT, len);
220 for (i = 0; i < len; i++)
221 acpi_ec_smb_write(smbus, ACPI_EC_SMB_DATA + i,
222 data->block[i + 1]);
223 protocol = ACPI_EC_SMB_PRTCL_BLOCK_PROC_CALL | pec;
224 read_write = I2C_SMBUS_READ;
225 break;
226
227 default:
228 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "EC SMBus adapter: "
229 "Unsupported transaction %d\n", size));
230 return (-1);
231 }
232
233 acpi_ec_smb_write(smbus, ACPI_EC_SMB_ADDR, addr << 1);
234 acpi_ec_smb_write(smbus, ACPI_EC_SMB_PRTCL, protocol);
235
236 acpi_ec_smb_read(smbus, ACPI_EC_SMB_STS, temp + 0);
237
238 if (~temp[0] & ACPI_EC_SMB_STS_DONE) {
239 xudelay(500);
240 acpi_ec_smb_read(smbus, ACPI_EC_SMB_STS, temp + 0);
241 }
242 if (~temp[0] & ACPI_EC_SMB_STS_DONE) {
243 xmsleep(ACPI_EC_SMB_ACCESS_SLEEP2);
244 acpi_ec_smb_read(smbus, ACPI_EC_SMB_STS, temp + 0);
245 }
246 if ((~temp[0] & ACPI_EC_SMB_STS_DONE)
247 || (temp[0] & ACPI_EC_SMB_STS_STATUS)) {
248 return (-1);
249 }
250
251 if (read_write == I2C_SMBUS_WRITE) {
252 return (0);
253 }
254
255 switch (size) {
256
257 case I2C_SMBUS_BYTE:
258 case I2C_SMBUS_BYTE_DATA:
259 acpi_ec_smb_read(smbus, ACPI_EC_SMB_DATA, &data->byte);
260 break;
261
262 case I2C_SMBUS_WORD_DATA:
263 case I2C_SMBUS_PROC_CALL:
264 acpi_ec_smb_read(smbus, ACPI_EC_SMB_DATA, temp + 0);
265 acpi_ec_smb_read(smbus, ACPI_EC_SMB_DATA + 1, temp + 1);
266 data->word = (temp[1] << 8) | temp[0];
267 break;
268
269 case I2C_SMBUS_BLOCK_DATA:
270 case I2C_SMBUS_BLOCK_PROC_CALL:
271 len = 0;
272 acpi_ec_smb_read(smbus, ACPI_EC_SMB_BCNT, &len);
273 len = min_t(u8, len, 32);
274 case I2C_SMBUS_I2C_BLOCK_DATA:
275 for (i = 0; i < len; i++)
276 acpi_ec_smb_read(smbus, ACPI_EC_SMB_DATA + i,
277 data->block + i + 1);
278 data->block[0] = len;
279 break;
280 }
281
282 return (0);
283}
284
285static u32 acpi_ec_smb_func(struct i2c_adapter *adapter)
286{
287
288 return (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
289 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
290 I2C_FUNC_SMBUS_BLOCK_DATA |
291 I2C_FUNC_SMBUS_PROC_CALL |
292 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
293 I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_HWPEC_CALC);
294}
295
296static struct i2c_algorithm acpi_ec_smbus_algorithm = {
297 .smbus_xfer = acpi_ec_smb_access,
298 .functionality = acpi_ec_smb_func,
299};
300
301static int acpi_ec_hc_add(struct acpi_device *device)
302{
303 int status;
304 unsigned long val;
305 struct acpi_ec_hc *ec_hc;
306 struct acpi_ec_smbus *smbus;
307
308 if (!device) {
309 return -EINVAL;
310 }
311
312 ec_hc = kmalloc(sizeof(struct acpi_ec_hc), GFP_KERNEL);
313 if (!ec_hc) {
314 return -ENOMEM;
315 }
316 memset(ec_hc, 0, sizeof(struct acpi_ec_hc));
317
318 smbus = kmalloc(sizeof(struct acpi_ec_smbus), GFP_KERNEL);
319 if (!smbus) {
320 kfree(ec_hc);
321 return -ENOMEM;
322 }
323 memset(smbus, 0, sizeof(struct acpi_ec_smbus));
324
325 ec_hc->handle = device->handle;
326 strcpy(acpi_device_name(device), ACPI_EC_HC_DEVICE_NAME);
327 strcpy(acpi_device_class(device), ACPI_EC_HC_CLASS);
328 acpi_driver_data(device) = ec_hc;
329
330 status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val);
331 if (ACPI_FAILURE(status)) {
332 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n"));
333 kfree(ec_hc->smbus);
334 kfree(smbus);
335 return -EIO;
336 }
337
338 smbus->ec = acpi_driver_data(device->parent);
339 smbus->base = (val & 0xff00ull) >> 8;
340 smbus->alert = val & 0xffull;
341
342 smbus->adapter.owner = THIS_MODULE;
343 smbus->adapter.algo = &acpi_ec_smbus_algorithm;
344 smbus->adapter.algo_data = smbus;
345
346 if (i2c_add_adapter(&smbus->adapter)) {
347 ACPI_DEBUG_PRINT((ACPI_DB_WARN,
348 "EC SMBus adapter: Failed to register adapter\n"));
349 kfree(smbus);
350 kfree(ec_hc);
351 return -EIO;
352 }
353
354 ec_hc->smbus = smbus;
355
356 printk(KERN_INFO PREFIX "%s [%s]\n",
357 acpi_device_name(device), acpi_device_bid(device));
358
359 return AE_OK;
360}
361
362static int acpi_ec_hc_remove(struct acpi_device *device, int type)
363{
364 struct acpi_ec_hc *ec_hc;
365
366 if (!device) {
367 return -EINVAL;
368 }
369 ec_hc = acpi_driver_data(device);
370
371 i2c_del_adapter(&ec_hc->smbus->adapter);
372 kfree(ec_hc->smbus);
373 kfree(ec_hc);
374
375 return AE_OK;
376}
377
378static int __init acpi_ec_hc_init(void)
379{
380 int result;
381
382 result = acpi_bus_register_driver(&acpi_ec_hc_driver);
383 if (result < 0) {
384 return -ENODEV;
385 }
386 return 0;
387}
388
389static void __exit acpi_ec_hc_exit(void)
390{
391 acpi_bus_unregister_driver(&acpi_ec_hc_driver);
392}
393
394struct acpi_ec_hc *acpi_get_ec_hc(struct acpi_device *device)
395{
396 return ((struct acpi_ec_hc *)acpi_driver_data(device->parent));
397}
398
399EXPORT_SYMBOL(acpi_get_ec_hc);
400
401module_init(acpi_ec_hc_init);
402module_exit(acpi_ec_hc_exit);
403
404MODULE_LICENSE("GPL");
405MODULE_AUTHOR("Ducrot Bruno");
406MODULE_DESCRIPTION("ACPI EC SMBus driver");
diff --git a/drivers/acpi/i2c_ec.h b/drivers/acpi/i2c_ec.h
new file mode 100644
index 000000000000..7c53fb732d61
--- /dev/null
+++ b/drivers/acpi/i2c_ec.h
@@ -0,0 +1,23 @@
1/*
2 * SMBus driver for ACPI Embedded Controller ($Revision: 1.2 $)
3 *
4 * Copyright (c) 2002, 2005 Ducrot Bruno
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation version 2.
9 */
10
11struct acpi_ec_smbus {
12 struct i2c_adapter adapter;
13 union acpi_ec *ec;
14 int base;
15 int alert;
16};
17
18struct acpi_ec_hc {
19 acpi_handle handle;
20 struct acpi_ec_smbus *smbus;
21};
22
23struct acpi_ec_hc *acpi_get_ec_hc(struct acpi_device *device);
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index 6d9bd45af30a..dca6799ac678 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -133,7 +133,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
133 133
134 /* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */ 134 /* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
135 135
136 acpi_os_free(return_buffer->pointer); 136 ACPI_FREE(return_buffer->pointer);
137 return_buffer->pointer = NULL; 137 return_buffer->pointer = NULL;
138 } 138 }
139 139
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 4d622981f61a..e5e448edca41 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -259,12 +259,10 @@ int acpi_get_node(acpi_handle *handle)
259{ 259{
260 int pxm, node = -1; 260 int pxm, node = -1;
261 261
262 ACPI_FUNCTION_TRACE("acpi_get_node");
263
264 pxm = acpi_get_pxm(handle); 262 pxm = acpi_get_pxm(handle);
265 if (pxm >= 0) 263 if (pxm >= 0)
266 node = acpi_map_pxm_to_node(pxm); 264 node = acpi_map_pxm_to_node(pxm);
267 265
268 return_VALUE(node); 266 return node;
269} 267}
270EXPORT_SYMBOL(acpi_get_node); 268EXPORT_SYMBOL(acpi_get_node);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 5a468e2779ae..eedb05c6dc7b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -146,13 +146,6 @@ void *acpi_os_allocate(acpi_size size)
146 return kmalloc(size, GFP_KERNEL); 146 return kmalloc(size, GFP_KERNEL);
147} 147}
148 148
149void acpi_os_free(void *ptr)
150{
151 kfree(ptr);
152}
153
154EXPORT_SYMBOL(acpi_os_free);
155
156acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr) 149acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr)
157{ 150{
158 if (efi_enabled) { 151 if (efi_enabled) {
@@ -742,7 +735,7 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
742 735
743 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 736 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
744 737
745 acpi_os_free(sem); 738 kfree(sem);
746 sem = NULL; 739 sem = NULL;
747 740
748 return AE_OK; 741 return AE_OK;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 1badce27a83f..8197c0e40769 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -83,7 +83,6 @@ struct acpi_pci_link_irq {
83struct acpi_pci_link { 83struct acpi_pci_link {
84 struct list_head node; 84 struct list_head node;
85 struct acpi_device *device; 85 struct acpi_device *device;
86 acpi_handle handle;
87 struct acpi_pci_link_irq irq; 86 struct acpi_pci_link_irq irq;
88 int refcnt; 87 int refcnt;
89}; 88};
@@ -175,7 +174,7 @@ static int acpi_pci_link_get_possible(struct acpi_pci_link *link)
175 if (!link) 174 if (!link)
176 return -EINVAL; 175 return -EINVAL;
177 176
178 status = acpi_walk_resources(link->handle, METHOD_NAME__PRS, 177 status = acpi_walk_resources(link->device->handle, METHOD_NAME__PRS,
179 acpi_pci_link_check_possible, link); 178 acpi_pci_link_check_possible, link);
180 if (ACPI_FAILURE(status)) { 179 if (ACPI_FAILURE(status)) {
181 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRS")); 180 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRS"));
@@ -249,8 +248,7 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
249 acpi_status status = AE_OK; 248 acpi_status status = AE_OK;
250 int irq = 0; 249 int irq = 0;
251 250
252 251 if (!link)
253 if (!link || !link->handle)
254 return -EINVAL; 252 return -EINVAL;
255 253
256 link->irq.active = 0; 254 link->irq.active = 0;
@@ -274,7 +272,7 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
274 * Query and parse _CRS to get the current IRQ assignment. 272 * Query and parse _CRS to get the current IRQ assignment.
275 */ 273 */
276 274
277 status = acpi_walk_resources(link->handle, METHOD_NAME__CRS, 275 status = acpi_walk_resources(link->device->handle, METHOD_NAME__CRS,
278 acpi_pci_link_check_current, &irq); 276 acpi_pci_link_check_current, &irq);
279 if (ACPI_FAILURE(status)) { 277 if (ACPI_FAILURE(status)) {
280 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _CRS")); 278 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _CRS"));
@@ -360,7 +358,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
360 resource->end.type = ACPI_RESOURCE_TYPE_END_TAG; 358 resource->end.type = ACPI_RESOURCE_TYPE_END_TAG;
361 359
362 /* Attempt to set the resource */ 360 /* Attempt to set the resource */
363 status = acpi_set_current_resources(link->handle, &buffer); 361 status = acpi_set_current_resources(link->device->handle, &buffer);
364 362
365 /* check for total failure */ 363 /* check for total failure */
366 if (ACPI_FAILURE(status)) { 364 if (ACPI_FAILURE(status)) {
@@ -699,7 +697,7 @@ int acpi_pci_link_free_irq(acpi_handle handle)
699 acpi_device_bid(link->device))); 697 acpi_device_bid(link->device)));
700 698
701 if (link->refcnt == 0) { 699 if (link->refcnt == 0) {
702 acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL); 700 acpi_ut_evaluate_object(link->device->handle, "_DIS", 0, NULL);
703 } 701 }
704 mutex_unlock(&acpi_link_lock); 702 mutex_unlock(&acpi_link_lock);
705 return (link->irq.active); 703 return (link->irq.active);
@@ -726,7 +724,6 @@ static int acpi_pci_link_add(struct acpi_device *device)
726 memset(link, 0, sizeof(struct acpi_pci_link)); 724 memset(link, 0, sizeof(struct acpi_pci_link));
727 725
728 link->device = device; 726 link->device = device;
729 link->handle = device->handle;
730 strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME); 727 strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
731 strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS); 728 strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
732 acpi_driver_data(device) = link; 729 acpi_driver_data(device) = link;
@@ -765,7 +762,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
765 762
766 end: 763 end:
767 /* disable all links -- to be activated on use */ 764 /* disable all links -- to be activated on use */
768 acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL); 765 acpi_ut_evaluate_object(device->handle, "_DIS", 0, NULL);
769 mutex_unlock(&acpi_link_lock); 766 mutex_unlock(&acpi_link_lock);
770 767
771 if (result) 768 if (result)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 8f10442119f0..0984a1ee24ed 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -58,7 +58,7 @@ static struct acpi_driver acpi_pci_root_driver = {
58 58
59struct acpi_pci_root { 59struct acpi_pci_root {
60 struct list_head node; 60 struct list_head node;
61 acpi_handle handle; 61 struct acpi_device * device;
62 struct acpi_pci_id id; 62 struct acpi_pci_id id;
63 struct pci_bus *bus; 63 struct pci_bus *bus;
64}; 64};
@@ -83,7 +83,7 @@ int acpi_pci_register_driver(struct acpi_pci_driver *driver)
83 list_for_each(entry, &acpi_pci_roots) { 83 list_for_each(entry, &acpi_pci_roots) {
84 struct acpi_pci_root *root; 84 struct acpi_pci_root *root;
85 root = list_entry(entry, struct acpi_pci_root, node); 85 root = list_entry(entry, struct acpi_pci_root, node);
86 driver->add(root->handle); 86 driver->add(root->device->handle);
87 n++; 87 n++;
88 } 88 }
89 89
@@ -110,7 +110,7 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
110 list_for_each(entry, &acpi_pci_roots) { 110 list_for_each(entry, &acpi_pci_roots) {
111 struct acpi_pci_root *root; 111 struct acpi_pci_root *root;
112 root = list_entry(entry, struct acpi_pci_root, node); 112 root = list_entry(entry, struct acpi_pci_root, node);
113 driver->remove(root->handle); 113 driver->remove(root->device->handle);
114 } 114 }
115} 115}
116 116
@@ -170,7 +170,7 @@ static int acpi_pci_root_add(struct acpi_device *device)
170 memset(root, 0, sizeof(struct acpi_pci_root)); 170 memset(root, 0, sizeof(struct acpi_pci_root));
171 INIT_LIST_HEAD(&root->node); 171 INIT_LIST_HEAD(&root->node);
172 172
173 root->handle = device->handle; 173 root->device = device;
174 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); 174 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
175 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 175 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
176 acpi_driver_data(device) = root; 176 acpi_driver_data(device) = root;
@@ -185,7 +185,7 @@ static int acpi_pci_root_add(struct acpi_device *device)
185 * ------- 185 * -------
186 * Obtained via _SEG, if exists, otherwise assumed to be zero (0). 186 * Obtained via _SEG, if exists, otherwise assumed to be zero (0).
187 */ 187 */
188 status = acpi_evaluate_integer(root->handle, METHOD_NAME__SEG, NULL, 188 status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
189 &value); 189 &value);
190 switch (status) { 190 switch (status) {
191 case AE_OK: 191 case AE_OK:
@@ -207,7 +207,7 @@ static int acpi_pci_root_add(struct acpi_device *device)
207 * --- 207 * ---
208 * Obtained via _BBN, if exists, otherwise assumed to be zero (0). 208 * Obtained via _BBN, if exists, otherwise assumed to be zero (0).
209 */ 209 */
210 status = acpi_evaluate_integer(root->handle, METHOD_NAME__BBN, NULL, 210 status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL,
211 &value); 211 &value);
212 switch (status) { 212 switch (status) {
213 case AE_OK: 213 case AE_OK:
@@ -234,7 +234,7 @@ static int acpi_pci_root_add(struct acpi_device *device)
234 "Wrong _BBN value, reboot" 234 "Wrong _BBN value, reboot"
235 " and use option 'pci=noacpi'\n"); 235 " and use option 'pci=noacpi'\n");
236 236
237 status = try_get_root_bridge_busnr(root->handle, &bus); 237 status = try_get_root_bridge_busnr(device->handle, &bus);
238 if (ACPI_FAILURE(status)) 238 if (ACPI_FAILURE(status))
239 break; 239 break;
240 if (bus != root->id.bus) { 240 if (bus != root->id.bus) {
@@ -294,9 +294,9 @@ static int acpi_pci_root_add(struct acpi_device *device)
294 * ----------------- 294 * -----------------
295 * Evaluate and parse _PRT, if exists. 295 * Evaluate and parse _PRT, if exists.
296 */ 296 */
297 status = acpi_get_handle(root->handle, METHOD_NAME__PRT, &handle); 297 status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
298 if (ACPI_SUCCESS(status)) 298 if (ACPI_SUCCESS(status))
299 result = acpi_pci_irq_add_prt(root->handle, root->id.segment, 299 result = acpi_pci_irq_add_prt(device->handle, root->id.segment,
300 root->id.bus); 300 root->id.bus);
301 301
302 end: 302 end:
@@ -315,7 +315,7 @@ static int acpi_pci_root_start(struct acpi_device *device)
315 315
316 316
317 list_for_each_entry(root, &acpi_pci_roots, node) { 317 list_for_each_entry(root, &acpi_pci_roots, node) {
318 if (root->handle == device->handle) { 318 if (root->device == device) {
319 pci_bus_add_devices(root->bus); 319 pci_bus_add_devices(root->bus);
320 return 0; 320 return 0;
321 } 321 }
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 224f729f700e..5d3447f4582c 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -70,7 +70,7 @@ static struct acpi_driver acpi_power_driver = {
70}; 70};
71 71
72struct acpi_power_resource { 72struct acpi_power_resource {
73 acpi_handle handle; 73 struct acpi_device * device;
74 acpi_bus_id name; 74 acpi_bus_id name;
75 u32 system_level; 75 u32 system_level;
76 u32 order; 76 u32 order;
@@ -124,7 +124,7 @@ static int acpi_power_get_state(struct acpi_power_resource *resource)
124 if (!resource) 124 if (!resource)
125 return -EINVAL; 125 return -EINVAL;
126 126
127 status = acpi_evaluate_integer(resource->handle, "_STA", NULL, &sta); 127 status = acpi_evaluate_integer(resource->device->handle, "_STA", NULL, &sta);
128 if (ACPI_FAILURE(status)) 128 if (ACPI_FAILURE(status))
129 return -ENODEV; 129 return -ENODEV;
130 130
@@ -192,7 +192,7 @@ static int acpi_power_on(acpi_handle handle)
192 return 0; 192 return 0;
193 } 193 }
194 194
195 status = acpi_evaluate_object(resource->handle, "_ON", NULL, NULL); 195 status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
196 if (ACPI_FAILURE(status)) 196 if (ACPI_FAILURE(status))
197 return -ENODEV; 197 return -ENODEV;
198 198
@@ -203,10 +203,8 @@ static int acpi_power_on(acpi_handle handle)
203 return -ENOEXEC; 203 return -ENOEXEC;
204 204
205 /* Update the power resource's _device_ power state */ 205 /* Update the power resource's _device_ power state */
206 result = acpi_bus_get_device(resource->handle, &device); 206 device = resource->device;
207 if (result) 207 resource->device->power.state = ACPI_STATE_D0;
208 return result;
209 device->power.state = ACPI_STATE_D0;
210 208
211 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned on\n", 209 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned on\n",
212 resource->name)); 210 resource->name));
@@ -242,7 +240,7 @@ static int acpi_power_off_device(acpi_handle handle)
242 return 0; 240 return 0;
243 } 241 }
244 242
245 status = acpi_evaluate_object(resource->handle, "_OFF", NULL, NULL); 243 status = acpi_evaluate_object(resource->device->handle, "_OFF", NULL, NULL);
246 if (ACPI_FAILURE(status)) 244 if (ACPI_FAILURE(status))
247 return -ENODEV; 245 return -ENODEV;
248 246
@@ -253,9 +251,7 @@ static int acpi_power_off_device(acpi_handle handle)
253 return -ENOEXEC; 251 return -ENOEXEC;
254 252
255 /* Update the power resource's _device_ power state */ 253 /* Update the power resource's _device_ power state */
256 result = acpi_bus_get_device(resource->handle, &device); 254 device = resource->device;
257 if (result)
258 return result;
259 device->power.state = ACPI_STATE_D3; 255 device->power.state = ACPI_STATE_D3;
260 256
261 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned off\n", 257 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned off\n",
@@ -544,14 +540,14 @@ static int acpi_power_add(struct acpi_device *device)
544 return -ENOMEM; 540 return -ENOMEM;
545 memset(resource, 0, sizeof(struct acpi_power_resource)); 541 memset(resource, 0, sizeof(struct acpi_power_resource));
546 542
547 resource->handle = device->handle; 543 resource->device = device;
548 strcpy(resource->name, device->pnp.bus_id); 544 strcpy(resource->name, device->pnp.bus_id);
549 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); 545 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
550 strcpy(acpi_device_class(device), ACPI_POWER_CLASS); 546 strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
551 acpi_driver_data(device) = resource; 547 acpi_driver_data(device) = resource;
552 548
553 /* Evalute the object to get the system level and resource order. */ 549 /* Evalute the object to get the system level and resource order. */
554 status = acpi_evaluate_object(resource->handle, NULL, NULL, &buffer); 550 status = acpi_evaluate_object(device->handle, NULL, NULL, &buffer);
555 if (ACPI_FAILURE(status)) { 551 if (ACPI_FAILURE(status)) {
556 result = -ENODEV; 552 result = -ENODEV;
557 goto end; 553 goto end;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e439eb77d283..8e9c26aae8fe 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -768,7 +768,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
768 status = -EFAULT; 768 status = -EFAULT;
769 769
770 end: 770 end:
771 acpi_os_free(buffer.pointer); 771 kfree(buffer.pointer);
772 772
773 return status; 773 return status;
774} 774}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 14a00e5a8f6a..7ba5e49ab302 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -216,7 +216,7 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
216 sizeof(struct acpi_pct_register)); 216 sizeof(struct acpi_pct_register));
217 217
218 end: 218 end:
219 acpi_os_free(buffer.pointer); 219 kfree(buffer.pointer);
220 220
221 return result; 221 return result;
222} 222}
@@ -294,7 +294,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
294 } 294 }
295 295
296 end: 296 end:
297 acpi_os_free(buffer.pointer); 297 kfree(buffer.pointer);
298 298
299 return result; 299 return result;
300} 300}
@@ -592,7 +592,7 @@ static int acpi_processor_get_psd(struct acpi_processor *pr)
592 } 592 }
593 593
594end: 594end:
595 acpi_os_free(buffer.pointer); 595 kfree(buffer.pointer);
596 return result; 596 return result;
597} 597}
598 598
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
new file mode 100644
index 000000000000..db7b350a5035
--- /dev/null
+++ b/drivers/acpi/sbs.c
@@ -0,0 +1,1766 @@
1/*
2 * acpi_sbs.c - ACPI Smart Battery System Driver ($Revision: 1.16 $)
3 *
4 * Copyright (c) 2005 Rich Townsend <rhdt@bartol.udel.edu>
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/kernel.h>
29#include <linux/proc_fs.h>
30#include <linux/seq_file.h>
31#include <asm/uaccess.h>
32#include <linux/acpi.h>
33#include <linux/i2c.h>
34#include <linux/delay.h>
35
36#include "i2c_ec.h"
37
38#define DEF_CAPACITY_UNIT 3
39#define MAH_CAPACITY_UNIT 1
40#define MWH_CAPACITY_UNIT 2
41#define CAPACITY_UNIT DEF_CAPACITY_UNIT
42
43#define REQUEST_UPDATE_MODE 1
44#define QUEUE_UPDATE_MODE 2
45
46#define DATA_TYPE_COMMON 0
47#define DATA_TYPE_INFO 1
48#define DATA_TYPE_STATE 2
49#define DATA_TYPE_ALARM 3
50#define DATA_TYPE_AC_STATE 4
51
52extern struct proc_dir_entry *acpi_lock_ac_dir(void);
53extern struct proc_dir_entry *acpi_lock_battery_dir(void);
54extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
55extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
56
57#define ACPI_SBS_COMPONENT 0x00080000
58#define ACPI_SBS_CLASS "sbs"
59#define ACPI_AC_CLASS "ac_adapter"
60#define ACPI_BATTERY_CLASS "battery"
61#define ACPI_SBS_HID "ACPI0002"
62#define ACPI_SBS_DRIVER_NAME "ACPI Smart Battery System Driver"
63#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
64#define ACPI_SBS_FILE_INFO "info"
65#define ACPI_SBS_FILE_STATE "state"
66#define ACPI_SBS_FILE_ALARM "alarm"
67#define ACPI_BATTERY_DIR_NAME "BAT%i"
68#define ACPI_AC_DIR_NAME "AC0"
69#define ACPI_SBC_SMBUS_ADDR 0x9
70#define ACPI_SBSM_SMBUS_ADDR 0xa
71#define ACPI_SB_SMBUS_ADDR 0xb
72#define ACPI_SBS_AC_NOTIFY_STATUS 0x80
73#define ACPI_SBS_BATTERY_NOTIFY_STATUS 0x80
74#define ACPI_SBS_BATTERY_NOTIFY_INFO 0x81
75
76#define _COMPONENT ACPI_SBS_COMPONENT
77
78#define MAX_SBS_BAT 4
79#define MAX_SMBUS_ERR 1
80
81ACPI_MODULE_NAME("acpi_sbs");
82
83MODULE_AUTHOR("Rich Townsend");
84MODULE_DESCRIPTION("Smart Battery System ACPI interface driver");
85MODULE_LICENSE("GPL");
86
87static struct semaphore sbs_sem;
88
89#define UPDATE_MODE QUEUE_UPDATE_MODE
90/* REQUEST_UPDATE_MODE QUEUE_UPDATE_MODE */
91#define UPDATE_INFO_MODE 0
92#define UPDATE_TIME 60
93#define UPDATE_TIME2 0
94
95static int capacity_mode = CAPACITY_UNIT;
96static int update_mode = UPDATE_MODE;
97static int update_info_mode = UPDATE_INFO_MODE;
98static int update_time = UPDATE_TIME;
99static int update_time2 = UPDATE_TIME2;
100
101module_param(capacity_mode, int, CAPACITY_UNIT);
102module_param(update_mode, int, UPDATE_MODE);
103module_param(update_info_mode, int, UPDATE_INFO_MODE);
104module_param(update_time, int, UPDATE_TIME);
105module_param(update_time2, int, UPDATE_TIME2);
106
107static int acpi_sbs_add(struct acpi_device *device);
108static int acpi_sbs_remove(struct acpi_device *device, int type);
109static void acpi_battery_smbus_err_handler(struct acpi_ec_smbus *smbus);
110static void acpi_sbs_update_queue(void *data);
111
112static struct acpi_driver acpi_sbs_driver = {
113 .name = ACPI_SBS_DRIVER_NAME,
114 .class = ACPI_SBS_CLASS,
115 .ids = ACPI_SBS_HID,
116 .ops = {
117 .add = acpi_sbs_add,
118 .remove = acpi_sbs_remove,
119 },
120};
121
122struct acpi_battery_info {
123 int capacity_mode;
124 s16 full_charge_capacity;
125 s16 design_capacity;
126 s16 design_voltage;
127 int vscale;
128 int ipscale;
129 s16 serial_number;
130 char manufacturer_name[I2C_SMBUS_BLOCK_MAX + 3];
131 char device_name[I2C_SMBUS_BLOCK_MAX + 3];
132 char device_chemistry[I2C_SMBUS_BLOCK_MAX + 3];
133};
134
135struct acpi_battery_state {
136 s16 voltage;
137 s16 amperage;
138 s16 remaining_capacity;
139 s16 average_time_to_empty;
140 s16 average_time_to_full;
141 s16 battery_status;
142};
143
144struct acpi_battery_alarm {
145 s16 remaining_capacity;
146};
147
148struct acpi_battery {
149 int alive;
150 int battery_present;
151 int id;
152 int init_state;
153 struct acpi_sbs *sbs;
154 struct acpi_battery_info info;
155 struct acpi_battery_state state;
156 struct acpi_battery_alarm alarm;
157 struct proc_dir_entry *battery_entry;
158};
159
160struct acpi_sbs {
161 acpi_handle handle;
162 struct acpi_device *device;
163 struct acpi_ec_smbus *smbus;
164 int sbsm_present;
165 int sbsm_batteries_supported;
166 int ac_present;
167 struct proc_dir_entry *ac_entry;
168 struct acpi_battery battery[MAX_SBS_BAT];
169 int update_info_mode;
170 int zombie;
171 int update_time;
172 int update_time2;
173 struct timer_list update_timer;
174};
175
176static void acpi_update_delay(struct acpi_sbs *sbs);
177static int acpi_sbs_update_run(struct acpi_sbs *sbs, int data_type);
178
179/* --------------------------------------------------------------------------
180 SMBus Communication
181 -------------------------------------------------------------------------- */
182
183static void acpi_battery_smbus_err_handler(struct acpi_ec_smbus *smbus)
184{
185 union i2c_smbus_data data;
186 int result = 0;
187 char *err_str;
188 int err_number;
189
190 data.word = 0;
191
192 result = smbus->adapter.algo->
193 smbus_xfer(&smbus->adapter,
194 ACPI_SB_SMBUS_ADDR,
195 0, I2C_SMBUS_READ, 0x16, I2C_SMBUS_BLOCK_DATA, &data);
196
197 err_number = (data.word & 0x000f);
198
199 switch (data.word & 0x000f) {
200 case 0x0000:
201 err_str = "unexpected bus error";
202 break;
203 case 0x0001:
204 err_str = "busy";
205 break;
206 case 0x0002:
207 err_str = "reserved command";
208 break;
209 case 0x0003:
210 err_str = "unsupported command";
211 break;
212 case 0x0004:
213 err_str = "access denied";
214 break;
215 case 0x0005:
216 err_str = "overflow/underflow";
217 break;
218 case 0x0006:
219 err_str = "bad size";
220 break;
221 case 0x0007:
222 err_str = "unknown error";
223 break;
224 default:
225 err_str = "unrecognized error";
226 }
227 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
228 "%s: ret %i, err %i\n", err_str, result, err_number));
229}
230
231static int
232acpi_sbs_smbus_read_word(struct acpi_ec_smbus *smbus, int addr, int func,
233 u16 * word,
234 void (*err_handler) (struct acpi_ec_smbus * smbus))
235{
236 union i2c_smbus_data data;
237 int result = 0;
238 int i;
239
240 if (err_handler == NULL) {
241 err_handler = acpi_battery_smbus_err_handler;
242 }
243
244 for (i = 0; i < MAX_SMBUS_ERR; i++) {
245 result =
246 smbus->adapter.algo->smbus_xfer(&smbus->adapter, addr, 0,
247 I2C_SMBUS_READ, func,
248 I2C_SMBUS_WORD_DATA, &data);
249 if (result) {
250 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
251 "try %i: smbus->adapter.algo->smbus_xfer() failed\n",
252 i));
253 if (err_handler) {
254 err_handler(smbus);
255 }
256 } else {
257 *word = data.word;
258 break;
259 }
260 }
261
262 return result;
263}
264
265static int
266acpi_sbs_smbus_read_str(struct acpi_ec_smbus *smbus, int addr, int func,
267 char *str,
268 void (*err_handler) (struct acpi_ec_smbus * smbus))
269{
270 union i2c_smbus_data data;
271 int result = 0;
272 int i;
273
274 if (err_handler == NULL) {
275 err_handler = acpi_battery_smbus_err_handler;
276 }
277
278 for (i = 0; i < MAX_SMBUS_ERR; i++) {
279 result =
280 smbus->adapter.algo->smbus_xfer(&smbus->adapter, addr, 0,
281 I2C_SMBUS_READ, func,
282 I2C_SMBUS_BLOCK_DATA,
283 &data);
284 if (result) {
285 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
286 "try %i: smbus->adapter.algo->smbus_xfer() failed\n",
287 i));
288 if (err_handler) {
289 err_handler(smbus);
290 }
291 } else {
292 strncpy(str, (const char *)data.block + 1,
293 data.block[0]);
294 str[data.block[0]] = 0;
295 break;
296 }
297 }
298
299 return result;
300}
301
302static int
303acpi_sbs_smbus_write_word(struct acpi_ec_smbus *smbus, int addr, int func,
304 int word,
305 void (*err_handler) (struct acpi_ec_smbus * smbus))
306{
307 union i2c_smbus_data data;
308 int result = 0;
309 int i;
310
311 if (err_handler == NULL) {
312 err_handler = acpi_battery_smbus_err_handler;
313 }
314
315 data.word = word;
316
317 for (i = 0; i < MAX_SMBUS_ERR; i++) {
318 result =
319 smbus->adapter.algo->smbus_xfer(&smbus->adapter, addr, 0,
320 I2C_SMBUS_WRITE, func,
321 I2C_SMBUS_WORD_DATA, &data);
322 if (result) {
323 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
324 "try %i: smbus->adapter.algo"
325 "->smbus_xfer() failed\n", i));
326 if (err_handler) {
327 err_handler(smbus);
328 }
329 } else {
330 break;
331 }
332 }
333
334 return result;
335}
336
337/* --------------------------------------------------------------------------
338 Smart Battery System Management
339 -------------------------------------------------------------------------- */
340
341/* Smart Battery */
342
343static int acpi_sbs_generate_event(struct acpi_device *device,
344 int event, int state, char *bid, char *class)
345{
346 char bid_saved[5];
347 char class_saved[20];
348 int result = 0;
349
350 strcpy(bid_saved, acpi_device_bid(device));
351 strcpy(class_saved, acpi_device_class(device));
352
353 strcpy(acpi_device_bid(device), bid);
354 strcpy(acpi_device_class(device), class);
355
356 result = acpi_bus_generate_event(device, event, state);
357
358 strcpy(acpi_device_bid(device), bid_saved);
359 strcpy(acpi_device_class(device), class_saved);
360
361 return result;
362}
363
364static int acpi_battery_get_present(struct acpi_battery *battery)
365{
366 s16 state;
367 int result = 0;
368 int is_present = 0;
369
370 result = acpi_sbs_smbus_read_word(battery->sbs->smbus,
371 ACPI_SBSM_SMBUS_ADDR, 0x01,
372 &state, NULL);
373 if (result) {
374 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
375 "acpi_sbs_smbus_read_word() failed"));
376 }
377 if (!result) {
378 is_present = (state & 0x000f) & (1 << battery->id);
379 }
380 battery->battery_present = is_present;
381
382 return result;
383}
384
385static int acpi_battery_is_present(struct acpi_battery *battery)
386{
387 return (battery->battery_present);
388}
389
390static int acpi_ac_is_present(struct acpi_sbs *sbs)
391{
392 return (sbs->ac_present);
393}
394
395static int acpi_battery_select(struct acpi_battery *battery)
396{
397 struct acpi_ec_smbus *smbus = battery->sbs->smbus;
398 int result = 0;
399 s16 state;
400 int foo;
401
402 if (battery->sbs->sbsm_present) {
403
404 /* Take special care not to knobble other nibbles of
405 * state (aka selector_state), since
406 * it causes charging to halt on SBSELs */
407
408 result =
409 acpi_sbs_smbus_read_word(smbus, ACPI_SBSM_SMBUS_ADDR, 0x01,
410 &state, NULL);
411 if (result) {
412 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
413 "acpi_sbs_smbus_read_word() failed\n"));
414 goto end;
415 }
416
417 foo = (state & 0x0fff) | (1 << (battery->id + 12));
418 result =
419 acpi_sbs_smbus_write_word(smbus, ACPI_SBSM_SMBUS_ADDR, 0x01,
420 foo, NULL);
421 if (result) {
422 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
423 "acpi_sbs_smbus_write_word() failed\n"));
424 goto end;
425 }
426 }
427
428 end:
429 return result;
430}
431
432static int acpi_sbsm_get_info(struct acpi_sbs *sbs)
433{
434 struct acpi_ec_smbus *smbus = sbs->smbus;
435 int result = 0;
436 s16 battery_system_info;
437
438 result = acpi_sbs_smbus_read_word(smbus, ACPI_SBSM_SMBUS_ADDR, 0x04,
439 &battery_system_info, NULL);
440 if (result) {
441 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
442 "acpi_sbs_smbus_read_word() failed\n"));
443 goto end;
444 }
445
446 sbs->sbsm_batteries_supported = battery_system_info & 0x000f;
447
448 end:
449
450 return result;
451}
452
453static int acpi_battery_get_info(struct acpi_battery *battery)
454{
455 struct acpi_ec_smbus *smbus = battery->sbs->smbus;
456 int result = 0;
457 s16 battery_mode;
458 s16 specification_info;
459
460 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x03,
461 &battery_mode,
462 &acpi_battery_smbus_err_handler);
463 if (result) {
464 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
465 "acpi_sbs_smbus_read_word() failed\n"));
466 goto end;
467 }
468 battery->info.capacity_mode = (battery_mode & 0x8000) >> 15;
469
470 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x10,
471 &battery->info.full_charge_capacity,
472 &acpi_battery_smbus_err_handler);
473 if (result) {
474 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
475 "acpi_sbs_smbus_read_word() failed\n"));
476 goto end;
477 }
478
479 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x18,
480 &battery->info.design_capacity,
481 &acpi_battery_smbus_err_handler);
482
483 if (result) {
484 goto end;
485 }
486
487 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x19,
488 &battery->info.design_voltage,
489 &acpi_battery_smbus_err_handler);
490 if (result) {
491 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
492 "acpi_sbs_smbus_read_word() failed\n"));
493 goto end;
494 }
495
496 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x1a,
497 &specification_info,
498 &acpi_battery_smbus_err_handler);
499 if (result) {
500 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
501 "acpi_sbs_smbus_read_word() failed\n"));
502 goto end;
503 }
504
505 switch ((specification_info & 0x0f00) >> 8) {
506 case 1:
507 battery->info.vscale = 10;
508 break;
509 case 2:
510 battery->info.vscale = 100;
511 break;
512 case 3:
513 battery->info.vscale = 1000;
514 break;
515 default:
516 battery->info.vscale = 1;
517 }
518
519 switch ((specification_info & 0xf000) >> 12) {
520 case 1:
521 battery->info.ipscale = 10;
522 break;
523 case 2:
524 battery->info.ipscale = 100;
525 break;
526 case 3:
527 battery->info.ipscale = 1000;
528 break;
529 default:
530 battery->info.ipscale = 1;
531 }
532
533 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x1c,
534 &battery->info.serial_number,
535 &acpi_battery_smbus_err_handler);
536 if (result) {
537 goto end;
538 }
539
540 result = acpi_sbs_smbus_read_str(smbus, ACPI_SB_SMBUS_ADDR, 0x20,
541 battery->info.manufacturer_name,
542 &acpi_battery_smbus_err_handler);
543 if (result) {
544 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
545 "acpi_sbs_smbus_read_str() failed\n"));
546 goto end;
547 }
548
549 result = acpi_sbs_smbus_read_str(smbus, ACPI_SB_SMBUS_ADDR, 0x21,
550 battery->info.device_name,
551 &acpi_battery_smbus_err_handler);
552 if (result) {
553 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
554 "acpi_sbs_smbus_read_str() failed\n"));
555 goto end;
556 }
557
558 result = acpi_sbs_smbus_read_str(smbus, ACPI_SB_SMBUS_ADDR, 0x22,
559 battery->info.device_chemistry,
560 &acpi_battery_smbus_err_handler);
561 if (result) {
562 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
563 "acpi_sbs_smbus_read_str() failed\n"));
564 goto end;
565 }
566
567 end:
568 return result;
569}
570
571static void acpi_update_delay(struct acpi_sbs *sbs)
572{
573 if (sbs->zombie) {
574 return;
575 }
576 if (sbs->update_time2 > 0) {
577 msleep(sbs->update_time2 * 1000);
578 }
579}
580
581static int acpi_battery_get_state(struct acpi_battery *battery)
582{
583 struct acpi_ec_smbus *smbus = battery->sbs->smbus;
584 int result = 0;
585
586 acpi_update_delay(battery->sbs);
587 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x09,
588 &battery->state.voltage,
589 &acpi_battery_smbus_err_handler);
590 if (result) {
591 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
592 "acpi_sbs_smbus_read_word() failed\n"));
593 goto end;
594 }
595
596 acpi_update_delay(battery->sbs);
597 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x0a,
598 &battery->state.amperage,
599 &acpi_battery_smbus_err_handler);
600 if (result) {
601 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
602 "acpi_sbs_smbus_read_word() failed\n"));
603 goto end;
604 }
605
606 acpi_update_delay(battery->sbs);
607 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x0f,
608 &battery->state.remaining_capacity,
609 &acpi_battery_smbus_err_handler);
610 if (result) {
611 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
612 "acpi_sbs_smbus_read_word() failed\n"));
613 goto end;
614 }
615
616 acpi_update_delay(battery->sbs);
617 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x12,
618 &battery->state.average_time_to_empty,
619 &acpi_battery_smbus_err_handler);
620 if (result) {
621 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
622 "acpi_sbs_smbus_read_word() failed\n"));
623 goto end;
624 }
625
626 acpi_update_delay(battery->sbs);
627 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x13,
628 &battery->state.average_time_to_full,
629 &acpi_battery_smbus_err_handler);
630 if (result) {
631 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
632 "acpi_sbs_smbus_read_word() failed\n"));
633 goto end;
634 }
635
636 acpi_update_delay(battery->sbs);
637 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x16,
638 &battery->state.battery_status,
639 &acpi_battery_smbus_err_handler);
640 if (result) {
641 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
642 "acpi_sbs_smbus_read_word() failed\n"));
643 goto end;
644 }
645
646 acpi_update_delay(battery->sbs);
647
648 end:
649 return result;
650}
651
652static int acpi_battery_get_alarm(struct acpi_battery *battery)
653{
654 struct acpi_ec_smbus *smbus = battery->sbs->smbus;
655 int result = 0;
656
657 result = acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x01,
658 &battery->alarm.remaining_capacity,
659 &acpi_battery_smbus_err_handler);
660 if (result) {
661 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
662 "acpi_sbs_smbus_read_word() failed\n"));
663 goto end;
664 }
665
666 acpi_update_delay(battery->sbs);
667
668 end:
669
670 return result;
671}
672
673static int acpi_battery_set_alarm(struct acpi_battery *battery,
674 unsigned long alarm)
675{
676 struct acpi_ec_smbus *smbus = battery->sbs->smbus;
677 int result = 0;
678 s16 battery_mode;
679 int foo;
680
681 result = acpi_battery_select(battery);
682 if (result) {
683 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
684 "acpi_battery_select() failed\n"));
685 goto end;
686 }
687
688 /* If necessary, enable the alarm */
689
690 if (alarm > 0) {
691 result =
692 acpi_sbs_smbus_read_word(smbus, ACPI_SB_SMBUS_ADDR, 0x03,
693 &battery_mode,
694 &acpi_battery_smbus_err_handler);
695 if (result) {
696 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
697 "acpi_sbs_smbus_read_word() failed\n"));
698 goto end;
699 }
700
701 result =
702 acpi_sbs_smbus_write_word(smbus, ACPI_SB_SMBUS_ADDR, 0x01,
703 battery_mode & 0xbfff,
704 &acpi_battery_smbus_err_handler);
705 if (result) {
706 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
707 "acpi_sbs_smbus_write_word() failed\n"));
708 goto end;
709 }
710 }
711
712 foo = alarm / (battery->info.capacity_mode ? 10 : 1);
713 result = acpi_sbs_smbus_write_word(smbus, ACPI_SB_SMBUS_ADDR, 0x01,
714 foo,
715 &acpi_battery_smbus_err_handler);
716 if (result) {
717 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
718 "acpi_sbs_smbus_write_word() failed\n"));
719 goto end;
720 }
721
722 end:
723
724 return result;
725}
726
727static int acpi_battery_set_mode(struct acpi_battery *battery)
728{
729 int result = 0;
730 s16 battery_mode;
731
732 if (capacity_mode == DEF_CAPACITY_UNIT) {
733 goto end;
734 }
735
736 result = acpi_sbs_smbus_read_word(battery->sbs->smbus,
737 ACPI_SB_SMBUS_ADDR, 0x03,
738 &battery_mode, NULL);
739 if (result) {
740 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
741 "acpi_sbs_smbus_read_word() failed\n"));
742 goto end;
743 }
744
745 if (capacity_mode == MAH_CAPACITY_UNIT) {
746 battery_mode &= 0x7fff;
747 } else {
748 battery_mode |= 0x8000;
749 }
750 result = acpi_sbs_smbus_write_word(battery->sbs->smbus,
751 ACPI_SB_SMBUS_ADDR, 0x03,
752 battery_mode, NULL);
753 if (result) {
754 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
755 "acpi_sbs_smbus_write_word() failed\n"));
756 goto end;
757 }
758
759 result = acpi_sbs_smbus_read_word(battery->sbs->smbus,
760 ACPI_SB_SMBUS_ADDR, 0x03,
761 &battery_mode, NULL);
762 if (result) {
763 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
764 "acpi_sbs_smbus_read_word() failed\n"));
765 goto end;
766 }
767
768 end:
769 return result;
770}
771
772static int acpi_battery_init(struct acpi_battery *battery)
773{
774 int result = 0;
775
776 result = acpi_battery_select(battery);
777 if (result) {
778 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
779 "acpi_battery_init() failed\n"));
780 goto end;
781 }
782
783 result = acpi_battery_set_mode(battery);
784 if (result) {
785 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
786 "acpi_battery_set_mode() failed\n"));
787 goto end;
788 }
789
790 result = acpi_battery_get_info(battery);
791 if (result) {
792 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
793 "acpi_battery_get_info() failed\n"));
794 goto end;
795 }
796
797 result = acpi_battery_get_state(battery);
798 if (result) {
799 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
800 "acpi_battery_get_state() failed\n"));
801 goto end;
802 }
803
804 result = acpi_battery_get_alarm(battery);
805 if (result) {
806 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
807 "acpi_battery_get_alarm() failed\n"));
808 goto end;
809 }
810
811 end:
812 return result;
813}
814
815static int acpi_ac_get_present(struct acpi_sbs *sbs)
816{
817 struct acpi_ec_smbus *smbus = sbs->smbus;
818 int result = 0;
819 s16 charger_status;
820
821 result = acpi_sbs_smbus_read_word(smbus, ACPI_SBC_SMBUS_ADDR, 0x13,
822 &charger_status, NULL);
823
824 if (result) {
825 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
826 "acpi_sbs_smbus_read_word() failed\n"));
827 goto end;
828 }
829
830 sbs->ac_present = (charger_status & 0x8000) >> 15;
831
832 end:
833
834 return result;
835}
836
837/* --------------------------------------------------------------------------
838 FS Interface (/proc/acpi)
839 -------------------------------------------------------------------------- */
840
841/* Generic Routines */
842
843static int
844acpi_sbs_generic_add_fs(struct proc_dir_entry **dir,
845 struct proc_dir_entry *parent_dir,
846 char *dir_name,
847 struct file_operations *info_fops,
848 struct file_operations *state_fops,
849 struct file_operations *alarm_fops, void *data)
850{
851 struct proc_dir_entry *entry = NULL;
852
853 if (!*dir) {
854 *dir = proc_mkdir(dir_name, parent_dir);
855 if (!*dir) {
856 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
857 "proc_mkdir() failed\n"));
858 return -ENODEV;
859 }
860 (*dir)->owner = THIS_MODULE;
861 }
862
863 /* 'info' [R] */
864 if (info_fops) {
865 entry = create_proc_entry(ACPI_SBS_FILE_INFO, S_IRUGO, *dir);
866 if (!entry) {
867 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
868 "create_proc_entry() failed\n"));
869 } else {
870 entry->proc_fops = info_fops;
871 entry->data = data;
872 entry->owner = THIS_MODULE;
873 }
874 }
875
876 /* 'state' [R] */
877 if (state_fops) {
878 entry = create_proc_entry(ACPI_SBS_FILE_STATE, S_IRUGO, *dir);
879 if (!entry) {
880 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
881 "create_proc_entry() failed\n"));
882 } else {
883 entry->proc_fops = state_fops;
884 entry->data = data;
885 entry->owner = THIS_MODULE;
886 }
887 }
888
889 /* 'alarm' [R/W] */
890 if (alarm_fops) {
891 entry = create_proc_entry(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir);
892 if (!entry) {
893 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
894 "create_proc_entry() failed\n"));
895 } else {
896 entry->proc_fops = alarm_fops;
897 entry->data = data;
898 entry->owner = THIS_MODULE;
899 }
900 }
901
902 return 0;
903}
904
905static void
906acpi_sbs_generic_remove_fs(struct proc_dir_entry **dir,
907 struct proc_dir_entry *parent_dir)
908{
909
910 if (*dir) {
911 remove_proc_entry(ACPI_SBS_FILE_INFO, *dir);
912 remove_proc_entry(ACPI_SBS_FILE_STATE, *dir);
913 remove_proc_entry(ACPI_SBS_FILE_ALARM, *dir);
914 remove_proc_entry((*dir)->name, parent_dir);
915 *dir = NULL;
916 }
917
918}
919
920/* Smart Battery Interface */
921
922static struct proc_dir_entry *acpi_battery_dir = NULL;
923
924static int acpi_battery_read_info(struct seq_file *seq, void *offset)
925{
926 struct acpi_battery *battery = (struct acpi_battery *)seq->private;
927 int cscale;
928 int result = 0;
929
930 if (battery->sbs->zombie) {
931 return -ENODEV;
932 }
933
934 down(&sbs_sem);
935
936 if (update_mode == REQUEST_UPDATE_MODE) {
937 result = acpi_sbs_update_run(battery->sbs, DATA_TYPE_INFO);
938 if (result) {
939 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
940 "acpi_sbs_update_run() failed\n"));
941 }
942 }
943
944 if (acpi_battery_is_present(battery)) {
945 seq_printf(seq, "present: yes\n");
946 } else {
947 seq_printf(seq, "present: no\n");
948 goto end;
949 }
950
951 if (battery->info.capacity_mode) {
952 cscale = battery->info.vscale * battery->info.ipscale;
953 } else {
954 cscale = battery->info.ipscale;
955 }
956 seq_printf(seq, "design capacity: %i%s",
957 battery->info.design_capacity * cscale,
958 battery->info.capacity_mode ? "0 mWh\n" : " mAh\n");
959
960 seq_printf(seq, "last full capacity: %i%s",
961 battery->info.full_charge_capacity * cscale,
962 battery->info.capacity_mode ? "0 mWh\n" : " mAh\n");
963
964 seq_printf(seq, "battery technology: rechargeable\n");
965
966 seq_printf(seq, "design voltage: %i mV\n",
967 battery->info.design_voltage * battery->info.vscale);
968
969 seq_printf(seq, "design capacity warning: unknown\n");
970 seq_printf(seq, "design capacity low: unknown\n");
971 seq_printf(seq, "capacity granularity 1: unknown\n");
972 seq_printf(seq, "capacity granularity 2: unknown\n");
973
974 seq_printf(seq, "model number: %s\n",
975 battery->info.device_name);
976
977 seq_printf(seq, "serial number: %i\n",
978 battery->info.serial_number);
979
980 seq_printf(seq, "battery type: %s\n",
981 battery->info.device_chemistry);
982
983 seq_printf(seq, "OEM info: %s\n",
984 battery->info.manufacturer_name);
985
986 end:
987
988 up(&sbs_sem);
989
990 return result;
991}
992
993static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
994{
995 return single_open(file, acpi_battery_read_info, PDE(inode)->data);
996}
997
998static int acpi_battery_read_state(struct seq_file *seq, void *offset)
999{
1000 struct acpi_battery *battery = (struct acpi_battery *)seq->private;
1001 int result = 0;
1002 int cscale;
1003 int foo;
1004
1005 if (battery->sbs->zombie) {
1006 return -ENODEV;
1007 }
1008
1009 down(&sbs_sem);
1010
1011 if (update_mode == REQUEST_UPDATE_MODE) {
1012 result = acpi_sbs_update_run(battery->sbs, DATA_TYPE_STATE);
1013 if (result) {
1014 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1015 "acpi_sbs_update_run() failed\n"));
1016 }
1017 }
1018
1019 if (acpi_battery_is_present(battery)) {
1020 seq_printf(seq, "present: yes\n");
1021 } else {
1022 seq_printf(seq, "present: no\n");
1023 goto end;
1024 }
1025
1026 if (battery->info.capacity_mode) {
1027 cscale = battery->info.vscale * battery->info.ipscale;
1028 } else {
1029 cscale = battery->info.ipscale;
1030 }
1031
1032 if (battery->state.battery_status & 0x0010) {
1033 seq_printf(seq, "capacity state: critical\n");
1034 } else {
1035 seq_printf(seq, "capacity state: ok\n");
1036 }
1037 if (battery->state.amperage < 0) {
1038 seq_printf(seq, "charging state: discharging\n");
1039 foo = battery->state.remaining_capacity * cscale * 60 /
1040 (battery->state.average_time_to_empty == 0 ? 1 :
1041 battery->state.average_time_to_empty);
1042 seq_printf(seq, "present rate: %i%s\n",
1043 foo, battery->info.capacity_mode ? "0 mW" : " mA");
1044 } else if (battery->state.amperage > 0) {
1045 seq_printf(seq, "charging state: charging\n");
1046 foo = (battery->info.full_charge_capacity -
1047 battery->state.remaining_capacity) * cscale * 60 /
1048 (battery->state.average_time_to_full == 0 ? 1 :
1049 battery->state.average_time_to_full);
1050 seq_printf(seq, "present rate: %i%s\n",
1051 foo, battery->info.capacity_mode ? "0 mW" : " mA");
1052 } else {
1053 seq_printf(seq, "charging state: charged\n");
1054 seq_printf(seq, "present rate: 0 %s\n",
1055 battery->info.capacity_mode ? "mW" : "mA");
1056 }
1057
1058 seq_printf(seq, "remaining capacity: %i%s",
1059 battery->state.remaining_capacity * cscale,
1060 battery->info.capacity_mode ? "0 mWh\n" : " mAh\n");
1061
1062 seq_printf(seq, "present voltage: %i mV\n",
1063 battery->state.voltage * battery->info.vscale);
1064
1065 end:
1066
1067 up(&sbs_sem);
1068
1069 return result;
1070}
1071
1072static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
1073{
1074 return single_open(file, acpi_battery_read_state, PDE(inode)->data);
1075}
1076
1077static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
1078{
1079 struct acpi_battery *battery = (struct acpi_battery *)seq->private;
1080 int result = 0;
1081 int cscale;
1082
1083 if (battery->sbs->zombie) {
1084 return -ENODEV;
1085 }
1086
1087 down(&sbs_sem);
1088
1089 if (update_mode == REQUEST_UPDATE_MODE) {
1090 result = acpi_sbs_update_run(battery->sbs, DATA_TYPE_ALARM);
1091 if (result) {
1092 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1093 "acpi_sbs_update_run() failed\n"));
1094 }
1095 }
1096
1097 if (!acpi_battery_is_present(battery)) {
1098 seq_printf(seq, "present: no\n");
1099 goto end;
1100 }
1101
1102 if (battery->info.capacity_mode) {
1103 cscale = battery->info.vscale * battery->info.ipscale;
1104 } else {
1105 cscale = battery->info.ipscale;
1106 }
1107
1108 seq_printf(seq, "alarm: ");
1109 if (battery->alarm.remaining_capacity) {
1110 seq_printf(seq, "%i%s",
1111 battery->alarm.remaining_capacity * cscale,
1112 battery->info.capacity_mode ? "0 mWh\n" : " mAh\n");
1113 } else {
1114 seq_printf(seq, "disabled\n");
1115 }
1116
1117 end:
1118
1119 up(&sbs_sem);
1120
1121 return result;
1122}
1123
1124static ssize_t
1125acpi_battery_write_alarm(struct file *file, const char __user * buffer,
1126 size_t count, loff_t * ppos)
1127{
1128 struct seq_file *seq = (struct seq_file *)file->private_data;
1129 struct acpi_battery *battery = (struct acpi_battery *)seq->private;
1130 char alarm_string[12] = { '\0' };
1131 int result, old_alarm, new_alarm;
1132
1133 if (battery->sbs->zombie) {
1134 return -ENODEV;
1135 }
1136
1137 down(&sbs_sem);
1138
1139 if (!acpi_battery_is_present(battery)) {
1140 result = -ENODEV;
1141 goto end;
1142 }
1143
1144 if (count > sizeof(alarm_string) - 1) {
1145 result = -EINVAL;
1146 goto end;
1147 }
1148
1149 if (copy_from_user(alarm_string, buffer, count)) {
1150 result = -EFAULT;
1151 goto end;
1152 }
1153
1154 alarm_string[count] = 0;
1155
1156 old_alarm = battery->alarm.remaining_capacity;
1157 new_alarm = simple_strtoul(alarm_string, NULL, 0);
1158
1159 result = acpi_battery_set_alarm(battery, new_alarm);
1160 if (result) {
1161 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1162 "acpi_battery_set_alarm() failed\n"));
1163 (void)acpi_battery_set_alarm(battery, old_alarm);
1164 goto end;
1165 }
1166 result = acpi_battery_get_alarm(battery);
1167 if (result) {
1168 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1169 "acpi_battery_get_alarm() failed\n"));
1170 (void)acpi_battery_set_alarm(battery, old_alarm);
1171 goto end;
1172 }
1173
1174 end:
1175 up(&sbs_sem);
1176
1177 if (result) {
1178 return result;
1179 } else {
1180 return count;
1181 }
1182}
1183
1184static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
1185{
1186 return single_open(file, acpi_battery_read_alarm, PDE(inode)->data);
1187}
1188
1189static struct file_operations acpi_battery_info_fops = {
1190 .open = acpi_battery_info_open_fs,
1191 .read = seq_read,
1192 .llseek = seq_lseek,
1193 .release = single_release,
1194 .owner = THIS_MODULE,
1195};
1196
1197static struct file_operations acpi_battery_state_fops = {
1198 .open = acpi_battery_state_open_fs,
1199 .read = seq_read,
1200 .llseek = seq_lseek,
1201 .release = single_release,
1202 .owner = THIS_MODULE,
1203};
1204
1205static struct file_operations acpi_battery_alarm_fops = {
1206 .open = acpi_battery_alarm_open_fs,
1207 .read = seq_read,
1208 .write = acpi_battery_write_alarm,
1209 .llseek = seq_lseek,
1210 .release = single_release,
1211 .owner = THIS_MODULE,
1212};
1213
1214/* Legacy AC Adapter Interface */
1215
1216static struct proc_dir_entry *acpi_ac_dir = NULL;
1217
1218static int acpi_ac_read_state(struct seq_file *seq, void *offset)
1219{
1220 struct acpi_sbs *sbs = (struct acpi_sbs *)seq->private;
1221 int result;
1222
1223 if (sbs->zombie) {
1224 return -ENODEV;
1225 }
1226
1227 down(&sbs_sem);
1228
1229 if (update_mode == REQUEST_UPDATE_MODE) {
1230 result = acpi_sbs_update_run(sbs, DATA_TYPE_AC_STATE);
1231 if (result) {
1232 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1233 "acpi_sbs_update_run() failed\n"));
1234 }
1235 }
1236
1237 seq_printf(seq, "state: %s\n",
1238 sbs->ac_present ? "on-line" : "off-line");
1239
1240 up(&sbs_sem);
1241
1242 return 0;
1243}
1244
1245static int acpi_ac_state_open_fs(struct inode *inode, struct file *file)
1246{
1247 return single_open(file, acpi_ac_read_state, PDE(inode)->data);
1248}
1249
1250static struct file_operations acpi_ac_state_fops = {
1251 .open = acpi_ac_state_open_fs,
1252 .read = seq_read,
1253 .llseek = seq_lseek,
1254 .release = single_release,
1255 .owner = THIS_MODULE,
1256};
1257
1258/* --------------------------------------------------------------------------
1259 Driver Interface
1260 -------------------------------------------------------------------------- */
1261
1262/* Smart Battery */
1263
1264static int acpi_battery_add(struct acpi_sbs *sbs, int id)
1265{
1266 int is_present;
1267 int result;
1268 char dir_name[32];
1269 struct acpi_battery *battery;
1270
1271 battery = &sbs->battery[id];
1272
1273 battery->alive = 0;
1274
1275 battery->init_state = 0;
1276 battery->id = id;
1277 battery->sbs = sbs;
1278
1279 result = acpi_battery_select(battery);
1280 if (result) {
1281 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1282 "acpi_battery_select() failed\n"));
1283 goto end;
1284 }
1285
1286 result = acpi_battery_get_present(battery);
1287 if (result) {
1288 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1289 "acpi_battery_get_present() failed\n"));
1290 goto end;
1291 }
1292
1293 is_present = acpi_battery_is_present(battery);
1294
1295 if (is_present) {
1296 result = acpi_battery_init(battery);
1297 if (result) {
1298 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1299 "acpi_battery_init() failed\n"));
1300 goto end;
1301 }
1302 battery->init_state = 1;
1303 }
1304
1305 (void)sprintf(dir_name, ACPI_BATTERY_DIR_NAME, id);
1306
1307 result = acpi_sbs_generic_add_fs(&battery->battery_entry,
1308 acpi_battery_dir,
1309 dir_name,
1310 &acpi_battery_info_fops,
1311 &acpi_battery_state_fops,
1312 &acpi_battery_alarm_fops, battery);
1313 if (result) {
1314 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1315 "acpi_sbs_generic_add_fs() failed\n"));
1316 goto end;
1317 }
1318 battery->alive = 1;
1319
1320 end:
1321 return result;
1322}
1323
1324static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
1325{
1326
1327 if (sbs->battery[id].battery_entry) {
1328 acpi_sbs_generic_remove_fs(&(sbs->battery[id].battery_entry),
1329 acpi_battery_dir);
1330 }
1331}
1332
1333static int acpi_ac_add(struct acpi_sbs *sbs)
1334{
1335 int result;
1336
1337 result = acpi_ac_get_present(sbs);
1338 if (result) {
1339 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1340 "acpi_ac_get_present() failed\n"));
1341 goto end;
1342 }
1343
1344 result = acpi_sbs_generic_add_fs(&sbs->ac_entry,
1345 acpi_ac_dir,
1346 ACPI_AC_DIR_NAME,
1347 NULL, &acpi_ac_state_fops, NULL, sbs);
1348 if (result) {
1349 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1350 "acpi_sbs_generic_add_fs() failed\n"));
1351 goto end;
1352 }
1353
1354 end:
1355
1356 return result;
1357}
1358
1359static void acpi_ac_remove(struct acpi_sbs *sbs)
1360{
1361
1362 if (sbs->ac_entry) {
1363 acpi_sbs_generic_remove_fs(&sbs->ac_entry, acpi_ac_dir);
1364 }
1365}
1366
1367static void acpi_sbs_update_queue_run(unsigned long data)
1368{
1369 acpi_os_execute(OSL_GPE_HANDLER, acpi_sbs_update_queue, (void *)data);
1370}
1371
1372static int acpi_sbs_update_run(struct acpi_sbs *sbs, int data_type)
1373{
1374 struct acpi_battery *battery;
1375 int result = 0;
1376 int old_ac_present;
1377 int old_battery_present;
1378 int new_ac_present;
1379 int new_battery_present;
1380 int id;
1381 char dir_name[32];
1382 int do_battery_init, do_ac_init;
1383 s16 old_remaining_capacity;
1384
1385 if (sbs->zombie) {
1386 goto end;
1387 }
1388
1389 old_ac_present = acpi_ac_is_present(sbs);
1390
1391 result = acpi_ac_get_present(sbs);
1392 if (result) {
1393 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1394 "acpi_ac_get_present() failed\n"));
1395 }
1396
1397 new_ac_present = acpi_ac_is_present(sbs);
1398
1399 do_ac_init = (old_ac_present != new_ac_present);
1400
1401 if (data_type == DATA_TYPE_AC_STATE) {
1402 goto end;
1403 }
1404
1405 for (id = 0; id < MAX_SBS_BAT; id++) {
1406 battery = &sbs->battery[id];
1407 if (battery->alive == 0) {
1408 continue;
1409 }
1410
1411 old_remaining_capacity = battery->state.remaining_capacity;
1412
1413 old_battery_present = acpi_battery_is_present(battery);
1414
1415 result = acpi_battery_select(battery);
1416 if (result) {
1417 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1418 "acpi_battery_select() failed\n"));
1419 }
1420 if (sbs->zombie) {
1421 goto end;
1422 }
1423
1424 result = acpi_battery_get_present(battery);
1425 if (result) {
1426 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1427 "acpi_battery_get_present() failed\n"));
1428 }
1429 if (sbs->zombie) {
1430 goto end;
1431 }
1432
1433 new_battery_present = acpi_battery_is_present(battery);
1434
1435 do_battery_init = ((old_battery_present != new_battery_present)
1436 && new_battery_present);
1437
1438 if (sbs->zombie) {
1439 goto end;
1440 }
1441 if (do_ac_init || do_battery_init ||
1442 update_info_mode || sbs->update_info_mode) {
1443 if (sbs->update_info_mode) {
1444 sbs->update_info_mode = 0;
1445 } else {
1446 sbs->update_info_mode = 1;
1447 }
1448 result = acpi_battery_init(battery);
1449 if (result) {
1450 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1451 "acpi_battery_init() "
1452 "failed\n"));
1453 }
1454 }
1455 if (data_type == DATA_TYPE_INFO) {
1456 continue;
1457 }
1458
1459 if (sbs->zombie) {
1460 goto end;
1461 }
1462 if (new_battery_present) {
1463 result = acpi_battery_get_alarm(battery);
1464 if (result) {
1465 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1466 "acpi_battery_get_alarm() "
1467 "failed\n"));
1468 }
1469 if (data_type == DATA_TYPE_ALARM) {
1470 continue;
1471 }
1472
1473 result = acpi_battery_get_state(battery);
1474 if (result) {
1475 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1476 "acpi_battery_get_state() "
1477 "failed\n"));
1478 }
1479 }
1480 if (sbs->zombie) {
1481 goto end;
1482 }
1483 if (data_type != DATA_TYPE_COMMON) {
1484 continue;
1485 }
1486
1487 if (old_battery_present != new_battery_present) {
1488 (void)sprintf(dir_name, ACPI_BATTERY_DIR_NAME, id);
1489 result = acpi_sbs_generate_event(sbs->device,
1490 ACPI_SBS_BATTERY_NOTIFY_STATUS,
1491 new_battery_present,
1492 dir_name,
1493 ACPI_BATTERY_CLASS);
1494 if (result) {
1495 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1496 "acpi_sbs_generate_event() "
1497 "failed\n"));
1498 }
1499 }
1500 if (old_remaining_capacity != battery->state.remaining_capacity) {
1501 (void)sprintf(dir_name, ACPI_BATTERY_DIR_NAME, id);
1502 result = acpi_sbs_generate_event(sbs->device,
1503 ACPI_SBS_BATTERY_NOTIFY_STATUS,
1504 new_battery_present,
1505 dir_name,
1506 ACPI_BATTERY_CLASS);
1507 if (result) {
1508 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1509 "acpi_sbs_generate_event() failed\n"));
1510 }
1511 }
1512
1513 }
1514 if (sbs->zombie) {
1515 goto end;
1516 }
1517 if (data_type != DATA_TYPE_COMMON) {
1518 goto end;
1519 }
1520
1521 if (old_ac_present != new_ac_present) {
1522 result = acpi_sbs_generate_event(sbs->device,
1523 ACPI_SBS_AC_NOTIFY_STATUS,
1524 new_ac_present,
1525 ACPI_AC_DIR_NAME,
1526 ACPI_AC_CLASS);
1527 if (result) {
1528 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1529 "acpi_sbs_generate_event() failed\n"));
1530 }
1531 }
1532
1533 end:
1534 return result;
1535}
1536
1537static void acpi_sbs_update_queue(void *data)
1538{
1539 struct acpi_sbs *sbs = data;
1540 unsigned long delay = -1;
1541 int result;
1542
1543 if (sbs->zombie) {
1544 goto end;
1545 }
1546
1547 result = acpi_sbs_update_run(sbs, DATA_TYPE_COMMON);
1548 if (result) {
1549 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1550 "acpi_sbs_update_run() failed\n"));
1551 }
1552
1553 if (sbs->zombie) {
1554 goto end;
1555 }
1556
1557 if (update_mode == REQUEST_UPDATE_MODE) {
1558 goto end;
1559 }
1560
1561 delay = jiffies + HZ * update_time;
1562 sbs->update_timer.data = (unsigned long)data;
1563 sbs->update_timer.function = acpi_sbs_update_queue_run;
1564 sbs->update_timer.expires = delay;
1565 add_timer(&sbs->update_timer);
1566 end:
1567 ;
1568}
1569
1570static int acpi_sbs_add(struct acpi_device *device)
1571{
1572 struct acpi_sbs *sbs = NULL;
1573 struct acpi_ec_hc *ec_hc = NULL;
1574 int result, remove_result = 0;
1575 unsigned long sbs_obj;
1576 int id, cnt;
1577 acpi_status status = AE_OK;
1578
1579 sbs = kmalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
1580 if (!sbs) {
1581 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "kmalloc() failed\n"));
1582 return -ENOMEM;
1583 }
1584 memset(sbs, 0, sizeof(struct acpi_sbs));
1585
1586 cnt = 0;
1587 while (cnt < 10) {
1588 cnt++;
1589 ec_hc = acpi_get_ec_hc(device);
1590 if (ec_hc) {
1591 break;
1592 }
1593 msleep(1000);
1594 }
1595
1596 if (!ec_hc) {
1597 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1598 "acpi_get_ec_hc() failed: "
1599 "NO driver found for EC HC SMBus\n"));
1600 result = -ENODEV;
1601 goto end;
1602 }
1603
1604 sbs->device = device;
1605 sbs->smbus = ec_hc->smbus;
1606
1607 strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
1608 strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
1609 acpi_driver_data(device) = sbs;
1610
1611 sbs->update_time = 0;
1612 sbs->update_time2 = 0;
1613
1614 result = acpi_ac_add(sbs);
1615 if (result) {
1616 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "acpi_ac_add() failed\n"));
1617 goto end;
1618 }
1619 result = acpi_evaluate_integer(device->handle, "_SBS", NULL, &sbs_obj);
1620 if (ACPI_FAILURE(result)) {
1621 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1622 "acpi_evaluate_integer() failed\n"));
1623 result = -EIO;
1624 goto end;
1625 }
1626
1627 if (sbs_obj > 0) {
1628 result = acpi_sbsm_get_info(sbs);
1629 if (result) {
1630 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1631 "acpi_sbsm_get_info() failed\n"));
1632 goto end;
1633 }
1634 sbs->sbsm_present = 1;
1635 }
1636 if (sbs->sbsm_present == 0) {
1637 result = acpi_battery_add(sbs, 0);
1638 if (result) {
1639 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1640 "acpi_battery_add() failed\n"));
1641 goto end;
1642 }
1643 } else {
1644 for (id = 0; id < MAX_SBS_BAT; id++) {
1645 if ((sbs->sbsm_batteries_supported & (1 << id))) {
1646 result = acpi_battery_add(sbs, id);
1647 if (result) {
1648 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1649 "acpi_battery_add() "
1650 "failed\n"));
1651 goto end;
1652 }
1653 }
1654 }
1655 }
1656
1657 sbs->handle = device->handle;
1658
1659 init_timer(&sbs->update_timer);
1660 if (update_mode == QUEUE_UPDATE_MODE) {
1661 status = acpi_os_execute(OSL_GPE_HANDLER,
1662 acpi_sbs_update_queue, (void *)sbs);
1663 if (status != AE_OK) {
1664 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1665 "acpi_os_execute() failed\n"));
1666 }
1667 }
1668 sbs->update_time = update_time;
1669 sbs->update_time2 = update_time2;
1670
1671 printk(KERN_INFO PREFIX "%s [%s]\n",
1672 acpi_device_name(device), acpi_device_bid(device));
1673
1674 end:
1675 if (result) {
1676 remove_result = acpi_sbs_remove(device, 0);
1677 if (remove_result) {
1678 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1679 "acpi_sbs_remove() failed\n"));
1680 }
1681 }
1682
1683 return result;
1684}
1685
1686int acpi_sbs_remove(struct acpi_device *device, int type)
1687{
1688 struct acpi_sbs *sbs = (struct acpi_sbs *)acpi_driver_data(device);
1689 int id;
1690
1691 if (!device || !sbs) {
1692 return -EINVAL;
1693 }
1694
1695 sbs->zombie = 1;
1696 sbs->update_time = 0;
1697 sbs->update_time2 = 0;
1698 del_timer_sync(&sbs->update_timer);
1699 acpi_os_wait_events_complete(NULL);
1700 del_timer_sync(&sbs->update_timer);
1701
1702 for (id = 0; id < MAX_SBS_BAT; id++) {
1703 acpi_battery_remove(sbs, id);
1704 }
1705
1706 acpi_ac_remove(sbs);
1707
1708 kfree(sbs);
1709
1710 return 0;
1711}
1712
1713static int __init acpi_sbs_init(void)
1714{
1715 int result = 0;
1716
1717 init_MUTEX(&sbs_sem);
1718
1719 if (capacity_mode != DEF_CAPACITY_UNIT
1720 && capacity_mode != MAH_CAPACITY_UNIT
1721 && capacity_mode != MWH_CAPACITY_UNIT) {
1722 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "acpi_sbs_init: "
1723 "invalid capacity_mode = %d\n",
1724 capacity_mode));
1725 return -EINVAL;
1726 }
1727
1728 acpi_ac_dir = acpi_lock_ac_dir();
1729 if (!acpi_ac_dir) {
1730 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1731 "acpi_lock_ac_dir() failed\n"));
1732 return -ENODEV;
1733 }
1734
1735 acpi_battery_dir = acpi_lock_battery_dir();
1736 if (!acpi_battery_dir) {
1737 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1738 "acpi_lock_battery_dir() failed\n"));
1739 return -ENODEV;
1740 }
1741
1742 result = acpi_bus_register_driver(&acpi_sbs_driver);
1743 if (result < 0) {
1744 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1745 "acpi_bus_register_driver() failed\n"));
1746 return -ENODEV;
1747 }
1748
1749 return 0;
1750}
1751
1752static void __exit acpi_sbs_exit(void)
1753{
1754
1755 acpi_bus_unregister_driver(&acpi_sbs_driver);
1756
1757 acpi_unlock_ac_dir(acpi_ac_dir);
1758 acpi_ac_dir = NULL;
1759 acpi_unlock_battery_dir(acpi_battery_dir);
1760 acpi_battery_dir = NULL;
1761
1762 return;
1763}
1764
1765module_init(acpi_sbs_init);
1766module_exit(acpi_sbs_exit);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 861ac378ce42..5fcb50c7b778 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -319,7 +319,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
319 goto end; 319 goto end;
320 } 320 }
321 321
322 acpi_os_free(buffer.pointer); 322 kfree(buffer.pointer);
323 323
324 device->wakeup.flags.valid = 1; 324 device->wakeup.flags.valid = 1;
325 /* Power button, Lid switch always enable wakeup */ 325 /* Power button, Lid switch always enable wakeup */
@@ -854,7 +854,7 @@ static void acpi_device_set_id(struct acpi_device *device,
854 printk(KERN_ERR "Memory allocation error\n"); 854 printk(KERN_ERR "Memory allocation error\n");
855 } 855 }
856 856
857 acpi_os_free(buffer.pointer); 857 kfree(buffer.pointer);
858} 858}
859 859
860static int acpi_device_set_context(struct acpi_device *device, int type) 860static int acpi_device_set_context(struct acpi_device *device, int type)
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index c90bd2f70b3f..c3bb7faad75e 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -86,7 +86,7 @@ acpi_system_read_dsdt(struct file *file,
86 86
87 res = simple_read_from_buffer(buffer, count, ppos, 87 res = simple_read_from_buffer(buffer, count, ppos,
88 dsdt.pointer, dsdt.length); 88 dsdt.pointer, dsdt.length);
89 acpi_os_free(dsdt.pointer); 89 kfree(dsdt.pointer);
90 90
91 return res; 91 return res;
92} 92}
@@ -113,7 +113,7 @@ acpi_system_read_fadt(struct file *file,
113 113
114 res = simple_read_from_buffer(buffer, count, ppos, 114 res = simple_read_from_buffer(buffer, count, ppos,
115 fadt.pointer, fadt.length); 115 fadt.pointer, fadt.length);
116 acpi_os_free(fadt.pointer); 116 kfree(fadt.pointer);
117 117
118 return res; 118 return res;
119} 119}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index c855f4446b5f..503c0b99db12 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -162,7 +162,7 @@ struct acpi_thermal_flags {
162}; 162};
163 163
164struct acpi_thermal { 164struct acpi_thermal {
165 acpi_handle handle; 165 struct acpi_device * device;
166 acpi_bus_id name; 166 acpi_bus_id name;
167 unsigned long temperature; 167 unsigned long temperature;
168 unsigned long last_temperature; 168 unsigned long last_temperature;
@@ -229,7 +229,7 @@ static int acpi_thermal_get_temperature(struct acpi_thermal *tz)
229 tz->last_temperature = tz->temperature; 229 tz->last_temperature = tz->temperature;
230 230
231 status = 231 status =
232 acpi_evaluate_integer(tz->handle, "_TMP", NULL, &tz->temperature); 232 acpi_evaluate_integer(tz->device->handle, "_TMP", NULL, &tz->temperature);
233 if (ACPI_FAILURE(status)) 233 if (ACPI_FAILURE(status))
234 return -ENODEV; 234 return -ENODEV;
235 235
@@ -248,7 +248,7 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
248 return -EINVAL; 248 return -EINVAL;
249 249
250 status = 250 status =
251 acpi_evaluate_integer(tz->handle, "_TZP", NULL, 251 acpi_evaluate_integer(tz->device->handle, "_TZP", NULL,
252 &tz->polling_frequency); 252 &tz->polling_frequency);
253 if (ACPI_FAILURE(status)) 253 if (ACPI_FAILURE(status))
254 return -ENODEV; 254 return -ENODEV;
@@ -285,7 +285,7 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
285 if (!tz) 285 if (!tz)
286 return -EINVAL; 286 return -EINVAL;
287 287
288 status = acpi_get_handle(tz->handle, "_SCP", &handle); 288 status = acpi_get_handle(tz->device->handle, "_SCP", &handle);
289 if (ACPI_FAILURE(status)) { 289 if (ACPI_FAILURE(status)) {
290 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n")); 290 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n"));
291 return -ENODEV; 291 return -ENODEV;
@@ -316,7 +316,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
316 316
317 /* Critical Shutdown (required) */ 317 /* Critical Shutdown (required) */
318 318
319 status = acpi_evaluate_integer(tz->handle, "_CRT", NULL, 319 status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL,
320 &tz->trips.critical.temperature); 320 &tz->trips.critical.temperature);
321 if (ACPI_FAILURE(status)) { 321 if (ACPI_FAILURE(status)) {
322 tz->trips.critical.flags.valid = 0; 322 tz->trips.critical.flags.valid = 0;
@@ -332,7 +332,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
332 /* Critical Sleep (optional) */ 332 /* Critical Sleep (optional) */
333 333
334 status = 334 status =
335 acpi_evaluate_integer(tz->handle, "_HOT", NULL, 335 acpi_evaluate_integer(tz->device->handle, "_HOT", NULL,
336 &tz->trips.hot.temperature); 336 &tz->trips.hot.temperature);
337 if (ACPI_FAILURE(status)) { 337 if (ACPI_FAILURE(status)) {
338 tz->trips.hot.flags.valid = 0; 338 tz->trips.hot.flags.valid = 0;
@@ -346,7 +346,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
346 /* Passive: Processors (optional) */ 346 /* Passive: Processors (optional) */
347 347
348 status = 348 status =
349 acpi_evaluate_integer(tz->handle, "_PSV", NULL, 349 acpi_evaluate_integer(tz->device->handle, "_PSV", NULL,
350 &tz->trips.passive.temperature); 350 &tz->trips.passive.temperature);
351 if (ACPI_FAILURE(status)) { 351 if (ACPI_FAILURE(status)) {
352 tz->trips.passive.flags.valid = 0; 352 tz->trips.passive.flags.valid = 0;
@@ -355,25 +355,25 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
355 tz->trips.passive.flags.valid = 1; 355 tz->trips.passive.flags.valid = 1;
356 356
357 status = 357 status =
358 acpi_evaluate_integer(tz->handle, "_TC1", NULL, 358 acpi_evaluate_integer(tz->device->handle, "_TC1", NULL,
359 &tz->trips.passive.tc1); 359 &tz->trips.passive.tc1);
360 if (ACPI_FAILURE(status)) 360 if (ACPI_FAILURE(status))
361 tz->trips.passive.flags.valid = 0; 361 tz->trips.passive.flags.valid = 0;
362 362
363 status = 363 status =
364 acpi_evaluate_integer(tz->handle, "_TC2", NULL, 364 acpi_evaluate_integer(tz->device->handle, "_TC2", NULL,
365 &tz->trips.passive.tc2); 365 &tz->trips.passive.tc2);
366 if (ACPI_FAILURE(status)) 366 if (ACPI_FAILURE(status))
367 tz->trips.passive.flags.valid = 0; 367 tz->trips.passive.flags.valid = 0;
368 368
369 status = 369 status =
370 acpi_evaluate_integer(tz->handle, "_TSP", NULL, 370 acpi_evaluate_integer(tz->device->handle, "_TSP", NULL,
371 &tz->trips.passive.tsp); 371 &tz->trips.passive.tsp);
372 if (ACPI_FAILURE(status)) 372 if (ACPI_FAILURE(status))
373 tz->trips.passive.flags.valid = 0; 373 tz->trips.passive.flags.valid = 0;
374 374
375 status = 375 status =
376 acpi_evaluate_reference(tz->handle, "_PSL", NULL, 376 acpi_evaluate_reference(tz->device->handle, "_PSL", NULL,
377 &tz->trips.passive.devices); 377 &tz->trips.passive.devices);
378 if (ACPI_FAILURE(status)) 378 if (ACPI_FAILURE(status))
379 tz->trips.passive.flags.valid = 0; 379 tz->trips.passive.flags.valid = 0;
@@ -393,14 +393,14 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
393 char name[5] = { '_', 'A', 'C', ('0' + i), '\0' }; 393 char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
394 394
395 status = 395 status =
396 acpi_evaluate_integer(tz->handle, name, NULL, 396 acpi_evaluate_integer(tz->device->handle, name, NULL,
397 &tz->trips.active[i].temperature); 397 &tz->trips.active[i].temperature);
398 if (ACPI_FAILURE(status)) 398 if (ACPI_FAILURE(status))
399 break; 399 break;
400 400
401 name[2] = 'L'; 401 name[2] = 'L';
402 status = 402 status =
403 acpi_evaluate_reference(tz->handle, name, NULL, 403 acpi_evaluate_reference(tz->device->handle, name, NULL,
404 &tz->trips.active[i].devices); 404 &tz->trips.active[i].devices);
405 if (ACPI_SUCCESS(status)) { 405 if (ACPI_SUCCESS(status)) {
406 tz->trips.active[i].flags.valid = 1; 406 tz->trips.active[i].flags.valid = 1;
@@ -424,7 +424,7 @@ static int acpi_thermal_get_devices(struct acpi_thermal *tz)
424 return -EINVAL; 424 return -EINVAL;
425 425
426 status = 426 status =
427 acpi_evaluate_reference(tz->handle, "_TZD", NULL, &tz->devices); 427 acpi_evaluate_reference(tz->device->handle, "_TZD", NULL, &tz->devices);
428 if (ACPI_FAILURE(status)) 428 if (ACPI_FAILURE(status))
429 return -ENODEV; 429 return -ENODEV;
430 430
@@ -453,10 +453,6 @@ static int acpi_thermal_call_usermode(char *path)
453 453
454static int acpi_thermal_critical(struct acpi_thermal *tz) 454static int acpi_thermal_critical(struct acpi_thermal *tz)
455{ 455{
456 int result = 0;
457 struct acpi_device *device = NULL;
458
459
460 if (!tz || !tz->trips.critical.flags.valid) 456 if (!tz || !tz->trips.critical.flags.valid)
461 return -EINVAL; 457 return -EINVAL;
462 458
@@ -466,14 +462,10 @@ static int acpi_thermal_critical(struct acpi_thermal *tz)
466 } else if (tz->trips.critical.flags.enabled) 462 } else if (tz->trips.critical.flags.enabled)
467 tz->trips.critical.flags.enabled = 0; 463 tz->trips.critical.flags.enabled = 0;
468 464
469 result = acpi_bus_get_device(tz->handle, &device);
470 if (result)
471 return result;
472
473 printk(KERN_EMERG 465 printk(KERN_EMERG
474 "Critical temperature reached (%ld C), shutting down.\n", 466 "Critical temperature reached (%ld C), shutting down.\n",
475 KELVIN_TO_CELSIUS(tz->temperature)); 467 KELVIN_TO_CELSIUS(tz->temperature));
476 acpi_bus_generate_event(device, ACPI_THERMAL_NOTIFY_CRITICAL, 468 acpi_bus_generate_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL,
477 tz->trips.critical.flags.enabled); 469 tz->trips.critical.flags.enabled);
478 470
479 acpi_thermal_call_usermode(ACPI_THERMAL_PATH_POWEROFF); 471 acpi_thermal_call_usermode(ACPI_THERMAL_PATH_POWEROFF);
@@ -483,10 +475,6 @@ static int acpi_thermal_critical(struct acpi_thermal *tz)
483 475
484static int acpi_thermal_hot(struct acpi_thermal *tz) 476static int acpi_thermal_hot(struct acpi_thermal *tz)
485{ 477{
486 int result = 0;
487 struct acpi_device *device = NULL;
488
489
490 if (!tz || !tz->trips.hot.flags.valid) 478 if (!tz || !tz->trips.hot.flags.valid)
491 return -EINVAL; 479 return -EINVAL;
492 480
@@ -496,11 +484,7 @@ static int acpi_thermal_hot(struct acpi_thermal *tz)
496 } else if (tz->trips.hot.flags.enabled) 484 } else if (tz->trips.hot.flags.enabled)
497 tz->trips.hot.flags.enabled = 0; 485 tz->trips.hot.flags.enabled = 0;
498 486
499 result = acpi_bus_get_device(tz->handle, &device); 487 acpi_bus_generate_event(tz->device, ACPI_THERMAL_NOTIFY_HOT,
500 if (result)
501 return result;
502
503 acpi_bus_generate_event(device, ACPI_THERMAL_NOTIFY_HOT,
504 tz->trips.hot.flags.enabled); 488 tz->trips.hot.flags.enabled);
505 489
506 /* TBD: Call user-mode "sleep(S4)" function */ 490 /* TBD: Call user-mode "sleep(S4)" function */
@@ -1193,8 +1177,7 @@ static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data)
1193 if (!tz) 1177 if (!tz)
1194 return; 1178 return;
1195 1179
1196 if (acpi_bus_get_device(tz->handle, &device)) 1180 device = tz->device;
1197 return;
1198 1181
1199 switch (event) { 1182 switch (event) {
1200 case ACPI_THERMAL_NOTIFY_TEMPERATURE: 1183 case ACPI_THERMAL_NOTIFY_TEMPERATURE:
@@ -1293,7 +1276,7 @@ static int acpi_thermal_add(struct acpi_device *device)
1293 return -ENOMEM; 1276 return -ENOMEM;
1294 memset(tz, 0, sizeof(struct acpi_thermal)); 1277 memset(tz, 0, sizeof(struct acpi_thermal));
1295 1278
1296 tz->handle = device->handle; 1279 tz->device = device;
1297 strcpy(tz->name, device->pnp.bus_id); 1280 strcpy(tz->name, device->pnp.bus_id);
1298 strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME); 1281 strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
1299 strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS); 1282 strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
@@ -1311,7 +1294,7 @@ static int acpi_thermal_add(struct acpi_device *device)
1311 1294
1312 acpi_thermal_check(tz); 1295 acpi_thermal_check(tz);
1313 1296
1314 status = acpi_install_notify_handler(tz->handle, 1297 status = acpi_install_notify_handler(device->handle,
1315 ACPI_DEVICE_NOTIFY, 1298 ACPI_DEVICE_NOTIFY,
1316 acpi_thermal_notify, tz); 1299 acpi_thermal_notify, tz);
1317 if (ACPI_FAILURE(status)) { 1300 if (ACPI_FAILURE(status)) {
@@ -1352,7 +1335,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
1352 /* deferred task may reinsert timer */ 1335 /* deferred task may reinsert timer */
1353 del_timer_sync(&(tz->timer)); 1336 del_timer_sync(&(tz->timer));
1354 1337
1355 status = acpi_remove_notify_handler(tz->handle, 1338 status = acpi_remove_notify_handler(device->handle,
1356 ACPI_DEVICE_NOTIFY, 1339 ACPI_DEVICE_NOTIFY,
1357 acpi_thermal_notify); 1340 acpi_thermal_notify);
1358 1341
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 7940fc1bd69e..5cff17dc78b3 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -166,10 +166,10 @@ acpi_status acpi_ut_delete_caches(void)
166 166
167 /* Free memory lists */ 167 /* Free memory lists */
168 168
169 acpi_os_free(acpi_gbl_global_list); 169 ACPI_FREE(acpi_gbl_global_list);
170 acpi_gbl_global_list = NULL; 170 acpi_gbl_global_list = NULL;
171 171
172 acpi_os_free(acpi_gbl_ns_node_list); 172 ACPI_FREE(acpi_gbl_ns_node_list);
173 acpi_gbl_ns_node_list = NULL; 173 acpi_gbl_ns_node_list = NULL;
174#endif 174#endif
175 175
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 56270a30718a..1a1f8109159c 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -162,7 +162,7 @@ acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
162 162
163 /* Now we can delete the cache object */ 163 /* Now we can delete the cache object */
164 164
165 acpi_os_free(cache); 165 ACPI_FREE(cache);
166 return (AE_OK); 166 return (AE_OK);
167} 167}
168 168
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 1930e1a75b22..f48227f4c8c9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -332,7 +332,7 @@ acpi_evaluate_string(acpi_handle handle,
332 332
333 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%s]\n", *data)); 333 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%s]\n", *data));
334 334
335 acpi_os_free(buffer.pointer); 335 kfree(buffer.pointer);
336 336
337 return AE_OK; 337 return AE_OK;
338} 338}
@@ -418,7 +418,7 @@ acpi_evaluate_reference(acpi_handle handle,
418 //kfree(list->handles); 418 //kfree(list->handles);
419 } 419 }
420 420
421 acpi_os_free(buffer.pointer); 421 kfree(buffer.pointer);
422 422
423 return status; 423 return status;
424} 424}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 9feb633087a9..56666a982476 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -117,7 +117,7 @@ struct acpi_video_enumerated_device {
117}; 117};
118 118
119struct acpi_video_bus { 119struct acpi_video_bus {
120 acpi_handle handle; 120 struct acpi_device *device;
121 u8 dos_setting; 121 u8 dos_setting;
122 struct acpi_video_enumerated_device *attached_array; 122 struct acpi_video_enumerated_device *attached_array;
123 u8 attached_count; 123 u8 attached_count;
@@ -155,7 +155,6 @@ struct acpi_video_device_brightness {
155}; 155};
156 156
157struct acpi_video_device { 157struct acpi_video_device {
158 acpi_handle handle;
159 unsigned long device_id; 158 unsigned long device_id;
160 struct acpi_video_device_flags flags; 159 struct acpi_video_device_flags flags;
161 struct acpi_video_device_cap cap; 160 struct acpi_video_device_cap cap;
@@ -272,7 +271,8 @@ static int
272acpi_video_device_query(struct acpi_video_device *device, unsigned long *state) 271acpi_video_device_query(struct acpi_video_device *device, unsigned long *state)
273{ 272{
274 int status; 273 int status;
275 status = acpi_evaluate_integer(device->handle, "_DGS", NULL, state); 274
275 status = acpi_evaluate_integer(device->dev->handle, "_DGS", NULL, state);
276 276
277 return status; 277 return status;
278} 278}
@@ -283,8 +283,7 @@ acpi_video_device_get_state(struct acpi_video_device *device,
283{ 283{
284 int status; 284 int status;
285 285
286 286 status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state);
287 status = acpi_evaluate_integer(device->handle, "_DCS", NULL, state);
288 287
289 return status; 288 return status;
290} 289}
@@ -299,7 +298,7 @@ acpi_video_device_set_state(struct acpi_video_device *device, int state)
299 298
300 299
301 arg0.integer.value = state; 300 arg0.integer.value = state;
302 status = acpi_evaluate_integer(device->handle, "_DSS", &args, &ret); 301 status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret);
303 302
304 return status; 303 return status;
305} 304}
@@ -315,7 +314,7 @@ acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
315 314
316 *levels = NULL; 315 *levels = NULL;
317 316
318 status = acpi_evaluate_object(device->handle, "_BCL", NULL, &buffer); 317 status = acpi_evaluate_object(device->dev->handle, "_BCL", NULL, &buffer);
319 if (!ACPI_SUCCESS(status)) 318 if (!ACPI_SUCCESS(status))
320 return status; 319 return status;
321 obj = (union acpi_object *)buffer.pointer; 320 obj = (union acpi_object *)buffer.pointer;
@@ -344,7 +343,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
344 343
345 344
346 arg0.integer.value = level; 345 arg0.integer.value = level;
347 status = acpi_evaluate_object(device->handle, "_BCM", &args, NULL); 346 status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL);
348 347
349 printk(KERN_DEBUG "set_level status: %x\n", status); 348 printk(KERN_DEBUG "set_level status: %x\n", status);
350 return status; 349 return status;
@@ -356,7 +355,7 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
356{ 355{
357 int status; 356 int status;
358 357
359 status = acpi_evaluate_integer(device->handle, "_BQC", NULL, level); 358 status = acpi_evaluate_integer(device->dev->handle, "_BQC", NULL, level);
360 359
361 return status; 360 return status;
362} 361}
@@ -383,7 +382,7 @@ acpi_video_device_EDID(struct acpi_video_device *device,
383 else 382 else
384 return -EINVAL; 383 return -EINVAL;
385 384
386 status = acpi_evaluate_object(device->handle, "_DDC", &args, &buffer); 385 status = acpi_evaluate_object(device->dev->handle, "_DDC", &args, &buffer);
387 if (ACPI_FAILURE(status)) 386 if (ACPI_FAILURE(status))
388 return -ENODEV; 387 return -ENODEV;
389 388
@@ -413,7 +412,7 @@ acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
413 412
414 arg0.integer.value = option; 413 arg0.integer.value = option;
415 414
416 status = acpi_evaluate_integer(video->handle, "_SPD", &args, &tmp); 415 status = acpi_evaluate_integer(video->device->handle, "_SPD", &args, &tmp);
417 if (ACPI_SUCCESS(status)) 416 if (ACPI_SUCCESS(status))
418 status = tmp ? (-EINVAL) : (AE_OK); 417 status = tmp ? (-EINVAL) : (AE_OK);
419 418
@@ -425,8 +424,7 @@ acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long *id)
425{ 424{
426 int status; 425 int status;
427 426
428 427 status = acpi_evaluate_integer(video->device->handle, "_GPD", NULL, id);
429 status = acpi_evaluate_integer(video->handle, "_GPD", NULL, id);
430 428
431 return status; 429 return status;
432} 430}
@@ -437,7 +435,7 @@ acpi_video_bus_POST_options(struct acpi_video_bus *video,
437{ 435{
438 int status; 436 int status;
439 437
440 status = acpi_evaluate_integer(video->handle, "_VPO", NULL, options); 438 status = acpi_evaluate_integer(video->device->handle, "_VPO", NULL, options);
441 *options &= 3; 439 *options &= 3;
442 440
443 return status; 441 return status;
@@ -478,7 +476,7 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
478 } 476 }
479 arg0.integer.value = (lcd_flag << 2) | bios_flag; 477 arg0.integer.value = (lcd_flag << 2) | bios_flag;
480 video->dos_setting = arg0.integer.value; 478 video->dos_setting = arg0.integer.value;
481 acpi_evaluate_object(video->handle, "_DOS", &args, NULL); 479 acpi_evaluate_object(video->device->handle, "_DOS", &args, NULL);
482 480
483 Failed: 481 Failed:
484 return status; 482 return status;
@@ -506,25 +504,25 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
506 504
507 memset(&device->cap, 0, 4); 505 memset(&device->cap, 0, 4);
508 506
509 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ADR", &h_dummy1))) { 507 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_ADR", &h_dummy1))) {
510 device->cap._ADR = 1; 508 device->cap._ADR = 1;
511 } 509 }
512 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_BCL", &h_dummy1))) { 510 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCL", &h_dummy1))) {
513 device->cap._BCL = 1; 511 device->cap._BCL = 1;
514 } 512 }
515 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_BCM", &h_dummy1))) { 513 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) {
516 device->cap._BCM = 1; 514 device->cap._BCM = 1;
517 } 515 }
518 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DDC", &h_dummy1))) { 516 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
519 device->cap._DDC = 1; 517 device->cap._DDC = 1;
520 } 518 }
521 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DCS", &h_dummy1))) { 519 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) {
522 device->cap._DCS = 1; 520 device->cap._DCS = 1;
523 } 521 }
524 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DGS", &h_dummy1))) { 522 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) {
525 device->cap._DGS = 1; 523 device->cap._DGS = 1;
526 } 524 }
527 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DSS", &h_dummy1))) { 525 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) {
528 device->cap._DSS = 1; 526 device->cap._DSS = 1;
529 } 527 }
530 528
@@ -588,22 +586,22 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
588 acpi_handle h_dummy1; 586 acpi_handle h_dummy1;
589 587
590 memset(&video->cap, 0, 4); 588 memset(&video->cap, 0, 4);
591 if (ACPI_SUCCESS(acpi_get_handle(video->handle, "_DOS", &h_dummy1))) { 589 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOS", &h_dummy1))) {
592 video->cap._DOS = 1; 590 video->cap._DOS = 1;
593 } 591 }
594 if (ACPI_SUCCESS(acpi_get_handle(video->handle, "_DOD", &h_dummy1))) { 592 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOD", &h_dummy1))) {
595 video->cap._DOD = 1; 593 video->cap._DOD = 1;
596 } 594 }
597 if (ACPI_SUCCESS(acpi_get_handle(video->handle, "_ROM", &h_dummy1))) { 595 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_ROM", &h_dummy1))) {
598 video->cap._ROM = 1; 596 video->cap._ROM = 1;
599 } 597 }
600 if (ACPI_SUCCESS(acpi_get_handle(video->handle, "_GPD", &h_dummy1))) { 598 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_GPD", &h_dummy1))) {
601 video->cap._GPD = 1; 599 video->cap._GPD = 1;
602 } 600 }
603 if (ACPI_SUCCESS(acpi_get_handle(video->handle, "_SPD", &h_dummy1))) { 601 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_SPD", &h_dummy1))) {
604 video->cap._SPD = 1; 602 video->cap._SPD = 1;
605 } 603 }
606 if (ACPI_SUCCESS(acpi_get_handle(video->handle, "_VPO", &h_dummy1))) { 604 if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_VPO", &h_dummy1))) {
607 video->cap._VPO = 1; 605 video->cap._VPO = 1;
608 } 606 }
609} 607}
@@ -1271,7 +1269,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1271 1269
1272 memset(data, 0, sizeof(struct acpi_video_device)); 1270 memset(data, 0, sizeof(struct acpi_video_device));
1273 1271
1274 data->handle = device->handle;
1275 strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME); 1272 strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
1276 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); 1273 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
1277 acpi_driver_data(device) = data; 1274 acpi_driver_data(device) = data;
@@ -1298,7 +1295,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1298 acpi_video_device_bind(video, data); 1295 acpi_video_device_bind(video, data);
1299 acpi_video_device_find_cap(data); 1296 acpi_video_device_find_cap(data);
1300 1297
1301 status = acpi_install_notify_handler(data->handle, 1298 status = acpi_install_notify_handler(device->handle,
1302 ACPI_DEVICE_NOTIFY, 1299 ACPI_DEVICE_NOTIFY,
1303 acpi_video_device_notify, 1300 acpi_video_device_notify,
1304 data); 1301 data);
@@ -1400,8 +1397,7 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
1400 union acpi_object *dod = NULL; 1397 union acpi_object *dod = NULL;
1401 union acpi_object *obj; 1398 union acpi_object *obj;
1402 1399
1403 1400 status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
1404 status = acpi_evaluate_object(video->handle, "_DOD", NULL, &buffer);
1405 if (!ACPI_SUCCESS(status)) { 1401 if (!ACPI_SUCCESS(status)) {
1406 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD")); 1402 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
1407 return status; 1403 return status;
@@ -1450,7 +1446,7 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
1450 video->attached_array = active_device_list; 1446 video->attached_array = active_device_list;
1451 video->attached_count = count; 1447 video->attached_count = count;
1452 out: 1448 out:
1453 acpi_os_free(buffer.pointer); 1449 kfree(buffer.pointer);
1454 return status; 1450 return status;
1455} 1451}
1456 1452
@@ -1569,7 +1565,7 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
1569 up(&video->sem); 1565 up(&video->sem);
1570 acpi_video_device_remove_fs(device->dev); 1566 acpi_video_device_remove_fs(device->dev);
1571 1567
1572 status = acpi_remove_notify_handler(device->handle, 1568 status = acpi_remove_notify_handler(device->dev->handle,
1573 ACPI_DEVICE_NOTIFY, 1569 ACPI_DEVICE_NOTIFY,
1574 acpi_video_device_notify); 1570 acpi_video_device_notify);
1575 1571
@@ -1624,8 +1620,7 @@ static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data)
1624 if (!video) 1620 if (!video)
1625 return; 1621 return;
1626 1622
1627 if (acpi_bus_get_device(handle, &device)) 1623 device = video->device;
1628 return;
1629 1624
1630 switch (event) { 1625 switch (event) {
1631 case ACPI_VIDEO_NOTIFY_SWITCH: /* User request that a switch occur, 1626 case ACPI_VIDEO_NOTIFY_SWITCH: /* User request that a switch occur,
@@ -1668,8 +1663,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
1668 if (!video_device) 1663 if (!video_device)
1669 return; 1664 return;
1670 1665
1671 if (acpi_bus_get_device(handle, &device)) 1666 device = video_device->dev;
1672 return;
1673 1667
1674 switch (event) { 1668 switch (event) {
1675 case ACPI_VIDEO_NOTIFY_SWITCH: /* change in status (cycle output device) */ 1669 case ACPI_VIDEO_NOTIFY_SWITCH: /* change in status (cycle output device) */
@@ -1707,7 +1701,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
1707 return -ENOMEM; 1701 return -ENOMEM;
1708 memset(video, 0, sizeof(struct acpi_video_bus)); 1702 memset(video, 0, sizeof(struct acpi_video_bus));
1709 1703
1710 video->handle = device->handle; 1704 video->device = device;
1711 strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); 1705 strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
1712 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS); 1706 strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
1713 acpi_driver_data(device) = video; 1707 acpi_driver_data(device) = video;
@@ -1727,7 +1721,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
1727 acpi_video_bus_get_devices(video, device); 1721 acpi_video_bus_get_devices(video, device);
1728 acpi_video_bus_start_devices(video); 1722 acpi_video_bus_start_devices(video);
1729 1723
1730 status = acpi_install_notify_handler(video->handle, 1724 status = acpi_install_notify_handler(device->handle,
1731 ACPI_DEVICE_NOTIFY, 1725 ACPI_DEVICE_NOTIFY,
1732 acpi_video_bus_notify, video); 1726 acpi_video_bus_notify, video);
1733 if (ACPI_FAILURE(status)) { 1727 if (ACPI_FAILURE(status)) {
@@ -1767,7 +1761,7 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
1767 1761
1768 acpi_video_bus_stop_devices(video); 1762 acpi_video_bus_stop_devices(video);
1769 1763
1770 status = acpi_remove_notify_handler(video->handle, 1764 status = acpi_remove_notify_handler(video->device->handle,
1771 ACPI_DEVICE_NOTIFY, 1765 ACPI_DEVICE_NOTIFY,
1772 acpi_video_bus_notify); 1766 acpi_video_bus_notify);
1773 1767
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index d3b426313a41..4521a249dd56 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -31,6 +31,7 @@
31#include <linux/atmdev.h> 31#include <linux/atmdev.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/poison.h>
34 35
35#include <asm/atomic.h> 36#include <asm/atomic.h>
36#include <asm/io.h> 37#include <asm/io.h>
@@ -1995,7 +1996,7 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
1995 } 1996 }
1996 i += 1; 1997 i += 1;
1997 } 1998 }
1998 if (*pointer == 0xdeadbeef) { 1999 if (*pointer == ATM_POISON) {
1999 return loader_start (lb, dev, ucode_start); 2000 return loader_start (lb, dev, ucode_start);
2000 } else { 2001 } else {
2001 // cast needed as there is no %? for pointer differnces 2002 // cast needed as there is no %? for pointer differnces
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 5d1c6c95262c..b0369bb20f08 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -35,6 +35,7 @@ static char const rcsid[] =
35 35
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/poison.h>
38#include <linux/skbuff.h> 39#include <linux/skbuff.h>
39#include <linux/kernel.h> 40#include <linux/kernel.h>
40#include <linux/vmalloc.h> 41#include <linux/vmalloc.h>
@@ -3657,7 +3658,7 @@ probe_sram(struct idt77252_dev *card)
3657 writel(SAR_CMD_WRITE_SRAM | (0 << 2), SAR_REG_CMD); 3658 writel(SAR_CMD_WRITE_SRAM | (0 << 2), SAR_REG_CMD);
3658 3659
3659 for (addr = 0x4000; addr < 0x80000; addr += 0x4000) { 3660 for (addr = 0x4000; addr < 0x80000; addr += 0x4000) {
3660 writel(0xdeadbeef, SAR_REG_DR0); 3661 writel(ATM_POISON, SAR_REG_DR0);
3661 writel(SAR_CMD_WRITE_SRAM | (addr << 2), SAR_REG_CMD); 3662 writel(SAR_CMD_WRITE_SRAM | (addr << 2), SAR_REG_CMD);
3662 3663
3663 writel(SAR_CMD_READ_SRAM | (0 << 2), SAR_REG_CMD); 3664 writel(SAR_CMD_READ_SRAM | (0 << 2), SAR_REG_CMD);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 0242cbb86a87..5109fa37c662 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -249,18 +249,6 @@ static int irqdma_allocated;
249#include <linux/cdrom.h> /* for the compatibility eject ioctl */ 249#include <linux/cdrom.h> /* for the compatibility eject ioctl */
250#include <linux/completion.h> 250#include <linux/completion.h>
251 251
252/*
253 * Interrupt freeing also means /proc VFS work - dont do it
254 * from interrupt context. We push this work into keventd:
255 */
256static void fd_free_irq_fn(void *data)
257{
258 fd_free_irq();
259}
260
261static DECLARE_WORK(fd_free_irq_work, fd_free_irq_fn, NULL);
262
263
264static struct request *current_req; 252static struct request *current_req;
265static struct request_queue *floppy_queue; 253static struct request_queue *floppy_queue;
266static void do_fd_request(request_queue_t * q); 254static void do_fd_request(request_queue_t * q);
@@ -826,15 +814,6 @@ static int set_dor(int fdc, char mask, char data)
826 UDRS->select_date = jiffies; 814 UDRS->select_date = jiffies;
827 } 815 }
828 } 816 }
829 /*
830 * We should propagate failures to grab the resources back
831 * nicely from here. Actually we ought to rewrite the fd
832 * driver some day too.
833 */
834 if (newdor & FLOPPY_MOTOR_MASK)
835 floppy_grab_irq_and_dma();
836 if (olddor & FLOPPY_MOTOR_MASK)
837 floppy_release_irq_and_dma();
838 return olddor; 817 return olddor;
839} 818}
840 819
@@ -892,8 +871,6 @@ static int _lock_fdc(int drive, int interruptible, int line)
892 line); 871 line);
893 return -1; 872 return -1;
894 } 873 }
895 if (floppy_grab_irq_and_dma() == -1)
896 return -EBUSY;
897 874
898 if (test_and_set_bit(0, &fdc_busy)) { 875 if (test_and_set_bit(0, &fdc_busy)) {
899 DECLARE_WAITQUEUE(wait, current); 876 DECLARE_WAITQUEUE(wait, current);
@@ -915,6 +892,8 @@ static int _lock_fdc(int drive, int interruptible, int line)
915 892
916 set_current_state(TASK_RUNNING); 893 set_current_state(TASK_RUNNING);
917 remove_wait_queue(&fdc_wait, &wait); 894 remove_wait_queue(&fdc_wait, &wait);
895
896 flush_scheduled_work();
918 } 897 }
919 command_status = FD_COMMAND_NONE; 898 command_status = FD_COMMAND_NONE;
920 899
@@ -948,7 +927,6 @@ static inline void unlock_fdc(void)
948 if (elv_next_request(floppy_queue)) 927 if (elv_next_request(floppy_queue))
949 do_fd_request(floppy_queue); 928 do_fd_request(floppy_queue);
950 spin_unlock_irqrestore(&floppy_lock, flags); 929 spin_unlock_irqrestore(&floppy_lock, flags);
951 floppy_release_irq_and_dma();
952 wake_up(&fdc_wait); 930 wake_up(&fdc_wait);
953} 931}
954 932
@@ -3694,8 +3672,8 @@ static int floppy_release(struct inode *inode, struct file *filp)
3694 } 3672 }
3695 if (!UDRS->fd_ref) 3673 if (!UDRS->fd_ref)
3696 opened_bdev[drive] = NULL; 3674 opened_bdev[drive] = NULL;
3697 floppy_release_irq_and_dma();
3698 mutex_unlock(&open_lock); 3675 mutex_unlock(&open_lock);
3676
3699 return 0; 3677 return 0;
3700} 3678}
3701 3679
@@ -3726,9 +3704,6 @@ static int floppy_open(struct inode *inode, struct file *filp)
3726 if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (filp->f_flags & O_EXCL))) 3704 if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
3727 goto out2; 3705 goto out2;
3728 3706
3729 if (floppy_grab_irq_and_dma())
3730 goto out2;
3731
3732 if (filp->f_flags & O_EXCL) 3707 if (filp->f_flags & O_EXCL)
3733 UDRS->fd_ref = -1; 3708 UDRS->fd_ref = -1;
3734 else 3709 else
@@ -3805,7 +3780,6 @@ out:
3805 UDRS->fd_ref--; 3780 UDRS->fd_ref--;
3806 if (!UDRS->fd_ref) 3781 if (!UDRS->fd_ref)
3807 opened_bdev[drive] = NULL; 3782 opened_bdev[drive] = NULL;
3808 floppy_release_irq_and_dma();
3809out2: 3783out2:
3810 mutex_unlock(&open_lock); 3784 mutex_unlock(&open_lock);
3811 return res; 3785 return res;
@@ -3822,14 +3796,9 @@ static int check_floppy_change(struct gendisk *disk)
3822 return 1; 3796 return 1;
3823 3797
3824 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 3798 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
3825 if (floppy_grab_irq_and_dma()) {
3826 return 1;
3827 }
3828
3829 lock_fdc(drive, 0); 3799 lock_fdc(drive, 0);
3830 poll_drive(0, 0); 3800 poll_drive(0, 0);
3831 process_fd_request(); 3801 process_fd_request();
3832 floppy_release_irq_and_dma();
3833 } 3802 }
3834 3803
3835 if (UTESTF(FD_DISK_CHANGED) || 3804 if (UTESTF(FD_DISK_CHANGED) ||
@@ -4346,7 +4315,6 @@ static int __init floppy_init(void)
4346 fdc = 0; 4315 fdc = 0;
4347 del_timer(&fd_timeout); 4316 del_timer(&fd_timeout);
4348 current_drive = 0; 4317 current_drive = 0;
4349 floppy_release_irq_and_dma();
4350 initialising = 0; 4318 initialising = 0;
4351 if (have_no_fdc) { 4319 if (have_no_fdc) {
4352 DPRINT("no floppy controllers found\n"); 4320 DPRINT("no floppy controllers found\n");
@@ -4504,7 +4472,7 @@ static void floppy_release_irq_and_dma(void)
4504 if (irqdma_allocated) { 4472 if (irqdma_allocated) {
4505 fd_disable_dma(); 4473 fd_disable_dma();
4506 fd_free_dma(); 4474 fd_free_dma();
4507 schedule_work(&fd_free_irq_work); 4475 fd_free_irq();
4508 irqdma_allocated = 0; 4476 irqdma_allocated = 0;
4509 } 4477 }
4510 set_dor(0, ~0, 8); 4478 set_dor(0, ~0, 8);
@@ -4600,8 +4568,6 @@ void cleanup_module(void)
4600 /* eject disk, if any */ 4568 /* eject disk, if any */
4601 fd_eject(0); 4569 fd_eject(0);
4602 4570
4603 flush_scheduled_work(); /* fd_free_irq() might be pending */
4604
4605 wait_for_completion(&device_release); 4571 wait_for_completion(&device_release);
4606} 4572}
4607 4573
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 3721e12135d9..cc42e762396f 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -250,8 +250,6 @@ static int floppy_open(struct inode *inode, struct file *filp);
250static int floppy_release(struct inode *inode, struct file *filp); 250static int floppy_release(struct inode *inode, struct file *filp);
251static int floppy_check_change(struct gendisk *disk); 251static int floppy_check_change(struct gendisk *disk);
252static int floppy_revalidate(struct gendisk *disk); 252static int floppy_revalidate(struct gendisk *disk);
253static int swim3_add_device(struct device_node *swims);
254int swim3_init(void);
255 253
256#ifndef CONFIG_PMAC_MEDIABAY 254#ifndef CONFIG_PMAC_MEDIABAY
257#define check_media_bay(which, what) 1 255#define check_media_bay(which, what) 1
@@ -1011,114 +1009,63 @@ static struct block_device_operations floppy_fops = {
1011 .revalidate_disk= floppy_revalidate, 1009 .revalidate_disk= floppy_revalidate,
1012}; 1010};
1013 1011
1014int swim3_init(void) 1012static int swim3_add_device(struct macio_dev *mdev, int index)
1015{
1016 struct device_node *swim;
1017 int err = -ENOMEM;
1018 int i;
1019
1020 swim = find_devices("floppy");
1021 while (swim && (floppy_count < MAX_FLOPPIES))
1022 {
1023 swim3_add_device(swim);
1024 swim = swim->next;
1025 }
1026
1027 swim = find_devices("swim3");
1028 while (swim && (floppy_count < MAX_FLOPPIES))
1029 {
1030 swim3_add_device(swim);
1031 swim = swim->next;
1032 }
1033
1034 if (!floppy_count)
1035 return -ENODEV;
1036
1037 for (i = 0; i < floppy_count; i++) {
1038 disks[i] = alloc_disk(1);
1039 if (!disks[i])
1040 goto out;
1041 }
1042
1043 if (register_blkdev(FLOPPY_MAJOR, "fd")) {
1044 err = -EBUSY;
1045 goto out;
1046 }
1047
1048 swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
1049 if (!swim3_queue) {
1050 err = -ENOMEM;
1051 goto out_queue;
1052 }
1053
1054 for (i = 0; i < floppy_count; i++) {
1055 struct gendisk *disk = disks[i];
1056 disk->major = FLOPPY_MAJOR;
1057 disk->first_minor = i;
1058 disk->fops = &floppy_fops;
1059 disk->private_data = &floppy_states[i];
1060 disk->queue = swim3_queue;
1061 disk->flags |= GENHD_FL_REMOVABLE;
1062 sprintf(disk->disk_name, "fd%d", i);
1063 set_capacity(disk, 2880);
1064 add_disk(disk);
1065 }
1066 return 0;
1067
1068out_queue:
1069 unregister_blkdev(FLOPPY_MAJOR, "fd");
1070out:
1071 while (i--)
1072 put_disk(disks[i]);
1073 /* shouldn't we do something with results of swim_add_device()? */
1074 return err;
1075}
1076
1077static int swim3_add_device(struct device_node *swim)
1078{ 1013{
1014 struct device_node *swim = mdev->ofdev.node;
1079 struct device_node *mediabay; 1015 struct device_node *mediabay;
1080 struct floppy_state *fs = &floppy_states[floppy_count]; 1016 struct floppy_state *fs = &floppy_states[index];
1081 struct resource res_reg, res_dma; 1017 int rc = -EBUSY;
1082 1018
1083 if (of_address_to_resource(swim, 0, &res_reg) || 1019 /* Check & Request resources */
1084 of_address_to_resource(swim, 1, &res_dma)) { 1020 if (macio_resource_count(mdev) < 2) {
1085 printk(KERN_ERR "swim3: Can't get addresses\n"); 1021 printk(KERN_WARNING "ifd%d: no address for %s\n",
1086 return -EINVAL; 1022 index, swim->full_name);
1023 return -ENXIO;
1087 } 1024 }
1088 if (request_mem_region(res_reg.start, res_reg.end - res_reg.start + 1, 1025 if (macio_irq_count(mdev) < 2) {
1089 " (reg)") == NULL) { 1026 printk(KERN_WARNING "fd%d: no intrs for device %s\n",
1090 printk(KERN_ERR "swim3: Can't request register space\n"); 1027 index, swim->full_name);
1091 return -EINVAL;
1092 } 1028 }
1093 if (request_mem_region(res_dma.start, res_dma.end - res_dma.start + 1, 1029 if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
1094 " (dma)") == NULL) { 1030 printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
1095 release_mem_region(res_reg.start, 1031 index, swim->full_name);
1096 res_reg.end - res_reg.start + 1); 1032 return -EBUSY;
1097 printk(KERN_ERR "swim3: Can't request DMA space\n");
1098 return -EINVAL;
1099 } 1033 }
1100 1034 if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
1101 if (swim->n_intrs < 2) { 1035 printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
1102 printk(KERN_INFO "swim3: expecting 2 intrs (n_intrs:%d)\n", 1036 index, swim->full_name);
1103 swim->n_intrs); 1037 macio_release_resource(mdev, 0);
1104 release_mem_region(res_reg.start, 1038 return -EBUSY;
1105 res_reg.end - res_reg.start + 1);
1106 release_mem_region(res_dma.start,
1107 res_dma.end - res_dma.start + 1);
1108 return -EINVAL;
1109 } 1039 }
1040 dev_set_drvdata(&mdev->ofdev.dev, fs);
1110 1041
1111 mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ? swim->parent : NULL; 1042 mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ?
1043 swim->parent : NULL;
1112 if (mediabay == NULL) 1044 if (mediabay == NULL)
1113 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); 1045 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
1114 1046
1115 memset(fs, 0, sizeof(*fs)); 1047 memset(fs, 0, sizeof(*fs));
1116 spin_lock_init(&fs->lock); 1048 spin_lock_init(&fs->lock);
1117 fs->state = idle; 1049 fs->state = idle;
1118 fs->swim3 = (struct swim3 __iomem *)ioremap(res_reg.start, 0x200); 1050 fs->swim3 = (struct swim3 __iomem *)
1119 fs->dma = (struct dbdma_regs __iomem *)ioremap(res_dma.start, 0x200); 1051 ioremap(macio_resource_start(mdev, 0), 0x200);
1120 fs->swim3_intr = swim->intrs[0].line; 1052 if (fs->swim3 == NULL) {
1121 fs->dma_intr = swim->intrs[1].line; 1053 printk("fd%d: couldn't map registers for %s\n",
1054 index, swim->full_name);
1055 rc = -ENOMEM;
1056 goto out_release;
1057 }
1058 fs->dma = (struct dbdma_regs __iomem *)
1059 ioremap(macio_resource_start(mdev, 1), 0x200);
1060 if (fs->dma == NULL) {
1061 printk("fd%d: couldn't map DMA for %s\n",
1062 index, swim->full_name);
1063 iounmap(fs->swim3);
1064 rc = -ENOMEM;
1065 goto out_release;
1066 }
1067 fs->swim3_intr = macio_irq(mdev, 0);
1068 fs->dma_intr = macio_irq(mdev, 1);;
1122 fs->cur_cyl = -1; 1069 fs->cur_cyl = -1;
1123 fs->cur_sector = -1; 1070 fs->cur_sector = -1;
1124 fs->secpercyl = 36; 1071 fs->secpercyl = 36;
@@ -1132,15 +1079,16 @@ static int swim3_add_device(struct device_node *swim)
1132 st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); 1079 st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
1133 1080
1134 if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) { 1081 if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
1135 printk(KERN_ERR "Couldn't get irq %d for SWIM3\n", fs->swim3_intr); 1082 printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
1083 index, fs->swim3_intr, swim->full_name);
1136 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0); 1084 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
1085 goto out_unmap;
1137 return -EBUSY; 1086 return -EBUSY;
1138 } 1087 }
1139/* 1088/*
1140 if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) { 1089 if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
1141 printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA", 1090 printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
1142 fs->dma_intr); 1091 fs->dma_intr);
1143 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
1144 return -EBUSY; 1092 return -EBUSY;
1145 } 1093 }
1146*/ 1094*/
@@ -1150,8 +1098,90 @@ static int swim3_add_device(struct device_node *swim)
1150 printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, 1098 printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
1151 mediabay ? "in media bay" : ""); 1099 mediabay ? "in media bay" : "");
1152 1100
1153 floppy_count++; 1101 return 0;
1154 1102
1103 out_unmap:
1104 iounmap(fs->dma);
1105 iounmap(fs->swim3);
1106
1107 out_release:
1108 macio_release_resource(mdev, 0);
1109 macio_release_resource(mdev, 1);
1110
1111 return rc;
1112}
1113
1114static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
1115{
1116 int i, rc;
1117 struct gendisk *disk;
1118
1119 /* Add the drive */
1120 rc = swim3_add_device(mdev, floppy_count);
1121 if (rc)
1122 return rc;
1123
1124 /* Now create the queue if not there yet */
1125 if (swim3_queue == NULL) {
1126 /* If we failed, there isn't much we can do as the driver is still
1127 * too dumb to remove the device, just bail out
1128 */
1129 if (register_blkdev(FLOPPY_MAJOR, "fd"))
1130 return 0;
1131 swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
1132 if (swim3_queue == NULL) {
1133 unregister_blkdev(FLOPPY_MAJOR, "fd");
1134 return 0;
1135 }
1136 }
1137
1138 /* Now register that disk. Same comment about failure handling */
1139 i = floppy_count++;
1140 disk = disks[i] = alloc_disk(1);
1141 if (disk == NULL)
1142 return 0;
1143
1144 disk->major = FLOPPY_MAJOR;
1145 disk->first_minor = i;
1146 disk->fops = &floppy_fops;
1147 disk->private_data = &floppy_states[i];
1148 disk->queue = swim3_queue;
1149 disk->flags |= GENHD_FL_REMOVABLE;
1150 sprintf(disk->disk_name, "fd%d", i);
1151 set_capacity(disk, 2880);
1152 add_disk(disk);
1153
1154 return 0;
1155}
1156
1157static struct of_device_id swim3_match[] =
1158{
1159 {
1160 .name = "swim3",
1161 },
1162 {
1163 .compatible = "ohare-swim3"
1164 },
1165 {
1166 .compatible = "swim3"
1167 },
1168};
1169
1170static struct macio_driver swim3_driver =
1171{
1172 .name = "swim3",
1173 .match_table = swim3_match,
1174 .probe = swim3_attach,
1175#if 0
1176 .suspend = swim3_suspend,
1177 .resume = swim3_resume,
1178#endif
1179};
1180
1181
1182int swim3_init(void)
1183{
1184 macio_register_driver(&swim3_driver);
1155 return 0; 1185 return 0;
1156} 1186}
1157 1187
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 2830f58d6f77..8eebf9ca3786 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -739,6 +739,7 @@ static int bluecard_open(bluecard_info_t *info)
739 739
740 hdev->type = HCI_PCCARD; 740 hdev->type = HCI_PCCARD;
741 hdev->driver_data = info; 741 hdev->driver_data = info;
742 SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
742 743
743 hdev->open = bluecard_hci_open; 744 hdev->open = bluecard_hci_open;
744 hdev->close = bluecard_hci_close; 745 hdev->close = bluecard_hci_close;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index c9dba5565cac..df7bb016df49 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -582,6 +582,7 @@ static int bt3c_open(bt3c_info_t *info)
582 582
583 hdev->type = HCI_PCCARD; 583 hdev->type = HCI_PCCARD;
584 hdev->driver_data = info; 584 hdev->driver_data = info;
585 SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
585 586
586 hdev->open = bt3c_hci_open; 587 hdev->open = bt3c_hci_open;
587 hdev->close = bt3c_hci_close; 588 hdev->close = bt3c_hci_close;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index c889bf8109a1..746ccca97f6f 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -502,6 +502,7 @@ static int btuart_open(btuart_info_t *info)
502 502
503 hdev->type = HCI_PCCARD; 503 hdev->type = HCI_PCCARD;
504 hdev->driver_data = info; 504 hdev->driver_data = info;
505 SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
505 506
506 hdev->open = btuart_hci_open; 507 hdev->open = btuart_hci_open;
507 hdev->close = btuart_hci_close; 508 hdev->close = btuart_hci_close;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index be6eed175aa3..0e99def8a1e3 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -484,6 +484,7 @@ static int dtl1_open(dtl1_info_t *info)
484 484
485 hdev->type = HCI_PCCARD; 485 hdev->type = HCI_PCCARD;
486 hdev->driver_data = info; 486 hdev->driver_data = info;
487 SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
487 488
488 hdev->open = dtl1_hci_open; 489 hdev->open = dtl1_hci_open;
489 hdev->close = dtl1_hci_close; 490 hdev->close = dtl1_hci_close;
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index a7d9d7e99e72..6a0c2230f82f 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -122,6 +122,9 @@ static struct usb_device_id blacklist_ids[] = {
122 /* RTX Telecom based adapter with buggy SCO support */ 122 /* RTX Telecom based adapter with buggy SCO support */
123 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, 123 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC },
124 124
125 /* Belkin F8T012 */
126 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU },
127
125 /* Digianswer devices */ 128 /* Digianswer devices */
126 { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER }, 129 { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
127 { USB_DEVICE(0x08fd, 0x0002), .driver_info = HCI_IGNORE }, 130 { USB_DEVICE(0x08fd, 0x0002), .driver_info = HCI_IGNORE },
@@ -129,6 +132,9 @@ static struct usb_device_id blacklist_ids[] = {
129 /* CSR BlueCore Bluetooth Sniffer */ 132 /* CSR BlueCore Bluetooth Sniffer */
130 { USB_DEVICE(0x0a12, 0x0002), .driver_info = HCI_SNIFFER }, 133 { USB_DEVICE(0x0a12, 0x0002), .driver_info = HCI_SNIFFER },
131 134
135 /* Frontline ComProbe Bluetooth Sniffer */
136 { USB_DEVICE(0x16d3, 0x0002), .driver_info = HCI_SNIFFER },
137
132 { } /* Terminating entry */ 138 { } /* Terminating entry */
133}; 139};
134 140
@@ -984,6 +990,9 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
984 if (reset || id->driver_info & HCI_RESET) 990 if (reset || id->driver_info & HCI_RESET)
985 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); 991 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
986 992
993 if (id->driver_info & HCI_WRONG_SCO_MTU)
994 set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
995
987 if (id->driver_info & HCI_SNIFFER) { 996 if (id->driver_info & HCI_SNIFFER) {
988 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) 997 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
989 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); 998 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
@@ -1042,10 +1051,81 @@ static void hci_usb_disconnect(struct usb_interface *intf)
1042 hci_free_dev(hdev); 1051 hci_free_dev(hdev);
1043} 1052}
1044 1053
1054static int hci_usb_suspend(struct usb_interface *intf, pm_message_t message)
1055{
1056 struct hci_usb *husb = usb_get_intfdata(intf);
1057 struct list_head killed;
1058 unsigned long flags;
1059 int i;
1060
1061 if (!husb || intf == husb->isoc_iface)
1062 return 0;
1063
1064 hci_suspend_dev(husb->hdev);
1065
1066 INIT_LIST_HEAD(&killed);
1067
1068 for (i = 0; i < 4; i++) {
1069 struct _urb_queue *q = &husb->pending_q[i];
1070 struct _urb *_urb, *_tmp;
1071
1072 while ((_urb = _urb_dequeue(q))) {
1073 /* reset queue since _urb_dequeue sets it to NULL */
1074 _urb->queue = q;
1075 usb_kill_urb(&_urb->urb);
1076 list_add(&_urb->list, &killed);
1077 }
1078
1079 spin_lock_irqsave(&q->lock, flags);
1080
1081 list_for_each_entry_safe(_urb, _tmp, &killed, list) {
1082 list_move_tail(&_urb->list, &q->head);
1083 }
1084
1085 spin_unlock_irqrestore(&q->lock, flags);
1086 }
1087
1088 return 0;
1089}
1090
1091static int hci_usb_resume(struct usb_interface *intf)
1092{
1093 struct hci_usb *husb = usb_get_intfdata(intf);
1094 unsigned long flags;
1095 int i, err = 0;
1096
1097 if (!husb || intf == husb->isoc_iface)
1098 return 0;
1099
1100 for (i = 0; i < 4; i++) {
1101 struct _urb_queue *q = &husb->pending_q[i];
1102 struct _urb *_urb;
1103
1104 spin_lock_irqsave(&q->lock, flags);
1105
1106 list_for_each_entry(_urb, &q->head, list) {
1107 err = usb_submit_urb(&_urb->urb, GFP_ATOMIC);
1108 if (err)
1109 break;
1110 }
1111
1112 spin_unlock_irqrestore(&q->lock, flags);
1113
1114 if (err)
1115 return -EIO;
1116 }
1117
1118 hci_resume_dev(husb->hdev);
1119
1120 return 0;
1121}
1122
1045static struct usb_driver hci_usb_driver = { 1123static struct usb_driver hci_usb_driver = {
1046 .name = "hci_usb", 1124 .name = "hci_usb",
1047 .probe = hci_usb_probe, 1125 .probe = hci_usb_probe,
1048 .disconnect = hci_usb_disconnect, 1126 .disconnect = hci_usb_disconnect,
1127 .suspend = hci_usb_suspend,
1128 .resume = hci_usb_resume,
1049 .id_table = bluetooth_ids, 1129 .id_table = bluetooth_ids,
1050}; 1130};
1051 1131
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h
index 37100a6ea1a8..963fc55cdc85 100644
--- a/drivers/bluetooth/hci_usb.h
+++ b/drivers/bluetooth/hci_usb.h
@@ -35,6 +35,7 @@
35#define HCI_SNIFFER 0x10 35#define HCI_SNIFFER 0x10
36#define HCI_BCM92035 0x20 36#define HCI_BCM92035 0x20
37#define HCI_BROKEN_ISOC 0x40 37#define HCI_BROKEN_ISOC 0x40
38#define HCI_WRONG_SCO_MTU 0x80
38 39
39#define HCI_MAX_IFACE_NUM 3 40#define HCI_MAX_IFACE_NUM 3
40 41
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index ea589007fa26..aac67a3a6019 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -277,7 +277,6 @@ static int vhci_open(struct inode *inode, struct file *file)
277 277
278 hdev->type = HCI_VHCI; 278 hdev->type = HCI_VHCI;
279 hdev->driver_data = vhci; 279 hdev->driver_data = vhci;
280 SET_HCIDEV_DEV(hdev, vhci_miscdev.dev);
281 280
282 hdev->open = vhci_open_dev; 281 hdev->open = vhci_open_dev;
283 hdev->close = vhci_close_dev; 282 hdev->close = vhci_close_dev;
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index ffcf15c30e90..d9c5a9142ad1 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -1059,7 +1059,7 @@ ioctl_out:
1059 return ret_val; 1059 return ret_val;
1060} 1060}
1061 1061
1062static struct file_operations agp_fops = 1062static const struct file_operations agp_fops =
1063{ 1063{
1064 .owner = THIS_MODULE, 1064 .owner = THIS_MODULE,
1065 .llseek = no_llseek, 1065 .llseek = no_llseek,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index bcc4668835b5..10a389dafd60 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -112,7 +112,7 @@ static int ac_ioctl(struct inode *, struct file *, unsigned int,
112 unsigned long); 112 unsigned long);
113static irqreturn_t ac_interrupt(int, void *, struct pt_regs *); 113static irqreturn_t ac_interrupt(int, void *, struct pt_regs *);
114 114
115static struct file_operations ac_fops = { 115static const struct file_operations ac_fops = {
116 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
117 .llseek = no_llseek, 117 .llseek = no_llseek,
118 .read = ac_read, 118 .read = ac_read,
diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c
index 46d66037b917..8ce3f34cfc22 100644
--- a/drivers/char/cs5535_gpio.c
+++ b/drivers/char/cs5535_gpio.c
@@ -158,7 +158,7 @@ static int cs5535_gpio_open(struct inode *inode, struct file *file)
158 return nonseekable_open(inode, file); 158 return nonseekable_open(inode, file);
159} 159}
160 160
161static struct file_operations cs5535_gpio_fops = { 161static const struct file_operations cs5535_gpio_fops = {
162 .owner = THIS_MODULE, 162 .owner = THIS_MODULE,
163 .write = cs5535_gpio_write, 163 .write = cs5535_gpio_write,
164 .read = cs5535_gpio_read, 164 .read = cs5535_gpio_read,
diff --git a/drivers/char/ds1286.c b/drivers/char/ds1286.c
index d755cac14bc1..21c8229f5443 100644
--- a/drivers/char/ds1286.c
+++ b/drivers/char/ds1286.c
@@ -281,7 +281,7 @@ static unsigned int ds1286_poll(struct file *file, poll_table *wait)
281 * The various file operations we support. 281 * The various file operations we support.
282 */ 282 */
283 283
284static struct file_operations ds1286_fops = { 284static const struct file_operations ds1286_fops = {
285 .llseek = no_llseek, 285 .llseek = no_llseek,
286 .read = ds1286_read, 286 .read = ds1286_read,
287 .poll = ds1286_poll, 287 .poll = ds1286_poll,
diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c
index 625e8b517005..bcdb107aa967 100644
--- a/drivers/char/ds1302.c
+++ b/drivers/char/ds1302.c
@@ -282,7 +282,7 @@ get_rtc_status(char *buf)
282 282
283/* The various file operations we support. */ 283/* The various file operations we support. */
284 284
285static struct file_operations rtc_fops = { 285static const struct file_operations rtc_fops = {
286 .owner = THIS_MODULE, 286 .owner = THIS_MODULE,
287 .ioctl = rtc_ioctl, 287 .ioctl = rtc_ioctl,
288}; 288};
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 953e670dcd09..48cb8f0e8ebf 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -336,7 +336,7 @@ proc_therm_ds1620_read(char *buf, char **start, off_t offset,
336static struct proc_dir_entry *proc_therm_ds1620; 336static struct proc_dir_entry *proc_therm_ds1620;
337#endif 337#endif
338 338
339static struct file_operations ds1620_fops = { 339static const struct file_operations ds1620_fops = {
340 .owner = THIS_MODULE, 340 .owner = THIS_MODULE,
341 .open = nonseekable_open, 341 .open = nonseekable_open,
342 .read = ds1620_read, 342 .read = ds1620_read,
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 09b413618b57..9b1bf60ffbe7 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -483,7 +483,7 @@ static int dsp56k_release(struct inode *inode, struct file *file)
483 return 0; 483 return 0;
484} 484}
485 485
486static struct file_operations dsp56k_fops = { 486static const struct file_operations dsp56k_fops = {
487 .owner = THIS_MODULE, 487 .owner = THIS_MODULE,
488 .read = dsp56k_read, 488 .read = dsp56k_read,
489 .write = dsp56k_write, 489 .write = dsp56k_write,
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index da2c89f1b8bc..5e82c3bad2e3 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -94,7 +94,7 @@ static int dtlk_release(struct inode *, struct file *);
94static int dtlk_ioctl(struct inode *inode, struct file *file, 94static int dtlk_ioctl(struct inode *inode, struct file *file,
95 unsigned int cmd, unsigned long arg); 95 unsigned int cmd, unsigned long arg);
96 96
97static struct file_operations dtlk_fops = 97static const struct file_operations dtlk_fops =
98{ 98{
99 .owner = THIS_MODULE, 99 .owner = THIS_MODULE,
100 .read = dtlk_read, 100 .read = dtlk_read,
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index 0090e7a4fcd3..004141d535a2 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -285,7 +285,7 @@ efi_rtc_close(struct inode *inode, struct file *file)
285 * The various file operations we support. 285 * The various file operations we support.
286 */ 286 */
287 287
288static struct file_operations efi_rtc_fops = { 288static const struct file_operations efi_rtc_fops = {
289 .owner = THIS_MODULE, 289 .owner = THIS_MODULE,
290 .ioctl = efi_rtc_ioctl, 290 .ioctl = efi_rtc_ioctl,
291 .open = efi_rtc_open, 291 .open = efi_rtc_open,
diff --git a/drivers/char/ftape/zftape/zftape-init.c b/drivers/char/ftape/zftape/zftape-init.c
index 55272566b740..164a1aa77a2f 100644
--- a/drivers/char/ftape/zftape/zftape-init.c
+++ b/drivers/char/ftape/zftape/zftape-init.c
@@ -86,7 +86,7 @@ static ssize_t zft_read (struct file *fp, char __user *buff,
86static ssize_t zft_write(struct file *fp, const char __user *buff, 86static ssize_t zft_write(struct file *fp, const char __user *buff,
87 size_t req_len, loff_t *ppos); 87 size_t req_len, loff_t *ppos);
88 88
89static struct file_operations zft_cdev = 89static const struct file_operations zft_cdev =
90{ 90{
91 .owner = THIS_MODULE, 91 .owner = THIS_MODULE,
92 .read = zft_read, 92 .read = zft_read,
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index bebd7e34f792..817dc409ac20 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -482,7 +482,7 @@ static inline int gen_rtc_proc_init(void) { return 0; }
482 * The various file operations we support. 482 * The various file operations we support.
483 */ 483 */
484 484
485static struct file_operations gen_rtc_fops = { 485static const struct file_operations gen_rtc_fops = {
486 .owner = THIS_MODULE, 486 .owner = THIS_MODULE,
487#ifdef CONFIG_GEN_RTC_X 487#ifdef CONFIG_GEN_RTC_X
488 .read = gen_rtc_read, 488 .read = gen_rtc_read,
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index e5643f3aa73f..8afba339f05a 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -553,7 +553,7 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
553 return err; 553 return err;
554} 554}
555 555
556static struct file_operations hpet_fops = { 556static const struct file_operations hpet_fops = {
557 .owner = THIS_MODULE, 557 .owner = THIS_MODULE,
558 .llseek = no_llseek, 558 .llseek = no_llseek,
559 .read = hpet_read, 559 .read = hpet_read,
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 8dc205b275e3..56612a2dca6b 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -1299,13 +1299,12 @@ static int __init hvsi_console_init(void)
1299 hp->inbuf_end = hp->inbuf; 1299 hp->inbuf_end = hp->inbuf;
1300 hp->state = HVSI_CLOSED; 1300 hp->state = HVSI_CLOSED;
1301 hp->vtermno = *vtermno; 1301 hp->vtermno = *vtermno;
1302 hp->virq = virt_irq_create_mapping(irq[0]); 1302 hp->virq = irq_create_mapping(NULL, irq[0], 0);
1303 if (hp->virq == NO_IRQ) { 1303 if (hp->virq == NO_IRQ) {
1304 printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", 1304 printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
1305 __FUNCTION__, hp->virq); 1305 __FUNCTION__, irq[0]);
1306 continue; 1306 continue;
1307 } else 1307 }
1308 hp->virq = irq_offset_up(hp->virq);
1309 1308
1310 hvsi_count++; 1309 hvsi_count++;
1311 } 1310 }
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 88b026639f10..154a81d328c1 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -149,7 +149,7 @@ out:
149} 149}
150 150
151 151
152static struct file_operations rng_chrdev_ops = { 152static const struct file_operations rng_chrdev_ops = {
153 .owner = THIS_MODULE, 153 .owner = THIS_MODULE,
154 .open = rng_dev_open, 154 .open = rng_dev_open,
155 .read = rng_dev_read, 155 .read = rng_dev_read,
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index f3c3aaf4560e..353d9f3cf8d7 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -80,7 +80,7 @@ static int i8k_open_fs(struct inode *inode, struct file *file);
80static int i8k_ioctl(struct inode *, struct file *, unsigned int, 80static int i8k_ioctl(struct inode *, struct file *, unsigned int,
81 unsigned long); 81 unsigned long);
82 82
83static struct file_operations i8k_fops = { 83static const struct file_operations i8k_fops = {
84 .open = i8k_open_fs, 84 .open = i8k_open_fs,
85 .read = seq_read, 85 .read = seq_read,
86 .llseek = seq_lseek, 86 .llseek = seq_lseek,
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index a4200a2b0811..518ece7ac656 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -233,7 +233,7 @@ static void *DevTableMem[IP2_MAX_BOARDS];
233/* This is the driver descriptor for the ip2ipl device, which is used to 233/* This is the driver descriptor for the ip2ipl device, which is used to
234 * download the loadware to the boards. 234 * download the loadware to the boards.
235 */ 235 */
236static struct file_operations ip2_ipl = { 236static const struct file_operations ip2_ipl = {
237 .owner = THIS_MODULE, 237 .owner = THIS_MODULE,
238 .read = ip2_ipl_read, 238 .read = ip2_ipl_read,
239 .write = ip2_ipl_write, 239 .write = ip2_ipl_write,
diff --git a/drivers/char/ip27-rtc.c b/drivers/char/ip27-rtc.c
index 3acdac3c967e..a48da02aad2f 100644
--- a/drivers/char/ip27-rtc.c
+++ b/drivers/char/ip27-rtc.c
@@ -196,7 +196,7 @@ static int rtc_release(struct inode *inode, struct file *file)
196 * The various file operations we support. 196 * The various file operations we support.
197 */ 197 */
198 198
199static struct file_operations rtc_fops = { 199static const struct file_operations rtc_fops = {
200 .owner = THIS_MODULE, 200 .owner = THIS_MODULE,
201 .ioctl = rtc_ioctl, 201 .ioctl = rtc_ioctl,
202 .open = rtc_open, 202 .open = rtc_open,
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 2fc894fef1cb..68d7c61a864e 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -765,7 +765,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
765} 765}
766#endif 766#endif
767 767
768static struct file_operations ipmi_fops = { 768static const struct file_operations ipmi_fops = {
769 .owner = THIS_MODULE, 769 .owner = THIS_MODULE,
770 .ioctl = ipmi_ioctl, 770 .ioctl = ipmi_ioctl,
771#ifdef CONFIG_COMPAT 771#ifdef CONFIG_COMPAT
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 74a889c58333..accaaf1a6b69 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -807,7 +807,7 @@ static int ipmi_close(struct inode *ino, struct file *filep)
807 return 0; 807 return 0;
808} 808}
809 809
810static struct file_operations ipmi_wdog_fops = { 810static const struct file_operations ipmi_wdog_fops = {
811 .owner = THIS_MODULE, 811 .owner = THIS_MODULE,
812 .read = ipmi_read, 812 .read = ipmi_read,
813 .poll = ipmi_poll, 813 .poll = ipmi_poll,
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index fbce2f0669d6..84dfc4278139 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -748,7 +748,7 @@ static int stli_initpcibrd(int brdtype, struct pci_dev *devp);
748 * will give access to the shared memory on the Stallion intelligent 748 * will give access to the shared memory on the Stallion intelligent
749 * board. This is also a very useful debugging tool. 749 * board. This is also a very useful debugging tool.
750 */ 750 */
751static struct file_operations stli_fsiomem = { 751static const struct file_operations stli_fsiomem = {
752 .owner = THIS_MODULE, 752 .owner = THIS_MODULE,
753 .read = stli_memread, 753 .read = stli_memread,
754 .write = stli_memwrite, 754 .write = stli_memwrite,
diff --git a/drivers/char/ite_gpio.c b/drivers/char/ite_gpio.c
index 747ba45e50e5..cde562d70c4f 100644
--- a/drivers/char/ite_gpio.c
+++ b/drivers/char/ite_gpio.c
@@ -357,7 +357,7 @@ DEB(printk("interrupt 0x%x %d\n",ITE_GPAISR, i));
357 } 357 }
358} 358}
359 359
360static struct file_operations ite_gpio_fops = { 360static const struct file_operations ite_gpio_fops = {
361 .owner = THIS_MODULE, 361 .owner = THIS_MODULE,
362 .ioctl = ite_gpio_ioctl, 362 .ioctl = ite_gpio_ioctl,
363 .open = ite_gpio_open, 363 .open = ite_gpio_open,
diff --git a/drivers/char/lcd.c b/drivers/char/lcd.c
index 7d49b241de56..da601fd6c07a 100644
--- a/drivers/char/lcd.c
+++ b/drivers/char/lcd.c
@@ -598,7 +598,7 @@ static ssize_t lcd_read(struct file *file, char *buf,
598 * The various file operations we support. 598 * The various file operations we support.
599 */ 599 */
600 600
601static struct file_operations lcd_fops = { 601static const struct file_operations lcd_fops = {
602 .read = lcd_read, 602 .read = lcd_read,
603 .ioctl = lcd_ioctl, 603 .ioctl = lcd_ioctl,
604 .open = lcd_open, 604 .open = lcd_open,
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 582cdbdb0c42..f875fda3b089 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -666,7 +666,7 @@ static int lp_ioctl(struct inode *inode, struct file *file,
666 return retval; 666 return retval;
667} 667}
668 668
669static struct file_operations lp_fops = { 669static const struct file_operations lp_fops = {
670 .owner = THIS_MODULE, 670 .owner = THIS_MODULE,
671 .write = lp_write, 671 .write = lp_write,
672 .ioctl = lp_ioctl, 672 .ioctl = lp_ioctl,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 70f3954d6dfd..e97c32ceb796 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -776,7 +776,7 @@ static int open_port(struct inode * inode, struct file * filp)
776#define open_kmem open_mem 776#define open_kmem open_mem
777#define open_oldmem open_mem 777#define open_oldmem open_mem
778 778
779static struct file_operations mem_fops = { 779static const struct file_operations mem_fops = {
780 .llseek = memory_lseek, 780 .llseek = memory_lseek,
781 .read = read_mem, 781 .read = read_mem,
782 .write = write_mem, 782 .write = write_mem,
@@ -784,7 +784,7 @@ static struct file_operations mem_fops = {
784 .open = open_mem, 784 .open = open_mem,
785}; 785};
786 786
787static struct file_operations kmem_fops = { 787static const struct file_operations kmem_fops = {
788 .llseek = memory_lseek, 788 .llseek = memory_lseek,
789 .read = read_kmem, 789 .read = read_kmem,
790 .write = write_kmem, 790 .write = write_kmem,
@@ -792,7 +792,7 @@ static struct file_operations kmem_fops = {
792 .open = open_kmem, 792 .open = open_kmem,
793}; 793};
794 794
795static struct file_operations null_fops = { 795static const struct file_operations null_fops = {
796 .llseek = null_lseek, 796 .llseek = null_lseek,
797 .read = read_null, 797 .read = read_null,
798 .write = write_null, 798 .write = write_null,
@@ -800,7 +800,7 @@ static struct file_operations null_fops = {
800}; 800};
801 801
802#if defined(CONFIG_ISA) || !defined(__mc68000__) 802#if defined(CONFIG_ISA) || !defined(__mc68000__)
803static struct file_operations port_fops = { 803static const struct file_operations port_fops = {
804 .llseek = memory_lseek, 804 .llseek = memory_lseek,
805 .read = read_port, 805 .read = read_port,
806 .write = write_port, 806 .write = write_port,
@@ -808,7 +808,7 @@ static struct file_operations port_fops = {
808}; 808};
809#endif 809#endif
810 810
811static struct file_operations zero_fops = { 811static const struct file_operations zero_fops = {
812 .llseek = zero_lseek, 812 .llseek = zero_lseek,
813 .read = read_zero, 813 .read = read_zero,
814 .write = write_zero, 814 .write = write_zero,
@@ -819,14 +819,14 @@ static struct backing_dev_info zero_bdi = {
819 .capabilities = BDI_CAP_MAP_COPY, 819 .capabilities = BDI_CAP_MAP_COPY,
820}; 820};
821 821
822static struct file_operations full_fops = { 822static const struct file_operations full_fops = {
823 .llseek = full_lseek, 823 .llseek = full_lseek,
824 .read = read_full, 824 .read = read_full,
825 .write = write_full, 825 .write = write_full,
826}; 826};
827 827
828#ifdef CONFIG_CRASH_DUMP 828#ifdef CONFIG_CRASH_DUMP
829static struct file_operations oldmem_fops = { 829static const struct file_operations oldmem_fops = {
830 .read = read_oldmem, 830 .read = read_oldmem,
831 .open = open_oldmem, 831 .open = open_oldmem,
832}; 832};
@@ -853,7 +853,7 @@ static ssize_t kmsg_write(struct file * file, const char __user * buf,
853 return ret; 853 return ret;
854} 854}
855 855
856static struct file_operations kmsg_fops = { 856static const struct file_operations kmsg_fops = {
857 .write = kmsg_write, 857 .write = kmsg_write,
858}; 858};
859 859
@@ -903,7 +903,7 @@ static int memory_open(struct inode * inode, struct file * filp)
903 return 0; 903 return 0;
904} 904}
905 905
906static struct file_operations memory_fops = { 906static const struct file_operations memory_fops = {
907 .open = memory_open, /* just a selector for the real open */ 907 .open = memory_open, /* just a selector for the real open */
908}; 908};
909 909
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index d5fa19da330b..62ebe09656e3 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -113,7 +113,7 @@ static int misc_seq_open(struct inode *inode, struct file *file)
113 return seq_open(file, &misc_seq_ops); 113 return seq_open(file, &misc_seq_ops);
114} 114}
115 115
116static struct file_operations misc_proc_fops = { 116static const struct file_operations misc_proc_fops = {
117 .owner = THIS_MODULE, 117 .owner = THIS_MODULE,
118 .open = misc_seq_open, 118 .open = misc_seq_open,
119 .read = seq_read, 119 .read = seq_read,
@@ -176,7 +176,7 @@ fail:
176 */ 176 */
177static struct class *misc_class; 177static struct class *misc_class;
178 178
179static struct file_operations misc_fops = { 179static const struct file_operations misc_fops = {
180 .owner = THIS_MODULE, 180 .owner = THIS_MODULE,
181 .open = misc_open, 181 .open = misc_open,
182}; 182};
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 70b774ff5aa4..1f0f2b6dae26 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -63,7 +63,7 @@ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
63 */ 63 */
64static unsigned long mmtimer_femtoperiod = 0; 64static unsigned long mmtimer_femtoperiod = 0;
65 65
66static struct file_operations mmtimer_fops = { 66static const struct file_operations mmtimer_fops = {
67 .owner = THIS_MODULE, 67 .owner = THIS_MODULE,
68 .mmap = mmtimer_mmap, 68 .mmap = mmtimer_mmap,
69 .ioctl = mmtimer_ioctl, 69 .ioctl = mmtimer_ioctl,
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index d3ba2f860ef0..39a2e661ff55 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -454,7 +454,7 @@ static int register_serial_portandirq(unsigned int port, int irq)
454} 454}
455 455
456 456
457static struct file_operations mwave_fops = { 457static const struct file_operations mwave_fops = {
458 .owner = THIS_MODULE, 458 .owner = THIS_MODULE,
459 .read = mwave_read, 459 .read = mwave_read,
460 .write = mwave_write, 460 .write = mwave_write,
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 8c5f102622b6..a39f19c35a6a 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -437,7 +437,7 @@ nvram_read_proc(char *buffer, char **start, off_t offset,
437 437
438#endif /* CONFIG_PROC_FS */ 438#endif /* CONFIG_PROC_FS */
439 439
440static struct file_operations nvram_fops = { 440static const struct file_operations nvram_fops = {
441 .owner = THIS_MODULE, 441 .owner = THIS_MODULE,
442 .llseek = nvram_llseek, 442 .llseek = nvram_llseek,
443 .read = nvram_read, 443 .read = nvram_read,
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index f240a104d250..7c57ebfa8640 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -183,7 +183,7 @@ static int button_read (struct file *filp, char __user *buffer,
183 * attempts to perform these operations on the device. 183 * attempts to perform these operations on the device.
184 */ 184 */
185 185
186static struct file_operations button_fops = { 186static const struct file_operations button_fops = {
187 .owner = THIS_MODULE, 187 .owner = THIS_MODULE,
188 .read = button_read, 188 .read = button_read,
189}; 189};
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index 8865387d3448..206cf6f50695 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -642,7 +642,7 @@ static void kick_open(void)
642 udelay(25); 642 udelay(25);
643} 643}
644 644
645static struct file_operations flash_fops = 645static const struct file_operations flash_fops =
646{ 646{
647 .owner = THIS_MODULE, 647 .owner = THIS_MODULE,
648 .llseek = flash_llseek, 648 .llseek = flash_llseek,
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index c860de6a6fde..4005ee0aa11e 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -236,7 +236,7 @@ static int pc8736x_gpio_open(struct inode *inode, struct file *file)
236 return nonseekable_open(inode, file); 236 return nonseekable_open(inode, file);
237} 237}
238 238
239static struct file_operations pc8736x_gpio_fops = { 239static const struct file_operations pc8736x_gpio_fops = {
240 .owner = THIS_MODULE, 240 .owner = THIS_MODULE,
241 .open = pc8736x_gpio_open, 241 .open = pc8736x_gpio_open,
242 .write = nsc_gpio_write, 242 .write = nsc_gpio_write,
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 31c8a21f9d87..50d20aafeb18 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1938,7 +1938,7 @@ static void cm4000_detach(struct pcmcia_device *link)
1938 return; 1938 return;
1939} 1939}
1940 1940
1941static struct file_operations cm4000_fops = { 1941static const struct file_operations cm4000_fops = {
1942 .owner = THIS_MODULE, 1942 .owner = THIS_MODULE,
1943 .read = cmm_read, 1943 .read = cmm_read,
1944 .write = cmm_write, 1944 .write = cmm_write,
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 47a8465bf95b..55cf4be42976 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -688,7 +688,7 @@ static void reader_detach(struct pcmcia_device *link)
688 return; 688 return;
689} 689}
690 690
691static struct file_operations reader_fops = { 691static const struct file_operations reader_fops = {
692 .owner = THIS_MODULE, 692 .owner = THIS_MODULE,
693 .read = cm4040_read, 693 .read = cm4040_read,
694 .write = cm4040_write, 694 .write = cm4040_write,
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 24231d9743dc..520d2cf82bc0 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -739,7 +739,7 @@ static unsigned int pp_poll (struct file * file, poll_table * wait)
739 739
740static struct class *ppdev_class; 740static struct class *ppdev_class;
741 741
742static struct file_operations pp_fops = { 742static const struct file_operations pp_fops = {
743 .owner = THIS_MODULE, 743 .owner = THIS_MODULE,
744 .llseek = no_llseek, 744 .llseek = no_llseek,
745 .read = pp_read, 745 .read = pp_read,
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 164bddae047f..4c3a5ca9d8f7 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -416,7 +416,7 @@ static struct entropy_store input_pool = {
416 .poolinfo = &poolinfo_table[0], 416 .poolinfo = &poolinfo_table[0],
417 .name = "input", 417 .name = "input",
418 .limit = 1, 418 .limit = 1,
419 .lock = SPIN_LOCK_UNLOCKED, 419 .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
420 .pool = input_pool_data 420 .pool = input_pool_data
421}; 421};
422 422
@@ -425,7 +425,7 @@ static struct entropy_store blocking_pool = {
425 .name = "blocking", 425 .name = "blocking",
426 .limit = 1, 426 .limit = 1,
427 .pull = &input_pool, 427 .pull = &input_pool,
428 .lock = SPIN_LOCK_UNLOCKED, 428 .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
429 .pool = blocking_pool_data 429 .pool = blocking_pool_data
430}; 430};
431 431
@@ -433,7 +433,7 @@ static struct entropy_store nonblocking_pool = {
433 .poolinfo = &poolinfo_table[1], 433 .poolinfo = &poolinfo_table[1],
434 .name = "nonblocking", 434 .name = "nonblocking",
435 .pull = &input_pool, 435 .pull = &input_pool,
436 .lock = SPIN_LOCK_UNLOCKED, 436 .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
437 .pool = nonblocking_pool_data 437 .pool = nonblocking_pool_data
438}; 438};
439 439
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 9bf97c5e38c0..579868af4a54 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -30,7 +30,7 @@ struct raw_device_data {
30static struct class *raw_class; 30static struct class *raw_class;
31static struct raw_device_data raw_devices[MAX_RAW_MINORS]; 31static struct raw_device_data raw_devices[MAX_RAW_MINORS];
32static DEFINE_MUTEX(raw_mutex); 32static DEFINE_MUTEX(raw_mutex);
33static struct file_operations raw_ctl_fops; /* forward declaration */ 33static const struct file_operations raw_ctl_fops; /* forward declaration */
34 34
35/* 35/*
36 * Open/close code for raw IO. 36 * Open/close code for raw IO.
@@ -261,7 +261,7 @@ static ssize_t raw_file_aio_write(struct kiocb *iocb, const char __user *buf,
261} 261}
262 262
263 263
264static struct file_operations raw_fops = { 264static const struct file_operations raw_fops = {
265 .read = generic_file_read, 265 .read = generic_file_read,
266 .aio_read = generic_file_aio_read, 266 .aio_read = generic_file_aio_read,
267 .write = raw_file_write, 267 .write = raw_file_write,
@@ -274,7 +274,7 @@ static struct file_operations raw_fops = {
274 .owner = THIS_MODULE, 274 .owner = THIS_MODULE,
275}; 275};
276 276
277static struct file_operations raw_ctl_fops = { 277static const struct file_operations raw_ctl_fops = {
278 .ioctl = raw_ctl_ioctl, 278 .ioctl = raw_ctl_ioctl,
279 .open = raw_open, 279 .open = raw_open,
280 .owner = THIS_MODULE, 280 .owner = THIS_MODULE,
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 3afc6a47ebbc..3fa80aaf4527 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -243,7 +243,7 @@ static struct real_driver rio_real_driver = {
243 * 243 *
244 */ 244 */
245 245
246static struct file_operations rio_fw_fops = { 246static const struct file_operations rio_fw_fops = {
247 .owner = THIS_MODULE, 247 .owner = THIS_MODULE,
248 .ioctl = rio_fw_ioctl, 248 .ioctl = rio_fw_ioctl,
249}; 249};
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index aefac4ac0bf5..cc7bd1a3095b 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -877,7 +877,7 @@ int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
877 * The various file operations we support. 877 * The various file operations we support.
878 */ 878 */
879 879
880static struct file_operations rtc_fops = { 880static const struct file_operations rtc_fops = {
881 .owner = THIS_MODULE, 881 .owner = THIS_MODULE,
882 .llseek = no_llseek, 882 .llseek = no_llseek,
883 .read = rtc_read, 883 .read = rtc_read,
@@ -896,7 +896,7 @@ static struct miscdevice rtc_dev = {
896 .fops = &rtc_fops, 896 .fops = &rtc_fops,
897}; 897};
898 898
899static struct file_operations rtc_proc_fops = { 899static const struct file_operations rtc_proc_fops = {
900 .owner = THIS_MODULE, 900 .owner = THIS_MODULE,
901 .open = rtc_proc_open, 901 .open = rtc_proc_open,
902 .read = seq_read, 902 .read = seq_read,
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 45083e5dd23b..425c58719db6 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -63,7 +63,7 @@ static int scx200_gpio_release(struct inode *inode, struct file *file)
63} 63}
64 64
65 65
66static struct file_operations scx200_gpio_fops = { 66static const struct file_operations scx200_gpio_fops = {
67 .owner = THIS_MODULE, 67 .owner = THIS_MODULE,
68 .write = nsc_gpio_write, 68 .write = nsc_gpio_write,
69 .read = nsc_gpio_read, 69 .read = nsc_gpio_read,
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 203240b6c08f..afc6eda602f7 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -347,7 +347,7 @@ scdrv_poll(struct file *file, struct poll_table_struct *wait)
347 return mask; 347 return mask;
348} 348}
349 349
350static struct file_operations scdrv_fops = { 350static const struct file_operations scdrv_fops = {
351 .owner = THIS_MODULE, 351 .owner = THIS_MODULE,
352 .read = scdrv_read, 352 .read = scdrv_read,
353 .write = scdrv_write, 353 .write = scdrv_write,
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 45508a039508..d4e434d694b7 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1106,7 +1106,7 @@ static int sonypi_misc_ioctl(struct inode *ip, struct file *fp,
1106 return ret; 1106 return ret;
1107} 1107}
1108 1108
1109static struct file_operations sonypi_misc_fops = { 1109static const struct file_operations sonypi_misc_fops = {
1110 .owner = THIS_MODULE, 1110 .owner = THIS_MODULE,
1111 .read = sonypi_misc_read, 1111 .read = sonypi_misc_read,
1112 .poll = sonypi_misc_poll, 1112 .poll = sonypi_misc_poll,
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index ed7b8eaf0367..3beb2203d24b 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -707,7 +707,7 @@ static unsigned int sc26198_baudtable[] = {
707 * Define the driver info for a user level control device. Used mainly 707 * Define the driver info for a user level control device. Used mainly
708 * to get at port stats - only not using the port device itself. 708 * to get at port stats - only not using the port device itself.
709 */ 709 */
710static struct file_operations stl_fsiomem = { 710static const struct file_operations stl_fsiomem = {
711 .owner = THIS_MODULE, 711 .owner = THIS_MODULE,
712 .ioctl = stl_memioctl, 712 .ioctl = stl_memioctl,
713}; 713};
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index 45c193aa11db..e1cd2bc4b1e4 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -410,7 +410,7 @@ static struct real_driver sx_real_driver = {
410 * 410 *
411 */ 411 */
412 412
413static struct file_operations sx_fw_fops = { 413static const struct file_operations sx_fw_fops = {
414 .owner = THIS_MODULE, 414 .owner = THIS_MODULE,
415 .ioctl = sx_fw_ioctl, 415 .ioctl = sx_fw_ioctl,
416}; 416};
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index a064ee9181c0..ee3ca8f1768e 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -147,12 +147,13 @@ static struct sysrq_key_op sysrq_mountro_op = {
147 .enable_mask = SYSRQ_ENABLE_REMOUNT, 147 .enable_mask = SYSRQ_ENABLE_REMOUNT,
148}; 148};
149 149
150#ifdef CONFIG_DEBUG_MUTEXES 150#ifdef CONFIG_LOCKDEP
151static void sysrq_handle_showlocks(int key, struct pt_regs *pt_regs, 151static void sysrq_handle_showlocks(int key, struct pt_regs *pt_regs,
152 struct tty_struct *tty) 152 struct tty_struct *tty)
153{ 153{
154 mutex_debug_show_all_locks(); 154 debug_show_all_locks();
155} 155}
156
156static struct sysrq_key_op sysrq_showlocks_op = { 157static struct sysrq_key_op sysrq_showlocks_op = {
157 .handler = sysrq_handle_showlocks, 158 .handler = sysrq_handle_showlocks,
158 .help_msg = "show-all-locks(D)", 159 .help_msg = "show-all-locks(D)",
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index a80c83210872..bb1bad4c18f9 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -255,7 +255,7 @@ static int tanbac_tb0219_release(struct inode *inode, struct file *file)
255 return 0; 255 return 0;
256} 256}
257 257
258static struct file_operations tb0219_fops = { 258static const struct file_operations tb0219_fops = {
259 .owner = THIS_MODULE, 259 .owner = THIS_MODULE,
260 .read = tanbac_tb0219_read, 260 .read = tanbac_tb0219_read,
261 .write = tanbac_tb0219_write, 261 .write = tanbac_tb0219_write,
diff --git a/drivers/char/tipar.c b/drivers/char/tipar.c
index e0633a119d29..d30dc09dbbc9 100644
--- a/drivers/char/tipar.c
+++ b/drivers/char/tipar.c
@@ -381,7 +381,7 @@ tipar_ioctl(struct inode *inode, struct file *file,
381 381
382/* ----- kernel module registering ------------------------------------ */ 382/* ----- kernel module registering ------------------------------------ */
383 383
384static struct file_operations tipar_fops = { 384static const struct file_operations tipar_fops = {
385 .owner = THIS_MODULE, 385 .owner = THIS_MODULE,
386 .llseek = no_llseek, 386 .llseek = no_llseek,
387 .read = tipar_read, 387 .read = tipar_read,
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 952b829e2cb4..d2c5ba4e83b8 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -247,7 +247,7 @@ static ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t cou
247 return 0; 247 return 0;
248} 248}
249 249
250static struct file_operations tlclk_fops = { 250static const struct file_operations tlclk_fops = {
251 .read = tlclk_read, 251 .read = tlclk_read,
252 .write = tlclk_write, 252 .write = tlclk_write,
253 .open = tlclk_open, 253 .open = tlclk_open,
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index e2fb234dee40..dd36fd04a842 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -92,7 +92,7 @@ static int tosh_ioctl(struct inode *, struct file *, unsigned int,
92 unsigned long); 92 unsigned long);
93 93
94 94
95static struct file_operations tosh_fops = { 95static const struct file_operations tosh_fops = {
96 .owner = THIS_MODULE, 96 .owner = THIS_MODULE,
97 .ioctl = tosh_ioctl, 97 .ioctl = tosh_ioctl,
98}; 98};
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 58a258cec153..ad8ffe49256f 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -116,7 +116,7 @@ static u8 tpm_atml_status(struct tpm_chip *chip)
116 return ioread8(chip->vendor.iobase + 1); 116 return ioread8(chip->vendor.iobase + 1);
117} 117}
118 118
119static struct file_operations atmel_ops = { 119static const struct file_operations atmel_ops = {
120 .owner = THIS_MODULE, 120 .owner = THIS_MODULE,
121 .llseek = no_llseek, 121 .llseek = no_llseek,
122 .open = tpm_open, 122 .open = tpm_open,
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index adfff21beb21..1353b5a6bae8 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -338,7 +338,7 @@ static struct attribute *inf_attrs[] = {
338 338
339static struct attribute_group inf_attr_grp = {.attrs = inf_attrs }; 339static struct attribute_group inf_attr_grp = {.attrs = inf_attrs };
340 340
341static struct file_operations inf_ops = { 341static const struct file_operations inf_ops = {
342 .owner = THIS_MODULE, 342 .owner = THIS_MODULE,
343 .llseek = no_llseek, 343 .llseek = no_llseek,
344 .open = tpm_open, 344 .open = tpm_open,
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 4c8bc06c7d95..26287aace87d 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -226,7 +226,7 @@ static u8 tpm_nsc_status(struct tpm_chip *chip)
226 return inb(chip->vendor.base + NSC_STATUS); 226 return inb(chip->vendor.base + NSC_STATUS);
227} 227}
228 228
229static struct file_operations nsc_ops = { 229static const struct file_operations nsc_ops = {
230 .owner = THIS_MODULE, 230 .owner = THIS_MODULE,
231 .llseek = no_llseek, 231 .llseek = no_llseek,
232 .open = tpm_open, 232 .open = tpm_open,
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index abb0f2aeae66..3232b1932597 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -330,7 +330,7 @@ out_err:
330 return rc; 330 return rc;
331} 331}
332 332
333static struct file_operations tis_ops = { 333static const struct file_operations tis_ops = {
334 .owner = THIS_MODULE, 334 .owner = THIS_MODULE,
335 .llseek = no_llseek, 335 .llseek = no_llseek,
336 .open = tpm_open, 336 .open = tpm_open,
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 615e934da05f..bfdb90242a90 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -912,7 +912,7 @@ static int hung_up_tty_ioctl(struct inode * inode, struct file * file,
912 return cmd == TIOCSPGRP ? -ENOTTY : -EIO; 912 return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
913} 913}
914 914
915static struct file_operations tty_fops = { 915static const struct file_operations tty_fops = {
916 .llseek = no_llseek, 916 .llseek = no_llseek,
917 .read = tty_read, 917 .read = tty_read,
918 .write = tty_write, 918 .write = tty_write,
@@ -924,7 +924,7 @@ static struct file_operations tty_fops = {
924}; 924};
925 925
926#ifdef CONFIG_UNIX98_PTYS 926#ifdef CONFIG_UNIX98_PTYS
927static struct file_operations ptmx_fops = { 927static const struct file_operations ptmx_fops = {
928 .llseek = no_llseek, 928 .llseek = no_llseek,
929 .read = tty_read, 929 .read = tty_read,
930 .write = tty_write, 930 .write = tty_write,
@@ -936,7 +936,7 @@ static struct file_operations ptmx_fops = {
936}; 936};
937#endif 937#endif
938 938
939static struct file_operations console_fops = { 939static const struct file_operations console_fops = {
940 .llseek = no_llseek, 940 .llseek = no_llseek,
941 .read = tty_read, 941 .read = tty_read,
942 .write = redirected_tty_write, 942 .write = redirected_tty_write,
@@ -947,7 +947,7 @@ static struct file_operations console_fops = {
947 .fasync = tty_fasync, 947 .fasync = tty_fasync,
948}; 948};
949 949
950static struct file_operations hung_up_tty_fops = { 950static const struct file_operations hung_up_tty_fops = {
951 .llseek = no_llseek, 951 .llseek = no_llseek,
952 .read = hung_up_tty_read, 952 .read = hung_up_tty_read,
953 .write = hung_up_tty_write, 953 .write = hung_up_tty_write,
@@ -2336,7 +2336,7 @@ static int fionbio(struct file *file, int __user *p)
2336 2336
2337static int tiocsctty(struct tty_struct *tty, int arg) 2337static int tiocsctty(struct tty_struct *tty, int arg)
2338{ 2338{
2339 task_t *p; 2339 struct task_struct *p;
2340 2340
2341 if (current->signal->leader && 2341 if (current->signal->leader &&
2342 (current->signal->session == tty->session)) 2342 (current->signal->session == tty->session))
diff --git a/drivers/char/vc_screen.c b/drivers/char/vc_screen.c
index 45e9bd81bc0e..a9247b5213d5 100644
--- a/drivers/char/vc_screen.c
+++ b/drivers/char/vc_screen.c
@@ -465,7 +465,7 @@ vcs_open(struct inode *inode, struct file *filp)
465 return 0; 465 return 0;
466} 466}
467 467
468static struct file_operations vcs_fops = { 468static const struct file_operations vcs_fops = {
469 .llseek = vcs_lseek, 469 .llseek = vcs_lseek,
470 .read = vcs_read, 470 .read = vcs_read,
471 .write = vcs_write, 471 .write = vcs_write,
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 7d42c8ec8dbc..b72b2049aaae 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -292,7 +292,7 @@ static int proc_viotape_open(struct inode *inode, struct file *file)
292 return single_open(file, proc_viotape_show, NULL); 292 return single_open(file, proc_viotape_show, NULL);
293} 293}
294 294
295static struct file_operations proc_viotape_operations = { 295static const struct file_operations proc_viotape_operations = {
296 .open = proc_viotape_open, 296 .open = proc_viotape_open,
297 .read = seq_read, 297 .read = seq_read,
298 .llseek = seq_lseek, 298 .llseek = seq_lseek,
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c
index 073da48c092e..1b9b1f1d4c49 100644
--- a/drivers/char/vr41xx_giu.c
+++ b/drivers/char/vr41xx_giu.c
@@ -605,7 +605,7 @@ static int gpio_release(struct inode *inode, struct file *file)
605 return 0; 605 return 0;
606} 606}
607 607
608static struct file_operations gpio_fops = { 608static const struct file_operations gpio_fops = {
609 .owner = THIS_MODULE, 609 .owner = THIS_MODULE,
610 .read = gpio_read, 610 .read = gpio_read,
611 .write = gpio_write, 611 .write = gpio_write,
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 3ef823d7d255..da7e66a2a38b 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -886,6 +886,7 @@ void vc_disallocate(unsigned int currcons)
886 if (vc_cons_allocated(currcons)) { 886 if (vc_cons_allocated(currcons)) {
887 struct vc_data *vc = vc_cons[currcons].d; 887 struct vc_data *vc = vc_cons[currcons].d;
888 vc->vc_sw->con_deinit(vc); 888 vc->vc_sw->con_deinit(vc);
889 module_put(vc->vc_sw->owner);
889 if (vc->vc_kmalloced) 890 if (vc->vc_kmalloced)
890 kfree(vc->vc_screenbuf); 891 kfree(vc->vc_screenbuf);
891 if (currcons >= MIN_NR_CONSOLES) 892 if (currcons >= MIN_NR_CONSOLES)
diff --git a/drivers/char/watchdog/acquirewdt.c b/drivers/char/watchdog/acquirewdt.c
index 7289f4af93d0..c77fe3cf2852 100644
--- a/drivers/char/watchdog/acquirewdt.c
+++ b/drivers/char/watchdog/acquirewdt.c
@@ -231,7 +231,7 @@ static int acq_notify_sys(struct notifier_block *this, unsigned long code,
231 * Kernel Interfaces 231 * Kernel Interfaces
232 */ 232 */
233 233
234static struct file_operations acq_fops = { 234static const struct file_operations acq_fops = {
235 .owner = THIS_MODULE, 235 .owner = THIS_MODULE,
236 .llseek = no_llseek, 236 .llseek = no_llseek,
237 .write = acq_write, 237 .write = acq_write,
diff --git a/drivers/char/watchdog/advantechwdt.c b/drivers/char/watchdog/advantechwdt.c
index 194a3fd36b91..8069be445edc 100644
--- a/drivers/char/watchdog/advantechwdt.c
+++ b/drivers/char/watchdog/advantechwdt.c
@@ -227,7 +227,7 @@ advwdt_notify_sys(struct notifier_block *this, unsigned long code,
227 * Kernel Interfaces 227 * Kernel Interfaces
228 */ 228 */
229 229
230static struct file_operations advwdt_fops = { 230static const struct file_operations advwdt_fops = {
231 .owner = THIS_MODULE, 231 .owner = THIS_MODULE,
232 .llseek = no_llseek, 232 .llseek = no_llseek,
233 .write = advwdt_write, 233 .write = advwdt_write,
diff --git a/drivers/char/watchdog/alim1535_wdt.c b/drivers/char/watchdog/alim1535_wdt.c
index 8338ca300e2e..c5c94e4c9495 100644
--- a/drivers/char/watchdog/alim1535_wdt.c
+++ b/drivers/char/watchdog/alim1535_wdt.c
@@ -362,7 +362,7 @@ static int __init ali_find_watchdog(void)
362 * Kernel Interfaces 362 * Kernel Interfaces
363 */ 363 */
364 364
365static struct file_operations ali_fops = { 365static const struct file_operations ali_fops = {
366 .owner = THIS_MODULE, 366 .owner = THIS_MODULE,
367 .llseek = no_llseek, 367 .llseek = no_llseek,
368 .write = ali_write, 368 .write = ali_write,
diff --git a/drivers/char/watchdog/alim7101_wdt.c b/drivers/char/watchdog/alim7101_wdt.c
index c05ac188a4d7..ffd7684f999b 100644
--- a/drivers/char/watchdog/alim7101_wdt.c
+++ b/drivers/char/watchdog/alim7101_wdt.c
@@ -281,7 +281,7 @@ static int fop_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
281 } 281 }
282} 282}
283 283
284static struct file_operations wdt_fops = { 284static const struct file_operations wdt_fops = {
285 .owner= THIS_MODULE, 285 .owner= THIS_MODULE,
286 .llseek= no_llseek, 286 .llseek= no_llseek,
287 .write= fop_write, 287 .write= fop_write,
diff --git a/drivers/char/watchdog/at91_wdt.c b/drivers/char/watchdog/at91_wdt.c
index f61dedc3c96c..cc266715ea32 100644
--- a/drivers/char/watchdog/at91_wdt.c
+++ b/drivers/char/watchdog/at91_wdt.c
@@ -183,7 +183,7 @@ static ssize_t at91_wdt_write(struct file *file, const char *data, size_t len, l
183 183
184/* ......................................................................... */ 184/* ......................................................................... */
185 185
186static struct file_operations at91wdt_fops = { 186static const struct file_operations at91wdt_fops = {
187 .owner = THIS_MODULE, 187 .owner = THIS_MODULE,
188 .llseek = no_llseek, 188 .llseek = no_llseek,
189 .ioctl = at91_wdt_ioctl, 189 .ioctl = at91_wdt_ioctl,
diff --git a/drivers/char/watchdog/booke_wdt.c b/drivers/char/watchdog/booke_wdt.c
index 537f5c6729bf..e3cefc538b40 100644
--- a/drivers/char/watchdog/booke_wdt.c
+++ b/drivers/char/watchdog/booke_wdt.c
@@ -145,7 +145,7 @@ static int booke_wdt_open (struct inode *inode, struct file *file)
145 return 0; 145 return 0;
146} 146}
147 147
148static struct file_operations booke_wdt_fops = { 148static const struct file_operations booke_wdt_fops = {
149 .owner = THIS_MODULE, 149 .owner = THIS_MODULE,
150 .llseek = no_llseek, 150 .llseek = no_llseek,
151 .write = booke_wdt_write, 151 .write = booke_wdt_write,
diff --git a/drivers/char/watchdog/cpu5wdt.c b/drivers/char/watchdog/cpu5wdt.c
index 3e8410b5a65e..04c7e49918db 100644
--- a/drivers/char/watchdog/cpu5wdt.c
+++ b/drivers/char/watchdog/cpu5wdt.c
@@ -198,7 +198,7 @@ static ssize_t cpu5wdt_write(struct file *file, const char __user *buf, size_t c
198 return count; 198 return count;
199} 199}
200 200
201static struct file_operations cpu5wdt_fops = { 201static const struct file_operations cpu5wdt_fops = {
202 .owner = THIS_MODULE, 202 .owner = THIS_MODULE,
203 .llseek = no_llseek, 203 .llseek = no_llseek,
204 .ioctl = cpu5wdt_ioctl, 204 .ioctl = cpu5wdt_ioctl,
diff --git a/drivers/char/watchdog/ep93xx_wdt.c b/drivers/char/watchdog/ep93xx_wdt.c
index 9021dbb78299..77c8a955ae9e 100644
--- a/drivers/char/watchdog/ep93xx_wdt.c
+++ b/drivers/char/watchdog/ep93xx_wdt.c
@@ -187,7 +187,7 @@ static int ep93xx_wdt_release(struct inode *inode, struct file *file)
187 return 0; 187 return 0;
188} 188}
189 189
190static struct file_operations ep93xx_wdt_fops = { 190static const struct file_operations ep93xx_wdt_fops = {
191 .owner = THIS_MODULE, 191 .owner = THIS_MODULE,
192 .write = ep93xx_wdt_write, 192 .write = ep93xx_wdt_write,
193 .ioctl = ep93xx_wdt_ioctl, 193 .ioctl = ep93xx_wdt_ioctl,
diff --git a/drivers/char/watchdog/eurotechwdt.c b/drivers/char/watchdog/eurotechwdt.c
index ea670de4fab7..62dbccb2f6df 100644
--- a/drivers/char/watchdog/eurotechwdt.c
+++ b/drivers/char/watchdog/eurotechwdt.c
@@ -356,7 +356,7 @@ static int eurwdt_notify_sys(struct notifier_block *this, unsigned long code,
356 */ 356 */
357 357
358 358
359static struct file_operations eurwdt_fops = { 359static const struct file_operations eurwdt_fops = {
360 .owner = THIS_MODULE, 360 .owner = THIS_MODULE,
361 .llseek = no_llseek, 361 .llseek = no_llseek,
362 .write = eurwdt_write, 362 .write = eurwdt_write,
diff --git a/drivers/char/watchdog/i6300esb.c b/drivers/char/watchdog/i6300esb.c
index 93785f13242e..870539eabbf3 100644
--- a/drivers/char/watchdog/i6300esb.c
+++ b/drivers/char/watchdog/i6300esb.c
@@ -337,7 +337,7 @@ static int esb_notify_sys (struct notifier_block *this, unsigned long code, void
337 * Kernel Interfaces 337 * Kernel Interfaces
338 */ 338 */
339 339
340static struct file_operations esb_fops = { 340static const struct file_operations esb_fops = {
341 .owner = THIS_MODULE, 341 .owner = THIS_MODULE,
342 .llseek = no_llseek, 342 .llseek = no_llseek,
343 .write = esb_write, 343 .write = esb_write,
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c
index bfbdbbf3c2f2..8385dd36eefe 100644
--- a/drivers/char/watchdog/i8xx_tco.c
+++ b/drivers/char/watchdog/i8xx_tco.c
@@ -378,7 +378,7 @@ static int i8xx_tco_notify_sys (struct notifier_block *this, unsigned long code,
378 * Kernel Interfaces 378 * Kernel Interfaces
379 */ 379 */
380 380
381static struct file_operations i8xx_tco_fops = { 381static const struct file_operations i8xx_tco_fops = {
382 .owner = THIS_MODULE, 382 .owner = THIS_MODULE,
383 .llseek = no_llseek, 383 .llseek = no_llseek,
384 .write = i8xx_tco_write, 384 .write = i8xx_tco_write,
diff --git a/drivers/char/watchdog/ib700wdt.c b/drivers/char/watchdog/ib700wdt.c
index a2e53c715b36..fd95f7327798 100644
--- a/drivers/char/watchdog/ib700wdt.c
+++ b/drivers/char/watchdog/ib700wdt.c
@@ -255,7 +255,7 @@ ibwdt_notify_sys(struct notifier_block *this, unsigned long code,
255 * Kernel Interfaces 255 * Kernel Interfaces
256 */ 256 */
257 257
258static struct file_operations ibwdt_fops = { 258static const struct file_operations ibwdt_fops = {
259 .owner = THIS_MODULE, 259 .owner = THIS_MODULE,
260 .llseek = no_llseek, 260 .llseek = no_llseek,
261 .write = ibwdt_write, 261 .write = ibwdt_write,
diff --git a/drivers/char/watchdog/ibmasr.c b/drivers/char/watchdog/ibmasr.c
index b0741cbdc139..26ceee7a4df0 100644
--- a/drivers/char/watchdog/ibmasr.c
+++ b/drivers/char/watchdog/ibmasr.c
@@ -322,7 +322,7 @@ static int asr_release(struct inode *inode, struct file *file)
322 return 0; 322 return 0;
323} 323}
324 324
325static struct file_operations asr_fops = { 325static const struct file_operations asr_fops = {
326 .owner = THIS_MODULE, 326 .owner = THIS_MODULE,
327 .llseek = no_llseek, 327 .llseek = no_llseek,
328 .write = asr_write, 328 .write = asr_write,
diff --git a/drivers/char/watchdog/indydog.c b/drivers/char/watchdog/indydog.c
index d387979b2434..dacc1c20a310 100644
--- a/drivers/char/watchdog/indydog.c
+++ b/drivers/char/watchdog/indydog.c
@@ -154,7 +154,7 @@ static int indydog_notify_sys(struct notifier_block *this, unsigned long code, v
154 return NOTIFY_DONE; 154 return NOTIFY_DONE;
155} 155}
156 156
157static struct file_operations indydog_fops = { 157static const struct file_operations indydog_fops = {
158 .owner = THIS_MODULE, 158 .owner = THIS_MODULE,
159 .llseek = no_llseek, 159 .llseek = no_llseek,
160 .write = indydog_write, 160 .write = indydog_write,
diff --git a/drivers/char/watchdog/ixp2000_wdt.c b/drivers/char/watchdog/ixp2000_wdt.c
index aa29a7d68759..692908819e26 100644
--- a/drivers/char/watchdog/ixp2000_wdt.c
+++ b/drivers/char/watchdog/ixp2000_wdt.c
@@ -168,7 +168,7 @@ ixp2000_wdt_release(struct inode *inode, struct file *file)
168} 168}
169 169
170 170
171static struct file_operations ixp2000_wdt_fops = 171static const struct file_operations ixp2000_wdt_fops =
172{ 172{
173 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
174 .llseek = no_llseek, 174 .llseek = no_llseek,
diff --git a/drivers/char/watchdog/ixp4xx_wdt.c b/drivers/char/watchdog/ixp4xx_wdt.c
index e6a3fe83fa01..9db5cf2c38c3 100644
--- a/drivers/char/watchdog/ixp4xx_wdt.c
+++ b/drivers/char/watchdog/ixp4xx_wdt.c
@@ -162,7 +162,7 @@ ixp4xx_wdt_release(struct inode *inode, struct file *file)
162} 162}
163 163
164 164
165static struct file_operations ixp4xx_wdt_fops = 165static const struct file_operations ixp4xx_wdt_fops =
166{ 166{
167 .owner = THIS_MODULE, 167 .owner = THIS_MODULE,
168 .llseek = no_llseek, 168 .llseek = no_llseek,
diff --git a/drivers/char/watchdog/machzwd.c b/drivers/char/watchdog/machzwd.c
index b67b4878ae0f..23734e07fb22 100644
--- a/drivers/char/watchdog/machzwd.c
+++ b/drivers/char/watchdog/machzwd.c
@@ -388,7 +388,7 @@ static int zf_notify_sys(struct notifier_block *this, unsigned long code,
388 388
389 389
390 390
391static struct file_operations zf_fops = { 391static const struct file_operations zf_fops = {
392 .owner = THIS_MODULE, 392 .owner = THIS_MODULE,
393 .llseek = no_llseek, 393 .llseek = no_llseek,
394 .write = zf_write, 394 .write = zf_write,
diff --git a/drivers/char/watchdog/mixcomwd.c b/drivers/char/watchdog/mixcomwd.c
index 433c27f98159..ae943324d251 100644
--- a/drivers/char/watchdog/mixcomwd.c
+++ b/drivers/char/watchdog/mixcomwd.c
@@ -190,7 +190,7 @@ static int mixcomwd_ioctl(struct inode *inode, struct file *file,
190 return 0; 190 return 0;
191} 191}
192 192
193static struct file_operations mixcomwd_fops= 193static const struct file_operations mixcomwd_fops=
194{ 194{
195 .owner = THIS_MODULE, 195 .owner = THIS_MODULE,
196 .llseek = no_llseek, 196 .llseek = no_llseek,
diff --git a/drivers/char/watchdog/mpc83xx_wdt.c b/drivers/char/watchdog/mpc83xx_wdt.c
index dac1381af364..a480903ee1a5 100644
--- a/drivers/char/watchdog/mpc83xx_wdt.c
+++ b/drivers/char/watchdog/mpc83xx_wdt.c
@@ -129,7 +129,7 @@ static int mpc83xx_wdt_ioctl(struct inode *inode, struct file *file,
129 } 129 }
130} 130}
131 131
132static struct file_operations mpc83xx_wdt_fops = { 132static const struct file_operations mpc83xx_wdt_fops = {
133 .owner = THIS_MODULE, 133 .owner = THIS_MODULE,
134 .llseek = no_llseek, 134 .llseek = no_llseek,
135 .write = mpc83xx_wdt_write, 135 .write = mpc83xx_wdt_write,
diff --git a/drivers/char/watchdog/mpc8xx_wdt.c b/drivers/char/watchdog/mpc8xx_wdt.c
index 11f0ccd4c4d4..35dd9e6e1140 100644
--- a/drivers/char/watchdog/mpc8xx_wdt.c
+++ b/drivers/char/watchdog/mpc8xx_wdt.c
@@ -132,7 +132,7 @@ static int mpc8xx_wdt_ioctl(struct inode *inode, struct file *file,
132 return 0; 132 return 0;
133} 133}
134 134
135static struct file_operations mpc8xx_wdt_fops = { 135static const struct file_operations mpc8xx_wdt_fops = {
136 .owner = THIS_MODULE, 136 .owner = THIS_MODULE,
137 .llseek = no_llseek, 137 .llseek = no_llseek,
138 .write = mpc8xx_wdt_write, 138 .write = mpc8xx_wdt_write,
diff --git a/drivers/char/watchdog/mpcore_wdt.c b/drivers/char/watchdog/mpcore_wdt.c
index c2d492c852fc..54b3c56ead0d 100644
--- a/drivers/char/watchdog/mpcore_wdt.c
+++ b/drivers/char/watchdog/mpcore_wdt.c
@@ -297,7 +297,7 @@ static void mpcore_wdt_shutdown(struct platform_device *dev)
297/* 297/*
298 * Kernel Interfaces 298 * Kernel Interfaces
299 */ 299 */
300static struct file_operations mpcore_wdt_fops = { 300static const struct file_operations mpcore_wdt_fops = {
301 .owner = THIS_MODULE, 301 .owner = THIS_MODULE,
302 .llseek = no_llseek, 302 .llseek = no_llseek,
303 .write = mpcore_wdt_write, 303 .write = mpcore_wdt_write,
diff --git a/drivers/char/watchdog/mv64x60_wdt.c b/drivers/char/watchdog/mv64x60_wdt.c
index 20a6cbb0fbb8..5c8fab345b40 100644
--- a/drivers/char/watchdog/mv64x60_wdt.c
+++ b/drivers/char/watchdog/mv64x60_wdt.c
@@ -166,7 +166,7 @@ static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file,
166 return 0; 166 return 0;
167} 167}
168 168
169static struct file_operations mv64x60_wdt_fops = { 169static const struct file_operations mv64x60_wdt_fops = {
170 .owner = THIS_MODULE, 170 .owner = THIS_MODULE,
171 .llseek = no_llseek, 171 .llseek = no_llseek,
172 .write = mv64x60_wdt_write, 172 .write = mv64x60_wdt_write,
diff --git a/drivers/char/watchdog/pcwd.c b/drivers/char/watchdog/pcwd.c
index 6d44ca68312d..cd7d1b6a5d9f 100644
--- a/drivers/char/watchdog/pcwd.c
+++ b/drivers/char/watchdog/pcwd.c
@@ -740,7 +740,7 @@ static int pcwd_notify_sys(struct notifier_block *this, unsigned long code, void
740 * Kernel Interfaces 740 * Kernel Interfaces
741 */ 741 */
742 742
743static struct file_operations pcwd_fops = { 743static const struct file_operations pcwd_fops = {
744 .owner = THIS_MODULE, 744 .owner = THIS_MODULE,
745 .llseek = no_llseek, 745 .llseek = no_llseek,
746 .write = pcwd_write, 746 .write = pcwd_write,
@@ -755,7 +755,7 @@ static struct miscdevice pcwd_miscdev = {
755 .fops = &pcwd_fops, 755 .fops = &pcwd_fops,
756}; 756};
757 757
758static struct file_operations pcwd_temp_fops = { 758static const struct file_operations pcwd_temp_fops = {
759 .owner = THIS_MODULE, 759 .owner = THIS_MODULE,
760 .llseek = no_llseek, 760 .llseek = no_llseek,
761 .read = pcwd_temp_read, 761 .read = pcwd_temp_read,
diff --git a/drivers/char/watchdog/pcwd_pci.c b/drivers/char/watchdog/pcwd_pci.c
index 1f40ecefbf72..c7cfd6dbfe1b 100644
--- a/drivers/char/watchdog/pcwd_pci.c
+++ b/drivers/char/watchdog/pcwd_pci.c
@@ -625,7 +625,7 @@ static int pcipcwd_notify_sys(struct notifier_block *this, unsigned long code, v
625 * Kernel Interfaces 625 * Kernel Interfaces
626 */ 626 */
627 627
628static struct file_operations pcipcwd_fops = { 628static const struct file_operations pcipcwd_fops = {
629 .owner = THIS_MODULE, 629 .owner = THIS_MODULE,
630 .llseek = no_llseek, 630 .llseek = no_llseek,
631 .write = pcipcwd_write, 631 .write = pcipcwd_write,
@@ -640,7 +640,7 @@ static struct miscdevice pcipcwd_miscdev = {
640 .fops = &pcipcwd_fops, 640 .fops = &pcipcwd_fops,
641}; 641};
642 642
643static struct file_operations pcipcwd_temp_fops = { 643static const struct file_operations pcipcwd_temp_fops = {
644 .owner = THIS_MODULE, 644 .owner = THIS_MODULE,
645 .llseek = no_llseek, 645 .llseek = no_llseek,
646 .read = pcipcwd_temp_read, 646 .read = pcipcwd_temp_read,
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 92bf8c1a0f0d..b7ae73dcdd08 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -523,7 +523,7 @@ static int usb_pcwd_notify_sys(struct notifier_block *this, unsigned long code,
523 * Kernel Interfaces 523 * Kernel Interfaces
524 */ 524 */
525 525
526static struct file_operations usb_pcwd_fops = { 526static const struct file_operations usb_pcwd_fops = {
527 .owner = THIS_MODULE, 527 .owner = THIS_MODULE,
528 .llseek = no_llseek, 528 .llseek = no_llseek,
529 .write = usb_pcwd_write, 529 .write = usb_pcwd_write,
@@ -538,7 +538,7 @@ static struct miscdevice usb_pcwd_miscdev = {
538 .fops = &usb_pcwd_fops, 538 .fops = &usb_pcwd_fops,
539}; 539};
540 540
541static struct file_operations usb_pcwd_temperature_fops = { 541static const struct file_operations usb_pcwd_temperature_fops = {
542 .owner = THIS_MODULE, 542 .owner = THIS_MODULE,
543 .llseek = no_llseek, 543 .llseek = no_llseek,
544 .read = usb_pcwd_temperature_read, 544 .read = usb_pcwd_temperature_read,
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index f267dad26071..be978e8ed754 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -319,7 +319,7 @@ static int s3c2410wdt_ioctl(struct inode *inode, struct file *file,
319 319
320/* kernel interface */ 320/* kernel interface */
321 321
322static struct file_operations s3c2410wdt_fops = { 322static const struct file_operations s3c2410wdt_fops = {
323 .owner = THIS_MODULE, 323 .owner = THIS_MODULE,
324 .llseek = no_llseek, 324 .llseek = no_llseek,
325 .write = s3c2410wdt_write, 325 .write = s3c2410wdt_write,
diff --git a/drivers/char/watchdog/sa1100_wdt.c b/drivers/char/watchdog/sa1100_wdt.c
index b22e95c5470c..1fc16d995788 100644
--- a/drivers/char/watchdog/sa1100_wdt.c
+++ b/drivers/char/watchdog/sa1100_wdt.c
@@ -135,7 +135,7 @@ static int sa1100dog_ioctl(struct inode *inode, struct file *file,
135 return ret; 135 return ret;
136} 136}
137 137
138static struct file_operations sa1100dog_fops = 138static const struct file_operations sa1100dog_fops =
139{ 139{
140 .owner = THIS_MODULE, 140 .owner = THIS_MODULE,
141 .llseek = no_llseek, 141 .llseek = no_llseek,
diff --git a/drivers/char/watchdog/sbc60xxwdt.c b/drivers/char/watchdog/sbc60xxwdt.c
index ed0bd55fbfc1..4663c2fd53cd 100644
--- a/drivers/char/watchdog/sbc60xxwdt.c
+++ b/drivers/char/watchdog/sbc60xxwdt.c
@@ -282,7 +282,7 @@ static int fop_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
282 } 282 }
283} 283}
284 284
285static struct file_operations wdt_fops = { 285static const struct file_operations wdt_fops = {
286 .owner = THIS_MODULE, 286 .owner = THIS_MODULE,
287 .llseek = no_llseek, 287 .llseek = no_llseek,
288 .write = fop_write, 288 .write = fop_write,
diff --git a/drivers/char/watchdog/sbc8360.c b/drivers/char/watchdog/sbc8360.c
index 6562aa910ace..1035be5b5019 100644
--- a/drivers/char/watchdog/sbc8360.c
+++ b/drivers/char/watchdog/sbc8360.c
@@ -305,7 +305,7 @@ static int sbc8360_notify_sys(struct notifier_block *this, unsigned long code,
305 * Kernel Interfaces 305 * Kernel Interfaces
306 */ 306 */
307 307
308static struct file_operations sbc8360_fops = { 308static const struct file_operations sbc8360_fops = {
309 .owner = THIS_MODULE, 309 .owner = THIS_MODULE,
310 .llseek = no_llseek, 310 .llseek = no_llseek,
311 .write = sbc8360_write, 311 .write = sbc8360_write,
diff --git a/drivers/char/watchdog/sbc_epx_c3.c b/drivers/char/watchdog/sbc_epx_c3.c
index 09867fadc720..bfc475dabe6d 100644
--- a/drivers/char/watchdog/sbc_epx_c3.c
+++ b/drivers/char/watchdog/sbc_epx_c3.c
@@ -154,7 +154,7 @@ static int epx_c3_notify_sys(struct notifier_block *this, unsigned long code,
154 return NOTIFY_DONE; 154 return NOTIFY_DONE;
155} 155}
156 156
157static struct file_operations epx_c3_fops = { 157static const struct file_operations epx_c3_fops = {
158 .owner = THIS_MODULE, 158 .owner = THIS_MODULE,
159 .llseek = no_llseek, 159 .llseek = no_llseek,
160 .write = epx_c3_write, 160 .write = epx_c3_write,
diff --git a/drivers/char/watchdog/sc1200wdt.c b/drivers/char/watchdog/sc1200wdt.c
index 78ef6333c181..7c3cf293a5af 100644
--- a/drivers/char/watchdog/sc1200wdt.c
+++ b/drivers/char/watchdog/sc1200wdt.c
@@ -292,7 +292,7 @@ static struct notifier_block sc1200wdt_notifier =
292 .notifier_call = sc1200wdt_notify_sys, 292 .notifier_call = sc1200wdt_notify_sys,
293}; 293};
294 294
295static struct file_operations sc1200wdt_fops = 295static const struct file_operations sc1200wdt_fops =
296{ 296{
297 .owner = THIS_MODULE, 297 .owner = THIS_MODULE,
298 .llseek = no_llseek, 298 .llseek = no_llseek,
diff --git a/drivers/char/watchdog/sc520_wdt.c b/drivers/char/watchdog/sc520_wdt.c
index 4ee9974ad8cb..2c7c9db71be8 100644
--- a/drivers/char/watchdog/sc520_wdt.c
+++ b/drivers/char/watchdog/sc520_wdt.c
@@ -336,7 +336,7 @@ static int fop_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
336 } 336 }
337} 337}
338 338
339static struct file_operations wdt_fops = { 339static const struct file_operations wdt_fops = {
340 .owner = THIS_MODULE, 340 .owner = THIS_MODULE,
341 .llseek = no_llseek, 341 .llseek = no_llseek,
342 .write = fop_write, 342 .write = fop_write,
diff --git a/drivers/char/watchdog/scx200_wdt.c b/drivers/char/watchdog/scx200_wdt.c
index c0b4754e8de0..c561299a5537 100644
--- a/drivers/char/watchdog/scx200_wdt.c
+++ b/drivers/char/watchdog/scx200_wdt.c
@@ -194,7 +194,7 @@ static int scx200_wdt_ioctl(struct inode *inode, struct file *file,
194 } 194 }
195} 195}
196 196
197static struct file_operations scx200_wdt_fops = { 197static const struct file_operations scx200_wdt_fops = {
198 .owner = THIS_MODULE, 198 .owner = THIS_MODULE,
199 .llseek = no_llseek, 199 .llseek = no_llseek,
200 .write = scx200_wdt_write, 200 .write = scx200_wdt_write,
diff --git a/drivers/char/watchdog/shwdt.c b/drivers/char/watchdog/shwdt.c
index 803701b675c0..1355038f1044 100644
--- a/drivers/char/watchdog/shwdt.c
+++ b/drivers/char/watchdog/shwdt.c
@@ -344,7 +344,7 @@ static int sh_wdt_notify_sys(struct notifier_block *this,
344 return NOTIFY_DONE; 344 return NOTIFY_DONE;
345} 345}
346 346
347static struct file_operations sh_wdt_fops = { 347static const struct file_operations sh_wdt_fops = {
348 .owner = THIS_MODULE, 348 .owner = THIS_MODULE,
349 .llseek = no_llseek, 349 .llseek = no_llseek,
350 .write = sh_wdt_write, 350 .write = sh_wdt_write,
diff --git a/drivers/char/watchdog/softdog.c b/drivers/char/watchdog/softdog.c
index 79ce5c655428..ef8da517545a 100644
--- a/drivers/char/watchdog/softdog.c
+++ b/drivers/char/watchdog/softdog.c
@@ -243,7 +243,7 @@ static int softdog_notify_sys(struct notifier_block *this, unsigned long code,
243 * Kernel Interfaces 243 * Kernel Interfaces
244 */ 244 */
245 245
246static struct file_operations softdog_fops = { 246static const struct file_operations softdog_fops = {
247 .owner = THIS_MODULE, 247 .owner = THIS_MODULE,
248 .llseek = no_llseek, 248 .llseek = no_llseek,
249 .write = softdog_write, 249 .write = softdog_write,
diff --git a/drivers/char/watchdog/w83627hf_wdt.c b/drivers/char/watchdog/w83627hf_wdt.c
index d15ca9a3986f..13f16d41c2fd 100644
--- a/drivers/char/watchdog/w83627hf_wdt.c
+++ b/drivers/char/watchdog/w83627hf_wdt.c
@@ -274,7 +274,7 @@ wdt_notify_sys(struct notifier_block *this, unsigned long code,
274 * Kernel Interfaces 274 * Kernel Interfaces
275 */ 275 */
276 276
277static struct file_operations wdt_fops = { 277static const struct file_operations wdt_fops = {
278 .owner = THIS_MODULE, 278 .owner = THIS_MODULE,
279 .llseek = no_llseek, 279 .llseek = no_llseek,
280 .write = wdt_write, 280 .write = wdt_write,
diff --git a/drivers/char/watchdog/w83877f_wdt.c b/drivers/char/watchdog/w83877f_wdt.c
index 52a8bd0a5988..ccf6c0915945 100644
--- a/drivers/char/watchdog/w83877f_wdt.c
+++ b/drivers/char/watchdog/w83877f_wdt.c
@@ -299,7 +299,7 @@ static int fop_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
299 } 299 }
300} 300}
301 301
302static struct file_operations wdt_fops = { 302static const struct file_operations wdt_fops = {
303 .owner = THIS_MODULE, 303 .owner = THIS_MODULE,
304 .llseek = no_llseek, 304 .llseek = no_llseek,
305 .write = fop_write, 305 .write = fop_write,
diff --git a/drivers/char/watchdog/w83977f_wdt.c b/drivers/char/watchdog/w83977f_wdt.c
index c31849e4c5c2..98f4e17db70a 100644
--- a/drivers/char/watchdog/w83977f_wdt.c
+++ b/drivers/char/watchdog/w83977f_wdt.c
@@ -449,7 +449,7 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
449 return NOTIFY_DONE; 449 return NOTIFY_DONE;
450} 450}
451 451
452static struct file_operations wdt_fops= 452static const struct file_operations wdt_fops=
453{ 453{
454 .owner = THIS_MODULE, 454 .owner = THIS_MODULE,
455 .llseek = no_llseek, 455 .llseek = no_llseek,
diff --git a/drivers/char/watchdog/wafer5823wdt.c b/drivers/char/watchdog/wafer5823wdt.c
index 7cf6c9bbf486..2bb6a9d6ad28 100644
--- a/drivers/char/watchdog/wafer5823wdt.c
+++ b/drivers/char/watchdog/wafer5823wdt.c
@@ -222,7 +222,7 @@ static int wafwdt_notify_sys(struct notifier_block *this, unsigned long code, vo
222 * Kernel Interfaces 222 * Kernel Interfaces
223 */ 223 */
224 224
225static struct file_operations wafwdt_fops = { 225static const struct file_operations wafwdt_fops = {
226 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
227 .llseek = no_llseek, 227 .llseek = no_llseek,
228 .write = wafwdt_write, 228 .write = wafwdt_write,
diff --git a/drivers/char/watchdog/wdrtas.c b/drivers/char/watchdog/wdrtas.c
index 3a462c34b92a..5c38cdf41731 100644
--- a/drivers/char/watchdog/wdrtas.c
+++ b/drivers/char/watchdog/wdrtas.c
@@ -520,7 +520,7 @@ wdrtas_reboot(struct notifier_block *this, unsigned long code, void *ptr)
520 520
521/*** initialization stuff */ 521/*** initialization stuff */
522 522
523static struct file_operations wdrtas_fops = { 523static const struct file_operations wdrtas_fops = {
524 .owner = THIS_MODULE, 524 .owner = THIS_MODULE,
525 .llseek = no_llseek, 525 .llseek = no_llseek,
526 .write = wdrtas_write, 526 .write = wdrtas_write,
@@ -535,7 +535,7 @@ static struct miscdevice wdrtas_miscdev = {
535 .fops = &wdrtas_fops, 535 .fops = &wdrtas_fops,
536}; 536};
537 537
538static struct file_operations wdrtas_temp_fops = { 538static const struct file_operations wdrtas_temp_fops = {
539 .owner = THIS_MODULE, 539 .owner = THIS_MODULE,
540 .llseek = no_llseek, 540 .llseek = no_llseek,
541 .read = wdrtas_temp_read, 541 .read = wdrtas_temp_read,
diff --git a/drivers/char/watchdog/wdt.c b/drivers/char/watchdog/wdt.c
index a1d972c8f44c..70be81e39a61 100644
--- a/drivers/char/watchdog/wdt.c
+++ b/drivers/char/watchdog/wdt.c
@@ -494,7 +494,7 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
494 */ 494 */
495 495
496 496
497static struct file_operations wdt_fops = { 497static const struct file_operations wdt_fops = {
498 .owner = THIS_MODULE, 498 .owner = THIS_MODULE,
499 .llseek = no_llseek, 499 .llseek = no_llseek,
500 .write = wdt_write, 500 .write = wdt_write,
@@ -510,7 +510,7 @@ static struct miscdevice wdt_miscdev = {
510}; 510};
511 511
512#ifdef CONFIG_WDT_501 512#ifdef CONFIG_WDT_501
513static struct file_operations wdt_temp_fops = { 513static const struct file_operations wdt_temp_fops = {
514 .owner = THIS_MODULE, 514 .owner = THIS_MODULE,
515 .llseek = no_llseek, 515 .llseek = no_llseek,
516 .read = wdt_temp_read, 516 .read = wdt_temp_read,
diff --git a/drivers/char/watchdog/wdt285.c b/drivers/char/watchdog/wdt285.c
index 52825a1f1779..6555fb844f23 100644
--- a/drivers/char/watchdog/wdt285.c
+++ b/drivers/char/watchdog/wdt285.c
@@ -178,7 +178,7 @@ watchdog_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
178 return ret; 178 return ret;
179} 179}
180 180
181static struct file_operations watchdog_fops = { 181static const struct file_operations watchdog_fops = {
182 .owner = THIS_MODULE, 182 .owner = THIS_MODULE,
183 .llseek = no_llseek, 183 .llseek = no_llseek,
184 .write = watchdog_write, 184 .write = watchdog_write,
diff --git a/drivers/char/watchdog/wdt977.c b/drivers/char/watchdog/wdt977.c
index 3cde2b9bb763..a0935bc775f8 100644
--- a/drivers/char/watchdog/wdt977.c
+++ b/drivers/char/watchdog/wdt977.c
@@ -418,7 +418,7 @@ static int wdt977_notify_sys(struct notifier_block *this, unsigned long code,
418 return NOTIFY_DONE; 418 return NOTIFY_DONE;
419} 419}
420 420
421static struct file_operations wdt977_fops= 421static const struct file_operations wdt977_fops=
422{ 422{
423 .owner = THIS_MODULE, 423 .owner = THIS_MODULE,
424 .llseek = no_llseek, 424 .llseek = no_llseek,
diff --git a/drivers/char/watchdog/wdt_pci.c b/drivers/char/watchdog/wdt_pci.c
index 7529ecdbabae..5918ca2c9c35 100644
--- a/drivers/char/watchdog/wdt_pci.c
+++ b/drivers/char/watchdog/wdt_pci.c
@@ -543,7 +543,7 @@ static int wdtpci_notify_sys(struct notifier_block *this, unsigned long code,
543 */ 543 */
544 544
545 545
546static struct file_operations wdtpci_fops = { 546static const struct file_operations wdtpci_fops = {
547 .owner = THIS_MODULE, 547 .owner = THIS_MODULE,
548 .llseek = no_llseek, 548 .llseek = no_llseek,
549 .write = wdtpci_write, 549 .write = wdtpci_write,
@@ -559,7 +559,7 @@ static struct miscdevice wdtpci_miscdev = {
559}; 559};
560 560
561#ifdef CONFIG_WDT_501_PCI 561#ifdef CONFIG_WDT_501_PCI
562static struct file_operations wdtpci_temp_fops = { 562static const struct file_operations wdtpci_temp_fops = {
563 .owner = THIS_MODULE, 563 .owner = THIS_MODULE,
564 .llseek = no_llseek, 564 .llseek = no_llseek,
565 .read = wdtpci_temp_read, 565 .read = wdtpci_temp_read,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 693e540481b4..87299924e735 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -12,22 +12,11 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/smp.h>
16#include <linux/init.h> 15#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/ctype.h>
19#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
20#include <linux/sysctl.h>
21#include <linux/types.h>
22#include <linux/fs.h>
23#include <linux/sysfs.h>
24#include <linux/cpu.h> 17#include <linux/cpu.h>
25#include <linux/sched.h>
26#include <linux/kmod.h>
27#include <linux/workqueue.h>
28#include <linux/jiffies.h> 18#include <linux/jiffies.h>
29#include <linux/kernel_stat.h> 19#include <linux/kernel_stat.h>
30#include <linux/percpu.h>
31#include <linux/mutex.h> 20#include <linux/mutex.h>
32 21
33/* 22/*
@@ -56,16 +45,15 @@ static unsigned int def_sampling_rate;
56#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 45#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
57#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 46#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
58#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 47#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
59#define DEF_SAMPLING_DOWN_FACTOR (1)
60#define MAX_SAMPLING_DOWN_FACTOR (10)
61#define TRANSITION_LATENCY_LIMIT (10 * 1000) 48#define TRANSITION_LATENCY_LIMIT (10 * 1000)
62 49
63static void do_dbs_timer(void *data); 50static void do_dbs_timer(void *data);
64 51
65struct cpu_dbs_info_s { 52struct cpu_dbs_info_s {
53 cputime64_t prev_cpu_idle;
54 cputime64_t prev_cpu_wall;
66 struct cpufreq_policy *cur_policy; 55 struct cpufreq_policy *cur_policy;
67 unsigned int prev_cpu_idle_up; 56 struct work_struct work;
68 unsigned int prev_cpu_idle_down;
69 unsigned int enable; 57 unsigned int enable;
70}; 58};
71static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 59static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
@@ -80,31 +68,32 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
80 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 68 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
81 * is recursive for the same process. -Venki 69 * is recursive for the same process. -Venki
82 */ 70 */
83static DEFINE_MUTEX (dbs_mutex); 71static DEFINE_MUTEX(dbs_mutex);
84static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
85 72
86static struct workqueue_struct *dbs_workq; 73static struct workqueue_struct *kondemand_wq;
87 74
88struct dbs_tuners { 75struct dbs_tuners {
89 unsigned int sampling_rate; 76 unsigned int sampling_rate;
90 unsigned int sampling_down_factor;
91 unsigned int up_threshold; 77 unsigned int up_threshold;
92 unsigned int ignore_nice; 78 unsigned int ignore_nice;
93}; 79};
94 80
95static struct dbs_tuners dbs_tuners_ins = { 81static struct dbs_tuners dbs_tuners_ins = {
96 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 82 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
97 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
98 .ignore_nice = 0, 83 .ignore_nice = 0,
99}; 84};
100 85
101static inline unsigned int get_cpu_idle_time(unsigned int cpu) 86static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
102{ 87{
103 return kstat_cpu(cpu).cpustat.idle + 88 cputime64_t retval;
104 kstat_cpu(cpu).cpustat.iowait + 89
105 ( dbs_tuners_ins.ignore_nice ? 90 retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
106 kstat_cpu(cpu).cpustat.nice : 91 kstat_cpu(cpu).cpustat.iowait);
107 0); 92
93 if (dbs_tuners_ins.ignore_nice)
94 retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
95
96 return retval;
108} 97}
109 98
110/************************** sysfs interface ************************/ 99/************************** sysfs interface ************************/
@@ -133,35 +122,15 @@ static ssize_t show_##file_name \
133 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 122 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
134} 123}
135show_one(sampling_rate, sampling_rate); 124show_one(sampling_rate, sampling_rate);
136show_one(sampling_down_factor, sampling_down_factor);
137show_one(up_threshold, up_threshold); 125show_one(up_threshold, up_threshold);
138show_one(ignore_nice_load, ignore_nice); 126show_one(ignore_nice_load, ignore_nice);
139 127
140static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
141 const char *buf, size_t count)
142{
143 unsigned int input;
144 int ret;
145 ret = sscanf (buf, "%u", &input);
146 if (ret != 1 )
147 return -EINVAL;
148
149 if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
150 return -EINVAL;
151
152 mutex_lock(&dbs_mutex);
153 dbs_tuners_ins.sampling_down_factor = input;
154 mutex_unlock(&dbs_mutex);
155
156 return count;
157}
158
159static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 128static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
160 const char *buf, size_t count) 129 const char *buf, size_t count)
161{ 130{
162 unsigned int input; 131 unsigned int input;
163 int ret; 132 int ret;
164 ret = sscanf (buf, "%u", &input); 133 ret = sscanf(buf, "%u", &input);
165 134
166 mutex_lock(&dbs_mutex); 135 mutex_lock(&dbs_mutex);
167 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 136 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
@@ -180,7 +149,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
180{ 149{
181 unsigned int input; 150 unsigned int input;
182 int ret; 151 int ret;
183 ret = sscanf (buf, "%u", &input); 152 ret = sscanf(buf, "%u", &input);
184 153
185 mutex_lock(&dbs_mutex); 154 mutex_lock(&dbs_mutex);
186 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 155 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
@@ -203,7 +172,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
203 172
204 unsigned int j; 173 unsigned int j;
205 174
206 ret = sscanf (buf, "%u", &input); 175 ret = sscanf(buf, "%u", &input);
207 if ( ret != 1 ) 176 if ( ret != 1 )
208 return -EINVAL; 177 return -EINVAL;
209 178
@@ -217,12 +186,12 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
217 } 186 }
218 dbs_tuners_ins.ignore_nice = input; 187 dbs_tuners_ins.ignore_nice = input;
219 188
220 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 189 /* we need to re-evaluate prev_cpu_idle */
221 for_each_online_cpu(j) { 190 for_each_online_cpu(j) {
222 struct cpu_dbs_info_s *j_dbs_info; 191 struct cpu_dbs_info_s *dbs_info;
223 j_dbs_info = &per_cpu(cpu_dbs_info, j); 192 dbs_info = &per_cpu(cpu_dbs_info, j);
224 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 193 dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
225 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 194 dbs_info->prev_cpu_wall = get_jiffies_64();
226 } 195 }
227 mutex_unlock(&dbs_mutex); 196 mutex_unlock(&dbs_mutex);
228 197
@@ -234,7 +203,6 @@ static struct freq_attr _name = \
234__ATTR(_name, 0644, show_##_name, store_##_name) 203__ATTR(_name, 0644, show_##_name, store_##_name)
235 204
236define_one_rw(sampling_rate); 205define_one_rw(sampling_rate);
237define_one_rw(sampling_down_factor);
238define_one_rw(up_threshold); 206define_one_rw(up_threshold);
239define_one_rw(ignore_nice_load); 207define_one_rw(ignore_nice_load);
240 208
@@ -242,7 +210,6 @@ static struct attribute * dbs_attributes[] = {
242 &sampling_rate_max.attr, 210 &sampling_rate_max.attr,
243 &sampling_rate_min.attr, 211 &sampling_rate_min.attr,
244 &sampling_rate.attr, 212 &sampling_rate.attr,
245 &sampling_down_factor.attr,
246 &up_threshold.attr, 213 &up_threshold.attr,
247 &ignore_nice_load.attr, 214 &ignore_nice_load.attr,
248 NULL 215 NULL
@@ -255,26 +222,27 @@ static struct attribute_group dbs_attr_group = {
255 222
256/************************** sysfs end ************************/ 223/************************** sysfs end ************************/
257 224
258static void dbs_check_cpu(int cpu) 225static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
259{ 226{
260 unsigned int idle_ticks, up_idle_ticks, total_ticks; 227 unsigned int idle_ticks, total_ticks;
261 unsigned int freq_next; 228 unsigned int load;
262 unsigned int freq_down_sampling_rate; 229 cputime64_t cur_jiffies;
263 static int down_skip[NR_CPUS];
264 struct cpu_dbs_info_s *this_dbs_info;
265 230
266 struct cpufreq_policy *policy; 231 struct cpufreq_policy *policy;
267 unsigned int j; 232 unsigned int j;
268 233
269 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
270 if (!this_dbs_info->enable) 234 if (!this_dbs_info->enable)
271 return; 235 return;
272 236
273 policy = this_dbs_info->cur_policy; 237 policy = this_dbs_info->cur_policy;
238 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
240 this_dbs_info->prev_cpu_wall);
241 this_dbs_info->prev_cpu_wall = cur_jiffies;
274 /* 242 /*
275 * Every sampling_rate, we check, if current idle time is less 243 * Every sampling_rate, we check, if current idle time is less
276 * than 20% (default), then we try to increase frequency 244 * than 20% (default), then we try to increase frequency
277 * Every sampling_rate*sampling_down_factor, we look for a the lowest 245 * Every sampling_rate, we look for a the lowest
278 * frequency which can sustain the load while keeping idle time over 246 * frequency which can sustain the load while keeping idle time over
279 * 30%. If such a frequency exist, we try to decrease to this frequency. 247 * 30%. If such a frequency exist, we try to decrease to this frequency.
280 * 248 *
@@ -283,36 +251,26 @@ static void dbs_check_cpu(int cpu)
283 * 5% (default) of current frequency 251 * 5% (default) of current frequency
284 */ 252 */
285 253
286 /* Check for frequency increase */ 254 /* Get Idle Time */
287 idle_ticks = UINT_MAX; 255 idle_ticks = UINT_MAX;
288 for_each_cpu_mask(j, policy->cpus) { 256 for_each_cpu_mask(j, policy->cpus) {
289 unsigned int tmp_idle_ticks, total_idle_ticks; 257 cputime64_t total_idle_ticks;
258 unsigned int tmp_idle_ticks;
290 struct cpu_dbs_info_s *j_dbs_info; 259 struct cpu_dbs_info_s *j_dbs_info;
291 260
292 j_dbs_info = &per_cpu(cpu_dbs_info, j); 261 j_dbs_info = &per_cpu(cpu_dbs_info, j);
293 total_idle_ticks = get_cpu_idle_time(j); 262 total_idle_ticks = get_cpu_idle_time(j);
294 tmp_idle_ticks = total_idle_ticks - 263 tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
295 j_dbs_info->prev_cpu_idle_up; 264 j_dbs_info->prev_cpu_idle);
296 j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 265 j_dbs_info->prev_cpu_idle = total_idle_ticks;
297 266
298 if (tmp_idle_ticks < idle_ticks) 267 if (tmp_idle_ticks < idle_ticks)
299 idle_ticks = tmp_idle_ticks; 268 idle_ticks = tmp_idle_ticks;
300 } 269 }
270 load = (100 * (total_ticks - idle_ticks)) / total_ticks;
301 271
302 /* Scale idle ticks by 100 and compare with up and down ticks */ 272 /* Check for frequency increase */
303 idle_ticks *= 100; 273 if (load > dbs_tuners_ins.up_threshold) {
304 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
305 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
306
307 if (idle_ticks < up_idle_ticks) {
308 down_skip[cpu] = 0;
309 for_each_cpu_mask(j, policy->cpus) {
310 struct cpu_dbs_info_s *j_dbs_info;
311
312 j_dbs_info = &per_cpu(cpu_dbs_info, j);
313 j_dbs_info->prev_cpu_idle_down =
314 j_dbs_info->prev_cpu_idle_up;
315 }
316 /* if we are already at full speed then break out early */ 274 /* if we are already at full speed then break out early */
317 if (policy->cur == policy->max) 275 if (policy->cur == policy->max)
318 return; 276 return;
@@ -323,83 +281,49 @@ static void dbs_check_cpu(int cpu)
323 } 281 }
324 282
325 /* Check for frequency decrease */ 283 /* Check for frequency decrease */
326 down_skip[cpu]++;
327 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
328 return;
329
330 idle_ticks = UINT_MAX;
331 for_each_cpu_mask(j, policy->cpus) {
332 unsigned int tmp_idle_ticks, total_idle_ticks;
333 struct cpu_dbs_info_s *j_dbs_info;
334
335 j_dbs_info = &per_cpu(cpu_dbs_info, j);
336 /* Check for frequency decrease */
337 total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
338 tmp_idle_ticks = total_idle_ticks -
339 j_dbs_info->prev_cpu_idle_down;
340 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
341
342 if (tmp_idle_ticks < idle_ticks)
343 idle_ticks = tmp_idle_ticks;
344 }
345
346 down_skip[cpu] = 0;
347 /* if we cannot reduce the frequency anymore, break out early */ 284 /* if we cannot reduce the frequency anymore, break out early */
348 if (policy->cur == policy->min) 285 if (policy->cur == policy->min)
349 return; 286 return;
350 287
351 /* Compute how many ticks there are between two measurements */
352 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
353 dbs_tuners_ins.sampling_down_factor;
354 total_ticks = usecs_to_jiffies(freq_down_sampling_rate);
355
356 /* 288 /*
357 * The optimal frequency is the frequency that is the lowest that 289 * The optimal frequency is the frequency that is the lowest that
358 * can support the current CPU usage without triggering the up 290 * can support the current CPU usage without triggering the up
359 * policy. To be safe, we focus 10 points under the threshold. 291 * policy. To be safe, we focus 10 points under the threshold.
360 */ 292 */
361 freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks; 293 if (load < (dbs_tuners_ins.up_threshold - 10)) {
362 freq_next = (freq_next * policy->cur) / 294 unsigned int freq_next;
295 freq_next = (policy->cur * load) /
363 (dbs_tuners_ins.up_threshold - 10); 296 (dbs_tuners_ins.up_threshold - 10);
364 297
365 if (freq_next < policy->min)
366 freq_next = policy->min;
367
368 if (freq_next <= ((policy->cur * 95) / 100))
369 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 298 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
299 }
370} 300}
371 301
372static void do_dbs_timer(void *data) 302static void do_dbs_timer(void *data)
373{ 303{
374 int i; 304 unsigned int cpu = smp_processor_id();
375 lock_cpu_hotplug(); 305 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
376 mutex_lock(&dbs_mutex); 306
377 for_each_online_cpu(i) 307 dbs_check_cpu(dbs_info);
378 dbs_check_cpu(i); 308 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
379 queue_delayed_work(dbs_workq, &dbs_work, 309 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
380 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
381 mutex_unlock(&dbs_mutex);
382 unlock_cpu_hotplug();
383} 310}
384 311
385static inline void dbs_timer_init(void) 312static inline void dbs_timer_init(unsigned int cpu)
386{ 313{
387 INIT_WORK(&dbs_work, do_dbs_timer, NULL); 314 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
388 if (!dbs_workq) 315
389 dbs_workq = create_singlethread_workqueue("ondemand"); 316 INIT_WORK(&dbs_info->work, do_dbs_timer, 0);
390 if (!dbs_workq) { 317 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
391 printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n"); 318 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
392 return;
393 }
394 queue_delayed_work(dbs_workq, &dbs_work,
395 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
396 return; 319 return;
397} 320}
398 321
399static inline void dbs_timer_exit(void) 322static inline void dbs_timer_exit(unsigned int cpu)
400{ 323{
401 if (dbs_workq) 324 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
402 cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work); 325
326 cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work);
403} 327}
404 328
405static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 329static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -413,8 +337,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
413 337
414 switch (event) { 338 switch (event) {
415 case CPUFREQ_GOV_START: 339 case CPUFREQ_GOV_START:
416 if ((!cpu_online(cpu)) || 340 if ((!cpu_online(cpu)) || (!policy->cur))
417 (!policy->cur))
418 return -EINVAL; 341 return -EINVAL;
419 342
420 if (policy->cpuinfo.transition_latency > 343 if (policy->cpuinfo.transition_latency >
@@ -427,18 +350,26 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
427 break; 350 break;
428 351
429 mutex_lock(&dbs_mutex); 352 mutex_lock(&dbs_mutex);
353 dbs_enable++;
354 if (dbs_enable == 1) {
355 kondemand_wq = create_workqueue("kondemand");
356 if (!kondemand_wq) {
357 printk(KERN_ERR "Creation of kondemand failed\n");
358 dbs_enable--;
359 mutex_unlock(&dbs_mutex);
360 return -ENOSPC;
361 }
362 }
430 for_each_cpu_mask(j, policy->cpus) { 363 for_each_cpu_mask(j, policy->cpus) {
431 struct cpu_dbs_info_s *j_dbs_info; 364 struct cpu_dbs_info_s *j_dbs_info;
432 j_dbs_info = &per_cpu(cpu_dbs_info, j); 365 j_dbs_info = &per_cpu(cpu_dbs_info, j);
433 j_dbs_info->cur_policy = policy; 366 j_dbs_info->cur_policy = policy;
434 367
435 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 368 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
436 j_dbs_info->prev_cpu_idle_down 369 j_dbs_info->prev_cpu_wall = get_jiffies_64();
437 = j_dbs_info->prev_cpu_idle_up;
438 } 370 }
439 this_dbs_info->enable = 1; 371 this_dbs_info->enable = 1;
440 sysfs_create_group(&policy->kobj, &dbs_attr_group); 372 sysfs_create_group(&policy->kobj, &dbs_attr_group);
441 dbs_enable++;
442 /* 373 /*
443 * Start the timerschedule work, when this governor 374 * Start the timerschedule work, when this governor
444 * is used for first time 375 * is used for first time
@@ -457,23 +388,20 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
457 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 388 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
458 389
459 dbs_tuners_ins.sampling_rate = def_sampling_rate; 390 dbs_tuners_ins.sampling_rate = def_sampling_rate;
460 dbs_timer_init();
461 } 391 }
392 dbs_timer_init(policy->cpu);
462 393
463 mutex_unlock(&dbs_mutex); 394 mutex_unlock(&dbs_mutex);
464 break; 395 break;
465 396
466 case CPUFREQ_GOV_STOP: 397 case CPUFREQ_GOV_STOP:
467 mutex_lock(&dbs_mutex); 398 mutex_lock(&dbs_mutex);
399 dbs_timer_exit(policy->cpu);
468 this_dbs_info->enable = 0; 400 this_dbs_info->enable = 0;
469 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 401 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
470 dbs_enable--; 402 dbs_enable--;
471 /*
472 * Stop the timerschedule work, when this governor
473 * is used for first time
474 */
475 if (dbs_enable == 0) 403 if (dbs_enable == 0)
476 dbs_timer_exit(); 404 destroy_workqueue(kondemand_wq);
477 405
478 mutex_unlock(&dbs_mutex); 406 mutex_unlock(&dbs_mutex);
479 407
@@ -483,13 +411,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
483 lock_cpu_hotplug(); 411 lock_cpu_hotplug();
484 mutex_lock(&dbs_mutex); 412 mutex_lock(&dbs_mutex);
485 if (policy->max < this_dbs_info->cur_policy->cur) 413 if (policy->max < this_dbs_info->cur_policy->cur)
486 __cpufreq_driver_target( 414 __cpufreq_driver_target(this_dbs_info->cur_policy,
487 this_dbs_info->cur_policy, 415 policy->max,
488 policy->max, CPUFREQ_RELATION_H); 416 CPUFREQ_RELATION_H);
489 else if (policy->min > this_dbs_info->cur_policy->cur) 417 else if (policy->min > this_dbs_info->cur_policy->cur)
490 __cpufreq_driver_target( 418 __cpufreq_driver_target(this_dbs_info->cur_policy,
491 this_dbs_info->cur_policy, 419 policy->min,
492 policy->min, CPUFREQ_RELATION_L); 420 CPUFREQ_RELATION_L);
493 mutex_unlock(&dbs_mutex); 421 mutex_unlock(&dbs_mutex);
494 unlock_cpu_hotplug(); 422 unlock_cpu_hotplug();
495 break; 423 break;
@@ -498,9 +426,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
498} 426}
499 427
500static struct cpufreq_governor cpufreq_gov_dbs = { 428static struct cpufreq_governor cpufreq_gov_dbs = {
501 .name = "ondemand", 429 .name = "ondemand",
502 .governor = cpufreq_governor_dbs, 430 .governor = cpufreq_governor_dbs,
503 .owner = THIS_MODULE, 431 .owner = THIS_MODULE,
504}; 432};
505 433
506static int __init cpufreq_gov_dbs_init(void) 434static int __init cpufreq_gov_dbs_init(void)
@@ -510,21 +438,15 @@ static int __init cpufreq_gov_dbs_init(void)
510 438
511static void __exit cpufreq_gov_dbs_exit(void) 439static void __exit cpufreq_gov_dbs_exit(void)
512{ 440{
513 /* Make sure that the scheduled work is indeed not running.
514 Assumes the timer has been cancelled first. */
515 if (dbs_workq) {
516 flush_workqueue(dbs_workq);
517 destroy_workqueue(dbs_workq);
518 }
519
520 cpufreq_unregister_governor(&cpufreq_gov_dbs); 441 cpufreq_unregister_governor(&cpufreq_gov_dbs);
521} 442}
522 443
523 444
524MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 445MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
525MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for " 446MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
526 "Low Latency Frequency Transition capable processors"); 447MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
527MODULE_LICENSE ("GPL"); 448 "Low Latency Frequency Transition capable processors");
449MODULE_LICENSE("GPL");
528 450
529module_init(cpufreq_gov_dbs_init); 451module_init(cpufreq_gov_dbs_init);
530module_exit(cpufreq_gov_dbs_exit); 452module_exit(cpufreq_gov_dbs_exit);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 5829143558e1..15278044295c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -166,8 +166,8 @@ static struct dma_chan *dma_client_chan_alloc(struct dma_client *client)
166} 166}
167 167
168/** 168/**
169 * dma_client_chan_free - release a DMA channel 169 * dma_chan_cleanup - release a DMA channel's resources
170 * @chan: &dma_chan 170 * @kref: kernel reference structure that contains the DMA channel device
171 */ 171 */
172void dma_chan_cleanup(struct kref *kref) 172void dma_chan_cleanup(struct kref *kref)
173{ 173{
@@ -199,7 +199,7 @@ static void dma_client_chan_free(struct dma_chan *chan)
199 * dma_chans_rebalance - reallocate channels to clients 199 * dma_chans_rebalance - reallocate channels to clients
200 * 200 *
201 * When the number of DMA channel in the system changes, 201 * When the number of DMA channel in the system changes,
202 * channels need to be rebalanced among clients 202 * channels need to be rebalanced among clients.
203 */ 203 */
204static void dma_chans_rebalance(void) 204static void dma_chans_rebalance(void)
205{ 205{
@@ -264,7 +264,7 @@ struct dma_client *dma_async_client_register(dma_event_callback event_callback)
264 264
265/** 265/**
266 * dma_async_client_unregister - unregister a client and free the &dma_client 266 * dma_async_client_unregister - unregister a client and free the &dma_client
267 * @client: 267 * @client: &dma_client to free
268 * 268 *
269 * Force frees any allocated DMA channels, frees the &dma_client memory 269 * Force frees any allocated DMA channels, frees the &dma_client memory
270 */ 270 */
@@ -306,7 +306,7 @@ void dma_async_client_chan_request(struct dma_client *client,
306} 306}
307 307
308/** 308/**
309 * dma_async_device_register - 309 * dma_async_device_register - registers DMA devices found
310 * @device: &dma_device 310 * @device: &dma_device
311 */ 311 */
312int dma_async_device_register(struct dma_device *device) 312int dma_async_device_register(struct dma_device *device)
@@ -348,8 +348,8 @@ int dma_async_device_register(struct dma_device *device)
348} 348}
349 349
350/** 350/**
351 * dma_async_device_unregister - 351 * dma_async_device_cleanup - function called when all references are released
352 * @device: &dma_device 352 * @kref: kernel reference object
353 */ 353 */
354static void dma_async_device_cleanup(struct kref *kref) 354static void dma_async_device_cleanup(struct kref *kref)
355{ 355{
@@ -359,7 +359,11 @@ static void dma_async_device_cleanup(struct kref *kref)
359 complete(&device->done); 359 complete(&device->done);
360} 360}
361 361
362void dma_async_device_unregister(struct dma_device* device) 362/**
363 * dma_async_device_unregister - unregisters DMA devices
364 * @device: &dma_device
365 */
366void dma_async_device_unregister(struct dma_device *device)
363{ 367{
364 struct dma_chan *chan; 368 struct dma_chan *chan;
365 unsigned long flags; 369 unsigned long flags;
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index ecad8f65d2d4..78bf46d917b7 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -217,7 +217,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
217 217
218/** 218/**
219 * do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction 219 * do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction
220 * @chan: IOAT DMA channel handle 220 * @ioat_chan: IOAT DMA channel handle
221 * @dest: DMA destination address 221 * @dest: DMA destination address
222 * @src: DMA source address 222 * @src: DMA source address
223 * @len: transaction length in bytes 223 * @len: transaction length in bytes
@@ -383,7 +383,7 @@ static dma_cookie_t ioat_dma_memcpy_buf_to_pg(struct dma_chan *chan,
383 * @dest_off: offset into that page 383 * @dest_off: offset into that page
384 * @src_pg: pointer to the page to copy from 384 * @src_pg: pointer to the page to copy from
385 * @src_off: offset into that page 385 * @src_off: offset into that page
386 * @len: transaction length in bytes. This is guaranteed to not make a copy 386 * @len: transaction length in bytes. This is guaranteed not to make a copy
387 * across a page boundary. 387 * across a page boundary.
388 */ 388 */
389 389
@@ -407,7 +407,7 @@ static dma_cookie_t ioat_dma_memcpy_pg_to_pg(struct dma_chan *chan,
407} 407}
408 408
409/** 409/**
410 * ioat_dma_memcpy_issue_pending - push potentially unrecognoized appended descriptors to hw 410 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw
411 * @chan: DMA channel handle 411 * @chan: DMA channel handle
412 */ 412 */
413 413
@@ -510,6 +510,8 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
510 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction 510 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
511 * @chan: IOAT DMA channel handle 511 * @chan: IOAT DMA channel handle
512 * @cookie: DMA transaction identifier 512 * @cookie: DMA transaction identifier
513 * @done: if not %NULL, updated with last completed transaction
514 * @used: if not %NULL, updated with last used transaction
513 */ 515 */
514 516
515static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, 517static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
@@ -826,7 +828,7 @@ static int __init ioat_init_module(void)
826 /* if forced, worst case is that rmmod hangs */ 828 /* if forced, worst case is that rmmod hangs */
827 __unsafe(THIS_MODULE); 829 __unsafe(THIS_MODULE);
828 830
829 pci_module_init(&ioat_pci_drv); 831 return pci_module_init(&ioat_pci_drv);
830} 832}
831 833
832module_init(ioat_init_module); 834module_init(ioat_init_module);
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
index 41a21ab2b000..a30c7349075a 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioatdma_registers.h
@@ -76,7 +76,7 @@
76#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ 76#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
77#define IOAT_CHANSTS_OFFSET_LOW 0x04 77#define IOAT_CHANSTS_OFFSET_LOW 0x04
78#define IOAT_CHANSTS_OFFSET_HIGH 0x08 78#define IOAT_CHANSTS_OFFSET_HIGH 0x08
79#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0 79#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0UL
80#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 80#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010
81#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 81#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007
82#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 82#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
index 5ed327e453a2..d637555a833b 100644
--- a/drivers/dma/iovlock.c
+++ b/drivers/dma/iovlock.c
@@ -31,7 +31,7 @@
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33 33
34int num_pages_spanned(struct iovec *iov) 34static int num_pages_spanned(struct iovec *iov)
35{ 35{
36 return 36 return
37 ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - 37 ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 6ca3476d02c7..adbe9f76a505 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -838,7 +838,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
838 "transferred\n", pc->actually_transferred); 838 "transferred\n", pc->actually_transferred);
839 clear_bit(PC_DMA_IN_PROGRESS, &pc->flags); 839 clear_bit(PC_DMA_IN_PROGRESS, &pc->flags);
840 840
841 local_irq_enable(); 841 local_irq_enable_in_hardirq();
842 842
843 if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) { 843 if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) {
844 /* Error detected */ 844 /* Error detected */
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 7dba9992ad30..fb6795236e76 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -693,7 +693,7 @@ static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
693 u8 stat = hwif->INB(IDE_STATUS_REG); 693 u8 stat = hwif->INB(IDE_STATUS_REG);
694 int retries = 10; 694 int retries = 10;
695 695
696 local_irq_enable(); 696 local_irq_enable_in_hardirq();
697 if ((stat & DRQ_STAT) && args && args[3]) { 697 if ((stat & DRQ_STAT) && args && args[3]) {
698 u8 io_32bit = drive->io_32bit; 698 u8 io_32bit = drive->io_32bit;
699 drive->io_32bit = 0; 699 drive->io_32bit = 0;
@@ -1286,7 +1286,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1286 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1286 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1287 disable_irq_nosync(hwif->irq); 1287 disable_irq_nosync(hwif->irq);
1288 spin_unlock(&ide_lock); 1288 spin_unlock(&ide_lock);
1289 local_irq_enable(); 1289 local_irq_enable_in_hardirq();
1290 /* allow other IRQs while we start this request */ 1290 /* allow other IRQs while we start this request */
1291 startstop = start_request(drive, rq); 1291 startstop = start_request(drive, rq);
1292 spin_lock_irq(&ide_lock); 1292 spin_lock_irq(&ide_lock);
@@ -1631,7 +1631,7 @@ irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
1631 spin_unlock(&ide_lock); 1631 spin_unlock(&ide_lock);
1632 1632
1633 if (drive->unmask) 1633 if (drive->unmask)
1634 local_irq_enable(); 1634 local_irq_enable_in_hardirq();
1635 /* service this interrupt, may set handler for next interrupt */ 1635 /* service this interrupt, may set handler for next interrupt */
1636 startstop = handler(drive); 1636 startstop = handler(drive);
1637 spin_lock_irq(&ide_lock); 1637 spin_lock_irq(&ide_lock);
@@ -1705,7 +1705,7 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
1705{ 1705{
1706 unsigned long flags; 1706 unsigned long flags;
1707 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1707 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1708 DECLARE_COMPLETION(wait); 1708 DECLARE_COMPLETION_ONSTACK(wait);
1709 int where = ELEVATOR_INSERT_BACK, err; 1709 int where = ELEVATOR_INSERT_BACK, err;
1710 int must_wait = (action == ide_wait || action == ide_head_wait); 1710 int must_wait = (action == ide_wait || action == ide_head_wait);
1711 1711
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 04547eb0833f..97a9244312fc 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -222,7 +222,7 @@ ide_startstop_t task_no_data_intr (ide_drive_t *drive)
222 ide_hwif_t *hwif = HWIF(drive); 222 ide_hwif_t *hwif = HWIF(drive);
223 u8 stat; 223 u8 stat;
224 224
225 local_irq_enable(); 225 local_irq_enable_in_hardirq();
226 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) { 226 if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
227 return ide_error(drive, "task_no_data_intr", stat); 227 return ide_error(drive, "task_no_data_intr", stat);
228 /* calls ide_end_drive_cmd */ 228 /* calls ide_end_drive_cmd */
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index 2c669287f5bd..4feead4a35c5 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -107,6 +107,14 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
107 */ 107 */
108static DEFINE_MUTEX(host_num_alloc); 108static DEFINE_MUTEX(host_num_alloc);
109 109
110/*
111 * The pending_packet_queue is special in that it's processed
112 * from hardirq context too (such as hpsb_bus_reset()). Hence
113 * split the lock class from the usual networking skb-head
114 * lock class by using a separate key for it:
115 */
116static struct lock_class_key pending_packet_queue_key;
117
110struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, 118struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
111 struct device *dev) 119 struct device *dev)
112{ 120{
@@ -128,6 +136,8 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
128 h->driver = drv; 136 h->driver = drv;
129 137
130 skb_queue_head_init(&h->pending_packet_queue); 138 skb_queue_head_init(&h->pending_packet_queue);
139 lockdep_set_class(&h->pending_packet_queue.lock,
140 &pending_packet_queue_key);
131 INIT_LIST_HEAD(&h->addr_space); 141 INIT_LIST_HEAD(&h->addr_space);
132 142
133 for (i = 2; i < 16; i++) 143 for (i = 2; i < 16; i++)
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 16c387d8170c..490fc783bb0c 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -224,7 +224,7 @@ static void *get_send_wqe(struct mthca_qp *qp, int n)
224 224
225static void mthca_wq_init(struct mthca_wq *wq) 225static void mthca_wq_init(struct mthca_wq *wq)
226{ 226{
227 spin_lock_init(&wq->lock); 227 /* mthca_alloc_qp_common() initializes the locks */
228 wq->next_ind = 0; 228 wq->next_ind = 0;
229 wq->last_comp = wq->max - 1; 229 wq->last_comp = wq->max - 1;
230 wq->head = 0; 230 wq->head = 0;
@@ -1114,6 +1114,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1114 qp->sq_policy = send_policy; 1114 qp->sq_policy = send_policy;
1115 mthca_wq_init(&qp->sq); 1115 mthca_wq_init(&qp->sq);
1116 mthca_wq_init(&qp->rq); 1116 mthca_wq_init(&qp->rq);
1117 /* these are initialized separately so lockdep can tell them apart */
1118 spin_lock_init(&qp->sq.lock);
1119 spin_lock_init(&qp->rq.lock);
1117 1120
1118 ret = mthca_map_memfree(dev, qp); 1121 ret = mthca_map_memfree(dev, qp);
1119 if (ret) 1122 if (ret)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index b2c033edb03c..34b0da5cfa0a 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -437,159 +437,50 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
437} 437}
438 438
439static int 439static int
440iscsi_iser_conn_set_param(struct iscsi_cls_conn *cls_conn, 440iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
441 enum iscsi_param param, uint32_t value) 441 enum iscsi_param param, char *buf, int buflen)
442{ 442{
443 struct iscsi_conn *conn = cls_conn->dd_data; 443 int value;
444 struct iscsi_session *session = conn->session;
445
446 spin_lock_bh(&session->lock);
447 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
448 conn->stop_stage != STOP_CONN_RECOVER) {
449 printk(KERN_ERR "iscsi_iser: can not change parameter [%d]\n",
450 param);
451 spin_unlock_bh(&session->lock);
452 return 0;
453 }
454 spin_unlock_bh(&session->lock);
455 444
456 switch (param) { 445 switch (param) {
457 case ISCSI_PARAM_MAX_RECV_DLENGTH: 446 case ISCSI_PARAM_MAX_RECV_DLENGTH:
458 /* TBD */ 447 /* TBD */
459 break; 448 break;
460 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
461 conn->max_xmit_dlength = value;
462 break;
463 case ISCSI_PARAM_HDRDGST_EN: 449 case ISCSI_PARAM_HDRDGST_EN:
450 sscanf(buf, "%d", &value);
464 if (value) { 451 if (value) {
465 printk(KERN_ERR "DataDigest wasn't negotiated to None"); 452 printk(KERN_ERR "DataDigest wasn't negotiated to None");
466 return -EPROTO; 453 return -EPROTO;
467 } 454 }
468 break; 455 break;
469 case ISCSI_PARAM_DATADGST_EN: 456 case ISCSI_PARAM_DATADGST_EN:
457 sscanf(buf, "%d", &value);
470 if (value) { 458 if (value) {
471 printk(KERN_ERR "DataDigest wasn't negotiated to None"); 459 printk(KERN_ERR "DataDigest wasn't negotiated to None");
472 return -EPROTO; 460 return -EPROTO;
473 } 461 }
474 break; 462 break;
475 case ISCSI_PARAM_INITIAL_R2T_EN:
476 session->initial_r2t_en = value;
477 break;
478 case ISCSI_PARAM_IMM_DATA_EN:
479 session->imm_data_en = value;
480 break;
481 case ISCSI_PARAM_FIRST_BURST:
482 session->first_burst = value;
483 break;
484 case ISCSI_PARAM_MAX_BURST:
485 session->max_burst = value;
486 break;
487 case ISCSI_PARAM_PDU_INORDER_EN:
488 session->pdu_inorder_en = value;
489 break;
490 case ISCSI_PARAM_DATASEQ_INORDER_EN:
491 session->dataseq_inorder_en = value;
492 break;
493 case ISCSI_PARAM_ERL:
494 session->erl = value;
495 break;
496 case ISCSI_PARAM_IFMARKER_EN: 463 case ISCSI_PARAM_IFMARKER_EN:
464 sscanf(buf, "%d", &value);
497 if (value) { 465 if (value) {
498 printk(KERN_ERR "IFMarker wasn't negotiated to No"); 466 printk(KERN_ERR "IFMarker wasn't negotiated to No");
499 return -EPROTO; 467 return -EPROTO;
500 } 468 }
501 break; 469 break;
502 case ISCSI_PARAM_OFMARKER_EN: 470 case ISCSI_PARAM_OFMARKER_EN:
471 sscanf(buf, "%d", &value);
503 if (value) { 472 if (value) {
504 printk(KERN_ERR "OFMarker wasn't negotiated to No"); 473 printk(KERN_ERR "OFMarker wasn't negotiated to No");
505 return -EPROTO; 474 return -EPROTO;
506 } 475 }
507 break; 476 break;
508 default: 477 default:
509 break; 478 return iscsi_set_param(cls_conn, param, buf, buflen);
510 }
511
512 return 0;
513}
514
515static int
516iscsi_iser_session_get_param(struct iscsi_cls_session *cls_session,
517 enum iscsi_param param, uint32_t *value)
518{
519 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
520 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
521
522 switch (param) {
523 case ISCSI_PARAM_INITIAL_R2T_EN:
524 *value = session->initial_r2t_en;
525 break;
526 case ISCSI_PARAM_MAX_R2T:
527 *value = session->max_r2t;
528 break;
529 case ISCSI_PARAM_IMM_DATA_EN:
530 *value = session->imm_data_en;
531 break;
532 case ISCSI_PARAM_FIRST_BURST:
533 *value = session->first_burst;
534 break;
535 case ISCSI_PARAM_MAX_BURST:
536 *value = session->max_burst;
537 break;
538 case ISCSI_PARAM_PDU_INORDER_EN:
539 *value = session->pdu_inorder_en;
540 break;
541 case ISCSI_PARAM_DATASEQ_INORDER_EN:
542 *value = session->dataseq_inorder_en;
543 break;
544 case ISCSI_PARAM_ERL:
545 *value = session->erl;
546 break;
547 case ISCSI_PARAM_IFMARKER_EN:
548 *value = 0;
549 break;
550 case ISCSI_PARAM_OFMARKER_EN:
551 *value = 0;
552 break;
553 default:
554 return ISCSI_ERR_PARAM_NOT_FOUND;
555 }
556
557 return 0;
558}
559
560static int
561iscsi_iser_conn_get_param(struct iscsi_cls_conn *cls_conn,
562 enum iscsi_param param, uint32_t *value)
563{
564 struct iscsi_conn *conn = cls_conn->dd_data;
565
566 switch(param) {
567 case ISCSI_PARAM_MAX_RECV_DLENGTH:
568 *value = conn->max_recv_dlength;
569 break;
570 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
571 *value = conn->max_xmit_dlength;
572 break;
573 case ISCSI_PARAM_HDRDGST_EN:
574 *value = 0;
575 break;
576 case ISCSI_PARAM_DATADGST_EN:
577 *value = 0;
578 break;
579 /*case ISCSI_PARAM_TARGET_RECV_DLENGTH:
580 *value = conn->target_recv_dlength;
581 break;
582 case ISCSI_PARAM_INITIATOR_RECV_DLENGTH:
583 *value = conn->initiator_recv_dlength;
584 break;*/
585 default:
586 return ISCSI_ERR_PARAM_NOT_FOUND;
587 } 479 }
588 480
589 return 0; 481 return 0;
590} 482}
591 483
592
593static void 484static void
594iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 485iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
595{ 486{
@@ -701,7 +592,12 @@ static struct iscsi_transport iscsi_iser_transport = {
701 ISCSI_FIRST_BURST | 592 ISCSI_FIRST_BURST |
702 ISCSI_MAX_BURST | 593 ISCSI_MAX_BURST |
703 ISCSI_PDU_INORDER_EN | 594 ISCSI_PDU_INORDER_EN |
704 ISCSI_DATASEQ_INORDER_EN, 595 ISCSI_DATASEQ_INORDER_EN |
596 ISCSI_EXP_STATSN |
597 ISCSI_PERSISTENT_PORT |
598 ISCSI_PERSISTENT_ADDRESS |
599 ISCSI_TARGET_NAME |
600 ISCSI_TPGT,
705 .host_template = &iscsi_iser_sht, 601 .host_template = &iscsi_iser_sht,
706 .conndata_size = sizeof(struct iscsi_conn), 602 .conndata_size = sizeof(struct iscsi_conn),
707 .max_lun = ISCSI_ISER_MAX_LUN, 603 .max_lun = ISCSI_ISER_MAX_LUN,
@@ -713,9 +609,9 @@ static struct iscsi_transport iscsi_iser_transport = {
713 .create_conn = iscsi_iser_conn_create, 609 .create_conn = iscsi_iser_conn_create,
714 .bind_conn = iscsi_iser_conn_bind, 610 .bind_conn = iscsi_iser_conn_bind,
715 .destroy_conn = iscsi_iser_conn_destroy, 611 .destroy_conn = iscsi_iser_conn_destroy,
716 .set_param = iscsi_iser_conn_set_param, 612 .set_param = iscsi_iser_set_param,
717 .get_conn_param = iscsi_iser_conn_get_param, 613 .get_conn_param = iscsi_conn_get_param,
718 .get_session_param = iscsi_iser_session_get_param, 614 .get_session_param = iscsi_session_get_param,
719 .start_conn = iscsi_iser_conn_start, 615 .start_conn = iscsi_iser_conn_start,
720 .stop_conn = iscsi_conn_stop, 616 .stop_conn = iscsi_conn_stop,
721 /* these are called as part of conn recovery */ 617 /* these are called as part of conn recovery */
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 7d9fafea9615..54adba2d8ed5 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -88,7 +88,7 @@ static struct of_device_id sparc_i8042_match[] = {
88 }, 88 },
89 {}, 89 {},
90}; 90};
91MODULE_DEVICE_TABLE(of, i8042_match); 91MODULE_DEVICE_TABLE(of, sparc_i8042_match);
92 92
93static struct of_platform_driver sparc_i8042_driver = { 93static struct of_platform_driver sparc_i8042_driver = {
94 .name = "i8042", 94 .name = "i8042",
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 79c97f94bcbd..61a6f977846f 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -177,7 +177,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
177 return -1; 177 return -1;
178 } 178 }
179 179
180 mutex_lock(&ps2dev->cmd_mutex); 180 mutex_lock_nested(&ps2dev->cmd_mutex, SINGLE_DEPTH_NESTING);
181 181
182 serio_pause_rx(ps2dev->serio); 182 serio_pause_rx(ps2dev->serio);
183 ps2dev->flags = command == PS2_CMD_GETID ? PS2_FLAG_WAITID : 0; 183 ps2dev->flags = command == PS2_CMD_GETID ? PS2_FLAG_WAITID : 0;
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index 314fc0830d90..4b08852c35ee 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -90,22 +90,12 @@ int macio_init(void)
90{ 90{
91 struct device_node *adbs; 91 struct device_node *adbs;
92 struct resource r; 92 struct resource r;
93 unsigned int irq;
93 94
94 adbs = find_compatible_devices("adb", "chrp,adb0"); 95 adbs = find_compatible_devices("adb", "chrp,adb0");
95 if (adbs == 0) 96 if (adbs == 0)
96 return -ENXIO; 97 return -ENXIO;
97 98
98#if 0
99 { int i = 0;
100
101 printk("macio_adb_init: node = %p, addrs =", adbs->node);
102 while(!of_address_to_resource(adbs, i, &r))
103 printk(" %x(%x)", r.start, r.end - r.start);
104 printk(", intrs =");
105 for (i = 0; i < adbs->n_intrs; ++i)
106 printk(" %x", adbs->intrs[i].line);
107 printk("\n"); }
108#endif
109 if (of_address_to_resource(adbs, 0, &r)) 99 if (of_address_to_resource(adbs, 0, &r))
110 return -ENXIO; 100 return -ENXIO;
111 adb = ioremap(r.start, sizeof(struct adb_regs)); 101 adb = ioremap(r.start, sizeof(struct adb_regs));
@@ -117,10 +107,9 @@ int macio_init(void)
117 out_8(&adb->active_lo.r, 0xff); 107 out_8(&adb->active_lo.r, 0xff);
118 out_8(&adb->autopoll.r, APE); 108 out_8(&adb->autopoll.r, APE);
119 109
120 if (request_irq(adbs->intrs[0].line, macio_adb_interrupt, 110 irq = irq_of_parse_and_map(adbs, 0);
121 0, "ADB", (void *)0)) { 111 if (request_irq(irq, macio_adb_interrupt, 0, "ADB", (void *)0)) {
122 printk(KERN_ERR "ADB: can't get irq %d\n", 112 printk(KERN_ERR "ADB: can't get irq %d\n", irq);
123 adbs->intrs[0].line);
124 return -EAGAIN; 113 return -EAGAIN;
125 } 114 }
126 out_8(&adb->intr_enb.r, DFB | TAG); 115 out_8(&adb->intr_enb.r, DFB | TAG);
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 40ae7b6a939d..80c0c665b5f6 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -280,75 +280,128 @@ static void macio_release_dev(struct device *dev)
280static int macio_resource_quirks(struct device_node *np, struct resource *res, 280static int macio_resource_quirks(struct device_node *np, struct resource *res,
281 int index) 281 int index)
282{ 282{
283 if (res->flags & IORESOURCE_MEM) { 283 /* Only quirks for memory resources for now */
284 /* Grand Central has too large resource 0 on some machines */ 284 if ((res->flags & IORESOURCE_MEM) == 0)
285 if (index == 0 && !strcmp(np->name, "gc")) 285 return 0;
286 res->end = res->start + 0x1ffff; 286
287 /* Grand Central has too large resource 0 on some machines */
288 if (index == 0 && !strcmp(np->name, "gc"))
289 res->end = res->start + 0x1ffff;
287 290
288 /* Airport has bogus resource 2 */ 291 /* Airport has bogus resource 2 */
289 if (index >= 2 && !strcmp(np->name, "radio")) 292 if (index >= 2 && !strcmp(np->name, "radio"))
290 return 1; 293 return 1;
291 294
292#ifndef CONFIG_PPC64 295#ifndef CONFIG_PPC64
293 /* DBDMAs may have bogus sizes */ 296 /* DBDMAs may have bogus sizes */
294 if ((res->start & 0x0001f000) == 0x00008000) 297 if ((res->start & 0x0001f000) == 0x00008000)
295 res->end = res->start + 0xff; 298 res->end = res->start + 0xff;
296#endif /* CONFIG_PPC64 */ 299#endif /* CONFIG_PPC64 */
297 300
298 /* ESCC parent eats child resources. We could have added a 301 /* ESCC parent eats child resources. We could have added a
299 * level of hierarchy, but I don't really feel the need 302 * level of hierarchy, but I don't really feel the need
300 * for it 303 * for it
301 */ 304 */
302 if (!strcmp(np->name, "escc")) 305 if (!strcmp(np->name, "escc"))
303 return 1; 306 return 1;
304 307
305 /* ESCC has bogus resources >= 3 */ 308 /* ESCC has bogus resources >= 3 */
306 if (index >= 3 && !(strcmp(np->name, "ch-a") && 309 if (index >= 3 && !(strcmp(np->name, "ch-a") &&
307 strcmp(np->name, "ch-b"))) 310 strcmp(np->name, "ch-b")))
308 return 1; 311 return 1;
309 312
310 /* Media bay has too many resources, keep only first one */ 313 /* Media bay has too many resources, keep only first one */
311 if (index > 0 && !strcmp(np->name, "media-bay")) 314 if (index > 0 && !strcmp(np->name, "media-bay"))
312 return 1; 315 return 1;
313 316
314 /* Some older IDE resources have bogus sizes */ 317 /* Some older IDE resources have bogus sizes */
315 if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") && 318 if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") &&
316 strcmp(np->type, "ide") && strcmp(np->type, "ata"))) { 319 strcmp(np->type, "ide") && strcmp(np->type, "ata"))) {
317 if (index == 0 && (res->end - res->start) > 0xfff) 320 if (index == 0 && (res->end - res->start) > 0xfff)
318 res->end = res->start + 0xfff; 321 res->end = res->start + 0xfff;
319 if (index == 1 && (res->end - res->start) > 0xff) 322 if (index == 1 && (res->end - res->start) > 0xff)
320 res->end = res->start + 0xff; 323 res->end = res->start + 0xff;
321 }
322 } 324 }
323 return 0; 325 return 0;
324} 326}
325 327
328static void macio_create_fixup_irq(struct macio_dev *dev, int index,
329 unsigned int line)
330{
331 unsigned int irq;
326 332
327static void macio_setup_interrupts(struct macio_dev *dev) 333 irq = irq_create_mapping(NULL, line, 0);
334 if (irq != NO_IRQ) {
335 dev->interrupt[index].start = irq;
336 dev->interrupt[index].flags = IORESOURCE_IRQ;
337 dev->interrupt[index].name = dev->ofdev.dev.bus_id;
338 }
339 if (dev->n_interrupts <= index)
340 dev->n_interrupts = index + 1;
341}
342
343static void macio_add_missing_resources(struct macio_dev *dev)
328{ 344{
329 struct device_node *np = dev->ofdev.node; 345 struct device_node *np = dev->ofdev.node;
330 int i,j; 346 unsigned int irq_base;
347
348 /* Gatwick has some missing interrupts on child nodes */
349 if (dev->bus->chip->type != macio_gatwick)
350 return;
331 351
332 /* For now, we use pre-parsed entries in the device-tree for 352 /* irq_base is always 64 on gatwick. I have no cleaner way to get
333 * interrupt routing and addresses, but we should change that 353 * that value from here at this point
334 * to dynamically parsed entries and so get rid of most of the
335 * clutter in struct device_node
336 */ 354 */
337 for (i = j = 0; i < np->n_intrs; i++) { 355 irq_base = 64;
356
357 /* Fix SCC */
358 if (strcmp(np->name, "ch-a") == 0) {
359 macio_create_fixup_irq(dev, 0, 15 + irq_base);
360 macio_create_fixup_irq(dev, 1, 4 + irq_base);
361 macio_create_fixup_irq(dev, 2, 5 + irq_base);
362 printk(KERN_INFO "macio: fixed SCC irqs on gatwick\n");
363 }
364
365 /* Fix media-bay */
366 if (strcmp(np->name, "media-bay") == 0) {
367 macio_create_fixup_irq(dev, 0, 29 + irq_base);
368 printk(KERN_INFO "macio: fixed media-bay irq on gatwick\n");
369 }
370
371 /* Fix left media bay childs */
372 if (dev->media_bay != NULL && strcmp(np->name, "floppy") == 0) {
373 macio_create_fixup_irq(dev, 0, 19 + irq_base);
374 macio_create_fixup_irq(dev, 1, 1 + irq_base);
375 printk(KERN_INFO "macio: fixed left floppy irqs\n");
376 }
377 if (dev->media_bay != NULL && strcasecmp(np->name, "ata4") == 0) {
378 macio_create_fixup_irq(dev, 0, 14 + irq_base);
379 macio_create_fixup_irq(dev, 0, 3 + irq_base);
380 printk(KERN_INFO "macio: fixed left ide irqs\n");
381 }
382}
383
384static void macio_setup_interrupts(struct macio_dev *dev)
385{
386 struct device_node *np = dev->ofdev.node;
387 unsigned int irq;
388 int i = 0, j = 0;
389
390 for (;;) {
338 struct resource *res = &dev->interrupt[j]; 391 struct resource *res = &dev->interrupt[j];
339 392
340 if (j >= MACIO_DEV_COUNT_IRQS) 393 if (j >= MACIO_DEV_COUNT_IRQS)
341 break; 394 break;
342 res->start = np->intrs[i].line; 395 irq = irq_of_parse_and_map(np, i++);
343 res->flags = IORESOURCE_IO; 396 if (irq == NO_IRQ)
344 if (np->intrs[j].sense) 397 break;
345 res->flags |= IORESOURCE_IRQ_LOWLEVEL; 398 res->start = irq;
346 else 399 res->flags = IORESOURCE_IRQ;
347 res->flags |= IORESOURCE_IRQ_HIGHEDGE;
348 res->name = dev->ofdev.dev.bus_id; 400 res->name = dev->ofdev.dev.bus_id;
349 if (macio_resource_quirks(np, res, i)) 401 if (macio_resource_quirks(np, res, i - 1)) {
350 memset(res, 0, sizeof(struct resource)); 402 memset(res, 0, sizeof(struct resource));
351 else 403 continue;
404 } else
352 j++; 405 j++;
353 } 406 }
354 dev->n_interrupts = j; 407 dev->n_interrupts = j;
@@ -445,6 +498,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
445 /* Setup interrupts & resources */ 498 /* Setup interrupts & resources */
446 macio_setup_interrupts(dev); 499 macio_setup_interrupts(dev);
447 macio_setup_resources(dev, parent_res); 500 macio_setup_resources(dev, parent_res);
501 macio_add_missing_resources(dev);
448 502
449 /* Register with core */ 503 /* Register with core */
450 if (of_device_register(&dev->ofdev) != 0) { 504 if (of_device_register(&dev->ofdev) != 0) {
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index ff6d9bfdc3d2..f139a74696fe 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -497,8 +497,7 @@ int __init smu_init (void)
497 smu->doorbell = *data; 497 smu->doorbell = *data;
498 if (smu->doorbell < 0x50) 498 if (smu->doorbell < 0x50)
499 smu->doorbell += 0x50; 499 smu->doorbell += 0x50;
500 if (np->n_intrs > 0) 500 smu->db_irq = irq_of_parse_and_map(np, 0);
501 smu->db_irq = np->intrs[0].line;
502 501
503 of_node_put(np); 502 of_node_put(np);
504 503
@@ -515,8 +514,7 @@ int __init smu_init (void)
515 smu->msg = *data; 514 smu->msg = *data;
516 if (smu->msg < 0x50) 515 if (smu->msg < 0x50)
517 smu->msg += 0x50; 516 smu->msg += 0x50;
518 if (np->n_intrs > 0) 517 smu->msg_irq = irq_of_parse_and_map(np, 0);
519 smu->msg_irq = np->intrs[0].line;
520 of_node_put(np); 518 of_node_put(np);
521 } while(0); 519 } while(0);
522 520
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 6501db50fb83..69d5452fd22f 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -34,13 +34,6 @@
34static volatile unsigned char __iomem *via; 34static volatile unsigned char __iomem *via;
35static DEFINE_SPINLOCK(cuda_lock); 35static DEFINE_SPINLOCK(cuda_lock);
36 36
37#ifdef CONFIG_MAC
38#define CUDA_IRQ IRQ_MAC_ADB
39#define eieio()
40#else
41#define CUDA_IRQ vias->intrs[0].line
42#endif
43
44/* VIA registers - spaced 0x200 bytes apart */ 37/* VIA registers - spaced 0x200 bytes apart */
45#define RS 0x200 /* skip between registers */ 38#define RS 0x200 /* skip between registers */
46#define B 0 /* B-side data */ 39#define B 0 /* B-side data */
@@ -189,11 +182,24 @@ int __init find_via_cuda(void)
189 182
190static int __init via_cuda_start(void) 183static int __init via_cuda_start(void)
191{ 184{
185 unsigned int irq;
186
192 if (via == NULL) 187 if (via == NULL)
193 return -ENODEV; 188 return -ENODEV;
194 189
195 if (request_irq(CUDA_IRQ, cuda_interrupt, 0, "ADB", cuda_interrupt)) { 190#ifdef CONFIG_MAC
196 printk(KERN_ERR "cuda_init: can't get irq %d\n", CUDA_IRQ); 191 irq = IRQ_MAC_ADB;
192#else /* CONFIG_MAC */
193 irq = irq_of_parse_and_map(vias, 0);
194 if (irq == NO_IRQ) {
195 printk(KERN_ERR "via-cuda: can't map interrupts for %s\n",
196 vias->full_name);
197 return -ENODEV;
198 }
199#endif /* CONFIG_MAP */
200
201 if (request_irq(irq, cuda_interrupt, 0, "ADB", cuda_interrupt)) {
202 printk(KERN_ERR "via-cuda: can't request irq %d\n", irq);
197 return -EAGAIN; 203 return -EAGAIN;
198 } 204 }
199 205
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index c1193d34ec9e..06ca80bfd6b9 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -64,10 +64,6 @@
64#include <asm/backlight.h> 64#include <asm/backlight.h>
65#endif 65#endif
66 66
67#ifdef CONFIG_PPC32
68#include <asm/open_pic.h>
69#endif
70
71#include "via-pmu-event.h" 67#include "via-pmu-event.h"
72 68
73/* Some compile options */ 69/* Some compile options */
@@ -151,7 +147,7 @@ static int pmu_fully_inited = 0;
151static int pmu_has_adb; 147static int pmu_has_adb;
152static struct device_node *gpio_node; 148static struct device_node *gpio_node;
153static unsigned char __iomem *gpio_reg = NULL; 149static unsigned char __iomem *gpio_reg = NULL;
154static int gpio_irq = -1; 150static int gpio_irq = NO_IRQ;
155static int gpio_irq_enabled = -1; 151static int gpio_irq_enabled = -1;
156static volatile int pmu_suspended = 0; 152static volatile int pmu_suspended = 0;
157static spinlock_t pmu_lock; 153static spinlock_t pmu_lock;
@@ -403,22 +399,21 @@ static int __init pmu_init(void)
403 */ 399 */
404static int __init via_pmu_start(void) 400static int __init via_pmu_start(void)
405{ 401{
402 unsigned int irq;
403
406 if (vias == NULL) 404 if (vias == NULL)
407 return -ENODEV; 405 return -ENODEV;
408 406
409 batt_req.complete = 1; 407 batt_req.complete = 1;
410 408
411#ifndef CONFIG_PPC_MERGE 409 irq = irq_of_parse_and_map(vias, 0);
412 if (pmu_kind == PMU_KEYLARGO_BASED) 410 if (irq == NO_IRQ) {
413 openpic_set_irq_priority(vias->intrs[0].line, 411 printk(KERN_ERR "via-pmu: can't map interruptn");
414 OPENPIC_PRIORITY_DEFAULT + 1); 412 return -ENODEV;
415#endif 413 }
416 414 if (request_irq(irq, via_pmu_interrupt, 0, "VIA-PMU", (void *)0)) {
417 if (request_irq(vias->intrs[0].line, via_pmu_interrupt, 0, "VIA-PMU", 415 printk(KERN_ERR "via-pmu: can't request irq %d\n", irq);
418 (void *)0)) { 416 return -ENODEV;
419 printk(KERN_ERR "VIA-PMU: can't get irq %d\n",
420 vias->intrs[0].line);
421 return -EAGAIN;
422 } 417 }
423 418
424 if (pmu_kind == PMU_KEYLARGO_BASED) { 419 if (pmu_kind == PMU_KEYLARGO_BASED) {
@@ -426,10 +421,10 @@ static int __init via_pmu_start(void)
426 if (gpio_node == NULL) 421 if (gpio_node == NULL)
427 gpio_node = of_find_node_by_name(NULL, 422 gpio_node = of_find_node_by_name(NULL,
428 "pmu-interrupt"); 423 "pmu-interrupt");
429 if (gpio_node && gpio_node->n_intrs > 0) 424 if (gpio_node)
430 gpio_irq = gpio_node->intrs[0].line; 425 gpio_irq = irq_of_parse_and_map(gpio_node, 0);
431 426
432 if (gpio_irq != -1) { 427 if (gpio_irq != NO_IRQ) {
433 if (request_irq(gpio_irq, gpio1_interrupt, 0, 428 if (request_irq(gpio_irq, gpio1_interrupt, 0,
434 "GPIO1 ADB", (void *)0)) 429 "GPIO1 ADB", (void *)0))
435 printk(KERN_ERR "pmu: can't get irq %d" 430 printk(KERN_ERR "pmu: can't get irq %d"
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2fe32c261922..e4e161372a3e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1404,7 +1404,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1404 struct block_device *bdev; 1404 struct block_device *bdev;
1405 char b[BDEVNAME_SIZE]; 1405 char b[BDEVNAME_SIZE];
1406 1406
1407 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1407 bdev = open_partition_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1408 if (IS_ERR(bdev)) { 1408 if (IS_ERR(bdev)) {
1409 printk(KERN_ERR "md: could not open %s.\n", 1409 printk(KERN_ERR "md: could not open %s.\n",
1410 __bdevname(dev, b)); 1410 __bdevname(dev, b));
@@ -1414,7 +1414,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1414 if (err) { 1414 if (err) {
1415 printk(KERN_ERR "md: could not bd_claim %s.\n", 1415 printk(KERN_ERR "md: could not bd_claim %s.\n",
1416 bdevname(bdev, b)); 1416 bdevname(bdev, b));
1417 blkdev_put(bdev); 1417 blkdev_put_partition(bdev);
1418 return err; 1418 return err;
1419 } 1419 }
1420 rdev->bdev = bdev; 1420 rdev->bdev = bdev;
@@ -1428,7 +1428,7 @@ static void unlock_rdev(mdk_rdev_t *rdev)
1428 if (!bdev) 1428 if (!bdev)
1429 MD_BUG(); 1429 MD_BUG();
1430 bd_release(bdev); 1430 bd_release(bdev);
1431 blkdev_put(bdev); 1431 blkdev_put_partition(bdev);
1432} 1432}
1433 1433
1434void md_autodetect_dev(dev_t dev); 1434void md_autodetect_dev(dev_t dev);
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index 51740b346224..b114236f4395 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -33,6 +33,11 @@
33# For mptfc: 33# For mptfc:
34#CFLAGS_mptfc.o += -DMPT_DEBUG_FC 34#CFLAGS_mptfc.o += -DMPT_DEBUG_FC
35 35
36# For mptsas:
37#CFLAGS_mptsas.o += -DMPT_DEBUG_SAS
38#CFLAGS_mptsas.o += -DMPT_DEBUG_SAS_WIDE
39
40
36#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-} LSI_LOGIC 41#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-} LSI_LOGIC
37 42
38obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o 43obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o
diff --git a/drivers/message/fusion/lsi/fc_log.h b/drivers/message/fusion/lsi/fc_log.h
deleted file mode 100644
index dc98d46f9071..000000000000
--- a/drivers/message/fusion/lsi/fc_log.h
+++ /dev/null
@@ -1,89 +0,0 @@
1/*
2 * Copyright (c) 2000-2001 LSI Logic Corporation. All rights reserved.
3 *
4 * NAME: fc_log.h
5 * SUMMARY: MPI IocLogInfo definitions for the SYMFC9xx chips
6 * DESCRIPTION: Contains the enumerated list of values that may be returned
7 * in the IOCLogInfo field of a MPI Default Reply Message.
8 *
9 * CREATION DATE: 6/02/2000
10 * ID: $Id: fc_log.h,v 4.6 2001/07/26 14:41:33 sschremm Exp $
11 */
12
13
14/*
15 * MpiIocLogInfo_t enum
16 *
17 * These 32 bit values are used in the IOCLogInfo field of the MPI reply
18 * messages.
19 * The value is 0xabcccccc where
20 * a = The type of log info as per the MPI spec. Since these codes are
21 * all for Fibre Channel this value will always be 2.
22 * b = Specifies a subclass of the firmware where
23 * 0 = FCP Initiator
24 * 1 = FCP Target
25 * 2 = LAN
26 * 3 = MPI Message Layer
27 * 4 = FC Link
28 * 5 = Context Manager
29 * 6 = Invalid Field Offset
30 * 7 = State Change Info
31 * all others are reserved for future use
32 * c = A specific value within the subclass.
33 *
34 * NOTE: Any new values should be added to the end of each subclass so that the
35 * codes remain consistent across firmware releases.
36 */
37typedef enum _MpiIocLogInfoFc
38{
39 MPI_IOCLOGINFO_FC_INIT_BASE = 0x20000000,
40 MPI_IOCLOGINFO_FC_INIT_ERROR_OUT_OF_ORDER_FRAME = 0x20000001, /* received an out of order frame - unsupported */
41 MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_START_OF_FRAME = 0x20000002, /* Bad Rx Frame, bad start of frame primative */
42 MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_END_OF_FRAME = 0x20000003, /* Bad Rx Frame, bad end of frame primative */
43 MPI_IOCLOGINFO_FC_INIT_ERROR_OVER_RUN = 0x20000004, /* Bad Rx Frame, overrun */
44 MPI_IOCLOGINFO_FC_INIT_ERROR_RX_OTHER = 0x20000005, /* Other errors caught by IOC which require retries */
45 MPI_IOCLOGINFO_FC_INIT_ERROR_SUBPROC_DEAD = 0x20000006, /* Main processor could not initialize sub-processor */
46 MPI_IOCLOGINFO_FC_INIT_ERROR_RX_OVERRUN = 0x20000007, /* Scatter Gather overrun */
47 MPI_IOCLOGINFO_FC_INIT_ERROR_RX_BAD_STATUS = 0x20000008, /* Receiver detected context mismatch via invalid header */
48 MPI_IOCLOGINFO_FC_INIT_ERROR_RX_UNEXPECTED_FRAME= 0x20000009, /* CtxMgr detected unsupported frame type */
49 MPI_IOCLOGINFO_FC_INIT_ERROR_LINK_FAILURE = 0x2000000A, /* Link failure occurred */
50 MPI_IOCLOGINFO_FC_INIT_ERROR_TX_TIMEOUT = 0x2000000B, /* Transmitter timeout error */
51
52 MPI_IOCLOGINFO_FC_TARGET_BASE = 0x21000000,
53 MPI_IOCLOGINFO_FC_TARGET_NO_PDISC = 0x21000001, /* not sent because we are waiting for a PDISC from the initiator */
54 MPI_IOCLOGINFO_FC_TARGET_NO_LOGIN = 0x21000002, /* not sent because we are not logged in to the remote node */
55 MPI_IOCLOGINFO_FC_TARGET_DOAR_KILLED_BY_LIP = 0x21000003, /* Data Out, Auto Response, not sent due to a LIP */
56 MPI_IOCLOGINFO_FC_TARGET_DIAR_KILLED_BY_LIP = 0x21000004, /* Data In, Auto Response, not sent due to a LIP */
57 MPI_IOCLOGINFO_FC_TARGET_DIAR_MISSING_DATA = 0x21000005, /* Data In, Auto Response, missing data frames */
58 MPI_IOCLOGINFO_FC_TARGET_DONR_KILLED_BY_LIP = 0x21000006, /* Data Out, No Response, not sent due to a LIP */
59 MPI_IOCLOGINFO_FC_TARGET_WRSP_KILLED_BY_LIP = 0x21000007, /* Auto-response after a write not sent due to a LIP */
60 MPI_IOCLOGINFO_FC_TARGET_DINR_KILLED_BY_LIP = 0x21000008, /* Data In, No Response, not completed due to a LIP */
61 MPI_IOCLOGINFO_FC_TARGET_DINR_MISSING_DATA = 0x21000009, /* Data In, No Response, missing data frames */
62 MPI_IOCLOGINFO_FC_TARGET_MRSP_KILLED_BY_LIP = 0x2100000a, /* Manual Response not sent due to a LIP */
63 MPI_IOCLOGINFO_FC_TARGET_NO_CLASS_3 = 0x2100000b, /* not sent because remote node does not support Class 3 */
64 MPI_IOCLOGINFO_FC_TARGET_LOGIN_NOT_VALID = 0x2100000c, /* not sent because login to remote node not validated */
65 MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND = 0x2100000e, /* cleared from the outbound queue after a logout */
66 MPI_IOCLOGINFO_FC_TARGET_WAITING_FOR_DATA_IN = 0x2100000f, /* cleared waiting for data after a logout */
67
68 MPI_IOCLOGINFO_FC_LAN_BASE = 0x22000000,
69 MPI_IOCLOGINFO_FC_LAN_TRANS_SGL_MISSING = 0x22000001, /* Transaction Context Sgl Missing */
70 MPI_IOCLOGINFO_FC_LAN_TRANS_WRONG_PLACE = 0x22000002, /* Transaction Context found before an EOB */
71 MPI_IOCLOGINFO_FC_LAN_TRANS_RES_BITS_SET = 0x22000003, /* Transaction Context value has reserved bits set */
72 MPI_IOCLOGINFO_FC_LAN_WRONG_SGL_FLAG = 0x22000004, /* Invalid SGL Flags */
73
74 MPI_IOCLOGINFO_FC_MSG_BASE = 0x23000000,
75
76 MPI_IOCLOGINFO_FC_LINK_BASE = 0x24000000,
77 MPI_IOCLOGINFO_FC_LINK_LOOP_INIT_TIMEOUT = 0x24000001, /* Loop initialization timed out */
78 MPI_IOCLOGINFO_FC_LINK_ALREADY_INITIALIZED = 0x24000002, /* Another system controller already initialized the loop */
79 MPI_IOCLOGINFO_FC_LINK_LINK_NOT_ESTABLISHED = 0x24000003, /* Not synchronized to signal or still negotiating (possible cable problem) */
80 MPI_IOCLOGINFO_FC_LINK_CRC_ERROR = 0x24000004, /* CRC check detected error on received frame */
81
82 MPI_IOCLOGINFO_FC_CTX_BASE = 0x25000000,
83
84 MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET = 0x26000000, /* The lower 24 bits give the byte offset of the field in the request message that is invalid */
85 MPI_IOCLOGINFO_FC_INVALID_FIELD_MAX_OFFSET = 0x26ffffff,
86
87 MPI_IOCLOGINFO_FC_STATE_CHANGE = 0x27000000 /* The lower 24 bits give additional information concerning state change */
88
89} MpiIocLogInfoFc_t;
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index 02cdc840a06b..81ad77622dac 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -6,7 +6,7 @@
6 * Title: MPI Message independent structures and definitions 6 * Title: MPI Message independent structures and definitions
7 * Creation Date: July 27, 2000 7 * Creation Date: July 27, 2000
8 * 8 *
9 * mpi.h Version: 01.05.10 9 * mpi.h Version: 01.05.11
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -76,6 +76,7 @@
76 * Added EEDP IOCStatus codes. 76 * Added EEDP IOCStatus codes.
77 * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT. 77 * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT.
78 * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target. 78 * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
79 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
79 * -------------------------------------------------------------------------- 80 * --------------------------------------------------------------------------
80 */ 81 */
81 82
@@ -106,7 +107,7 @@
106/* Note: The major versions of 0xe0 through 0xff are reserved */ 107/* Note: The major versions of 0xe0 through 0xff are reserved */
107 108
108/* versioning for this MPI header set */ 109/* versioning for this MPI header set */
109#define MPI_HEADER_VERSION_UNIT (0x0C) 110#define MPI_HEADER_VERSION_UNIT (0x0D)
110#define MPI_HEADER_VERSION_DEV (0x00) 111#define MPI_HEADER_VERSION_DEV (0x00)
111#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00) 112#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
112#define MPI_HEADER_VERSION_UNIT_SHIFT (8) 113#define MPI_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index b1becec27e1b..47e13e360c10 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Config message, structures, and Pages 6 * Title: MPI Config message, structures, and Pages
7 * Creation Date: July 27, 2000 7 * Creation Date: July 27, 2000
8 * 8 *
9 * mpi_cnfg.h Version: 01.05.11 9 * mpi_cnfg.h Version: 01.05.12
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -266,6 +266,16 @@
266 * Added postpone SATA Init bit to SAS IO Unit Page 1 266 * Added postpone SATA Init bit to SAS IO Unit Page 1
267 * ControlFlags. 267 * ControlFlags.
268 * Changed LogEntry format for Log Page 0. 268 * Changed LogEntry format for Log Page 0.
269 * 03-27-06 01.05.12 Added two new Flags defines for Manufacturing Page 4.
270 * Added Manufacturing Page 7.
271 * Added MPI_IOCPAGE2_CAP_FLAGS_RAID_64_BIT_ADDRESSING.
272 * Added IOC Page 6.
273 * Added PrevBootDeviceForm field to CONFIG_PAGE_BIOS_2.
274 * Added MaxLBAHigh field to RAID Volume Page 0.
275 * Added Nvdata version fields to SAS IO Unit Page 0.
276 * Added AdditionalControlFlags, MaxTargetPortConnectTime,
277 * ReportDeviceMissingDelay, and IODeviceMissingDelay
278 * fields to SAS IO Unit Page 1.
269 * -------------------------------------------------------------------------- 279 * --------------------------------------------------------------------------
270 */ 280 */
271 281
@@ -631,9 +641,11 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
631} CONFIG_PAGE_MANUFACTURING_4, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_4, 641} CONFIG_PAGE_MANUFACTURING_4, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_4,
632 ManufacturingPage4_t, MPI_POINTER pManufacturingPage4_t; 642 ManufacturingPage4_t, MPI_POINTER pManufacturingPage4_t;
633 643
634#define MPI_MANUFACTURING4_PAGEVERSION (0x03) 644#define MPI_MANUFACTURING4_PAGEVERSION (0x04)
635 645
636/* defines for the Flags field */ 646/* defines for the Flags field */
647#define MPI_MANPAGE4_FORCE_BAD_BLOCK_TABLE (0x80)
648#define MPI_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x40)
637#define MPI_MANPAGE4_IME_DISABLE (0x20) 649#define MPI_MANPAGE4_IME_DISABLE (0x20)
638#define MPI_MANPAGE4_IM_DISABLE (0x10) 650#define MPI_MANPAGE4_IM_DISABLE (0x10)
639#define MPI_MANPAGE4_IS_DISABLE (0x08) 651#define MPI_MANPAGE4_IS_DISABLE (0x08)
@@ -668,6 +680,66 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_6
668#define MPI_MANUFACTURING6_PAGEVERSION (0x00) 680#define MPI_MANUFACTURING6_PAGEVERSION (0x00)
669 681
670 682
683typedef struct _MPI_MANPAGE7_CONNECTOR_INFO
684{
685 U32 Pinout; /* 00h */
686 U8 Connector[16]; /* 04h */
687 U8 Location; /* 14h */
688 U8 Reserved1; /* 15h */
689 U16 Slot; /* 16h */
690 U32 Reserved2; /* 18h */
691} MPI_MANPAGE7_CONNECTOR_INFO, MPI_POINTER PTR_MPI_MANPAGE7_CONNECTOR_INFO,
692 MpiManPage7ConnectorInfo_t, MPI_POINTER pMpiManPage7ConnectorInfo_t;
693
694/* defines for the Pinout field */
695#define MPI_MANPAGE7_PINOUT_SFF_8484_L4 (0x00080000)
696#define MPI_MANPAGE7_PINOUT_SFF_8484_L3 (0x00040000)
697#define MPI_MANPAGE7_PINOUT_SFF_8484_L2 (0x00020000)
698#define MPI_MANPAGE7_PINOUT_SFF_8484_L1 (0x00010000)
699#define MPI_MANPAGE7_PINOUT_SFF_8470_L4 (0x00000800)
700#define MPI_MANPAGE7_PINOUT_SFF_8470_L3 (0x00000400)
701#define MPI_MANPAGE7_PINOUT_SFF_8470_L2 (0x00000200)
702#define MPI_MANPAGE7_PINOUT_SFF_8470_L1 (0x00000100)
703#define MPI_MANPAGE7_PINOUT_SFF_8482 (0x00000002)
704#define MPI_MANPAGE7_PINOUT_CONNECTION_UNKNOWN (0x00000001)
705
706/* defines for the Location field */
707#define MPI_MANPAGE7_LOCATION_UNKNOWN (0x01)
708#define MPI_MANPAGE7_LOCATION_INTERNAL (0x02)
709#define MPI_MANPAGE7_LOCATION_EXTERNAL (0x04)
710#define MPI_MANPAGE7_LOCATION_SWITCHABLE (0x08)
711#define MPI_MANPAGE7_LOCATION_AUTO (0x10)
712#define MPI_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
713#define MPI_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
714
715/*
716 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
717 * one and check NumPhys at runtime.
718 */
719#ifndef MPI_MANPAGE7_CONNECTOR_INFO_MAX
720#define MPI_MANPAGE7_CONNECTOR_INFO_MAX (1)
721#endif
722
723typedef struct _CONFIG_PAGE_MANUFACTURING_7
724{
725 CONFIG_PAGE_HEADER Header; /* 00h */
726 U32 Reserved1; /* 04h */
727 U32 Reserved2; /* 08h */
728 U32 Flags; /* 0Ch */
729 U8 EnclosureName[16]; /* 10h */
730 U8 NumPhys; /* 20h */
731 U8 Reserved3; /* 21h */
732 U16 Reserved4; /* 22h */
733 MPI_MANPAGE7_CONNECTOR_INFO ConnectorInfo[MPI_MANPAGE7_CONNECTOR_INFO_MAX]; /* 24h */
734} CONFIG_PAGE_MANUFACTURING_7, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_7,
735 ManufacturingPage7_t, MPI_POINTER pManufacturingPage7_t;
736
737#define MPI_MANUFACTURING7_PAGEVERSION (0x00)
738
739/* defines for the Flags field */
740#define MPI_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
741
742
671/**************************************************************************** 743/****************************************************************************
672* IO Unit Config Pages 744* IO Unit Config Pages
673****************************************************************************/ 745****************************************************************************/
@@ -867,7 +939,7 @@ typedef struct _CONFIG_PAGE_IOC_2
867} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2, 939} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
868 IOCPage2_t, MPI_POINTER pIOCPage2_t; 940 IOCPage2_t, MPI_POINTER pIOCPage2_t;
869 941
870#define MPI_IOCPAGE2_PAGEVERSION (0x03) 942#define MPI_IOCPAGE2_PAGEVERSION (0x04)
871 943
872/* IOC Page 2 Capabilities flags */ 944/* IOC Page 2 Capabilities flags */
873 945
@@ -878,6 +950,7 @@ typedef struct _CONFIG_PAGE_IOC_2
878#define MPI_IOCPAGE2_CAP_FLAGS_RAID_6_SUPPORT (0x00000010) 950#define MPI_IOCPAGE2_CAP_FLAGS_RAID_6_SUPPORT (0x00000010)
879#define MPI_IOCPAGE2_CAP_FLAGS_RAID_10_SUPPORT (0x00000020) 951#define MPI_IOCPAGE2_CAP_FLAGS_RAID_10_SUPPORT (0x00000020)
880#define MPI_IOCPAGE2_CAP_FLAGS_RAID_50_SUPPORT (0x00000040) 952#define MPI_IOCPAGE2_CAP_FLAGS_RAID_50_SUPPORT (0x00000040)
953#define MPI_IOCPAGE2_CAP_FLAGS_RAID_64_BIT_ADDRESSING (0x10000000)
881#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000) 954#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000)
882#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000) 955#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000)
883#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000) 956#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000)
@@ -975,6 +1048,44 @@ typedef struct _CONFIG_PAGE_IOC_5
975 1048
976#define MPI_IOCPAGE5_PAGEVERSION (0x00) 1049#define MPI_IOCPAGE5_PAGEVERSION (0x00)
977 1050
1051typedef struct _CONFIG_PAGE_IOC_6
1052{
1053 CONFIG_PAGE_HEADER Header; /* 00h */
1054 U32 CapabilitiesFlags; /* 04h */
1055 U8 MaxDrivesIS; /* 08h */
1056 U8 MaxDrivesIM; /* 09h */
1057 U8 MaxDrivesIME; /* 0Ah */
1058 U8 Reserved1; /* 0Bh */
1059 U8 MinDrivesIS; /* 0Ch */
1060 U8 MinDrivesIM; /* 0Dh */
1061 U8 MinDrivesIME; /* 0Eh */
1062 U8 Reserved2; /* 0Fh */
1063 U8 MaxGlobalHotSpares; /* 10h */
1064 U8 Reserved3; /* 11h */
1065 U16 Reserved4; /* 12h */
1066 U32 Reserved5; /* 14h */
1067 U32 SupportedStripeSizeMapIS; /* 18h */
1068 U32 SupportedStripeSizeMapIME; /* 1Ch */
1069 U32 Reserved6; /* 20h */
1070 U8 MetadataSize; /* 24h */
1071 U8 Reserved7; /* 25h */
1072 U16 Reserved8; /* 26h */
1073 U16 MaxBadBlockTableEntries; /* 28h */
1074 U16 Reserved9; /* 2Ah */
1075 U16 IRNvsramUsage; /* 2Ch */
1076 U16 Reserved10; /* 2Eh */
1077 U32 IRNvsramVersion; /* 30h */
1078 U32 Reserved11; /* 34h */
1079 U32 Reserved12; /* 38h */
1080} CONFIG_PAGE_IOC_6, MPI_POINTER PTR_CONFIG_PAGE_IOC_6,
1081 IOCPage6_t, MPI_POINTER pIOCPage6_t;
1082
1083#define MPI_IOCPAGE6_PAGEVERSION (0x00)
1084
1085/* IOC Page 6 Capabilities Flags */
1086
1087#define MPI_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
1088
978 1089
979/**************************************************************************** 1090/****************************************************************************
980* BIOS Config Pages 1091* BIOS Config Pages
@@ -1218,13 +1329,13 @@ typedef struct _CONFIG_PAGE_BIOS_2
1218 U32 Reserved5; /* 14h */ 1329 U32 Reserved5; /* 14h */
1219 U32 Reserved6; /* 18h */ 1330 U32 Reserved6; /* 18h */
1220 U8 BootDeviceForm; /* 1Ch */ 1331 U8 BootDeviceForm; /* 1Ch */
1221 U8 Reserved7; /* 1Dh */ 1332 U8 PrevBootDeviceForm; /* 1Ch */
1222 U16 Reserved8; /* 1Eh */ 1333 U16 Reserved8; /* 1Eh */
1223 MPI_BIOSPAGE2_BOOT_DEVICE BootDevice; /* 20h */ 1334 MPI_BIOSPAGE2_BOOT_DEVICE BootDevice; /* 20h */
1224} CONFIG_PAGE_BIOS_2, MPI_POINTER PTR_CONFIG_PAGE_BIOS_2, 1335} CONFIG_PAGE_BIOS_2, MPI_POINTER PTR_CONFIG_PAGE_BIOS_2,
1225 BIOSPage2_t, MPI_POINTER pBIOSPage2_t; 1336 BIOSPage2_t, MPI_POINTER pBIOSPage2_t;
1226 1337
1227#define MPI_BIOSPAGE2_PAGEVERSION (0x01) 1338#define MPI_BIOSPAGE2_PAGEVERSION (0x02)
1228 1339
1229#define MPI_BIOSPAGE2_FORM_MASK (0x0F) 1340#define MPI_BIOSPAGE2_FORM_MASK (0x0F)
1230#define MPI_BIOSPAGE2_FORM_ADAPTER_ORDER (0x00) 1341#define MPI_BIOSPAGE2_FORM_ADAPTER_ORDER (0x00)
@@ -2080,7 +2191,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
2080 RAID_VOL0_STATUS VolumeStatus; /* 08h */ 2191 RAID_VOL0_STATUS VolumeStatus; /* 08h */
2081 RAID_VOL0_SETTINGS VolumeSettings; /* 0Ch */ 2192 RAID_VOL0_SETTINGS VolumeSettings; /* 0Ch */
2082 U32 MaxLBA; /* 10h */ 2193 U32 MaxLBA; /* 10h */
2083 U32 Reserved1; /* 14h */ 2194 U32 MaxLBAHigh; /* 14h */
2084 U32 StripeSize; /* 18h */ 2195 U32 StripeSize; /* 18h */
2085 U32 Reserved2; /* 1Ch */ 2196 U32 Reserved2; /* 1Ch */
2086 U32 Reserved3; /* 20h */ 2197 U32 Reserved3; /* 20h */
@@ -2092,7 +2203,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
2092} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0, 2203} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
2093 RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t; 2204 RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
2094 2205
2095#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x05) 2206#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x06)
2096 2207
2097/* values for RAID Volume Page 0 InactiveStatus field */ 2208/* values for RAID Volume Page 0 InactiveStatus field */
2098#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) 2209#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
@@ -2324,7 +2435,8 @@ typedef struct _MPI_SAS_IO_UNIT0_PHY_DATA
2324typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0 2435typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
2325{ 2436{
2326 CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */ 2437 CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
2327 U32 Reserved1; /* 08h */ 2438 U16 NvdataVersionDefault; /* 08h */
2439 U16 NvdataVersionPersistent; /* 0Ah */
2328 U8 NumPhys; /* 0Ch */ 2440 U8 NumPhys; /* 0Ch */
2329 U8 Reserved2; /* 0Dh */ 2441 U8 Reserved2; /* 0Dh */
2330 U16 Reserved3; /* 0Eh */ 2442 U16 Reserved3; /* 0Eh */
@@ -2332,7 +2444,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
2332} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0, 2444} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
2333 SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t; 2445 SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;
2334 2446
2335#define MPI_SASIOUNITPAGE0_PAGEVERSION (0x03) 2447#define MPI_SASIOUNITPAGE0_PAGEVERSION (0x04)
2336 2448
2337/* values for SAS IO Unit Page 0 PortFlags */ 2449/* values for SAS IO Unit Page 0 PortFlags */
2338#define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS (0x08) 2450#define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS (0x08)
@@ -2373,12 +2485,13 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
2373 2485
2374typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA 2486typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA
2375{ 2487{
2376 U8 Port; /* 00h */ 2488 U8 Port; /* 00h */
2377 U8 PortFlags; /* 01h */ 2489 U8 PortFlags; /* 01h */
2378 U8 PhyFlags; /* 02h */ 2490 U8 PhyFlags; /* 02h */
2379 U8 MaxMinLinkRate; /* 03h */ 2491 U8 MaxMinLinkRate; /* 03h */
2380 U32 ControllerPhyDeviceInfo;/* 04h */ 2492 U32 ControllerPhyDeviceInfo; /* 04h */
2381 U32 Reserved1; /* 08h */ 2493 U16 MaxTargetPortConnectTime; /* 08h */
2494 U16 Reserved1; /* 0Ah */
2382} MPI_SAS_IO_UNIT1_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT1_PHY_DATA, 2495} MPI_SAS_IO_UNIT1_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT1_PHY_DATA,
2383 SasIOUnit1PhyData, MPI_POINTER pSasIOUnit1PhyData; 2496 SasIOUnit1PhyData, MPI_POINTER pSasIOUnit1PhyData;
2384 2497
@@ -2395,15 +2508,17 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
2395 CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */ 2508 CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
2396 U16 ControlFlags; /* 08h */ 2509 U16 ControlFlags; /* 08h */
2397 U16 MaxNumSATATargets; /* 0Ah */ 2510 U16 MaxNumSATATargets; /* 0Ah */
2398 U32 Reserved1; /* 0Ch */ 2511 U16 AdditionalControlFlags; /* 0Ch */
2512 U16 Reserved1; /* 0Eh */
2399 U8 NumPhys; /* 10h */ 2513 U8 NumPhys; /* 10h */
2400 U8 SATAMaxQDepth; /* 11h */ 2514 U8 SATAMaxQDepth; /* 11h */
2401 U16 Reserved2; /* 12h */ 2515 U8 ReportDeviceMissingDelay; /* 12h */
2516 U8 IODeviceMissingDelay; /* 13h */
2402 MPI_SAS_IO_UNIT1_PHY_DATA PhyData[MPI_SAS_IOUNIT1_PHY_MAX]; /* 14h */ 2517 MPI_SAS_IO_UNIT1_PHY_DATA PhyData[MPI_SAS_IOUNIT1_PHY_MAX]; /* 14h */
2403} CONFIG_PAGE_SAS_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_1, 2518} CONFIG_PAGE_SAS_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_1,
2404 SasIOUnitPage1_t, MPI_POINTER pSasIOUnitPage1_t; 2519 SasIOUnitPage1_t, MPI_POINTER pSasIOUnitPage1_t;
2405 2520
2406#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x05) 2521#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x06)
2407 2522
2408/* values for SAS IO Unit Page 1 ControlFlags */ 2523/* values for SAS IO Unit Page 1 ControlFlags */
2409#define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) 2524#define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
@@ -2428,6 +2543,13 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
2428#define MPI_SAS_IOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002) 2543#define MPI_SAS_IOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
2429#define MPI_SAS_IOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) 2544#define MPI_SAS_IOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
2430 2545
2546/* values for SAS IO Unit Page 1 AdditionalControlFlags */
2547#define MPI_SAS_IOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
2548
2549/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
2550#define MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
2551#define MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
2552
2431/* values for SAS IO Unit Page 1 PortFlags */ 2553/* values for SAS IO Unit Page 1 PortFlags */
2432#define MPI_SAS_IOUNIT1_PORT_FLAGS_0_TARGET_IOC_NUM (0x00) 2554#define MPI_SAS_IOUNIT1_PORT_FLAGS_0_TARGET_IOC_NUM (0x00)
2433#define MPI_SAS_IOUNIT1_PORT_FLAGS_1_TARGET_IOC_NUM (0x04) 2555#define MPI_SAS_IOUNIT1_PORT_FLAGS_1_TARGET_IOC_NUM (0x04)
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index 4a5f8dd1d766..582cfe7c2aa1 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -6,25 +6,25 @@
6 Copyright (c) 2000-2005 LSI Logic Corporation. 6 Copyright (c) 2000-2005 LSI Logic Corporation.
7 7
8 --------------------------------------- 8 ---------------------------------------
9 Header Set Release Version: 01.05.12 9 Header Set Release Version: 01.05.13
10 Header Set Release Date: 08-30-05 10 Header Set Release Date: 03-27-06
11 --------------------------------------- 11 ---------------------------------------
12 12
13 Filename Current version Prior version 13 Filename Current version Prior version
14 ---------- --------------- ------------- 14 ---------- --------------- -------------
15 mpi.h 01.05.10 01.05.09 15 mpi.h 01.05.11 01.05.10
16 mpi_ioc.h 01.05.10 01.05.09 16 mpi_ioc.h 01.05.11 01.05.10
17 mpi_cnfg.h 01.05.11 01.05.10 17 mpi_cnfg.h 01.05.12 01.05.11
18 mpi_init.h 01.05.06 01.05.06 18 mpi_init.h 01.05.07 01.05.06
19 mpi_targ.h 01.05.05 01.05.05 19 mpi_targ.h 01.05.06 01.05.05
20 mpi_fc.h 01.05.01 01.05.01 20 mpi_fc.h 01.05.01 01.05.01
21 mpi_lan.h 01.05.01 01.05.01 21 mpi_lan.h 01.05.01 01.05.01
22 mpi_raid.h 01.05.02 01.05.02 22 mpi_raid.h 01.05.02 01.05.02
23 mpi_tool.h 01.05.03 01.05.03 23 mpi_tool.h 01.05.03 01.05.03
24 mpi_inb.h 01.05.01 01.05.01 24 mpi_inb.h 01.05.01 01.05.01
25 mpi_sas.h 01.05.02 01.05.01 25 mpi_sas.h 01.05.03 01.05.02
26 mpi_type.h 01.05.02 01.05.01 26 mpi_type.h 01.05.02 01.05.02
27 mpi_history.txt 01.05.12 01.05.11 27 mpi_history.txt 01.05.13 01.05.12
28 28
29 29
30 * Date Version Description 30 * Date Version Description
@@ -93,6 +93,7 @@ mpi.h
93 * Added EEDP IOCStatus codes. 93 * Added EEDP IOCStatus codes.
94 * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT. 94 * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT.
95 * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target. 95 * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
96 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
96 * -------------------------------------------------------------------------- 97 * --------------------------------------------------------------------------
97 98
98mpi_ioc.h 99mpi_ioc.h
@@ -170,6 +171,17 @@ mpi_ioc.h
170 * Added new ReasonCode value for SAS Device Status Change 171 * Added new ReasonCode value for SAS Device Status Change
171 * event. 172 * event.
172 * Added new family code for FC949E. 173 * Added new family code for FC949E.
174 * 03-27-06 01.05.11 Added MPI_IOCFACTS_CAPABILITY_TLR.
175 * Added additional Reason Codes and more event data fields
176 * to EVENT_DATA_SAS_DEVICE_STATUS_CHANGE.
177 * Added EVENT_DATA_SAS_BROADCAST_PRIMITIVE structure and
178 * new event.
179 * Added MPI_EVENT_SAS_SMP_ERROR and event data structure.
180 * Added MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE and event
181 * data structure.
182 * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event
183 * data structure.
184 * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION.
173 * -------------------------------------------------------------------------- 185 * --------------------------------------------------------------------------
174 186
175mpi_cnfg.h 187mpi_cnfg.h
@@ -425,6 +437,16 @@ mpi_cnfg.h
425 * Added postpone SATA Init bit to SAS IO Unit Page 1 437 * Added postpone SATA Init bit to SAS IO Unit Page 1
426 * ControlFlags. 438 * ControlFlags.
427 * Changed LogEntry format for Log Page 0. 439 * Changed LogEntry format for Log Page 0.
440 * 03-27-06 01.05.12 Added two new Flags defines for Manufacturing Page 4.
441 * Added Manufacturing Page 7.
442 * Added MPI_IOCPAGE2_CAP_FLAGS_RAID_64_BIT_ADDRESSING.
443 * Added IOC Page 6.
444 * Added PrevBootDeviceForm field to CONFIG_PAGE_BIOS_2.
445 * Added MaxLBAHigh field to RAID Volume Page 0.
446 * Added Nvdata version fields to SAS IO Unit Page 0.
447 * Added AdditionalControlFlags, MaxTargetPortConnectTime,
448 * ReportDeviceMissingDelay, and IODeviceMissingDelay
449 * fields to SAS IO Unit Page 1.
428 * -------------------------------------------------------------------------- 450 * --------------------------------------------------------------------------
429 451
430mpi_init.h 452mpi_init.h
@@ -467,6 +489,7 @@ mpi_init.h
467 * Added four new defines for SEP SlotStatus. 489 * Added four new defines for SEP SlotStatus.
468 * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them 490 * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them
469 * unique in the first 32 characters. 491 * unique in the first 32 characters.
492 * 03-27-06 01.05.07 Added Task Management type of Clear ACA.
470 * -------------------------------------------------------------------------- 493 * --------------------------------------------------------------------------
471 494
472mpi_targ.h 495mpi_targ.h
@@ -511,6 +534,7 @@ mpi_targ.h
511 * 02-22-05 01.05.03 Changed a comment. 534 * 02-22-05 01.05.03 Changed a comment.
512 * 03-11-05 01.05.04 Removed TargetAssistExtended Request. 535 * 03-11-05 01.05.04 Removed TargetAssistExtended Request.
513 * 06-24-05 01.05.05 Added TargetAssistExtended structures and defines. 536 * 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
537 * 03-27-06 01.05.06 Added a comment.
514 * -------------------------------------------------------------------------- 538 * --------------------------------------------------------------------------
515 539
516mpi_fc.h 540mpi_fc.h
@@ -610,6 +634,10 @@ mpi_sas.h
610 * 08-30-05 01.05.02 Added DeviceInfo bit for SEP. 634 * 08-30-05 01.05.02 Added DeviceInfo bit for SEP.
611 * Added PrimFlags and Primitive field to SAS IO Unit 635 * Added PrimFlags and Primitive field to SAS IO Unit
612 * Control request, and added a new operation code. 636 * Control request, and added a new operation code.
637 * 03-27-06 01.05.03 Added Force Full Discovery, Transmit Port Select Signal,
638 * and Remove Device operations to SAS IO Unit Control.
639 * Added DevHandle field to SAS IO Unit Control request and
640 * reply.
613 * -------------------------------------------------------------------------- 641 * --------------------------------------------------------------------------
614 642
615mpi_type.h 643mpi_type.h
@@ -625,20 +653,20 @@ mpi_type.h
625 653
626mpi_history.txt Parts list history 654mpi_history.txt Parts list history
627 655
628Filename 01.05.12 01.05.11 01.05.10 01.05.09 656Filename 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09
629---------- -------- -------- -------- -------- 657---------- -------- -------- -------- -------- --------
630mpi.h 01.05.10 01.05.09 01.05.08 01.05.07 658mpi.h 01.05.11 01.05.10 01.05.09 01.05.08 01.05.07
631mpi_ioc.h 01.05.10 01.05.09 01.05.09 01.05.08 659mpi_ioc.h 01.05.11 01.05.10 01.05.09 01.05.09 01.05.08
632mpi_cnfg.h 01.05.11 01.05.10 01.05.09 01.05.08 660mpi_cnfg.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08
633mpi_init.h 01.05.06 01.05.06 01.05.05 01.05.04 661mpi_init.h 01.05.07 01.05.06 01.05.06 01.05.05 01.05.04
634mpi_targ.h 01.05.05 01.05.05 01.05.05 01.05.04 662mpi_targ.h 01.05.06 01.05.05 01.05.05 01.05.05 01.05.04
635mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 663mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
636mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 664mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
637mpi_raid.h 01.05.02 01.05.02 01.05.02 01.05.02 665mpi_raid.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02
638mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 666mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03
639mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 667mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
640mpi_sas.h 01.05.02 01.05.01 01.05.01 01.05.01 668mpi_sas.h 01.05.03 01.05.02 01.05.01 01.05.01 01.05.01
641mpi_type.h 01.05.02 01.05.01 01.05.01 01.05.01 669mpi_type.h 01.05.02 01.05.02 01.05.01 01.05.01 01.05.01
642 670
643Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03 671Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03
644---------- -------- -------- -------- -------- -------- -------- 672---------- -------- -------- -------- -------- -------- --------
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
index 68941f459ca3..c1c678989a23 100644
--- a/drivers/message/fusion/lsi/mpi_init.h
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -6,7 +6,7 @@
6 * Title: MPI initiator mode messages and structures 6 * Title: MPI initiator mode messages and structures
7 * Creation Date: June 8, 2000 7 * Creation Date: June 8, 2000
8 * 8 *
9 * mpi_init.h Version: 01.05.06 9 * mpi_init.h Version: 01.05.07
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -52,6 +52,7 @@
52 * Added four new defines for SEP SlotStatus. 52 * Added four new defines for SEP SlotStatus.
53 * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them 53 * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them
54 * unique in the first 32 characters. 54 * unique in the first 32 characters.
55 * 03-27-06 01.05.07 Added Task Management type of Clear ACA.
55 * -------------------------------------------------------------------------- 56 * --------------------------------------------------------------------------
56 */ 57 */
57 58
@@ -427,6 +428,7 @@ typedef struct _MSG_SCSI_TASK_MGMT
427#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) 428#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
428#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) 429#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
429#define MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) 430#define MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
431#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08)
430 432
431/* MsgFlags bits */ 433/* MsgFlags bits */
432#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00) 434#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00)
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 2c5f43fa7c73..18ba407fd399 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -6,7 +6,7 @@
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: August 11, 2000 7 * Creation Date: August 11, 2000
8 * 8 *
9 * mpi_ioc.h Version: 01.05.10 9 * mpi_ioc.h Version: 01.05.11
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -87,6 +87,17 @@
87 * Added new ReasonCode value for SAS Device Status Change 87 * Added new ReasonCode value for SAS Device Status Change
88 * event. 88 * event.
89 * Added new family code for FC949E. 89 * Added new family code for FC949E.
90 * 03-27-06 01.05.11 Added MPI_IOCFACTS_CAPABILITY_TLR.
91 * Added additional Reason Codes and more event data fields
92 * to EVENT_DATA_SAS_DEVICE_STATUS_CHANGE.
93 * Added EVENT_DATA_SAS_BROADCAST_PRIMITIVE structure and
94 * new event.
95 * Added MPI_EVENT_SAS_SMP_ERROR and event data structure.
96 * Added MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE and event
97 * data structure.
98 * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event
99 * data structure.
100 * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION.
90 * -------------------------------------------------------------------------- 101 * --------------------------------------------------------------------------
91 */ 102 */
92 103
@@ -272,6 +283,7 @@ typedef struct _MSG_IOC_FACTS_REPLY
272#define MPI_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) 283#define MPI_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
273#define MPI_IOCFACTS_CAPABILITY_SCSIIO32 (0x00000200) 284#define MPI_IOCFACTS_CAPABILITY_SCSIIO32 (0x00000200)
274#define MPI_IOCFACTS_CAPABILITY_NO_SCSIIO16 (0x00000400) 285#define MPI_IOCFACTS_CAPABILITY_NO_SCSIIO16 (0x00000400)
286#define MPI_IOCFACTS_CAPABILITY_TLR (0x00000800)
275 287
276 288
277/***************************************************************************** 289/*****************************************************************************
@@ -448,30 +460,34 @@ typedef struct _MSG_EVENT_ACK_REPLY
448 460
449/* Event */ 461/* Event */
450 462
451#define MPI_EVENT_NONE (0x00000000) 463#define MPI_EVENT_NONE (0x00000000)
452#define MPI_EVENT_LOG_DATA (0x00000001) 464#define MPI_EVENT_LOG_DATA (0x00000001)
453#define MPI_EVENT_STATE_CHANGE (0x00000002) 465#define MPI_EVENT_STATE_CHANGE (0x00000002)
454#define MPI_EVENT_UNIT_ATTENTION (0x00000003) 466#define MPI_EVENT_UNIT_ATTENTION (0x00000003)
455#define MPI_EVENT_IOC_BUS_RESET (0x00000004) 467#define MPI_EVENT_IOC_BUS_RESET (0x00000004)
456#define MPI_EVENT_EXT_BUS_RESET (0x00000005) 468#define MPI_EVENT_EXT_BUS_RESET (0x00000005)
457#define MPI_EVENT_RESCAN (0x00000006) 469#define MPI_EVENT_RESCAN (0x00000006)
458#define MPI_EVENT_LINK_STATUS_CHANGE (0x00000007) 470#define MPI_EVENT_LINK_STATUS_CHANGE (0x00000007)
459#define MPI_EVENT_LOOP_STATE_CHANGE (0x00000008) 471#define MPI_EVENT_LOOP_STATE_CHANGE (0x00000008)
460#define MPI_EVENT_LOGOUT (0x00000009) 472#define MPI_EVENT_LOGOUT (0x00000009)
461#define MPI_EVENT_EVENT_CHANGE (0x0000000A) 473#define MPI_EVENT_EVENT_CHANGE (0x0000000A)
462#define MPI_EVENT_INTEGRATED_RAID (0x0000000B) 474#define MPI_EVENT_INTEGRATED_RAID (0x0000000B)
463#define MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE (0x0000000C) 475#define MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE (0x0000000C)
464#define MPI_EVENT_ON_BUS_TIMER_EXPIRED (0x0000000D) 476#define MPI_EVENT_ON_BUS_TIMER_EXPIRED (0x0000000D)
465#define MPI_EVENT_QUEUE_FULL (0x0000000E) 477#define MPI_EVENT_QUEUE_FULL (0x0000000E)
466#define MPI_EVENT_SAS_DEVICE_STATUS_CHANGE (0x0000000F) 478#define MPI_EVENT_SAS_DEVICE_STATUS_CHANGE (0x0000000F)
467#define MPI_EVENT_SAS_SES (0x00000010) 479#define MPI_EVENT_SAS_SES (0x00000010)
468#define MPI_EVENT_PERSISTENT_TABLE_FULL (0x00000011) 480#define MPI_EVENT_PERSISTENT_TABLE_FULL (0x00000011)
469#define MPI_EVENT_SAS_PHY_LINK_STATUS (0x00000012) 481#define MPI_EVENT_SAS_PHY_LINK_STATUS (0x00000012)
470#define MPI_EVENT_SAS_DISCOVERY_ERROR (0x00000013) 482#define MPI_EVENT_SAS_DISCOVERY_ERROR (0x00000013)
471#define MPI_EVENT_IR_RESYNC_UPDATE (0x00000014) 483#define MPI_EVENT_IR_RESYNC_UPDATE (0x00000014)
472#define MPI_EVENT_IR2 (0x00000015) 484#define MPI_EVENT_IR2 (0x00000015)
473#define MPI_EVENT_SAS_DISCOVERY (0x00000016) 485#define MPI_EVENT_SAS_DISCOVERY (0x00000016)
474#define MPI_EVENT_LOG_ENTRY_ADDED (0x00000021) 486#define MPI_EVENT_SAS_BROADCAST_PRIMITIVE (0x00000017)
487#define MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x00000018)
488#define MPI_EVENT_SAS_INIT_TABLE_OVERFLOW (0x00000019)
489#define MPI_EVENT_SAS_SMP_ERROR (0x0000001A)
490#define MPI_EVENT_LOG_ENTRY_ADDED (0x00000021)
475 491
476/* AckRequired field values */ 492/* AckRequired field values */
477 493
@@ -558,18 +574,25 @@ typedef struct _EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
558 U8 PhyNum; /* 0Eh */ 574 U8 PhyNum; /* 0Eh */
559 U8 Reserved1; /* 0Fh */ 575 U8 Reserved1; /* 0Fh */
560 U64 SASAddress; /* 10h */ 576 U64 SASAddress; /* 10h */
577 U8 LUN[8]; /* 18h */
578 U16 TaskTag; /* 20h */
579 U16 Reserved2; /* 22h */
561} EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, 580} EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
562 MPI_POINTER PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, 581 MPI_POINTER PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
563 MpiEventDataSasDeviceStatusChange_t, 582 MpiEventDataSasDeviceStatusChange_t,
564 MPI_POINTER pMpiEventDataSasDeviceStatusChange_t; 583 MPI_POINTER pMpiEventDataSasDeviceStatusChange_t;
565 584
566/* MPI SAS Device Status Change Event data ReasonCode values */ 585/* MPI SAS Device Status Change Event data ReasonCode values */
567#define MPI_EVENT_SAS_DEV_STAT_RC_ADDED (0x03) 586#define MPI_EVENT_SAS_DEV_STAT_RC_ADDED (0x03)
568#define MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING (0x04) 587#define MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING (0x04)
569#define MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) 588#define MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
570#define MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED (0x06) 589#define MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED (0x06)
571#define MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) 590#define MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
572#define MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) 591#define MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
592#define MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
593#define MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
594#define MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
595#define MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
573 596
574 597
575/* SCSI Event data for Queue Full event */ 598/* SCSI Event data for Queue Full event */
@@ -742,6 +765,27 @@ typedef struct _EVENT_DATA_SAS_SES
742} EVENT_DATA_SAS_SES, MPI_POINTER PTR_EVENT_DATA_SAS_SES, 765} EVENT_DATA_SAS_SES, MPI_POINTER PTR_EVENT_DATA_SAS_SES,
743 MpiEventDataSasSes_t, MPI_POINTER pMpiEventDataSasSes_t; 766 MpiEventDataSasSes_t, MPI_POINTER pMpiEventDataSasSes_t;
744 767
768/* SAS Broadcast Primitive Event data */
769
770typedef struct _EVENT_DATA_SAS_BROADCAST_PRIMITIVE
771{
772 U8 PhyNum; /* 00h */
773 U8 Port; /* 01h */
774 U8 PortWidth; /* 02h */
775 U8 Primitive; /* 04h */
776} EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
777 MPI_POINTER PTR_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
778 MpiEventDataSasBroadcastPrimitive_t,
779 MPI_POINTER pMpiEventDataSasBroadcastPrimitive_t;
780
781#define MPI_EVENT_PRIMITIVE_CHANGE (0x01)
782#define MPI_EVENT_PRIMITIVE_EXPANDER (0x03)
783#define MPI_EVENT_PRIMITIVE_RESERVED2 (0x04)
784#define MPI_EVENT_PRIMITIVE_RESERVED3 (0x05)
785#define MPI_EVENT_PRIMITIVE_RESERVED4 (0x06)
786#define MPI_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
787#define MPI_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
788
745/* SAS Phy Link Status Event data */ 789/* SAS Phy Link Status Event data */
746 790
747typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS 791typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS
@@ -804,6 +848,53 @@ typedef struct _EVENT_DATA_DISCOVERY_ERROR
804#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_PATHS (0x00000800) 848#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_PATHS (0x00000800)
805#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000) 849#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000)
806 850
851/* SAS SMP Error Event data */
852
853typedef struct _EVENT_DATA_SAS_SMP_ERROR
854{
855 U8 Status; /* 00h */
856 U8 Port; /* 01h */
857 U8 SMPFunctionResult; /* 02h */
858 U8 Reserved1; /* 03h */
859 U64 SASAddress; /* 04h */
860} EVENT_DATA_SAS_SMP_ERROR, MPI_POINTER PTR_EVENT_DATA_SAS_SMP_ERROR,
861 MpiEventDataSasSmpError_t, MPI_POINTER pMpiEventDataSasSmpError_t;
862
863/* defines for the Status field of the SAS SMP Error event */
864#define MPI_EVENT_SAS_SMP_FUNCTION_RESULT_VALID (0x00)
865#define MPI_EVENT_SAS_SMP_CRC_ERROR (0x01)
866#define MPI_EVENT_SAS_SMP_TIMEOUT (0x02)
867#define MPI_EVENT_SAS_SMP_NO_DESTINATION (0x03)
868#define MPI_EVENT_SAS_SMP_BAD_DESTINATION (0x04)
869
870/* SAS Initiator Device Status Change Event data */
871
872typedef struct _EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE
873{
874 U8 ReasonCode; /* 00h */
875 U8 Port; /* 01h */
876 U16 DevHandle; /* 02h */
877 U64 SASAddress; /* 04h */
878} EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
879 MPI_POINTER PTR_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
880 MpiEventDataSasInitDevStatusChange_t,
881 MPI_POINTER pMpiEventDataSasInitDevStatusChange_t;
882
883/* defines for the ReasonCode field of the SAS Initiator Device Status Change event */
884#define MPI_EVENT_SAS_INIT_RC_ADDED (0x01)
885
886/* SAS Initiator Device Table Overflow Event data */
887
888typedef struct _EVENT_DATA_SAS_INIT_TABLE_OVERFLOW
889{
890 U8 MaxInit; /* 00h */
891 U8 CurrentInit; /* 01h */
892 U16 Reserved1; /* 02h */
893} EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
894 MPI_POINTER PTR_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
895 MpiEventDataSasInitTableOverflow_t,
896 MPI_POINTER pMpiEventDataSasInitTableOverflow_t;
897
807 898
808/***************************************************************************** 899/*****************************************************************************
809* 900*
@@ -1013,5 +1104,6 @@ typedef struct _MPI_EXT_IMAGE_HEADER
1013#define MPI_EXT_IMAGE_TYPE_FW (0x01) 1104#define MPI_EXT_IMAGE_TYPE_FW (0x01)
1014#define MPI_EXT_IMAGE_TYPE_NVDATA (0x03) 1105#define MPI_EXT_IMAGE_TYPE_NVDATA (0x03)
1015#define MPI_EXT_IMAGE_TYPE_BOOTLOADER (0x04) 1106#define MPI_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
1107#define MPI_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
1016 1108
1017#endif 1109#endif
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index a9c14ad132ce..871ebc08b706 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -13,6 +13,8 @@
13#ifndef IOPI_IOCLOGINFO_H_INCLUDED 13#ifndef IOPI_IOCLOGINFO_H_INCLUDED
14#define IOPI_IOCLOGINFO_H_INCLUDED 14#define IOPI_IOCLOGINFO_H_INCLUDED
15 15
16#define SAS_LOGINFO_NEXUS_LOSS 0x31170000
17#define SAS_LOGINFO_MASK 0xFFFF0000
16 18
17/****************************************************************************/ 19/****************************************************************************/
18/* IOC LOGINFO defines, 0x00000000 - 0x0FFFFFFF */ 20/* IOC LOGINFO defines, 0x00000000 - 0x0FFFFFFF */
@@ -51,6 +53,9 @@
51#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DNM (0x00030500) /* Device Not Mapped */ 53#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DNM (0x00030500) /* Device Not Mapped */
52#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PERSIST (0x00030600) /* Persistent Page not found */ 54#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PERSIST (0x00030600) /* Persistent Page not found */
53#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DEFAULT (0x00030700) /* Default Page not found */ 55#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DEFAULT (0x00030700) /* Default Page not found */
56
57#define IOP_LOGINFO_CODE_DIAG_MSG_ERROR (0x00040000) /* Error handling diag msg - or'd with diag status */
58
54#define IOP_LOGINFO_CODE_TASK_TERMINATED (0x00050000) 59#define IOP_LOGINFO_CODE_TASK_TERMINATED (0x00050000)
55 60
56#define IOP_LOGINFO_CODE_ENCL_MGMT_READ_ACTION_ERR0R (0x00060001) /* Read Action not supported for SEP msg */ 61#define IOP_LOGINFO_CODE_ENCL_MGMT_READ_ACTION_ERR0R (0x00060001) /* Read Action not supported for SEP msg */
@@ -103,6 +108,7 @@
103#define PL_LOGINFO_CODE_IO_EXECUTED (0x00140000) 108#define PL_LOGINFO_CODE_IO_EXECUTED (0x00140000)
104#define PL_LOGINFO_CODE_PERS_RESV_OUT_NOT_AFFIL_OWNER (0x00150000) 109#define PL_LOGINFO_CODE_PERS_RESV_OUT_NOT_AFFIL_OWNER (0x00150000)
105#define PL_LOGINFO_CODE_OPEN_TXDMA_ABORT (0x00160000) 110#define PL_LOGINFO_CODE_OPEN_TXDMA_ABORT (0x00160000)
111#define PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY (0x00170000)
106#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE (0x00000100) 112#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE (0x00000100)
107#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_NO_DEST_TIMEOUT (0x00000101) 113#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_NO_DEST_TIMEOUT (0x00000101)
108#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ORR_TIMEOUT (0x0000011A) /* Open Reject (Retry) Timeout */ 114#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ORR_TIMEOUT (0x0000011A) /* Open Reject (Retry) Timeout */
@@ -165,11 +171,81 @@
165/****************************************************************************/ 171/****************************************************************************/
166/* IR LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = IR */ 172/* IR LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = IR */
167/****************************************************************************/ 173/****************************************************************************/
168#define IR_LOGINFO_CODE_UNUSED1 (0x00010000) 174#define IR_LOGINFO_RAID_ACTION_ERROR (0x00010000)
169#define IR_LOGINFO_CODE_UNUSED2 (0x00020000) 175#define IR_LOGINFO_CODE_UNUSED2 (0x00020000)
176
177/* Amount of information passed down for Create Volume is too large */
178#define IR_LOGINFO_VOLUME_CREATE_INVALID_LENGTH (0x00010001)
179/* Creation of duplicate volume attempted (Bus/Target ID checked) */
180#define IR_LOGINFO_VOLUME_CREATE_DUPLICATE (0x00010002)
181/* Creation failed due to maximum number of supported volumes exceeded */
182#define IR_LOGINFO_VOLUME_CREATE_NO_SLOTS (0x00010003)
183/* Creation failed due to DMA error in trying to read from host */
184#define IR_LOGINFO_VOLUME_CREATE_DMA_ERROR (0x00010004)
185/* Creation failed due to invalid volume type passed down */
186#define IR_LOGINFO_VOLUME_CREATE_INVALID_VOLUME_TYPE (0x00010005)
187/* Creation failed due to error reading MFG Page 4 */
188#define IR_LOGINFO_VOLUME_MFG_PAGE4_ERROR (0x00010006)
189/* Creation failed when trying to create internal structures */
190#define IR_LOGINFO_VOLUME_INTERNAL_CONFIG_STRUCTURE_ERROR (0x00010007)
191
192/* Activation failed due to trying to activate an already active volume */
193#define IR_LOGINFO_VOLUME_ACTIVATING_AN_ACTIVE_VOLUME (0x00010010)
194/* Activation failed due to trying to active unsupported volume type */
195#define IR_LOGINFO_VOLUME_ACTIVATING_INVALID_VOLUME_TYPE (0x00010011)
196/* Activation failed due to trying to active too many volumes */
197#define IR_LOGINFO_VOLUME_ACTIVATING_TOO_MANY_VOLUMES (0x00010012)
198/* Activation failed due to Volume ID in use already */
199#define IR_LOGINFO_VOLUME_ACTIVATING_VOLUME_ID_IN_USE (0x00010013)
200/* Activation failed call to activateVolume returned failure */
201#define IR_LOGINFO_VOLUME_ACTIVATE_VOLUME_FAILED (0x00010014)
202/* Activation failed trying to import the volume */
203#define IR_LOGINFO_VOLUME_ACTIVATING_IMPORT_VOLUME_FAILED (0x00010015)
204
205/* Phys Disk failed, too many phys disks */
206#define IR_LOGINFO_PHYSDISK_CREATE_TOO_MANY_DISKS (0x00010020)
207/* Amount of information passed down for Create Pnysdisk is too large */
208#define IR_LOGINFO_PHYSDISK_CREATE_INVALID_LENGTH (0x00010021)
209/* Creation failed due to DMA error in trying to read from host */
210#define IR_LOGINFO_PHYSDISK_CREATE_DMA_ERROR (0x00010022)
211/* Creation failed due to invalid Bus TargetID passed down */
212#define IR_LOGINFO_PHYSDISK_CREATE_BUS_TID_INVALID (0x00010023)
213/* Creation failed due to error in creating RAID Phys Disk Config Page */
214#define IR_LOGINFO_PHYSDISK_CREATE_CONFIG_PAGE_ERROR (0x00010024)
215
216
217/* Compatibility Error : IR Disabled */
218#define IR_LOGINFO_COMPAT_ERROR_RAID_DISABLED (0x00010030)
219/* Compatibility Error : Inquiry Comand failed */
220#define IR_LOGINFO_COMPAT_ERROR_INQUIRY_FAILED (0x00010031)
221/* Compatibility Error : Device not direct access device */
222#define IR_LOGINFO_COMPAT_ERROR_NOT_DIRECT_ACCESS (0x00010032)
223/* Compatibility Error : Removable device found */
224#define IR_LOGINFO_COMPAT_ERROR_REMOVABLE_FOUND (0x00010033)
225/* Compatibility Error : Device SCSI Version not 2 or higher */
226#define IR_LOGINFO_COMPAT_ERROR_NEED_SCSI_2_OR_HIGHER (0x00010034)
227/* Compatibility Error : SATA device, 48 BIT LBA not supported */
228#define IR_LOGINFO_COMPAT_ERROR_SATA_48BIT_LBA_NOT_SUPPORTED (0x00010035)
229/* Compatibility Error : Device does not have 512 byte block sizes */
230#define IR_LOGINFO_COMPAT_ERROR_DEVICE_NOT_512_BYTE_BLOCK (0x00010036)
231/* Compatibility Error : Volume Type check failed */
232#define IR_LOGINFO_COMPAT_ERROR_VOLUME_TYPE_CHECK_FAILED (0x00010037)
233/* Compatibility Error : Volume Type is unsupported by FW */
234#define IR_LOGINFO_COMPAT_ERROR_UNSUPPORTED_VOLUME_TYPE (0x00010038)
235/* Compatibility Error : Disk drive too small for use in volume */
236#define IR_LOGINFO_COMPAT_ERROR_DISK_TOO_SMALL (0x00010039)
237/* Compatibility Error : Phys disk for Create Volume not found */
238#define IR_LOGINFO_COMPAT_ERROR_PHYS_DISK_NOT_FOUND (0x0001003A)
239/* Compatibility Error : membership count error, too many or too few disks for volume type */
240#define IR_LOGINFO_COMPAT_ERROR_MEMBERSHIP_COUNT (0x0001003B)
241/* Compatibility Error : Disk stripe sizes must be 64KB */
242#define IR_LOGINFO_COMPAT_ERROR_NON_64K_STRIPE_SIZE (0x0001003C)
243/* Compatibility Error : IME size limited to < 2TB */
244#define IR_LOGINFO_COMPAT_ERROR_IME_VOL_NOT_CURRENTLY_SUPPORTED (0x0001003D)
245
170 246
171/****************************************************************************/ 247/****************************************************************************/
172/* Defines for convienence */ 248/* Defines for convenience */
173/****************************************************************************/ 249/****************************************************************************/
174#define IOC_LOGINFO_PREFIX_IOP ((MPI_IOCLOGINFO_TYPE_SAS << MPI_IOCLOGINFO_TYPE_SHIFT) | IOC_LOGINFO_ORIGINATOR_IOP) 250#define IOC_LOGINFO_PREFIX_IOP ((MPI_IOCLOGINFO_TYPE_SAS << MPI_IOCLOGINFO_TYPE_SHIFT) | IOC_LOGINFO_ORIGINATOR_IOP)
175#define IOC_LOGINFO_PREFIX_PL ((MPI_IOCLOGINFO_TYPE_SAS << MPI_IOCLOGINFO_TYPE_SHIFT) | IOC_LOGINFO_ORIGINATOR_PL) 251#define IOC_LOGINFO_PREFIX_PL ((MPI_IOCLOGINFO_TYPE_SAS << MPI_IOCLOGINFO_TYPE_SHIFT) | IOC_LOGINFO_ORIGINATOR_PL)
diff --git a/drivers/message/fusion/lsi/mpi_sas.h b/drivers/message/fusion/lsi/mpi_sas.h
index 70514867bddf..50b8f0a8f456 100644
--- a/drivers/message/fusion/lsi/mpi_sas.h
+++ b/drivers/message/fusion/lsi/mpi_sas.h
@@ -6,7 +6,7 @@
6 * Title: MPI Serial Attached SCSI structures and definitions 6 * Title: MPI Serial Attached SCSI structures and definitions
7 * Creation Date: August 19, 2004 7 * Creation Date: August 19, 2004
8 * 8 *
9 * mpi_sas.h Version: 01.05.02 9 * mpi_sas.h Version: 01.05.03
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -17,6 +17,10 @@
17 * 08-30-05 01.05.02 Added DeviceInfo bit for SEP. 17 * 08-30-05 01.05.02 Added DeviceInfo bit for SEP.
18 * Added PrimFlags and Primitive field to SAS IO Unit 18 * Added PrimFlags and Primitive field to SAS IO Unit
19 * Control request, and added a new operation code. 19 * Control request, and added a new operation code.
20 * 03-27-06 01.05.03 Added Force Full Discovery, Transmit Port Select Signal,
21 * and Remove Device operations to SAS IO Unit Control.
22 * Added DevHandle field to SAS IO Unit Control request and
23 * reply.
20 * -------------------------------------------------------------------------- 24 * --------------------------------------------------------------------------
21 */ 25 */
22 26
@@ -209,7 +213,7 @@ typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
209 U8 Reserved1; /* 01h */ 213 U8 Reserved1; /* 01h */
210 U8 ChainOffset; /* 02h */ 214 U8 ChainOffset; /* 02h */
211 U8 Function; /* 03h */ 215 U8 Function; /* 03h */
212 U16 Reserved2; /* 04h */ 216 U16 DevHandle; /* 04h */
213 U8 Reserved3; /* 06h */ 217 U8 Reserved3; /* 06h */
214 U8 MsgFlags; /* 07h */ 218 U8 MsgFlags; /* 07h */
215 U32 MsgContext; /* 08h */ 219 U32 MsgContext; /* 08h */
@@ -231,6 +235,9 @@ typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
231#define MPI_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) 235#define MPI_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
232#define MPI_SAS_OP_MAP_CURRENT (0x09) 236#define MPI_SAS_OP_MAP_CURRENT (0x09)
233#define MPI_SAS_OP_SEND_PRIMITIVE (0x0A) 237#define MPI_SAS_OP_SEND_PRIMITIVE (0x0A)
238#define MPI_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
239#define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
240#define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE (0x0D)
234 241
235/* values for the PrimFlags field */ 242/* values for the PrimFlags field */
236#define MPI_SAS_PRIMFLAGS_SINGLE (0x08) 243#define MPI_SAS_PRIMFLAGS_SINGLE (0x08)
@@ -245,7 +252,7 @@ typedef struct _MSG_SAS_IOUNIT_CONTROL_REPLY
245 U8 Reserved1; /* 01h */ 252 U8 Reserved1; /* 01h */
246 U8 MsgLength; /* 02h */ 253 U8 MsgLength; /* 02h */
247 U8 Function; /* 03h */ 254 U8 Function; /* 03h */
248 U16 Reserved2; /* 04h */ 255 U16 DevHandle; /* 04h */
249 U8 Reserved3; /* 06h */ 256 U8 Reserved3; /* 06h */
250 U8 MsgFlags; /* 07h */ 257 U8 MsgFlags; /* 07h */
251 U32 MsgContext; /* 08h */ 258 U32 MsgContext; /* 08h */
diff --git a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h
index 3f462859ceea..20b667315773 100644
--- a/drivers/message/fusion/lsi/mpi_targ.h
+++ b/drivers/message/fusion/lsi/mpi_targ.h
@@ -6,7 +6,7 @@
6 * Title: MPI Target mode messages and structures 6 * Title: MPI Target mode messages and structures
7 * Creation Date: June 22, 2000 7 * Creation Date: June 22, 2000
8 * 8 *
9 * mpi_targ.h Version: 01.05.05 9 * mpi_targ.h Version: 01.05.06
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -54,6 +54,7 @@
54 * 02-22-05 01.05.03 Changed a comment. 54 * 02-22-05 01.05.03 Changed a comment.
55 * 03-11-05 01.05.04 Removed TargetAssistExtended Request. 55 * 03-11-05 01.05.04 Removed TargetAssistExtended Request.
56 * 06-24-05 01.05.05 Added TargetAssistExtended structures and defines. 56 * 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
57 * 03-27-06 01.05.06 Added a comment.
57 * -------------------------------------------------------------------------- 58 * --------------------------------------------------------------------------
58 */ 59 */
59 60
@@ -351,7 +352,7 @@ typedef struct _MSG_TARGET_ASSIST_REQUEST
351#define TARGET_ASSIST_FLAGS_CONFIRMED (0x08) 352#define TARGET_ASSIST_FLAGS_CONFIRMED (0x08)
352#define TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER (0x80) 353#define TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER (0x80)
353 354
354 355/* Standard Target Mode Reply message */
355typedef struct _MSG_TARGET_ERROR_REPLY 356typedef struct _MSG_TARGET_ERROR_REPLY
356{ 357{
357 U16 Reserved; /* 00h */ 358 U16 Reserved; /* 00h */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 57543603d6c8..43308df64623 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -368,20 +368,21 @@ static irqreturn_t
368mpt_interrupt(int irq, void *bus_id, struct pt_regs *r) 368mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
369{ 369{
370 MPT_ADAPTER *ioc = bus_id; 370 MPT_ADAPTER *ioc = bus_id;
371 u32 pa; 371 u32 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
372
373 if (pa == 0xFFFFFFFF)
374 return IRQ_NONE;
372 375
373 /* 376 /*
374 * Drain the reply FIFO! 377 * Drain the reply FIFO!
375 */ 378 */
376 while (1) { 379 do {
377 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo); 380 if (pa & MPI_ADDRESS_REPLY_A_BIT)
378 if (pa == 0xFFFFFFFF)
379 return IRQ_HANDLED;
380 else if (pa & MPI_ADDRESS_REPLY_A_BIT)
381 mpt_reply(ioc, pa); 381 mpt_reply(ioc, pa);
382 else 382 else
383 mpt_turbo_reply(ioc, pa); 383 mpt_turbo_reply(ioc, pa);
384 } 384 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
385 } while (pa != 0xFFFFFFFF);
385 386
386 return IRQ_HANDLED; 387 return IRQ_HANDLED;
387} 388}
@@ -1219,31 +1220,25 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1219 port = psize = 0; 1220 port = psize = 0;
1220 for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) { 1221 for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) {
1221 if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) { 1222 if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) {
1223 if (psize)
1224 continue;
1222 /* Get I/O space! */ 1225 /* Get I/O space! */
1223 port = pci_resource_start(pdev, ii); 1226 port = pci_resource_start(pdev, ii);
1224 psize = pci_resource_len(pdev,ii); 1227 psize = pci_resource_len(pdev,ii);
1225 } else { 1228 } else {
1229 if (msize)
1230 continue;
1226 /* Get memmap */ 1231 /* Get memmap */
1227 mem_phys = pci_resource_start(pdev, ii); 1232 mem_phys = pci_resource_start(pdev, ii);
1228 msize = pci_resource_len(pdev,ii); 1233 msize = pci_resource_len(pdev,ii);
1229 break;
1230 } 1234 }
1231 } 1235 }
1232 ioc->mem_size = msize; 1236 ioc->mem_size = msize;
1233 1237
1234 if (ii == DEVICE_COUNT_RESOURCE) {
1235 printk(KERN_ERR MYNAM ": ERROR - MPT adapter has no memory regions defined!\n");
1236 kfree(ioc);
1237 return -EINVAL;
1238 }
1239
1240 dinitprintk((KERN_INFO MYNAM ": MPT adapter @ %lx, msize=%dd bytes\n", mem_phys, msize));
1241 dinitprintk((KERN_INFO MYNAM ": (port i/o @ %lx, psize=%dd bytes)\n", port, psize));
1242
1243 mem = NULL; 1238 mem = NULL;
1244 /* Get logical ptr for PciMem0 space */ 1239 /* Get logical ptr for PciMem0 space */
1245 /*mem = ioremap(mem_phys, msize);*/ 1240 /*mem = ioremap(mem_phys, msize);*/
1246 mem = ioremap(mem_phys, 0x100); 1241 mem = ioremap(mem_phys, msize);
1247 if (mem == NULL) { 1242 if (mem == NULL) {
1248 printk(KERN_ERR MYNAM ": ERROR - Unable to map adapter memory!\n"); 1243 printk(KERN_ERR MYNAM ": ERROR - Unable to map adapter memory!\n");
1249 kfree(ioc); 1244 kfree(ioc);
@@ -1343,11 +1338,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1343 ioc->bus_type = SAS; 1338 ioc->bus_type = SAS;
1344 ioc->errata_flag_1064 = 1; 1339 ioc->errata_flag_1064 = 1;
1345 } 1340 }
1346 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066) {
1347 ioc->prod_name = "LSISAS1066";
1348 ioc->bus_type = SAS;
1349 ioc->errata_flag_1064 = 1;
1350 }
1351 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) { 1341 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) {
1352 ioc->prod_name = "LSISAS1068"; 1342 ioc->prod_name = "LSISAS1068";
1353 ioc->bus_type = SAS; 1343 ioc->bus_type = SAS;
@@ -1357,14 +1347,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1357 ioc->prod_name = "LSISAS1064E"; 1347 ioc->prod_name = "LSISAS1064E";
1358 ioc->bus_type = SAS; 1348 ioc->bus_type = SAS;
1359 } 1349 }
1360 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066E) {
1361 ioc->prod_name = "LSISAS1066E";
1362 ioc->bus_type = SAS;
1363 }
1364 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) { 1350 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) {
1365 ioc->prod_name = "LSISAS1068E"; 1351 ioc->prod_name = "LSISAS1068E";
1366 ioc->bus_type = SAS; 1352 ioc->bus_type = SAS;
1367 } 1353 }
1354 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
1355 ioc->prod_name = "LSISAS1078";
1356 ioc->bus_type = SAS;
1357 }
1368 1358
1369 if (ioc->errata_flag_1064) 1359 if (ioc->errata_flag_1064)
1370 pci_disable_io_access(pdev); 1360 pci_disable_io_access(pdev);
@@ -3184,6 +3174,37 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3184 u32 diag1val = 0; 3174 u32 diag1val = 0;
3185#endif 3175#endif
3186 3176
3177 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
3178 drsprintk((MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
3179 "address=%p\n", ioc->name, __FUNCTION__,
3180 &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
3181 CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
3182 if (sleepFlag == CAN_SLEEP)
3183 msleep(1);
3184 else
3185 mdelay(1);
3186
3187 for (count = 0; count < 60; count ++) {
3188 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
3189 doorbell &= MPI_IOC_STATE_MASK;
3190
3191 drsprintk((MYIOC_s_INFO_FMT
3192 "looking for READY STATE: doorbell=%x"
3193 " count=%d\n",
3194 ioc->name, doorbell, count));
3195 if (doorbell == MPI_IOC_STATE_READY) {
3196 return 0;
3197 }
3198
3199 /* wait 1 sec */
3200 if (sleepFlag == CAN_SLEEP)
3201 msleep(1000);
3202 else
3203 mdelay(1000);
3204 }
3205 return -1;
3206 }
3207
3187 /* Clear any existing interrupts */ 3208 /* Clear any existing interrupts */
3188 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 3209 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3189 3210
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 32ae4d664545..a5ce10b67d02 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -75,8 +75,8 @@
75#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 75#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
76#endif 76#endif
77 77
78#define MPT_LINUX_VERSION_COMMON "3.03.10" 78#define MPT_LINUX_VERSION_COMMON "3.04.00"
79#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.10" 79#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.00"
80#define WHAT_MAGIC_STRING "@" "(" "#" ")" 80#define WHAT_MAGIC_STRING "@" "(" "#" ")"
81 81
82#define show_mptmod_ver(s,ver) \ 82#define show_mptmod_ver(s,ver) \
@@ -307,7 +307,8 @@ typedef struct _SYSIF_REGS
307 u32 HostIndex; /* 50 Host Index register */ 307 u32 HostIndex; /* 50 Host Index register */
308 u32 Reserved4[15]; /* 54-8F */ 308 u32 Reserved4[15]; /* 54-8F */
309 u32 Fubar; /* 90 For Fubar usage */ 309 u32 Fubar; /* 90 For Fubar usage */
310 u32 Reserved5[27]; /* 94-FF */ 310 u32 Reserved5[1050];/* 94-10F8 */
311 u32 Reset_1078; /* 10FC Reset 1078 */
311} SYSIF_REGS; 312} SYSIF_REGS;
312 313
313/* 314/*
@@ -341,6 +342,7 @@ typedef struct _VirtTarget {
341 u8 negoFlags; /* bit field, see above */ 342 u8 negoFlags; /* bit field, see above */
342 u8 raidVolume; /* set, if RAID Volume */ 343 u8 raidVolume; /* set, if RAID Volume */
343 u8 type; /* byte 0 of Inquiry data */ 344 u8 type; /* byte 0 of Inquiry data */
345 u8 deleted; /* target in process of being removed */
344 u32 num_luns; 346 u32 num_luns;
345 u32 luns[8]; /* Max LUNs is 256 */ 347 u32 luns[8]; /* Max LUNs is 256 */
346} VirtTarget; 348} VirtTarget;
@@ -629,10 +631,11 @@ typedef struct _MPT_ADAPTER
629 struct mutex sas_discovery_mutex; 631 struct mutex sas_discovery_mutex;
630 u8 sas_discovery_runtime; 632 u8 sas_discovery_runtime;
631 u8 sas_discovery_ignore_events; 633 u8 sas_discovery_ignore_events;
634 u16 handle;
632 int sas_index; /* index refrencing */ 635 int sas_index; /* index refrencing */
633 MPT_SAS_MGMT sas_mgmt; 636 MPT_SAS_MGMT sas_mgmt;
634 int num_ports; 637 int num_ports;
635 struct work_struct mptscsih_persistTask; 638 struct work_struct sas_persist_task;
636 639
637 struct work_struct fc_setup_reset_work; 640 struct work_struct fc_setup_reset_work;
638 struct list_head fc_rports; 641 struct list_head fc_rports;
@@ -641,6 +644,7 @@ typedef struct _MPT_ADAPTER
641 struct work_struct fc_rescan_work; 644 struct work_struct fc_rescan_work;
642 char fc_rescan_work_q_name[KOBJ_NAME_LEN]; 645 char fc_rescan_work_q_name[KOBJ_NAME_LEN];
643 struct workqueue_struct *fc_rescan_work_q; 646 struct workqueue_struct *fc_rescan_work_q;
647 u8 port_serial_number;
644} MPT_ADAPTER; 648} MPT_ADAPTER;
645 649
646/* 650/*
@@ -892,6 +896,13 @@ typedef struct _mpt_sge {
892#define DBG_DUMP_REQUEST_FRAME_HDR(mfp) 896#define DBG_DUMP_REQUEST_FRAME_HDR(mfp)
893#endif 897#endif
894 898
899// debug sas wide ports
900#ifdef MPT_DEBUG_SAS_WIDE
901#define dsaswideprintk(x) printk x
902#else
903#define dsaswideprintk(x)
904#endif
905
895 906
896/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 907/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
897 908
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 3ff8378ea660..a8f2fa985455 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -132,21 +132,21 @@ static struct scsi_host_template mptfc_driver_template = {
132 */ 132 */
133 133
134static struct pci_device_id mptfc_pci_table[] = { 134static struct pci_device_id mptfc_pci_table[] = {
135 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC909, 135 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC909,
136 PCI_ANY_ID, PCI_ANY_ID }, 136 PCI_ANY_ID, PCI_ANY_ID },
137 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC919, 137 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919,
138 PCI_ANY_ID, PCI_ANY_ID }, 138 PCI_ANY_ID, PCI_ANY_ID },
139 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC929, 139 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929,
140 PCI_ANY_ID, PCI_ANY_ID }, 140 PCI_ANY_ID, PCI_ANY_ID },
141 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC919X, 141 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919X,
142 PCI_ANY_ID, PCI_ANY_ID }, 142 PCI_ANY_ID, PCI_ANY_ID },
143 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC929X, 143 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929X,
144 PCI_ANY_ID, PCI_ANY_ID }, 144 PCI_ANY_ID, PCI_ANY_ID },
145 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC939X, 145 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC939X,
146 PCI_ANY_ID, PCI_ANY_ID }, 146 PCI_ANY_ID, PCI_ANY_ID },
147 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC949X, 147 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949X,
148 PCI_ANY_ID, PCI_ANY_ID }, 148 PCI_ANY_ID, PCI_ANY_ID },
149 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC949ES, 149 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E,
150 PCI_ANY_ID, PCI_ANY_ID }, 150 PCI_ANY_ID, PCI_ANY_ID },
151 {0} /* Terminating entry */ 151 {0} /* Terminating entry */
152}; 152};
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 85689ab46cbc..f7bd8b11ed3b 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -50,11 +50,14 @@
50#include <linux/errno.h> 50#include <linux/errno.h>
51#include <linux/sched.h> 51#include <linux/sched.h>
52#include <linux/workqueue.h> 52#include <linux/workqueue.h>
53#include <linux/delay.h> /* for mdelay */
53 54
55#include <scsi/scsi.h>
54#include <scsi/scsi_cmnd.h> 56#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h> 57#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h> 58#include <scsi/scsi_host.h>
57#include <scsi/scsi_transport_sas.h> 59#include <scsi/scsi_transport_sas.h>
60#include <scsi/scsi_dbg.h>
58 61
59#include "mptbase.h" 62#include "mptbase.h"
60#include "mptscsih.h" 63#include "mptscsih.h"
@@ -137,23 +140,37 @@ struct mptsas_devinfo {
137 u32 device_info; /* bitfield detailed info about this device */ 140 u32 device_info; /* bitfield detailed info about this device */
138}; 141};
139 142
143/*
144 * Specific details on ports, wide/narrow
145 */
146struct mptsas_portinfo_details{
147 u8 port_id; /* port number provided to transport */
148 u16 num_phys; /* number of phys belong to this port */
149 u64 phy_bitmask; /* TODO, extend support for 255 phys */
150 struct sas_rphy *rphy; /* transport layer rphy object */
151 struct sas_port *port; /* transport layer port object */
152 struct scsi_target *starget;
153 struct mptsas_portinfo *port_info;
154};
155
140struct mptsas_phyinfo { 156struct mptsas_phyinfo {
141 u8 phy_id; /* phy index */ 157 u8 phy_id; /* phy index */
142 u8 port_id; /* port number this phy is part of */ 158 u8 port_id; /* firmware port identifier */
143 u8 negotiated_link_rate; /* nego'd link rate for this phy */ 159 u8 negotiated_link_rate; /* nego'd link rate for this phy */
144 u8 hw_link_rate; /* hardware max/min phys link rate */ 160 u8 hw_link_rate; /* hardware max/min phys link rate */
145 u8 programmed_link_rate; /* programmed max/min phy link rate */ 161 u8 programmed_link_rate; /* programmed max/min phy link rate */
162 u8 sas_port_add_phy; /* flag to request sas_port_add_phy*/
146 struct mptsas_devinfo identify; /* point to phy device info */ 163 struct mptsas_devinfo identify; /* point to phy device info */
147 struct mptsas_devinfo attached; /* point to attached device info */ 164 struct mptsas_devinfo attached; /* point to attached device info */
148 struct sas_phy *phy; 165 struct sas_phy *phy; /* transport layer phy object */
149 struct sas_rphy *rphy; 166 struct mptsas_portinfo *portinfo;
150 struct scsi_target *starget; 167 struct mptsas_portinfo_details * port_details;
151}; 168};
152 169
153struct mptsas_portinfo { 170struct mptsas_portinfo {
154 struct list_head list; 171 struct list_head list;
155 u16 handle; /* unique id to address this */ 172 u16 handle; /* unique id to address this */
156 u8 num_phys; /* number of phys */ 173 u16 num_phys; /* number of phys */
157 struct mptsas_phyinfo *phy_info; 174 struct mptsas_phyinfo *phy_info;
158}; 175};
159 176
@@ -169,7 +186,7 @@ struct mptsas_enclosure {
169 u8 sep_channel; /* SEP channel logical channel id */ 186 u8 sep_channel; /* SEP channel logical channel id */
170}; 187};
171 188
172#ifdef SASDEBUG 189#ifdef MPT_DEBUG_SAS
173static void mptsas_print_phy_data(MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) 190static void mptsas_print_phy_data(MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
174{ 191{
175 printk("---- IO UNIT PAGE 0 ------------\n"); 192 printk("---- IO UNIT PAGE 0 ------------\n");
@@ -305,7 +322,7 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
305static inline int 322static inline int
306mptsas_is_end_device(struct mptsas_devinfo * attached) 323mptsas_is_end_device(struct mptsas_devinfo * attached)
307{ 324{
308 if ((attached->handle) && 325 if ((attached->sas_address) &&
309 (attached->device_info & 326 (attached->device_info &
310 MPI_SAS_DEVICE_INFO_END_DEVICE) && 327 MPI_SAS_DEVICE_INFO_END_DEVICE) &&
311 ((attached->device_info & 328 ((attached->device_info &
@@ -319,6 +336,253 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
319 return 0; 336 return 0;
320} 337}
321 338
339/* no mutex */
340static void
341mptsas_port_delete(struct mptsas_portinfo_details * port_details)
342{
343 struct mptsas_portinfo *port_info;
344 struct mptsas_phyinfo *phy_info;
345 u8 i;
346
347 if (!port_details)
348 return;
349
350 port_info = port_details->port_info;
351 phy_info = port_info->phy_info;
352
353 dsaswideprintk((KERN_DEBUG "%s: [%p]: port=%02d num_phys=%02d "
354 "bitmask=0x%016llX\n",
355 __FUNCTION__, port_details, port_details->port_id,
356 port_details->num_phys, port_details->phy_bitmask));
357
358 for (i = 0; i < port_info->num_phys; i++, phy_info++) {
359 if(phy_info->port_details != port_details)
360 continue;
361 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
362 phy_info->port_details = NULL;
363 }
364 kfree(port_details);
365}
366
367static inline struct sas_rphy *
368mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
369{
370 if (phy_info->port_details)
371 return phy_info->port_details->rphy;
372 else
373 return NULL;
374}
375
376static inline void
377mptsas_set_rphy(struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
378{
379 if (phy_info->port_details) {
380 phy_info->port_details->rphy = rphy;
381 dsaswideprintk((KERN_DEBUG "sas_rphy_add: rphy=%p\n", rphy));
382 }
383
384#ifdef MPT_DEBUG_SAS_WIDE
385 if (rphy) {
386 dev_printk(KERN_DEBUG, &rphy->dev, "add:");
387 printk("rphy=%p release=%p\n",
388 rphy, rphy->dev.release);
389 }
390#endif
391}
392
393static inline struct sas_port *
394mptsas_get_port(struct mptsas_phyinfo *phy_info)
395{
396 if (phy_info->port_details)
397 return phy_info->port_details->port;
398 else
399 return NULL;
400}
401
402static inline void
403mptsas_set_port(struct mptsas_phyinfo *phy_info, struct sas_port *port)
404{
405 if (phy_info->port_details)
406 phy_info->port_details->port = port;
407
408#ifdef MPT_DEBUG_SAS_WIDE
409 if (port) {
410 dev_printk(KERN_DEBUG, &port->dev, "add: ");
411 printk("port=%p release=%p\n",
412 port, port->dev.release);
413 }
414#endif
415}
416
417static inline struct scsi_target *
418mptsas_get_starget(struct mptsas_phyinfo *phy_info)
419{
420 if (phy_info->port_details)
421 return phy_info->port_details->starget;
422 else
423 return NULL;
424}
425
426static inline void
427mptsas_set_starget(struct mptsas_phyinfo *phy_info, struct scsi_target *
428starget)
429{
430 if (phy_info->port_details)
431 phy_info->port_details->starget = starget;
432}
433
434
435/*
436 * mptsas_setup_wide_ports
437 *
438 * Updates for new and existing narrow/wide port configuration
439 * in the sas_topology
440 */
441static void
442mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
443{
444 struct mptsas_portinfo_details * port_details;
445 struct mptsas_phyinfo *phy_info, *phy_info_cmp;
446 u64 sas_address;
447 int i, j;
448
449 mutex_lock(&ioc->sas_topology_mutex);
450
451 phy_info = port_info->phy_info;
452 for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
453 if (phy_info->attached.handle)
454 continue;
455 port_details = phy_info->port_details;
456 if (!port_details)
457 continue;
458 if (port_details->num_phys < 2)
459 continue;
460 /*
461 * Removing a phy from a port, letting the last
462 * phy be removed by firmware events.
463 */
464 dsaswideprintk((KERN_DEBUG
465 "%s: [%p]: port=%d deleting phy = %d\n",
466 __FUNCTION__, port_details,
467 port_details->port_id, i));
468 port_details->num_phys--;
469 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
470 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
471 sas_port_delete_phy(port_details->port, phy_info->phy);
472 phy_info->port_details = NULL;
473 }
474
475 /*
476 * Populate and refresh the tree
477 */
478 phy_info = port_info->phy_info;
479 for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
480 sas_address = phy_info->attached.sas_address;
481 dsaswideprintk((KERN_DEBUG "phy_id=%d sas_address=0x%018llX\n",
482 i, sas_address));
483 if (!sas_address)
484 continue;
485 port_details = phy_info->port_details;
486 /*
487 * Forming a port
488 */
489 if (!port_details) {
490 port_details = kzalloc(sizeof(*port_details),
491 GFP_KERNEL);
492 if (!port_details)
493 goto out;
494 port_details->num_phys = 1;
495 port_details->port_info = port_info;
496 port_details->port_id = ioc->port_serial_number++;
497 if (phy_info->phy_id < 64 )
498 port_details->phy_bitmask |=
499 (1 << phy_info->phy_id);
500 phy_info->sas_port_add_phy=1;
501 dsaswideprintk((KERN_DEBUG "\t\tForming port\n\t\t"
502 "phy_id=%d sas_address=0x%018llX\n",
503 i, sas_address));
504 phy_info->port_details = port_details;
505 }
506
507 if (i == port_info->num_phys - 1)
508 continue;
509 phy_info_cmp = &port_info->phy_info[i + 1];
510 for (j = i + 1 ; j < port_info->num_phys ; j++,
511 phy_info_cmp++) {
512 if (!phy_info_cmp->attached.sas_address)
513 continue;
514 if (sas_address != phy_info_cmp->attached.sas_address)
515 continue;
516 if (phy_info_cmp->port_details == port_details )
517 continue;
518 dsaswideprintk((KERN_DEBUG
519 "\t\tphy_id=%d sas_address=0x%018llX\n",
520 j, phy_info_cmp->attached.sas_address));
521 if (phy_info_cmp->port_details) {
522 port_details->rphy =
523 mptsas_get_rphy(phy_info_cmp);
524 port_details->port =
525 mptsas_get_port(phy_info_cmp);
526 port_details->starget =
527 mptsas_get_starget(phy_info_cmp);
528 port_details->port_id =
529 phy_info_cmp->port_details->port_id;
530 port_details->num_phys =
531 phy_info_cmp->port_details->num_phys;
532// port_info->port_serial_number--;
533 ioc->port_serial_number--;
534 if (!phy_info_cmp->port_details->num_phys)
535 kfree(phy_info_cmp->port_details);
536 } else
537 phy_info_cmp->sas_port_add_phy=1;
538 /*
539 * Adding a phy to a port
540 */
541 phy_info_cmp->port_details = port_details;
542 if (phy_info_cmp->phy_id < 64 )
543 port_details->phy_bitmask |=
544 (1 << phy_info_cmp->phy_id);
545 port_details->num_phys++;
546 }
547 }
548
549 out:
550
551#ifdef MPT_DEBUG_SAS_WIDE
552 for (i = 0; i < port_info->num_phys; i++) {
553 port_details = port_info->phy_info[i].port_details;
554 if (!port_details)
555 continue;
556 dsaswideprintk((KERN_DEBUG
557 "%s: [%p]: phy_id=%02d port_id=%02d num_phys=%02d "
558 "bitmask=0x%016llX\n",
559 __FUNCTION__,
560 port_details, i, port_details->port_id,
561 port_details->num_phys, port_details->phy_bitmask));
562 dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n",
563 port_details->port, port_details->rphy));
564 }
565 dsaswideprintk((KERN_DEBUG"\n"));
566#endif
567 mutex_unlock(&ioc->sas_topology_mutex);
568}
569
570static void
571mptsas_target_reset(MPT_ADAPTER *ioc, VirtTarget * vtarget)
572{
573 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)ioc->sh->hostdata;
574
575 if (mptscsih_TMHandler(hd,
576 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
577 vtarget->bus_id, vtarget->target_id, 0, 0, 5) < 0) {
578 hd->tmPending = 0;
579 hd->tmState = TM_STATE_NONE;
580 printk(MYIOC_s_WARN_FMT
581 "Error processing TaskMgmt id=%d TARGET_RESET\n",
582 ioc->name, vtarget->target_id);
583 }
584}
585
322static int 586static int
323mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, 587mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
324 u32 form, u32 form_specific) 588 u32 form, u32 form_specific)
@@ -400,11 +664,105 @@ mptsas_slave_configure(struct scsi_device *sdev)
400 return mptscsih_slave_configure(sdev); 664 return mptscsih_slave_configure(sdev);
401} 665}
402 666
403/* 667static int
404 * This is pretty ugly. We will be able to seriously clean it up 668mptsas_target_alloc(struct scsi_target *starget)
405 * once the DV code in mptscsih goes away and we can properly 669{
406 * implement ->target_alloc. 670 struct Scsi_Host *host = dev_to_shost(&starget->dev);
407 */ 671 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
672 VirtTarget *vtarget;
673 u32 target_id;
674 u32 channel;
675 struct sas_rphy *rphy;
676 struct mptsas_portinfo *p;
677 int i;
678
679 vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
680 if (!vtarget)
681 return -ENOMEM;
682
683 vtarget->starget = starget;
684 vtarget->ioc_id = hd->ioc->id;
685 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
686
687 target_id = starget->id;
688 channel = 0;
689
690 hd->Targets[target_id] = vtarget;
691
692 /*
693 * RAID volumes placed beyond the last expected port.
694 */
695 if (starget->channel == hd->ioc->num_ports)
696 goto out;
697
698 rphy = dev_to_rphy(starget->dev.parent);
699 mutex_lock(&hd->ioc->sas_topology_mutex);
700 list_for_each_entry(p, &hd->ioc->sas_topology, list) {
701 for (i = 0; i < p->num_phys; i++) {
702 if (p->phy_info[i].attached.sas_address !=
703 rphy->identify.sas_address)
704 continue;
705 target_id = p->phy_info[i].attached.id;
706 channel = p->phy_info[i].attached.channel;
707 mptsas_set_starget(&p->phy_info[i], starget);
708
709 /*
710 * Exposing hidden raid components
711 */
712 if (mptscsih_is_phys_disk(hd->ioc, target_id)) {
713 target_id = mptscsih_raid_id_to_num(hd,
714 target_id);
715 vtarget->tflags |=
716 MPT_TARGET_FLAGS_RAID_COMPONENT;
717 }
718 mutex_unlock(&hd->ioc->sas_topology_mutex);
719 goto out;
720 }
721 }
722 mutex_unlock(&hd->ioc->sas_topology_mutex);
723
724 kfree(vtarget);
725 return -ENXIO;
726
727 out:
728 vtarget->target_id = target_id;
729 vtarget->bus_id = channel;
730 starget->hostdata = vtarget;
731 return 0;
732}
733
734static void
735mptsas_target_destroy(struct scsi_target *starget)
736{
737 struct Scsi_Host *host = dev_to_shost(&starget->dev);
738 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
739 struct sas_rphy *rphy;
740 struct mptsas_portinfo *p;
741 int i;
742
743 if (!starget->hostdata)
744 return;
745
746 if (starget->channel == hd->ioc->num_ports)
747 goto out;
748
749 rphy = dev_to_rphy(starget->dev.parent);
750 list_for_each_entry(p, &hd->ioc->sas_topology, list) {
751 for (i = 0; i < p->num_phys; i++) {
752 if (p->phy_info[i].attached.sas_address !=
753 rphy->identify.sas_address)
754 continue;
755 mptsas_set_starget(&p->phy_info[i], NULL);
756 goto out;
757 }
758 }
759
760 out:
761 kfree(starget->hostdata);
762 starget->hostdata = NULL;
763}
764
765
408static int 766static int
409mptsas_slave_alloc(struct scsi_device *sdev) 767mptsas_slave_alloc(struct scsi_device *sdev)
410{ 768{
@@ -412,61 +770,41 @@ mptsas_slave_alloc(struct scsi_device *sdev)
412 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata; 770 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
413 struct sas_rphy *rphy; 771 struct sas_rphy *rphy;
414 struct mptsas_portinfo *p; 772 struct mptsas_portinfo *p;
415 VirtTarget *vtarget;
416 VirtDevice *vdev; 773 VirtDevice *vdev;
417 struct scsi_target *starget; 774 struct scsi_target *starget;
418 u32 target_id; 775 int i;
419 int i;
420 776
421 vdev = kzalloc(sizeof(VirtDevice), GFP_KERNEL); 777 vdev = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
422 if (!vdev) { 778 if (!vdev) {
423 printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", 779 printk(MYIOC_s_ERR_FMT "slave_alloc kzalloc(%zd) FAILED!\n",
424 hd->ioc->name, sizeof(VirtDevice)); 780 hd->ioc->name, sizeof(VirtDevice));
425 return -ENOMEM; 781 return -ENOMEM;
426 } 782 }
427 sdev->hostdata = vdev;
428 starget = scsi_target(sdev); 783 starget = scsi_target(sdev);
429 vtarget = starget->hostdata; 784 vdev->vtarget = starget->hostdata;
430 vtarget->ioc_id = hd->ioc->id;
431 vdev->vtarget = vtarget;
432 if (vtarget->num_luns == 0) {
433 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
434 hd->Targets[sdev->id] = vtarget;
435 }
436 785
437 /* 786 /*
438 RAID volumes placed beyond the last expected port. 787 * RAID volumes placed beyond the last expected port.
439 */ 788 */
440 if (sdev->channel == hd->ioc->num_ports) { 789 if (sdev->channel == hd->ioc->num_ports)
441 target_id = sdev->id;
442 vtarget->bus_id = 0;
443 vdev->lun = 0;
444 goto out; 790 goto out;
445 }
446 791
447 rphy = dev_to_rphy(sdev->sdev_target->dev.parent); 792 rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
448 mutex_lock(&hd->ioc->sas_topology_mutex); 793 mutex_lock(&hd->ioc->sas_topology_mutex);
449 list_for_each_entry(p, &hd->ioc->sas_topology, list) { 794 list_for_each_entry(p, &hd->ioc->sas_topology, list) {
450 for (i = 0; i < p->num_phys; i++) { 795 for (i = 0; i < p->num_phys; i++) {
451 if (p->phy_info[i].attached.sas_address == 796 if (p->phy_info[i].attached.sas_address !=
452 rphy->identify.sas_address) { 797 rphy->identify.sas_address)
453 target_id = p->phy_info[i].attached.id; 798 continue;
454 vtarget->bus_id = p->phy_info[i].attached.channel; 799 vdev->lun = sdev->lun;
455 vdev->lun = sdev->lun; 800 /*
456 p->phy_info[i].starget = sdev->sdev_target; 801 * Exposing hidden raid components
457 /* 802 */
458 * Exposing hidden disk (RAID) 803 if (mptscsih_is_phys_disk(hd->ioc,
459 */ 804 p->phy_info[i].attached.id))
460 if (mptscsih_is_phys_disk(hd->ioc, target_id)) { 805 sdev->no_uld_attach = 1;
461 target_id = mptscsih_raid_id_to_num(hd, 806 mutex_unlock(&hd->ioc->sas_topology_mutex);
462 target_id); 807 goto out;
463 vdev->vtarget->tflags |=
464 MPT_TARGET_FLAGS_RAID_COMPONENT;
465 sdev->no_uld_attach = 1;
466 }
467 mutex_unlock(&hd->ioc->sas_topology_mutex);
468 goto out;
469 }
470 } 808 }
471 } 809 }
472 mutex_unlock(&hd->ioc->sas_topology_mutex); 810 mutex_unlock(&hd->ioc->sas_topology_mutex);
@@ -475,57 +813,39 @@ mptsas_slave_alloc(struct scsi_device *sdev)
475 return -ENXIO; 813 return -ENXIO;
476 814
477 out: 815 out:
478 vtarget->target_id = target_id; 816 vdev->vtarget->num_luns++;
479 vtarget->num_luns++; 817 sdev->hostdata = vdev;
480 return 0; 818 return 0;
481} 819}
482 820
483static void 821static int
484mptsas_slave_destroy(struct scsi_device *sdev) 822mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
485{ 823{
486 struct Scsi_Host *host = sdev->host; 824 VirtDevice *vdev = SCpnt->device->hostdata;
487 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
488 VirtDevice *vdev;
489 825
490 /* 826// scsi_print_command(SCpnt);
491 * Issue target reset to flush firmware outstanding commands. 827 if (vdev->vtarget->deleted) {
492 */ 828 SCpnt->result = DID_NO_CONNECT << 16;
493 vdev = sdev->hostdata; 829 done(SCpnt);
494 if (vdev->configured_lun){ 830 return 0;
495 if (mptscsih_TMHandler(hd,
496 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
497 vdev->vtarget->bus_id,
498 vdev->vtarget->target_id,
499 0, 0, 5 /* 5 second timeout */)
500 < 0){
501
502 /* The TM request failed!
503 * Fatal error case.
504 */
505 printk(MYIOC_s_WARN_FMT
506 "Error processing TaskMgmt id=%d TARGET_RESET\n",
507 hd->ioc->name,
508 vdev->vtarget->target_id);
509
510 hd->tmPending = 0;
511 hd->tmState = TM_STATE_NONE;
512 }
513 } 831 }
514 mptscsih_slave_destroy(sdev); 832
833 return mptscsih_qcmd(SCpnt,done);
515} 834}
516 835
836
517static struct scsi_host_template mptsas_driver_template = { 837static struct scsi_host_template mptsas_driver_template = {
518 .module = THIS_MODULE, 838 .module = THIS_MODULE,
519 .proc_name = "mptsas", 839 .proc_name = "mptsas",
520 .proc_info = mptscsih_proc_info, 840 .proc_info = mptscsih_proc_info,
521 .name = "MPT SPI Host", 841 .name = "MPT SPI Host",
522 .info = mptscsih_info, 842 .info = mptscsih_info,
523 .queuecommand = mptscsih_qcmd, 843 .queuecommand = mptsas_qcmd,
524 .target_alloc = mptscsih_target_alloc, 844 .target_alloc = mptsas_target_alloc,
525 .slave_alloc = mptsas_slave_alloc, 845 .slave_alloc = mptsas_slave_alloc,
526 .slave_configure = mptsas_slave_configure, 846 .slave_configure = mptsas_slave_configure,
527 .target_destroy = mptscsih_target_destroy, 847 .target_destroy = mptsas_target_destroy,
528 .slave_destroy = mptsas_slave_destroy, 848 .slave_destroy = mptscsih_slave_destroy,
529 .change_queue_depth = mptscsih_change_queue_depth, 849 .change_queue_depth = mptscsih_change_queue_depth,
530 .eh_abort_handler = mptscsih_abort, 850 .eh_abort_handler = mptscsih_abort,
531 .eh_device_reset_handler = mptscsih_dev_reset, 851 .eh_device_reset_handler = mptscsih_dev_reset,
@@ -795,7 +1115,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
795 1115
796 port_info->num_phys = buffer->NumPhys; 1116 port_info->num_phys = buffer->NumPhys;
797 port_info->phy_info = kcalloc(port_info->num_phys, 1117 port_info->phy_info = kcalloc(port_info->num_phys,
798 sizeof(struct mptsas_phyinfo),GFP_KERNEL); 1118 sizeof(*port_info->phy_info),GFP_KERNEL);
799 if (!port_info->phy_info) { 1119 if (!port_info->phy_info) {
800 error = -ENOMEM; 1120 error = -ENOMEM;
801 goto out_free_consistent; 1121 goto out_free_consistent;
@@ -811,6 +1131,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
811 buffer->PhyData[i].Port; 1131 buffer->PhyData[i].Port;
812 port_info->phy_info[i].negotiated_link_rate = 1132 port_info->phy_info[i].negotiated_link_rate =
813 buffer->PhyData[i].NegotiatedLinkRate; 1133 buffer->PhyData[i].NegotiatedLinkRate;
1134 port_info->phy_info[i].portinfo = port_info;
814 } 1135 }
815 1136
816 out_free_consistent: 1137 out_free_consistent:
@@ -968,7 +1289,7 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
968 CONFIGPARMS cfg; 1289 CONFIGPARMS cfg;
969 SasExpanderPage0_t *buffer; 1290 SasExpanderPage0_t *buffer;
970 dma_addr_t dma_handle; 1291 dma_addr_t dma_handle;
971 int error; 1292 int i, error;
972 1293
973 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; 1294 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
974 hdr.ExtPageLength = 0; 1295 hdr.ExtPageLength = 0;
@@ -1013,12 +1334,15 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1013 port_info->num_phys = buffer->NumPhys; 1334 port_info->num_phys = buffer->NumPhys;
1014 port_info->handle = le16_to_cpu(buffer->DevHandle); 1335 port_info->handle = le16_to_cpu(buffer->DevHandle);
1015 port_info->phy_info = kcalloc(port_info->num_phys, 1336 port_info->phy_info = kcalloc(port_info->num_phys,
1016 sizeof(struct mptsas_phyinfo),GFP_KERNEL); 1337 sizeof(*port_info->phy_info),GFP_KERNEL);
1017 if (!port_info->phy_info) { 1338 if (!port_info->phy_info) {
1018 error = -ENOMEM; 1339 error = -ENOMEM;
1019 goto out_free_consistent; 1340 goto out_free_consistent;
1020 } 1341 }
1021 1342
1343 for (i = 0; i < port_info->num_phys; i++)
1344 port_info->phy_info[i].portinfo = port_info;
1345
1022 out_free_consistent: 1346 out_free_consistent:
1023 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, 1347 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
1024 buffer, dma_handle); 1348 buffer, dma_handle);
@@ -1161,19 +1485,23 @@ static int mptsas_probe_one_phy(struct device *dev,
1161{ 1485{
1162 MPT_ADAPTER *ioc; 1486 MPT_ADAPTER *ioc;
1163 struct sas_phy *phy; 1487 struct sas_phy *phy;
1164 int error; 1488 struct sas_port *port;
1489 int error = 0;
1165 1490
1166 if (!dev) 1491 if (!dev) {
1167 return -ENODEV; 1492 error = -ENODEV;
1493 goto out;
1494 }
1168 1495
1169 if (!phy_info->phy) { 1496 if (!phy_info->phy) {
1170 phy = sas_phy_alloc(dev, index); 1497 phy = sas_phy_alloc(dev, index);
1171 if (!phy) 1498 if (!phy) {
1172 return -ENOMEM; 1499 error = -ENOMEM;
1500 goto out;
1501 }
1173 } else 1502 } else
1174 phy = phy_info->phy; 1503 phy = phy_info->phy;
1175 1504
1176 phy->port_identifier = phy_info->port_id;
1177 mptsas_parse_device_info(&phy->identify, &phy_info->identify); 1505 mptsas_parse_device_info(&phy->identify, &phy_info->identify);
1178 1506
1179 /* 1507 /*
@@ -1265,19 +1593,52 @@ static int mptsas_probe_one_phy(struct device *dev,
1265 error = sas_phy_add(phy); 1593 error = sas_phy_add(phy);
1266 if (error) { 1594 if (error) {
1267 sas_phy_free(phy); 1595 sas_phy_free(phy);
1268 return error; 1596 goto out;
1269 } 1597 }
1270 phy_info->phy = phy; 1598 phy_info->phy = phy;
1271 } 1599 }
1272 1600
1273 if ((phy_info->attached.handle) && 1601 if (!phy_info->attached.handle ||
1274 (!phy_info->rphy)) { 1602 !phy_info->port_details)
1603 goto out;
1604
1605 port = mptsas_get_port(phy_info);
1606 ioc = phy_to_ioc(phy_info->phy);
1607
1608 if (phy_info->sas_port_add_phy) {
1609
1610 if (!port) {
1611 port = sas_port_alloc(dev,
1612 phy_info->port_details->port_id);
1613 dsaswideprintk((KERN_DEBUG
1614 "sas_port_alloc: port=%p dev=%p port_id=%d\n",
1615 port, dev, phy_info->port_details->port_id));
1616 if (!port) {
1617 error = -ENOMEM;
1618 goto out;
1619 }
1620 error = sas_port_add(port);
1621 if (error) {
1622 dfailprintk((MYIOC_s_ERR_FMT
1623 "%s: exit at line=%d\n", ioc->name,
1624 __FUNCTION__, __LINE__));
1625 goto out;
1626 }
1627 mptsas_set_port(phy_info, port);
1628 }
1629 dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n",
1630 phy_info->phy_id));
1631 sas_port_add_phy(port, phy_info->phy);
1632 phy_info->sas_port_add_phy = 0;
1633 }
1634
1635 if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
1275 1636
1276 struct sas_rphy *rphy; 1637 struct sas_rphy *rphy;
1638 struct device *parent;
1277 struct sas_identify identify; 1639 struct sas_identify identify;
1278 1640
1279 ioc = phy_to_ioc(phy_info->phy); 1641 parent = dev->parent->parent;
1280
1281 /* 1642 /*
1282 * Let the hotplug_work thread handle processing 1643 * Let the hotplug_work thread handle processing
1283 * the adding/removing of devices that occur 1644 * the adding/removing of devices that occur
@@ -1285,36 +1646,63 @@ static int mptsas_probe_one_phy(struct device *dev,
1285 */ 1646 */
1286 if (ioc->sas_discovery_runtime && 1647 if (ioc->sas_discovery_runtime &&
1287 mptsas_is_end_device(&phy_info->attached)) 1648 mptsas_is_end_device(&phy_info->attached))
1288 return 0; 1649 goto out;
1289 1650
1290 mptsas_parse_device_info(&identify, &phy_info->attached); 1651 mptsas_parse_device_info(&identify, &phy_info->attached);
1652 if (scsi_is_host_device(parent)) {
1653 struct mptsas_portinfo *port_info;
1654 int i;
1655
1656 mutex_lock(&ioc->sas_topology_mutex);
1657 port_info = mptsas_find_portinfo_by_handle(ioc,
1658 ioc->handle);
1659 mutex_unlock(&ioc->sas_topology_mutex);
1660
1661 for (i = 0; i < port_info->num_phys; i++)
1662 if (port_info->phy_info[i].identify.sas_address ==
1663 identify.sas_address)
1664 goto out;
1665
1666 } else if (scsi_is_sas_rphy(parent)) {
1667 struct sas_rphy *parent_rphy = dev_to_rphy(parent);
1668 if (identify.sas_address ==
1669 parent_rphy->identify.sas_address)
1670 goto out;
1671 }
1672
1291 switch (identify.device_type) { 1673 switch (identify.device_type) {
1292 case SAS_END_DEVICE: 1674 case SAS_END_DEVICE:
1293 rphy = sas_end_device_alloc(phy); 1675 rphy = sas_end_device_alloc(port);
1294 break; 1676 break;
1295 case SAS_EDGE_EXPANDER_DEVICE: 1677 case SAS_EDGE_EXPANDER_DEVICE:
1296 case SAS_FANOUT_EXPANDER_DEVICE: 1678 case SAS_FANOUT_EXPANDER_DEVICE:
1297 rphy = sas_expander_alloc(phy, identify.device_type); 1679 rphy = sas_expander_alloc(port, identify.device_type);
1298 break; 1680 break;
1299 default: 1681 default:
1300 rphy = NULL; 1682 rphy = NULL;
1301 break; 1683 break;
1302 } 1684 }
1303 if (!rphy) 1685 if (!rphy) {
1304 return 0; /* non-fatal: an rphy can be added later */ 1686 dfailprintk((MYIOC_s_ERR_FMT
1687 "%s: exit at line=%d\n", ioc->name,
1688 __FUNCTION__, __LINE__));
1689 goto out;
1690 }
1305 1691
1306 rphy->identify = identify; 1692 rphy->identify = identify;
1307
1308 error = sas_rphy_add(rphy); 1693 error = sas_rphy_add(rphy);
1309 if (error) { 1694 if (error) {
1695 dfailprintk((MYIOC_s_ERR_FMT
1696 "%s: exit at line=%d\n", ioc->name,
1697 __FUNCTION__, __LINE__));
1310 sas_rphy_free(rphy); 1698 sas_rphy_free(rphy);
1311 return error; 1699 goto out;
1312 } 1700 }
1313 1701 mptsas_set_rphy(phy_info, rphy);
1314 phy_info->rphy = rphy;
1315 } 1702 }
1316 1703
1317 return 0; 1704 out:
1705 return error;
1318} 1706}
1319 1707
1320static int 1708static int
@@ -1333,6 +1721,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
1333 goto out_free_port_info; 1721 goto out_free_port_info;
1334 1722
1335 mutex_lock(&ioc->sas_topology_mutex); 1723 mutex_lock(&ioc->sas_topology_mutex);
1724 ioc->handle = hba->handle;
1336 port_info = mptsas_find_portinfo_by_handle(ioc, hba->handle); 1725 port_info = mptsas_find_portinfo_by_handle(ioc, hba->handle);
1337 if (!port_info) { 1726 if (!port_info) {
1338 port_info = hba; 1727 port_info = hba;
@@ -1342,8 +1731,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
1342 for (i = 0; i < hba->num_phys; i++) 1731 for (i = 0; i < hba->num_phys; i++)
1343 port_info->phy_info[i].negotiated_link_rate = 1732 port_info->phy_info[i].negotiated_link_rate =
1344 hba->phy_info[i].negotiated_link_rate; 1733 hba->phy_info[i].negotiated_link_rate;
1345 if (hba->phy_info) 1734 kfree(hba->phy_info);
1346 kfree(hba->phy_info);
1347 kfree(hba); 1735 kfree(hba);
1348 hba = NULL; 1736 hba = NULL;
1349 } 1737 }
@@ -1362,18 +1750,19 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
1362 port_info->phy_info[i].phy_id; 1750 port_info->phy_info[i].phy_id;
1363 handle = port_info->phy_info[i].identify.handle; 1751 handle = port_info->phy_info[i].identify.handle;
1364 1752
1365 if (port_info->phy_info[i].attached.handle) { 1753 if (port_info->phy_info[i].attached.handle)
1366 mptsas_sas_device_pg0(ioc, 1754 mptsas_sas_device_pg0(ioc,
1367 &port_info->phy_info[i].attached, 1755 &port_info->phy_info[i].attached,
1368 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 1756 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
1369 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 1757 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
1370 port_info->phy_info[i].attached.handle); 1758 port_info->phy_info[i].attached.handle);
1371 } 1759 }
1760
1761 mptsas_setup_wide_ports(ioc, port_info);
1372 1762
1763 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
1373 mptsas_probe_one_phy(&ioc->sh->shost_gendev, 1764 mptsas_probe_one_phy(&ioc->sh->shost_gendev,
1374 &port_info->phy_info[i], ioc->sas_index, 1); 1765 &port_info->phy_info[i], ioc->sas_index, 1);
1375 ioc->sas_index++;
1376 }
1377 1766
1378 return 0; 1767 return 0;
1379 1768
@@ -1387,6 +1776,8 @@ static int
1387mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle) 1776mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle)
1388{ 1777{
1389 struct mptsas_portinfo *port_info, *p, *ex; 1778 struct mptsas_portinfo *port_info, *p, *ex;
1779 struct device *parent;
1780 struct sas_rphy *rphy;
1390 int error = -ENOMEM, i, j; 1781 int error = -ENOMEM, i, j;
1391 1782
1392 ex = kzalloc(sizeof(*port_info), GFP_KERNEL); 1783 ex = kzalloc(sizeof(*port_info), GFP_KERNEL);
@@ -1408,16 +1799,13 @@ mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle)
1408 list_add_tail(&port_info->list, &ioc->sas_topology); 1799 list_add_tail(&port_info->list, &ioc->sas_topology);
1409 } else { 1800 } else {
1410 port_info->handle = ex->handle; 1801 port_info->handle = ex->handle;
1411 if (ex->phy_info) 1802 kfree(ex->phy_info);
1412 kfree(ex->phy_info);
1413 kfree(ex); 1803 kfree(ex);
1414 ex = NULL; 1804 ex = NULL;
1415 } 1805 }
1416 mutex_unlock(&ioc->sas_topology_mutex); 1806 mutex_unlock(&ioc->sas_topology_mutex);
1417 1807
1418 for (i = 0; i < port_info->num_phys; i++) { 1808 for (i = 0; i < port_info->num_phys; i++) {
1419 struct device *parent;
1420
1421 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i], 1809 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
1422 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM << 1810 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
1423 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle); 1811 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle);
@@ -1441,34 +1829,34 @@ mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle)
1441 port_info->phy_info[i].attached.phy_id = 1829 port_info->phy_info[i].attached.phy_id =
1442 port_info->phy_info[i].phy_id; 1830 port_info->phy_info[i].phy_id;
1443 } 1831 }
1832 }
1444 1833
1445 /* 1834 parent = &ioc->sh->shost_gendev;
1446 * If we find a parent port handle this expander is 1835 for (i = 0; i < port_info->num_phys; i++) {
1447 * attached to another expander, else it hangs of the
1448 * HBA phys.
1449 */
1450 parent = &ioc->sh->shost_gendev;
1451 mutex_lock(&ioc->sas_topology_mutex); 1836 mutex_lock(&ioc->sas_topology_mutex);
1452 list_for_each_entry(p, &ioc->sas_topology, list) { 1837 list_for_each_entry(p, &ioc->sas_topology, list) {
1453 for (j = 0; j < p->num_phys; j++) { 1838 for (j = 0; j < p->num_phys; j++) {
1454 if (port_info->phy_info[i].identify.handle == 1839 if (port_info->phy_info[i].identify.handle !=
1455 p->phy_info[j].attached.handle) 1840 p->phy_info[j].attached.handle)
1456 parent = &p->phy_info[j].rphy->dev; 1841 continue;
1842 rphy = mptsas_get_rphy(&p->phy_info[j]);
1843 parent = &rphy->dev;
1457 } 1844 }
1458 } 1845 }
1459 mutex_unlock(&ioc->sas_topology_mutex); 1846 mutex_unlock(&ioc->sas_topology_mutex);
1847 }
1848
1849 mptsas_setup_wide_ports(ioc, port_info);
1460 1850
1851 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
1461 mptsas_probe_one_phy(parent, &port_info->phy_info[i], 1852 mptsas_probe_one_phy(parent, &port_info->phy_info[i],
1462 ioc->sas_index, 0); 1853 ioc->sas_index, 0);
1463 ioc->sas_index++;
1464 }
1465 1854
1466 return 0; 1855 return 0;
1467 1856
1468 out_free_port_info: 1857 out_free_port_info:
1469 if (ex) { 1858 if (ex) {
1470 if (ex->phy_info) 1859 kfree(ex->phy_info);
1471 kfree(ex->phy_info);
1472 kfree(ex); 1860 kfree(ex);
1473 } 1861 }
1474 out: 1862 out:
@@ -1487,7 +1875,12 @@ mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
1487{ 1875{
1488 struct mptsas_portinfo buffer; 1876 struct mptsas_portinfo buffer;
1489 struct mptsas_portinfo *port_info, *n, *parent; 1877 struct mptsas_portinfo *port_info, *n, *parent;
1878 struct mptsas_phyinfo *phy_info;
1879 struct scsi_target * starget;
1880 VirtTarget * vtarget;
1881 struct sas_port * port;
1490 int i; 1882 int i;
1883 u64 expander_sas_address;
1491 1884
1492 mutex_lock(&ioc->sas_topology_mutex); 1885 mutex_lock(&ioc->sas_topology_mutex);
1493 list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) { 1886 list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) {
@@ -1502,6 +1895,25 @@ mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
1502 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), port_info->handle)) { 1895 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), port_info->handle)) {
1503 1896
1504 /* 1897 /*
1898 * Issue target reset to all child end devices
1899 * then mark them deleted to prevent further
1900 * IO going to them.
1901 */
1902 phy_info = port_info->phy_info;
1903 for (i = 0; i < port_info->num_phys; i++, phy_info++) {
1904 starget = mptsas_get_starget(phy_info);
1905 if (!starget)
1906 continue;
1907 vtarget = starget->hostdata;
1908 if(vtarget->deleted)
1909 continue;
1910 vtarget->deleted = 1;
1911 mptsas_target_reset(ioc, vtarget);
1912 sas_port_delete(mptsas_get_port(phy_info));
1913 mptsas_port_delete(phy_info->port_details);
1914 }
1915
1916 /*
1505 * Obtain the port_info instance to the parent port 1917 * Obtain the port_info instance to the parent port
1506 */ 1918 */
1507 parent = mptsas_find_portinfo_by_handle(ioc, 1919 parent = mptsas_find_portinfo_by_handle(ioc,
@@ -1510,34 +1922,43 @@ mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
1510 if (!parent) 1922 if (!parent)
1511 goto next_port; 1923 goto next_port;
1512 1924
1925 expander_sas_address =
1926 port_info->phy_info[0].identify.sas_address;
1927
1513 /* 1928 /*
1514 * Delete rphys in the parent that point 1929 * Delete rphys in the parent that point
1515 * to this expander. The transport layer will 1930 * to this expander. The transport layer will
1516 * cleanup all the children. 1931 * cleanup all the children.
1517 */ 1932 */
1518 for (i = 0; i < parent->num_phys; i++) { 1933 phy_info = parent->phy_info;
1519 if ((!parent->phy_info[i].rphy) || 1934 for (i = 0; i < parent->num_phys; i++, phy_info++) {
1520 (parent->phy_info[i].attached.sas_address != 1935 port = mptsas_get_port(phy_info);
1521 port_info->phy_info[i].identify.sas_address)) 1936 if (!port)
1937 continue;
1938 if (phy_info->attached.sas_address !=
1939 expander_sas_address)
1522 continue; 1940 continue;
1523 sas_rphy_delete(parent->phy_info[i].rphy); 1941#ifdef MPT_DEBUG_SAS_WIDE
1524 memset(&parent->phy_info[i].attached, 0, 1942 dev_printk(KERN_DEBUG, &port->dev, "delete\n");
1525 sizeof(struct mptsas_devinfo)); 1943#endif
1526 parent->phy_info[i].rphy = NULL; 1944 sas_port_delete(port);
1527 parent->phy_info[i].starget = NULL; 1945 mptsas_port_delete(phy_info->port_details);
1528 } 1946 }
1529 next_port: 1947 next_port:
1948
1949 phy_info = port_info->phy_info;
1950 for (i = 0; i < port_info->num_phys; i++, phy_info++)
1951 mptsas_port_delete(phy_info->port_details);
1952
1530 list_del(&port_info->list); 1953 list_del(&port_info->list);
1531 if (port_info->phy_info) 1954 kfree(port_info->phy_info);
1532 kfree(port_info->phy_info);
1533 kfree(port_info); 1955 kfree(port_info);
1534 } 1956 }
1535 /* 1957 /*
1536 * Free this memory allocated from inside 1958 * Free this memory allocated from inside
1537 * mptsas_sas_expander_pg0 1959 * mptsas_sas_expander_pg0
1538 */ 1960 */
1539 if (buffer.phy_info) 1961 kfree(buffer.phy_info);
1540 kfree(buffer.phy_info);
1541 } 1962 }
1542 mutex_unlock(&ioc->sas_topology_mutex); 1963 mutex_unlock(&ioc->sas_topology_mutex);
1543} 1964}
@@ -1573,60 +1994,59 @@ mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
1573/* 1994/*
1574 * Work queue thread to handle Runtime discovery 1995 * Work queue thread to handle Runtime discovery
1575 * Mere purpose is the hot add/delete of expanders 1996 * Mere purpose is the hot add/delete of expanders
1997 *(Mutex UNLOCKED)
1576 */ 1998 */
1577static void 1999static void
1578mptscsih_discovery_work(void * arg) 2000__mptsas_discovery_work(MPT_ADAPTER *ioc)
1579{ 2001{
1580 struct mptsas_discovery_event *ev = arg;
1581 MPT_ADAPTER *ioc = ev->ioc;
1582 u32 handle = 0xFFFF; 2002 u32 handle = 0xFFFF;
1583 2003
1584 mutex_lock(&ioc->sas_discovery_mutex);
1585 ioc->sas_discovery_runtime=1; 2004 ioc->sas_discovery_runtime=1;
1586 mptsas_delete_expander_phys(ioc); 2005 mptsas_delete_expander_phys(ioc);
1587 mptsas_probe_hba_phys(ioc); 2006 mptsas_probe_hba_phys(ioc);
1588 while (!mptsas_probe_expander_phys(ioc, &handle)) 2007 while (!mptsas_probe_expander_phys(ioc, &handle))
1589 ; 2008 ;
1590 kfree(ev);
1591 ioc->sas_discovery_runtime=0; 2009 ioc->sas_discovery_runtime=0;
2010}
2011
2012/*
2013 * Work queue thread to handle Runtime discovery
2014 * Mere purpose is the hot add/delete of expanders
2015 *(Mutex LOCKED)
2016 */
2017static void
2018mptsas_discovery_work(void * arg)
2019{
2020 struct mptsas_discovery_event *ev = arg;
2021 MPT_ADAPTER *ioc = ev->ioc;
2022
2023 mutex_lock(&ioc->sas_discovery_mutex);
2024 __mptsas_discovery_work(ioc);
1592 mutex_unlock(&ioc->sas_discovery_mutex); 2025 mutex_unlock(&ioc->sas_discovery_mutex);
2026 kfree(ev);
1593} 2027}
1594 2028
1595static struct mptsas_phyinfo * 2029static struct mptsas_phyinfo *
1596mptsas_find_phyinfo_by_parent(MPT_ADAPTER *ioc, u16 parent_handle, u8 phy_id) 2030mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
1597{ 2031{
1598 struct mptsas_portinfo *port_info; 2032 struct mptsas_portinfo *port_info;
1599 struct mptsas_devinfo device_info;
1600 struct mptsas_phyinfo *phy_info = NULL; 2033 struct mptsas_phyinfo *phy_info = NULL;
1601 int i, error; 2034 int i;
1602
1603 /*
1604 * Retrieve the parent sas_address
1605 */
1606 error = mptsas_sas_device_pg0(ioc, &device_info,
1607 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
1608 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
1609 parent_handle);
1610 if (error)
1611 return NULL;
1612 2035
1613 /*
1614 * The phy_info structures are never deallocated during lifetime of
1615 * a host, so the code below is safe without additional refcounting.
1616 */
1617 mutex_lock(&ioc->sas_topology_mutex); 2036 mutex_lock(&ioc->sas_topology_mutex);
1618 list_for_each_entry(port_info, &ioc->sas_topology, list) { 2037 list_for_each_entry(port_info, &ioc->sas_topology, list) {
1619 for (i = 0; i < port_info->num_phys; i++) { 2038 for (i = 0; i < port_info->num_phys; i++) {
1620 if (port_info->phy_info[i].identify.sas_address == 2039 if (port_info->phy_info[i].attached.sas_address
1621 device_info.sas_address && 2040 != sas_address)
1622 port_info->phy_info[i].phy_id == phy_id) { 2041 continue;
1623 phy_info = &port_info->phy_info[i]; 2042 if (!mptsas_is_end_device(
1624 break; 2043 &port_info->phy_info[i].attached))
1625 } 2044 continue;
2045 phy_info = &port_info->phy_info[i];
2046 break;
1626 } 2047 }
1627 } 2048 }
1628 mutex_unlock(&ioc->sas_topology_mutex); 2049 mutex_unlock(&ioc->sas_topology_mutex);
1629
1630 return phy_info; 2050 return phy_info;
1631} 2051}
1632 2052
@@ -1637,21 +2057,19 @@ mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id)
1637 struct mptsas_phyinfo *phy_info = NULL; 2057 struct mptsas_phyinfo *phy_info = NULL;
1638 int i; 2058 int i;
1639 2059
1640 /*
1641 * The phy_info structures are never deallocated during lifetime of
1642 * a host, so the code below is safe without additional refcounting.
1643 */
1644 mutex_lock(&ioc->sas_topology_mutex); 2060 mutex_lock(&ioc->sas_topology_mutex);
1645 list_for_each_entry(port_info, &ioc->sas_topology, list) { 2061 list_for_each_entry(port_info, &ioc->sas_topology, list) {
1646 for (i = 0; i < port_info->num_phys; i++) 2062 for (i = 0; i < port_info->num_phys; i++) {
1647 if (mptsas_is_end_device(&port_info->phy_info[i].attached)) 2063 if (port_info->phy_info[i].attached.id != id)
1648 if (port_info->phy_info[i].attached.id == id) { 2064 continue;
1649 phy_info = &port_info->phy_info[i]; 2065 if (!mptsas_is_end_device(
1650 break; 2066 &port_info->phy_info[i].attached))
1651 } 2067 continue;
2068 phy_info = &port_info->phy_info[i];
2069 break;
2070 }
1652 } 2071 }
1653 mutex_unlock(&ioc->sas_topology_mutex); 2072 mutex_unlock(&ioc->sas_topology_mutex);
1654
1655 return phy_info; 2073 return phy_info;
1656} 2074}
1657 2075
@@ -1659,7 +2077,7 @@ mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id)
1659 * Work queue thread to clear the persitency table 2077 * Work queue thread to clear the persitency table
1660 */ 2078 */
1661static void 2079static void
1662mptscsih_sas_persist_clear_table(void * arg) 2080mptsas_persist_clear_table(void * arg)
1663{ 2081{
1664 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 2082 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
1665 2083
@@ -1680,7 +2098,6 @@ mptsas_reprobe_target(struct scsi_target *starget, int uld_attach)
1680 mptsas_reprobe_lun); 2098 mptsas_reprobe_lun);
1681} 2099}
1682 2100
1683
1684/* 2101/*
1685 * Work queue thread to handle SAS hotplug events 2102 * Work queue thread to handle SAS hotplug events
1686 */ 2103 */
@@ -1691,14 +2108,17 @@ mptsas_hotplug_work(void *arg)
1691 MPT_ADAPTER *ioc = ev->ioc; 2108 MPT_ADAPTER *ioc = ev->ioc;
1692 struct mptsas_phyinfo *phy_info; 2109 struct mptsas_phyinfo *phy_info;
1693 struct sas_rphy *rphy; 2110 struct sas_rphy *rphy;
2111 struct sas_port *port;
1694 struct scsi_device *sdev; 2112 struct scsi_device *sdev;
2113 struct scsi_target * starget;
1695 struct sas_identify identify; 2114 struct sas_identify identify;
1696 char *ds = NULL; 2115 char *ds = NULL;
1697 struct mptsas_devinfo sas_device; 2116 struct mptsas_devinfo sas_device;
1698 VirtTarget *vtarget; 2117 VirtTarget *vtarget;
2118 VirtDevice *vdevice;
1699 2119
1700 mutex_lock(&ioc->sas_discovery_mutex);
1701 2120
2121 mutex_lock(&ioc->sas_discovery_mutex);
1702 switch (ev->event_type) { 2122 switch (ev->event_type) {
1703 case MPTSAS_DEL_DEVICE: 2123 case MPTSAS_DEL_DEVICE:
1704 2124
@@ -1707,24 +2127,50 @@ mptsas_hotplug_work(void *arg)
1707 /* 2127 /*
1708 * Sanity checks, for non-existing phys and remote rphys. 2128 * Sanity checks, for non-existing phys and remote rphys.
1709 */ 2129 */
1710 if (!phy_info) 2130 if (!phy_info || !phy_info->port_details) {
2131 dfailprintk((MYIOC_s_ERR_FMT
2132 "%s: exit at line=%d\n", ioc->name,
2133 __FUNCTION__, __LINE__));
1711 break; 2134 break;
1712 if (!phy_info->rphy) 2135 }
2136 rphy = mptsas_get_rphy(phy_info);
2137 if (!rphy) {
2138 dfailprintk((MYIOC_s_ERR_FMT
2139 "%s: exit at line=%d\n", ioc->name,
2140 __FUNCTION__, __LINE__));
1713 break; 2141 break;
1714 if (phy_info->starget) { 2142 }
1715 vtarget = phy_info->starget->hostdata; 2143 port = mptsas_get_port(phy_info);
2144 if (!port) {
2145 dfailprintk((MYIOC_s_ERR_FMT
2146 "%s: exit at line=%d\n", ioc->name,
2147 __FUNCTION__, __LINE__));
2148 break;
2149 }
1716 2150
1717 if (!vtarget) 2151 starget = mptsas_get_starget(phy_info);
2152 if (starget) {
2153 vtarget = starget->hostdata;
2154
2155 if (!vtarget) {
2156 dfailprintk((MYIOC_s_ERR_FMT
2157 "%s: exit at line=%d\n", ioc->name,
2158 __FUNCTION__, __LINE__));
1718 break; 2159 break;
2160 }
2161
1719 /* 2162 /*
1720 * Handling RAID components 2163 * Handling RAID components
1721 */ 2164 */
1722 if (ev->phys_disk_num_valid) { 2165 if (ev->phys_disk_num_valid) {
1723 vtarget->target_id = ev->phys_disk_num; 2166 vtarget->target_id = ev->phys_disk_num;
1724 vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT; 2167 vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
1725 mptsas_reprobe_target(vtarget->starget, 1); 2168 mptsas_reprobe_target(starget, 1);
1726 break; 2169 break;
1727 } 2170 }
2171
2172 vtarget->deleted = 1;
2173 mptsas_target_reset(ioc, vtarget);
1728 } 2174 }
1729 2175
1730 if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_TARGET) 2176 if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_TARGET)
@@ -1738,10 +2184,11 @@ mptsas_hotplug_work(void *arg)
1738 "removing %s device, channel %d, id %d, phy %d\n", 2184 "removing %s device, channel %d, id %d, phy %d\n",
1739 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id); 2185 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
1740 2186
1741 sas_rphy_delete(phy_info->rphy); 2187#ifdef MPT_DEBUG_SAS_WIDE
1742 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 2188 dev_printk(KERN_DEBUG, &port->dev, "delete\n");
1743 phy_info->rphy = NULL; 2189#endif
1744 phy_info->starget = NULL; 2190 sas_port_delete(port);
2191 mptsas_port_delete(phy_info->port_details);
1745 break; 2192 break;
1746 case MPTSAS_ADD_DEVICE: 2193 case MPTSAS_ADD_DEVICE:
1747 2194
@@ -1753,59 +2200,60 @@ mptsas_hotplug_work(void *arg)
1753 */ 2200 */
1754 if (mptsas_sas_device_pg0(ioc, &sas_device, 2201 if (mptsas_sas_device_pg0(ioc, &sas_device,
1755 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << 2202 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
1756 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), ev->id)) 2203 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), ev->id)) {
2204 dfailprintk((MYIOC_s_ERR_FMT
2205 "%s: exit at line=%d\n", ioc->name,
2206 __FUNCTION__, __LINE__));
1757 break; 2207 break;
2208 }
1758 2209
1759 phy_info = mptsas_find_phyinfo_by_parent(ioc, 2210 ssleep(2);
1760 sas_device.handle_parent, sas_device.phy_id); 2211 __mptsas_discovery_work(ioc);
1761 2212
1762 if (!phy_info) { 2213 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
1763 u32 handle = 0xFFFF; 2214 sas_device.sas_address);
1764 2215
1765 /* 2216 if (!phy_info || !phy_info->port_details) {
1766 * Its possible when an expander has been hot added 2217 dfailprintk((MYIOC_s_ERR_FMT
1767 * containing attached devices, the sas firmware 2218 "%s: exit at line=%d\n", ioc->name,
1768 * may send a RC_ADDED event prior to the 2219 __FUNCTION__, __LINE__));
1769 * DISCOVERY STOP event. If that occurs, our 2220 break;
1770 * view of the topology in the driver in respect to this
1771 * expander might of not been setup, and we hit this
1772 * condition.
1773 * Therefore, this code kicks off discovery to
1774 * refresh the data.
1775 * Then again, we check whether the parent phy has
1776 * been created.
1777 */
1778 ioc->sas_discovery_runtime=1;
1779 mptsas_delete_expander_phys(ioc);
1780 mptsas_probe_hba_phys(ioc);
1781 while (!mptsas_probe_expander_phys(ioc, &handle))
1782 ;
1783 ioc->sas_discovery_runtime=0;
1784
1785 phy_info = mptsas_find_phyinfo_by_parent(ioc,
1786 sas_device.handle_parent, sas_device.phy_id);
1787 if (!phy_info)
1788 break;
1789 } 2221 }
1790 2222
1791 if (phy_info->starget) { 2223 starget = mptsas_get_starget(phy_info);
1792 vtarget = phy_info->starget->hostdata; 2224 if (starget) {
2225 vtarget = starget->hostdata;
1793 2226
1794 if (!vtarget) 2227 if (!vtarget) {
2228 dfailprintk((MYIOC_s_ERR_FMT
2229 "%s: exit at line=%d\n", ioc->name,
2230 __FUNCTION__, __LINE__));
1795 break; 2231 break;
2232 }
1796 /* 2233 /*
1797 * Handling RAID components 2234 * Handling RAID components
1798 */ 2235 */
1799 if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) { 2236 if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
1800 vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT; 2237 vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
1801 vtarget->target_id = ev->id; 2238 vtarget->target_id = ev->id;
1802 mptsas_reprobe_target(phy_info->starget, 0); 2239 mptsas_reprobe_target(starget, 0);
1803 } 2240 }
1804 break; 2241 break;
1805 } 2242 }
1806 2243
1807 if (phy_info->rphy) 2244 if (mptsas_get_rphy(phy_info)) {
2245 dfailprintk((MYIOC_s_ERR_FMT
2246 "%s: exit at line=%d\n", ioc->name,
2247 __FUNCTION__, __LINE__));
1808 break; 2248 break;
2249 }
2250 port = mptsas_get_port(phy_info);
2251 if (!port) {
2252 dfailprintk((MYIOC_s_ERR_FMT
2253 "%s: exit at line=%d\n", ioc->name,
2254 __FUNCTION__, __LINE__));
2255 break;
2256 }
1809 2257
1810 memcpy(&phy_info->attached, &sas_device, 2258 memcpy(&phy_info->attached, &sas_device,
1811 sizeof(struct mptsas_devinfo)); 2259 sizeof(struct mptsas_devinfo));
@@ -1822,28 +2270,23 @@ mptsas_hotplug_work(void *arg)
1822 ioc->name, ds, ev->channel, ev->id, ev->phy_id); 2270 ioc->name, ds, ev->channel, ev->id, ev->phy_id);
1823 2271
1824 mptsas_parse_device_info(&identify, &phy_info->attached); 2272 mptsas_parse_device_info(&identify, &phy_info->attached);
1825 switch (identify.device_type) { 2273 rphy = sas_end_device_alloc(port);
1826 case SAS_END_DEVICE: 2274 if (!rphy) {
1827 rphy = sas_end_device_alloc(phy_info->phy); 2275 dfailprintk((MYIOC_s_ERR_FMT
1828 break; 2276 "%s: exit at line=%d\n", ioc->name,
1829 case SAS_EDGE_EXPANDER_DEVICE: 2277 __FUNCTION__, __LINE__));
1830 case SAS_FANOUT_EXPANDER_DEVICE:
1831 rphy = sas_expander_alloc(phy_info->phy, identify.device_type);
1832 break;
1833 default:
1834 rphy = NULL;
1835 break;
1836 }
1837 if (!rphy)
1838 break; /* non-fatal: an rphy can be added later */ 2278 break; /* non-fatal: an rphy can be added later */
2279 }
1839 2280
1840 rphy->identify = identify; 2281 rphy->identify = identify;
1841 if (sas_rphy_add(rphy)) { 2282 if (sas_rphy_add(rphy)) {
2283 dfailprintk((MYIOC_s_ERR_FMT
2284 "%s: exit at line=%d\n", ioc->name,
2285 __FUNCTION__, __LINE__));
1842 sas_rphy_free(rphy); 2286 sas_rphy_free(rphy);
1843 break; 2287 break;
1844 } 2288 }
1845 2289 mptsas_set_rphy(phy_info, rphy);
1846 phy_info->rphy = rphy;
1847 break; 2290 break;
1848 case MPTSAS_ADD_RAID: 2291 case MPTSAS_ADD_RAID:
1849 sdev = scsi_device_lookup( 2292 sdev = scsi_device_lookup(
@@ -1875,6 +2318,9 @@ mptsas_hotplug_work(void *arg)
1875 printk(MYIOC_s_INFO_FMT 2318 printk(MYIOC_s_INFO_FMT
1876 "removing raid volume, channel %d, id %d\n", 2319 "removing raid volume, channel %d, id %d\n",
1877 ioc->name, ioc->num_ports, ev->id); 2320 ioc->name, ioc->num_ports, ev->id);
2321 vdevice = sdev->hostdata;
2322 vdevice->vtarget->deleted = 1;
2323 mptsas_target_reset(ioc, vdevice->vtarget);
1878 scsi_remove_device(sdev); 2324 scsi_remove_device(sdev);
1879 scsi_device_put(sdev); 2325 scsi_device_put(sdev);
1880 mpt_findImVolumes(ioc); 2326 mpt_findImVolumes(ioc);
@@ -1884,12 +2330,13 @@ mptsas_hotplug_work(void *arg)
1884 break; 2330 break;
1885 } 2331 }
1886 2332
1887 kfree(ev);
1888 mutex_unlock(&ioc->sas_discovery_mutex); 2333 mutex_unlock(&ioc->sas_discovery_mutex);
2334 kfree(ev);
2335
1889} 2336}
1890 2337
1891static void 2338static void
1892mptscsih_send_sas_event(MPT_ADAPTER *ioc, 2339mptsas_send_sas_event(MPT_ADAPTER *ioc,
1893 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data) 2340 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
1894{ 2341{
1895 struct mptsas_hotplug_event *ev; 2342 struct mptsas_hotplug_event *ev;
@@ -1905,7 +2352,7 @@ mptscsih_send_sas_event(MPT_ADAPTER *ioc,
1905 switch (sas_event_data->ReasonCode) { 2352 switch (sas_event_data->ReasonCode) {
1906 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 2353 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
1907 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 2354 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
1908 ev = kmalloc(sizeof(*ev), GFP_ATOMIC); 2355 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
1909 if (!ev) { 2356 if (!ev) {
1910 printk(KERN_WARNING "mptsas: lost hotplug event\n"); 2357 printk(KERN_WARNING "mptsas: lost hotplug event\n");
1911 break; 2358 break;
@@ -1935,10 +2382,9 @@ mptscsih_send_sas_event(MPT_ADAPTER *ioc,
1935 /* 2382 /*
1936 * Persistent table is full. 2383 * Persistent table is full.
1937 */ 2384 */
1938 INIT_WORK(&ioc->mptscsih_persistTask, 2385 INIT_WORK(&ioc->sas_persist_task,
1939 mptscsih_sas_persist_clear_table, 2386 mptsas_persist_clear_table, (void *)ioc);
1940 (void *)ioc); 2387 schedule_work(&ioc->sas_persist_task);
1941 schedule_work(&ioc->mptscsih_persistTask);
1942 break; 2388 break;
1943 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 2389 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
1944 /* TODO */ 2390 /* TODO */
@@ -1950,7 +2396,7 @@ mptscsih_send_sas_event(MPT_ADAPTER *ioc,
1950} 2396}
1951 2397
1952static void 2398static void
1953mptscsih_send_raid_event(MPT_ADAPTER *ioc, 2399mptsas_send_raid_event(MPT_ADAPTER *ioc,
1954 EVENT_DATA_RAID *raid_event_data) 2400 EVENT_DATA_RAID *raid_event_data)
1955{ 2401{
1956 struct mptsas_hotplug_event *ev; 2402 struct mptsas_hotplug_event *ev;
@@ -1960,13 +2406,12 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
1960 if (ioc->bus_type != SAS) 2406 if (ioc->bus_type != SAS)
1961 return; 2407 return;
1962 2408
1963 ev = kmalloc(sizeof(*ev), GFP_ATOMIC); 2409 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
1964 if (!ev) { 2410 if (!ev) {
1965 printk(KERN_WARNING "mptsas: lost hotplug event\n"); 2411 printk(KERN_WARNING "mptsas: lost hotplug event\n");
1966 return; 2412 return;
1967 } 2413 }
1968 2414
1969 memset(ev,0,sizeof(struct mptsas_hotplug_event));
1970 INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 2415 INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
1971 ev->ioc = ioc; 2416 ev->ioc = ioc;
1972 ev->id = raid_event_data->VolumeID; 2417 ev->id = raid_event_data->VolumeID;
@@ -2028,7 +2473,7 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
2028} 2473}
2029 2474
2030static void 2475static void
2031mptscsih_send_discovery(MPT_ADAPTER *ioc, 2476mptsas_send_discovery_event(MPT_ADAPTER *ioc,
2032 EVENT_DATA_SAS_DISCOVERY *discovery_data) 2477 EVENT_DATA_SAS_DISCOVERY *discovery_data)
2033{ 2478{
2034 struct mptsas_discovery_event *ev; 2479 struct mptsas_discovery_event *ev;
@@ -2043,11 +2488,10 @@ mptscsih_send_discovery(MPT_ADAPTER *ioc,
2043 if (discovery_data->DiscoveryStatus) 2488 if (discovery_data->DiscoveryStatus)
2044 return; 2489 return;
2045 2490
2046 ev = kmalloc(sizeof(*ev), GFP_ATOMIC); 2491 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2047 if (!ev) 2492 if (!ev)
2048 return; 2493 return;
2049 memset(ev,0,sizeof(struct mptsas_discovery_event)); 2494 INIT_WORK(&ev->work, mptsas_discovery_work, ev);
2050 INIT_WORK(&ev->work, mptscsih_discovery_work, ev);
2051 ev->ioc = ioc; 2495 ev->ioc = ioc;
2052 schedule_work(&ev->work); 2496 schedule_work(&ev->work);
2053}; 2497};
@@ -2075,21 +2519,21 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
2075 2519
2076 switch (event) { 2520 switch (event) {
2077 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2521 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2078 mptscsih_send_sas_event(ioc, 2522 mptsas_send_sas_event(ioc,
2079 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data); 2523 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data);
2080 break; 2524 break;
2081 case MPI_EVENT_INTEGRATED_RAID: 2525 case MPI_EVENT_INTEGRATED_RAID:
2082 mptscsih_send_raid_event(ioc, 2526 mptsas_send_raid_event(ioc,
2083 (EVENT_DATA_RAID *)reply->Data); 2527 (EVENT_DATA_RAID *)reply->Data);
2084 break; 2528 break;
2085 case MPI_EVENT_PERSISTENT_TABLE_FULL: 2529 case MPI_EVENT_PERSISTENT_TABLE_FULL:
2086 INIT_WORK(&ioc->mptscsih_persistTask, 2530 INIT_WORK(&ioc->sas_persist_task,
2087 mptscsih_sas_persist_clear_table, 2531 mptsas_persist_clear_table,
2088 (void *)ioc); 2532 (void *)ioc);
2089 schedule_work(&ioc->mptscsih_persistTask); 2533 schedule_work(&ioc->sas_persist_task);
2090 break; 2534 break;
2091 case MPI_EVENT_SAS_DISCOVERY: 2535 case MPI_EVENT_SAS_DISCOVERY:
2092 mptscsih_send_discovery(ioc, 2536 mptsas_send_discovery_event(ioc,
2093 (EVENT_DATA_SAS_DISCOVERY *)reply->Data); 2537 (EVENT_DATA_SAS_DISCOVERY *)reply->Data);
2094 break; 2538 break;
2095 default: 2539 default:
@@ -2308,7 +2752,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2308 2752
2309 return 0; 2753 return 0;
2310 2754
2311out_mptsas_probe: 2755 out_mptsas_probe:
2312 2756
2313 mptscsih_remove(pdev); 2757 mptscsih_remove(pdev);
2314 return error; 2758 return error;
@@ -2318,6 +2762,7 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
2318{ 2762{
2319 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 2763 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2320 struct mptsas_portinfo *p, *n; 2764 struct mptsas_portinfo *p, *n;
2765 int i;
2321 2766
2322 ioc->sas_discovery_ignore_events=1; 2767 ioc->sas_discovery_ignore_events=1;
2323 sas_remove_host(ioc->sh); 2768 sas_remove_host(ioc->sh);
@@ -2325,8 +2770,9 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
2325 mutex_lock(&ioc->sas_topology_mutex); 2770 mutex_lock(&ioc->sas_topology_mutex);
2326 list_for_each_entry_safe(p, n, &ioc->sas_topology, list) { 2771 list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
2327 list_del(&p->list); 2772 list_del(&p->list);
2328 if (p->phy_info) 2773 for (i = 0 ; i < p->num_phys ; i++)
2329 kfree(p->phy_info); 2774 mptsas_port_delete(p->phy_info[i].port_details);
2775 kfree(p->phy_info);
2330 kfree(p); 2776 kfree(p);
2331 } 2777 }
2332 mutex_unlock(&ioc->sas_topology_mutex); 2778 mutex_unlock(&ioc->sas_topology_mutex);
@@ -2335,17 +2781,15 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
2335} 2781}
2336 2782
2337static struct pci_device_id mptsas_pci_table[] = { 2783static struct pci_device_id mptsas_pci_table[] = {
2338 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064, 2784 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064,
2339 PCI_ANY_ID, PCI_ANY_ID },
2340 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066,
2341 PCI_ANY_ID, PCI_ANY_ID }, 2785 PCI_ANY_ID, PCI_ANY_ID },
2342 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068, 2786 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068,
2343 PCI_ANY_ID, PCI_ANY_ID }, 2787 PCI_ANY_ID, PCI_ANY_ID },
2344 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064E, 2788 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064E,
2345 PCI_ANY_ID, PCI_ANY_ID }, 2789 PCI_ANY_ID, PCI_ANY_ID },
2346 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066E, 2790 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068E,
2347 PCI_ANY_ID, PCI_ANY_ID }, 2791 PCI_ANY_ID, PCI_ANY_ID },
2348 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068E, 2792 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
2349 PCI_ANY_ID, PCI_ANY_ID }, 2793 PCI_ANY_ID, PCI_ANY_ID },
2350 {0} /* Terminating entry */ 2794 {0} /* Terminating entry */
2351}; 2795};
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 3201de053943..0a1ff762205f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -775,9 +775,9 @@ static struct spi_function_template mptspi_transport_functions = {
775 */ 775 */
776 776
777static struct pci_device_id mptspi_pci_table[] = { 777static struct pci_device_id mptspi_pci_table[] = {
778 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1030, 778 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_53C1030,
779 PCI_ANY_ID, PCI_ANY_ID }, 779 PCI_ANY_ID, PCI_ANY_ID },
780 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_1030_53C1035, 780 { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_53C1035,
781 PCI_ANY_ID, PCI_ANY_ID }, 781 PCI_ANY_ID, PCI_ANY_ID },
782 {0} /* Terminating entry */ 782 {0} /* Terminating entry */
783}; 783};
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 247ff2f23ac9..33525bdf2ab6 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -128,7 +128,7 @@ static void mmc_wait_done(struct mmc_request *mrq)
128 128
129int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 129int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
130{ 130{
131 DECLARE_COMPLETION(complete); 131 DECLARE_COMPLETION_ONSTACK(complete);
132 132
133 mrq->done_data = &complete; 133 mrq->done_data = &complete;
134 mrq->done = mmc_wait_done; 134 mrq->done = mmc_wait_done;
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index c54e40464d82..603a7951ac9b 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -55,10 +55,6 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
55 size_t *retlen, u_char *buf); 55 size_t *retlen, u_char *buf);
56static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, 56static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
57 size_t *retlen, const u_char *buf); 57 size_t *retlen, const u_char *buf);
58static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
59 size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
60static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
61 size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
62static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, 58static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
63 struct mtd_oob_ops *ops); 59 struct mtd_oob_ops *ops);
64static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, 60static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
@@ -615,17 +611,10 @@ EXPORT_SYMBOL_GPL(DoC2k_init);
615static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, 611static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
616 size_t * retlen, u_char * buf) 612 size_t * retlen, u_char * buf)
617{ 613{
618 /* Just a special case of doc_read_ecc */
619 return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
620}
621
622static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
623 size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel)
624{
625 struct DiskOnChip *this = mtd->priv; 614 struct DiskOnChip *this = mtd->priv;
626 void __iomem *docptr = this->virtadr; 615 void __iomem *docptr = this->virtadr;
627 struct Nand *mychip; 616 struct Nand *mychip;
628 unsigned char syndrome[6]; 617 unsigned char syndrome[6], eccbuf[6];
629 volatile char dummy; 618 volatile char dummy;
630 int i, len256 = 0, ret=0; 619 int i, len256 = 0, ret=0;
631 size_t left = len; 620 size_t left = len;
@@ -673,15 +662,9 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
673 DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP, 662 DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
674 CDSN_CTRL_ECC_IO); 663 CDSN_CTRL_ECC_IO);
675 664
676 if (eccbuf) { 665 /* Prime the ECC engine */
677 /* Prime the ECC engine */ 666 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
678 WriteDOC(DOC_ECC_RESET, docptr, ECCConf); 667 WriteDOC(DOC_ECC_EN, docptr, ECCConf);
679 WriteDOC(DOC_ECC_EN, docptr, ECCConf);
680 } else {
681 /* disable the ECC engine */
682 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
683 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
684 }
685 668
686 /* treat crossing 256-byte sector for 2M x 8bits devices */ 669 /* treat crossing 256-byte sector for 2M x 8bits devices */
687 if (this->page256 && from + len > (from | 0xff) + 1) { 670 if (this->page256 && from + len > (from | 0xff) + 1) {
@@ -698,58 +681,59 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
698 /* Let the caller know we completed it */ 681 /* Let the caller know we completed it */
699 *retlen += len; 682 *retlen += len;
700 683
701 if (eccbuf) { 684 /* Read the ECC data through the DiskOnChip ECC logic */
702 /* Read the ECC data through the DiskOnChip ECC logic */ 685 /* Note: this will work even with 2M x 8bit devices as */
703 /* Note: this will work even with 2M x 8bit devices as */ 686 /* they have 8 bytes of OOB per 256 page. mf. */
704 /* they have 8 bytes of OOB per 256 page. mf. */ 687 DoC_ReadBuf(this, eccbuf, 6);
705 DoC_ReadBuf(this, eccbuf, 6);
706
707 /* Flush the pipeline */
708 if (DoC_is_Millennium(this)) {
709 dummy = ReadDOC(docptr, ECCConf);
710 dummy = ReadDOC(docptr, ECCConf);
711 i = ReadDOC(docptr, ECCConf);
712 } else {
713 dummy = ReadDOC(docptr, 2k_ECCStatus);
714 dummy = ReadDOC(docptr, 2k_ECCStatus);
715 i = ReadDOC(docptr, 2k_ECCStatus);
716 }
717 688
718 /* Check the ECC Status */ 689 /* Flush the pipeline */
719 if (i & 0x80) { 690 if (DoC_is_Millennium(this)) {
720 int nb_errors; 691 dummy = ReadDOC(docptr, ECCConf);
721 /* There was an ECC error */ 692 dummy = ReadDOC(docptr, ECCConf);
693 i = ReadDOC(docptr, ECCConf);
694 } else {
695 dummy = ReadDOC(docptr, 2k_ECCStatus);
696 dummy = ReadDOC(docptr, 2k_ECCStatus);
697 i = ReadDOC(docptr, 2k_ECCStatus);
698 }
699
700 /* Check the ECC Status */
701 if (i & 0x80) {
702 int nb_errors;
703 /* There was an ECC error */
722#ifdef ECC_DEBUG 704#ifdef ECC_DEBUG
723 printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from); 705 printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
724#endif 706#endif
725 /* Read the ECC syndrom through the DiskOnChip ECC logic. 707 /* Read the ECC syndrom through the DiskOnChip ECC
726 These syndrome will be all ZERO when there is no error */ 708 logic. These syndrome will be all ZERO when there
727 for (i = 0; i < 6; i++) { 709 is no error */
728 syndrome[i] = 710 for (i = 0; i < 6; i++) {
729 ReadDOC(docptr, ECCSyndrome0 + i); 711 syndrome[i] =
730 } 712 ReadDOC(docptr, ECCSyndrome0 + i);
731 nb_errors = doc_decode_ecc(buf, syndrome); 713 }
714 nb_errors = doc_decode_ecc(buf, syndrome);
732 715
733#ifdef ECC_DEBUG 716#ifdef ECC_DEBUG
734 printk(KERN_ERR "Errors corrected: %x\n", nb_errors); 717 printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
735#endif 718#endif
736 if (nb_errors < 0) { 719 if (nb_errors < 0) {
737 /* We return error, but have actually done the read. Not that 720 /* We return error, but have actually done the
738 this can be told to user-space, via sys_read(), but at least 721 read. Not that this can be told to
739 MTD-aware stuff can know about it by checking *retlen */ 722 user-space, via sys_read(), but at least
740 ret = -EIO; 723 MTD-aware stuff can know about it by
741 } 724 checking *retlen */
725 ret = -EIO;
742 } 726 }
727 }
743 728
744#ifdef PSYCHO_DEBUG 729#ifdef PSYCHO_DEBUG
745 printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 730 printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
746 (long)from, eccbuf[0], eccbuf[1], eccbuf[2], 731 (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
747 eccbuf[3], eccbuf[4], eccbuf[5]); 732 eccbuf[3], eccbuf[4], eccbuf[5]);
748#endif 733#endif
749 734
750 /* disable the ECC engine */ 735 /* disable the ECC engine */
751 WriteDOC(DOC_ECC_DIS, docptr , ECCConf); 736 WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
752 }
753 737
754 /* according to 11.4.1, we need to wait for the busy line 738 /* according to 11.4.1, we need to wait for the busy line
755 * drop if we read to the end of the page. */ 739 * drop if we read to the end of the page. */
@@ -771,17 +755,10 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
771static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, 755static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
772 size_t * retlen, const u_char * buf) 756 size_t * retlen, const u_char * buf)
773{ 757{
774 char eccbuf[6];
775 return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
776}
777
778static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
779 size_t * retlen, const u_char * buf,
780 u_char * eccbuf, struct nand_oobinfo *oobsel)
781{
782 struct DiskOnChip *this = mtd->priv; 758 struct DiskOnChip *this = mtd->priv;
783 int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */ 759 int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */
784 void __iomem *docptr = this->virtadr; 760 void __iomem *docptr = this->virtadr;
761 unsigned char eccbuf[6];
785 volatile char dummy; 762 volatile char dummy;
786 int len256 = 0; 763 int len256 = 0;
787 struct Nand *mychip; 764 struct Nand *mychip;
@@ -835,15 +812,9 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
835 DoC_Command(this, NAND_CMD_SEQIN, 0); 812 DoC_Command(this, NAND_CMD_SEQIN, 0);
836 DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO); 813 DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
837 814
838 if (eccbuf) { 815 /* Prime the ECC engine */
839 /* Prime the ECC engine */ 816 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
840 WriteDOC(DOC_ECC_RESET, docptr, ECCConf); 817 WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
841 WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
842 } else {
843 /* disable the ECC engine */
844 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
845 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
846 }
847 818
848 /* treat crossing 256-byte sector for 2M x 8bits devices */ 819 /* treat crossing 256-byte sector for 2M x 8bits devices */
849 if (this->page256 && to + len > (to | 0xff) + 1) { 820 if (this->page256 && to + len > (to | 0xff) + 1) {
@@ -873,39 +844,35 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
873 844
874 DoC_WriteBuf(this, &buf[len256], len - len256); 845 DoC_WriteBuf(this, &buf[len256], len - len256);
875 846
876 if (eccbuf) { 847 WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr, CDSNControl);
877 WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr,
878 CDSNControl);
879
880 if (DoC_is_Millennium(this)) {
881 WriteDOC(0, docptr, NOP);
882 WriteDOC(0, docptr, NOP);
883 WriteDOC(0, docptr, NOP);
884 } else {
885 WriteDOC_(0, docptr, this->ioreg);
886 WriteDOC_(0, docptr, this->ioreg);
887 WriteDOC_(0, docptr, this->ioreg);
888 }
889 848
890 WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr, 849 if (DoC_is_Millennium(this)) {
891 CDSNControl); 850 WriteDOC(0, docptr, NOP);
851 WriteDOC(0, docptr, NOP);
852 WriteDOC(0, docptr, NOP);
853 } else {
854 WriteDOC_(0, docptr, this->ioreg);
855 WriteDOC_(0, docptr, this->ioreg);
856 WriteDOC_(0, docptr, this->ioreg);
857 }
892 858
893 /* Read the ECC data through the DiskOnChip ECC logic */ 859 WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr,
894 for (di = 0; di < 6; di++) { 860 CDSNControl);
895 eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
896 }
897 861
898 /* Reset the ECC engine */ 862 /* Read the ECC data through the DiskOnChip ECC logic */
899 WriteDOC(DOC_ECC_DIS, docptr, ECCConf); 863 for (di = 0; di < 6; di++) {
864 eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
865 }
866
867 /* Reset the ECC engine */
868 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
900 869
901#ifdef PSYCHO_DEBUG 870#ifdef PSYCHO_DEBUG
902 printk 871 printk
903 ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 872 ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
904 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], 873 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
905 eccbuf[4], eccbuf[5]); 874 eccbuf[4], eccbuf[5]);
906#endif 875#endif
907 }
908
909 DoC_Command(this, NAND_CMD_PAGEPROG, 0); 876 DoC_Command(this, NAND_CMD_PAGEPROG, 0);
910 877
911 DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); 878 DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 0cf022a69e65..0e2a9326f717 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -37,12 +37,6 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
37 size_t *retlen, u_char *buf); 37 size_t *retlen, u_char *buf);
38static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, 38static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39 size_t *retlen, const u_char *buf); 39 size_t *retlen, const u_char *buf);
40static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
41 size_t *retlen, u_char *buf, u_char *eccbuf,
42 struct nand_oobinfo *oobsel);
43static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
44 size_t *retlen, const u_char *buf, u_char *eccbuf,
45 struct nand_oobinfo *oobsel);
46static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, 40static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
47 struct mtd_oob_ops *ops); 41 struct mtd_oob_ops *ops);
48static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, 42static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
@@ -397,17 +391,9 @@ EXPORT_SYMBOL_GPL(DoCMil_init);
397static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, 391static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
398 size_t *retlen, u_char *buf) 392 size_t *retlen, u_char *buf)
399{ 393{
400 /* Just a special case of doc_read_ecc */
401 return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
402}
403
404static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
405 size_t *retlen, u_char *buf, u_char *eccbuf,
406 struct nand_oobinfo *oobsel)
407{
408 int i, ret; 394 int i, ret;
409 volatile char dummy; 395 volatile char dummy;
410 unsigned char syndrome[6]; 396 unsigned char syndrome[6], eccbuf[6];
411 struct DiskOnChip *this = mtd->priv; 397 struct DiskOnChip *this = mtd->priv;
412 void __iomem *docptr = this->virtadr; 398 void __iomem *docptr = this->virtadr;
413 struct Nand *mychip = &this->chips[from >> (this->chipshift)]; 399 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
@@ -437,15 +423,9 @@ static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
437 DoC_Address(docptr, 3, from, CDSN_CTRL_WP, 0x00); 423 DoC_Address(docptr, 3, from, CDSN_CTRL_WP, 0x00);
438 DoC_WaitReady(docptr); 424 DoC_WaitReady(docptr);
439 425
440 if (eccbuf) { 426 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
441 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ 427 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
442 WriteDOC (DOC_ECC_RESET, docptr, ECCConf); 428 WriteDOC (DOC_ECC_EN, docptr, ECCConf);
443 WriteDOC (DOC_ECC_EN, docptr, ECCConf);
444 } else {
445 /* disable the ECC engine */
446 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
447 WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
448 }
449 429
450 /* Read the data via the internal pipeline through CDSN IO register, 430 /* Read the data via the internal pipeline through CDSN IO register,
451 see Pipelined Read Operations 11.3 */ 431 see Pipelined Read Operations 11.3 */
@@ -465,58 +445,56 @@ static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
465 *retlen = len; 445 *retlen = len;
466 ret = 0; 446 ret = 0;
467 447
468 if (eccbuf) { 448 /* Read the ECC data from Spare Data Area,
469 /* Read the ECC data from Spare Data Area, 449 see Reed-Solomon EDC/ECC 11.1 */
470 see Reed-Solomon EDC/ECC 11.1 */ 450 dummy = ReadDOC(docptr, ReadPipeInit);
471 dummy = ReadDOC(docptr, ReadPipeInit);
472#ifndef USE_MEMCPY 451#ifndef USE_MEMCPY
473 for (i = 0; i < 5; i++) { 452 for (i = 0; i < 5; i++) {
474 /* N.B. you have to increase the source address in this way or the 453 /* N.B. you have to increase the source address in this way or the
475 ECC logic will not work properly */ 454 ECC logic will not work properly */
476 eccbuf[i] = ReadDOC(docptr, Mil_CDSN_IO + i); 455 eccbuf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
477 } 456 }
478#else 457#else
479 memcpy_fromio(eccbuf, docptr + DoC_Mil_CDSN_IO, 5); 458 memcpy_fromio(eccbuf, docptr + DoC_Mil_CDSN_IO, 5);
480#endif 459#endif
481 eccbuf[5] = ReadDOC(docptr, LastDataRead); 460 eccbuf[5] = ReadDOC(docptr, LastDataRead);
482 461
483 /* Flush the pipeline */ 462 /* Flush the pipeline */
484 dummy = ReadDOC(docptr, ECCConf); 463 dummy = ReadDOC(docptr, ECCConf);
485 dummy = ReadDOC(docptr, ECCConf); 464 dummy = ReadDOC(docptr, ECCConf);
486 465
487 /* Check the ECC Status */ 466 /* Check the ECC Status */
488 if (ReadDOC(docptr, ECCConf) & 0x80) { 467 if (ReadDOC(docptr, ECCConf) & 0x80) {
489 int nb_errors; 468 int nb_errors;
490 /* There was an ECC error */ 469 /* There was an ECC error */
491#ifdef ECC_DEBUG 470#ifdef ECC_DEBUG
492 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); 471 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
493#endif 472#endif
494 /* Read the ECC syndrom through the DiskOnChip ECC logic. 473 /* Read the ECC syndrom through the DiskOnChip ECC logic.
495 These syndrome will be all ZERO when there is no error */ 474 These syndrome will be all ZERO when there is no error */
496 for (i = 0; i < 6; i++) { 475 for (i = 0; i < 6; i++) {
497 syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i); 476 syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
498 } 477 }
499 nb_errors = doc_decode_ecc(buf, syndrome); 478 nb_errors = doc_decode_ecc(buf, syndrome);
500#ifdef ECC_DEBUG 479#ifdef ECC_DEBUG
501 printk("ECC Errors corrected: %x\n", nb_errors); 480 printk("ECC Errors corrected: %x\n", nb_errors);
502#endif 481#endif
503 if (nb_errors < 0) { 482 if (nb_errors < 0) {
504 /* We return error, but have actually done the read. Not that 483 /* We return error, but have actually done the read. Not that
505 this can be told to user-space, via sys_read(), but at least 484 this can be told to user-space, via sys_read(), but at least
506 MTD-aware stuff can know about it by checking *retlen */ 485 MTD-aware stuff can know about it by checking *retlen */
507 ret = -EIO; 486 ret = -EIO;
508 }
509 } 487 }
488 }
510 489
511#ifdef PSYCHO_DEBUG 490#ifdef PSYCHO_DEBUG
512 printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 491 printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
513 (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], 492 (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
514 eccbuf[4], eccbuf[5]); 493 eccbuf[4], eccbuf[5]);
515#endif 494#endif
516 495
517 /* disable the ECC engine */ 496 /* disable the ECC engine */
518 WriteDOC(DOC_ECC_DIS, docptr , ECCConf); 497 WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
519 }
520 498
521 return ret; 499 return ret;
522} 500}
@@ -524,15 +502,8 @@ static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
524static int doc_write (struct mtd_info *mtd, loff_t to, size_t len, 502static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
525 size_t *retlen, const u_char *buf) 503 size_t *retlen, const u_char *buf)
526{ 504{
527 char eccbuf[6];
528 return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
529}
530
531static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
532 size_t *retlen, const u_char *buf, u_char *eccbuf,
533 struct nand_oobinfo *oobsel)
534{
535 int i,ret = 0; 505 int i,ret = 0;
506 char eccbuf[6];
536 volatile char dummy; 507 volatile char dummy;
537 struct DiskOnChip *this = mtd->priv; 508 struct DiskOnChip *this = mtd->priv;
538 void __iomem *docptr = this->virtadr; 509 void __iomem *docptr = this->virtadr;
@@ -573,15 +544,9 @@ static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
573 DoC_Address(docptr, 3, to, 0x00, 0x00); 544 DoC_Address(docptr, 3, to, 0x00, 0x00);
574 DoC_WaitReady(docptr); 545 DoC_WaitReady(docptr);
575 546
576 if (eccbuf) { 547 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
577 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ 548 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
578 WriteDOC (DOC_ECC_RESET, docptr, ECCConf); 549 WriteDOC (DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
579 WriteDOC (DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
580 } else {
581 /* disable the ECC engine */
582 WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
583 WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
584 }
585 550
586 /* Write the data via the internal pipeline through CDSN IO register, 551 /* Write the data via the internal pipeline through CDSN IO register,
587 see Pipelined Write Operations 11.2 */ 552 see Pipelined Write Operations 11.2 */
@@ -596,46 +561,44 @@ static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
596#endif 561#endif
597 WriteDOC(0x00, docptr, WritePipeTerm); 562 WriteDOC(0x00, docptr, WritePipeTerm);
598 563
599 if (eccbuf) { 564 /* Write ECC data to flash, the ECC info is generated by the DiskOnChip ECC logic
600 /* Write ECC data to flash, the ECC info is generated by the DiskOnChip ECC logic 565 see Reed-Solomon EDC/ECC 11.1 */
601 see Reed-Solomon EDC/ECC 11.1 */ 566 WriteDOC(0, docptr, NOP);
602 WriteDOC(0, docptr, NOP); 567 WriteDOC(0, docptr, NOP);
603 WriteDOC(0, docptr, NOP); 568 WriteDOC(0, docptr, NOP);
604 WriteDOC(0, docptr, NOP);
605 569
606 /* Read the ECC data through the DiskOnChip ECC logic */ 570 /* Read the ECC data through the DiskOnChip ECC logic */
607 for (i = 0; i < 6; i++) { 571 for (i = 0; i < 6; i++) {
608 eccbuf[i] = ReadDOC(docptr, ECCSyndrome0 + i); 572 eccbuf[i] = ReadDOC(docptr, ECCSyndrome0 + i);
609 } 573 }
610 574
611 /* ignore the ECC engine */ 575 /* ignore the ECC engine */
612 WriteDOC(DOC_ECC_DIS, docptr , ECCConf); 576 WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
613 577
614#ifndef USE_MEMCPY 578#ifndef USE_MEMCPY
615 /* Write the ECC data to flash */ 579 /* Write the ECC data to flash */
616 for (i = 0; i < 6; i++) { 580 for (i = 0; i < 6; i++) {
617 /* N.B. you have to increase the source address in this way or the 581 /* N.B. you have to increase the source address in this way or the
618 ECC logic will not work properly */ 582 ECC logic will not work properly */
619 WriteDOC(eccbuf[i], docptr, Mil_CDSN_IO + i); 583 WriteDOC(eccbuf[i], docptr, Mil_CDSN_IO + i);
620 } 584 }
621#else 585#else
622 memcpy_toio(docptr + DoC_Mil_CDSN_IO, eccbuf, 6); 586 memcpy_toio(docptr + DoC_Mil_CDSN_IO, eccbuf, 6);
623#endif 587#endif
624 588
625 /* write the block status BLOCK_USED (0x5555) at the end of ECC data 589 /* write the block status BLOCK_USED (0x5555) at the end of ECC data
626 FIXME: this is only a hack for programming the IPL area for LinuxBIOS 590 FIXME: this is only a hack for programming the IPL area for LinuxBIOS
627 and should be replace with proper codes in user space utilities */ 591 and should be replace with proper codes in user space utilities */
628 WriteDOC(0x55, docptr, Mil_CDSN_IO); 592 WriteDOC(0x55, docptr, Mil_CDSN_IO);
629 WriteDOC(0x55, docptr, Mil_CDSN_IO + 1); 593 WriteDOC(0x55, docptr, Mil_CDSN_IO + 1);
630 594
631 WriteDOC(0x00, docptr, WritePipeTerm); 595 WriteDOC(0x00, docptr, WritePipeTerm);
632 596
633#ifdef PSYCHO_DEBUG 597#ifdef PSYCHO_DEBUG
634 printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 598 printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
635 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], 599 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
636 eccbuf[4], eccbuf[5]); 600 eccbuf[4], eccbuf[5]);
637#endif 601#endif
638 }
639 602
640 /* Commit the Page Program command and wait for ready 603 /* Commit the Page Program command and wait for ready
641 see Software Requirement 11.4 item 1.*/ 604 see Software Requirement 11.4 item 1.*/
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 66cb1e50469a..92dbb47f2ac3 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -41,12 +41,6 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
41 size_t *retlen, u_char *buf); 41 size_t *retlen, u_char *buf);
42static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, 42static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
43 size_t *retlen, const u_char *buf); 43 size_t *retlen, const u_char *buf);
44static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
45 size_t *retlen, u_char *buf, u_char *eccbuf,
46 struct nand_oobinfo *oobsel);
47static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
48 size_t *retlen, const u_char *buf, u_char *eccbuf,
49 struct nand_oobinfo *oobsel);
50static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, 44static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
51 struct mtd_oob_ops *ops); 45 struct mtd_oob_ops *ops);
52static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, 46static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
@@ -595,18 +589,10 @@ static int doc_dumpblk(struct mtd_info *mtd, loff_t from)
595static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, 589static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
596 size_t *retlen, u_char *buf) 590 size_t *retlen, u_char *buf)
597{ 591{
598 /* Just a special case of doc_read_ecc */
599 return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
600}
601
602static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
603 size_t *retlen, u_char *buf, u_char *eccbuf,
604 struct nand_oobinfo *oobsel)
605{
606 int ret, i; 592 int ret, i;
607 volatile char dummy; 593 volatile char dummy;
608 loff_t fofs; 594 loff_t fofs;
609 unsigned char syndrome[6]; 595 unsigned char syndrome[6], eccbuf[6];
610 struct DiskOnChip *this = mtd->priv; 596 struct DiskOnChip *this = mtd->priv;
611 void __iomem * docptr = this->virtadr; 597 void __iomem * docptr = this->virtadr;
612 struct Nand *mychip = &this->chips[from >> (this->chipshift)]; 598 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
@@ -644,56 +630,51 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
644 WriteDOC(0, docptr, Mplus_FlashControl); 630 WriteDOC(0, docptr, Mplus_FlashControl);
645 DoC_WaitReady(docptr); 631 DoC_WaitReady(docptr);
646 632
647 if (eccbuf) { 633 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
648 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ 634 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
649 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); 635 WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
650 WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
651 } else {
652 /* disable the ECC engine */
653 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
654 }
655 636
656 /* Let the caller know we completed it */ 637 /* Let the caller know we completed it */
657 *retlen = len; 638 *retlen = len;
658 ret = 0; 639 ret = 0;
659 640
660 ReadDOC(docptr, Mplus_ReadPipeInit); 641 ReadDOC(docptr, Mplus_ReadPipeInit);
661 ReadDOC(docptr, Mplus_ReadPipeInit); 642 ReadDOC(docptr, Mplus_ReadPipeInit);
662 643
663 if (eccbuf) { 644 /* Read the data via the internal pipeline through CDSN IO
664 /* Read the data via the internal pipeline through CDSN IO 645 register, see Pipelined Read Operations 11.3 */
665 register, see Pipelined Read Operations 11.3 */ 646 MemReadDOC(docptr, buf, len);
666 MemReadDOC(docptr, buf, len);
667 647
668 /* Read the ECC data following raw data */ 648 /* Read the ECC data following raw data */
669 MemReadDOC(docptr, eccbuf, 4); 649 MemReadDOC(docptr, eccbuf, 4);
670 eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead); 650 eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead);
671 eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead); 651 eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead);
672 652
673 /* Flush the pipeline */ 653 /* Flush the pipeline */
674 dummy = ReadDOC(docptr, Mplus_ECCConf); 654 dummy = ReadDOC(docptr, Mplus_ECCConf);
675 dummy = ReadDOC(docptr, Mplus_ECCConf); 655 dummy = ReadDOC(docptr, Mplus_ECCConf);
676 656
677 /* Check the ECC Status */ 657 /* Check the ECC Status */
678 if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) { 658 if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) {
679 int nb_errors; 659 int nb_errors;
680 /* There was an ECC error */ 660 /* There was an ECC error */
681#ifdef ECC_DEBUG 661#ifdef ECC_DEBUG
682 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); 662 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
683#endif 663#endif
684 /* Read the ECC syndrom through the DiskOnChip ECC logic. 664 /* Read the ECC syndrom through the DiskOnChip ECC logic.
685 These syndrome will be all ZERO when there is no error */ 665 These syndrome will be all ZERO when there is no error */
686 for (i = 0; i < 6; i++) 666 for (i = 0; i < 6; i++)
687 syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); 667 syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
688 668
689 nb_errors = doc_decode_ecc(buf, syndrome); 669 nb_errors = doc_decode_ecc(buf, syndrome);
690#ifdef ECC_DEBUG 670#ifdef ECC_DEBUG
691 printk("ECC Errors corrected: %x\n", nb_errors); 671 printk("ECC Errors corrected: %x\n", nb_errors);
692#endif 672#endif
693 if (nb_errors < 0) { 673 if (nb_errors < 0) {
694 /* We return error, but have actually done the read. Not that 674 /* We return error, but have actually done the
695 this can be told to user-space, via sys_read(), but at least 675 read. Not that this can be told to user-space, via
696 MTD-aware stuff can know about it by checking *retlen */ 676 sys_read(), but at least MTD-aware stuff can know
677 about it by checking *retlen */
697#ifdef ECC_DEBUG 678#ifdef ECC_DEBUG
698 printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n", 679 printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
699 __FILE__, __LINE__, (int)from); 680 __FILE__, __LINE__, (int)from);
@@ -707,24 +688,16 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
707 eccbuf[3], eccbuf[4], eccbuf[5]); 688 eccbuf[3], eccbuf[4], eccbuf[5]);
708#endif 689#endif
709 ret = -EIO; 690 ret = -EIO;
710 }
711 } 691 }
692 }
712 693
713#ifdef PSYCHO_DEBUG 694#ifdef PSYCHO_DEBUG
714 printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 695 printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
715 (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], 696 (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
716 eccbuf[4], eccbuf[5]); 697 eccbuf[4], eccbuf[5]);
717#endif 698#endif
718 699 /* disable the ECC engine */
719 /* disable the ECC engine */ 700 WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
720 WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
721 } else {
722 /* Read the data via the internal pipeline through CDSN IO
723 register, see Pipelined Read Operations 11.3 */
724 MemReadDOC(docptr, buf, len-2);
725 buf[len-2] = ReadDOC(docptr, Mplus_LastDataRead);
726 buf[len-1] = ReadDOC(docptr, Mplus_LastDataRead);
727 }
728 701
729 /* Disable flash internally */ 702 /* Disable flash internally */
730 WriteDOC(0, docptr, Mplus_FlashSelect); 703 WriteDOC(0, docptr, Mplus_FlashSelect);
@@ -735,17 +708,10 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
735static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, 708static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
736 size_t *retlen, const u_char *buf) 709 size_t *retlen, const u_char *buf)
737{ 710{
738 char eccbuf[6];
739 return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
740}
741
742static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
743 size_t *retlen, const u_char *buf, u_char *eccbuf,
744 struct nand_oobinfo *oobsel)
745{
746 int i, before, ret = 0; 711 int i, before, ret = 0;
747 loff_t fto; 712 loff_t fto;
748 volatile char dummy; 713 volatile char dummy;
714 char eccbuf[6];
749 struct DiskOnChip *this = mtd->priv; 715 struct DiskOnChip *this = mtd->priv;
750 void __iomem * docptr = this->virtadr; 716 void __iomem * docptr = this->virtadr;
751 struct Nand *mychip = &this->chips[to >> (this->chipshift)]; 717 struct Nand *mychip = &this->chips[to >> (this->chipshift)];
@@ -795,46 +761,42 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
795 /* Disable the ECC engine */ 761 /* Disable the ECC engine */
796 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); 762 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
797 763
798 if (eccbuf) { 764 if (before) {
799 if (before) { 765 /* Write the block status BLOCK_USED (0x5555) */
800 /* Write the block status BLOCK_USED (0x5555) */ 766 WriteDOC(0x55, docptr, Mil_CDSN_IO);
801 WriteDOC(0x55, docptr, Mil_CDSN_IO); 767 WriteDOC(0x55, docptr, Mil_CDSN_IO);
802 WriteDOC(0x55, docptr, Mil_CDSN_IO);
803 }
804
805 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
806 WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
807 } 768 }
808 769
770 /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
771 WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
772
809 MemWriteDOC(docptr, (unsigned char *) buf, len); 773 MemWriteDOC(docptr, (unsigned char *) buf, len);
810 774
811 if (eccbuf) { 775 /* Write ECC data to flash, the ECC info is generated by
812 /* Write ECC data to flash, the ECC info is generated by 776 the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */
813 the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */ 777 DoC_Delay(docptr, 3);
814 DoC_Delay(docptr, 3);
815 778
816 /* Read the ECC data through the DiskOnChip ECC logic */ 779 /* Read the ECC data through the DiskOnChip ECC logic */
817 for (i = 0; i < 6; i++) 780 for (i = 0; i < 6; i++)
818 eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); 781 eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
819 782
820 /* disable the ECC engine */ 783 /* disable the ECC engine */
821 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); 784 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
822 785
823 /* Write the ECC data to flash */ 786 /* Write the ECC data to flash */
824 MemWriteDOC(docptr, eccbuf, 6); 787 MemWriteDOC(docptr, eccbuf, 6);
825 788
826 if (!before) { 789 if (!before) {
827 /* Write the block status BLOCK_USED (0x5555) */ 790 /* Write the block status BLOCK_USED (0x5555) */
828 WriteDOC(0x55, docptr, Mil_CDSN_IO+6); 791 WriteDOC(0x55, docptr, Mil_CDSN_IO+6);
829 WriteDOC(0x55, docptr, Mil_CDSN_IO+7); 792 WriteDOC(0x55, docptr, Mil_CDSN_IO+7);
830 } 793 }
831 794
832#ifdef PSYCHO_DEBUG 795#ifdef PSYCHO_DEBUG
833 printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", 796 printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
834 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], 797 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
835 eccbuf[4], eccbuf[5]); 798 eccbuf[4], eccbuf[5]);
836#endif 799#endif
837 }
838 800
839 WriteDOC(0x00, docptr, Mplus_WritePipeTerm); 801 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
840 WriteDOC(0x00, docptr, Mplus_WritePipeTerm); 802 WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 80a76654d963..62b861304e03 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -155,7 +155,7 @@ static u16 nand_read_word(struct mtd_info *mtd)
155/** 155/**
156 * nand_select_chip - [DEFAULT] control CE line 156 * nand_select_chip - [DEFAULT] control CE line
157 * @mtd: MTD device structure 157 * @mtd: MTD device structure
158 * @chip: chipnumber to select, -1 for deselect 158 * @chipnr: chipnumber to select, -1 for deselect
159 * 159 *
160 * Default select function for 1 chip devices. 160 * Default select function for 1 chip devices.
161 */ 161 */
@@ -542,7 +542,6 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
542 * Send command to NAND device. This is the version for the new large page 542 * Send command to NAND device. This is the version for the new large page
543 * devices We dont have the separate regions as we have in the small page 543 * devices We dont have the separate regions as we have in the small page
544 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. 544 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
545 *
546 */ 545 */
547static void nand_command_lp(struct mtd_info *mtd, unsigned int command, 546static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
548 int column, int page_addr) 547 int column, int page_addr)
@@ -656,7 +655,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
656 655
657/** 656/**
658 * nand_get_device - [GENERIC] Get chip for selected access 657 * nand_get_device - [GENERIC] Get chip for selected access
659 * @this: the nand chip descriptor 658 * @chip: the nand chip descriptor
660 * @mtd: MTD device structure 659 * @mtd: MTD device structure
661 * @new_state: the state which is requested 660 * @new_state: the state which is requested
662 * 661 *
@@ -696,13 +695,12 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
696/** 695/**
697 * nand_wait - [DEFAULT] wait until the command is done 696 * nand_wait - [DEFAULT] wait until the command is done
698 * @mtd: MTD device structure 697 * @mtd: MTD device structure
699 * @this: NAND chip structure 698 * @chip: NAND chip structure
700 * 699 *
701 * Wait for command done. This applies to erase and program only 700 * Wait for command done. This applies to erase and program only
702 * Erase can take up to 400ms and program up to 20ms according to 701 * Erase can take up to 400ms and program up to 20ms according to
703 * general NAND and SmartMedia specs 702 * general NAND and SmartMedia specs
704 * 703 */
705*/
706static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) 704static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
707{ 705{
708 706
@@ -896,6 +894,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
896/** 894/**
897 * nand_transfer_oob - [Internal] Transfer oob to client buffer 895 * nand_transfer_oob - [Internal] Transfer oob to client buffer
898 * @chip: nand chip structure 896 * @chip: nand chip structure
897 * @oob: oob destination address
899 * @ops: oob ops structure 898 * @ops: oob ops structure
900 */ 899 */
901static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 900static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
@@ -946,6 +945,7 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
946 * 945 *
947 * @mtd: MTD device structure 946 * @mtd: MTD device structure
948 * @from: offset to read from 947 * @from: offset to read from
948 * @ops: oob ops structure
949 * 949 *
950 * Internal function. Called with chip held. 950 * Internal function. Called with chip held.
951 */ 951 */
@@ -1760,7 +1760,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
1760/** 1760/**
1761 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 1761 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
1762 * @mtd: MTD device structure 1762 * @mtd: MTD device structure
1763 * @from: offset to read from 1763 * @to: offset to write to
1764 * @ops: oob operation description structure 1764 * @ops: oob operation description structure
1765 */ 1765 */
1766static int nand_write_oob(struct mtd_info *mtd, loff_t to, 1766static int nand_write_oob(struct mtd_info *mtd, loff_t to,
@@ -2055,7 +2055,7 @@ static void nand_sync(struct mtd_info *mtd)
2055/** 2055/**
2056 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 2056 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
2057 * @mtd: MTD device structure 2057 * @mtd: MTD device structure
2058 * @ofs: offset relative to mtd start 2058 * @offs: offset relative to mtd start
2059 */ 2059 */
2060static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 2060static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2061{ 2061{
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 2a163e4084df..dd438ca47d9a 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -65,8 +65,7 @@ static const u_char nand_ecc_precalc_table[] = {
65}; 65};
66 66
67/** 67/**
68 * nand_calculate_ecc - [NAND Interface] Calculate 3 byte ECC code 68 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block
69 * for 256 byte block
70 * @mtd: MTD block structure 69 * @mtd: MTD block structure
71 * @dat: raw data 70 * @dat: raw data
72 * @ecc_code: buffer for ECC 71 * @ecc_code: buffer for ECC
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 21743658d150..fbeedc3184e9 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -237,11 +237,6 @@ static int __init sharpsl_nand_init(void)
237 } 237 }
238 } 238 }
239 239
240 if (machine_is_husky() || machine_is_borzoi() || machine_is_akita()) {
241 /* Need to use small eraseblock size for backward compatibility */
242 sharpsl_mtd->flags |= MTD_NO_VIRTBLOCKS;
243 }
244
245 add_mtd_partitions(sharpsl_mtd, sharpsl_partition_info, nr_partitions); 240 add_mtd_partitions(sharpsl_mtd, sharpsl_partition_info, nr_partitions);
246 241
247 /* Return happy */ 242 /* Return happy */
@@ -255,8 +250,6 @@ module_init(sharpsl_nand_init);
255 */ 250 */
256static void __exit sharpsl_nand_cleanup(void) 251static void __exit sharpsl_nand_cleanup(void)
257{ 252{
258 struct nand_chip *this = (struct nand_chip *)&sharpsl_mtd[1];
259
260 /* Release resources, unregister device */ 253 /* Release resources, unregister device */
261 nand_release(sharpsl_mtd); 254 nand_release(sharpsl_mtd);
262 255
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 8ab03b4a885e..2819de79442c 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1897,7 +1897,7 @@ vortex_timer(unsigned long data)
1897 printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo); 1897 printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1898 } 1898 }
1899 1899
1900 disable_irq(dev->irq); 1900 disable_irq_lockdep(dev->irq);
1901 old_window = ioread16(ioaddr + EL3_CMD) >> 13; 1901 old_window = ioread16(ioaddr + EL3_CMD) >> 13;
1902 EL3WINDOW(4); 1902 EL3WINDOW(4);
1903 media_status = ioread16(ioaddr + Wn4_Media); 1903 media_status = ioread16(ioaddr + Wn4_Media);
@@ -1978,7 +1978,7 @@ leave_media_alone:
1978 dev->name, media_tbl[dev->if_port].name); 1978 dev->name, media_tbl[dev->if_port].name);
1979 1979
1980 EL3WINDOW(old_window); 1980 EL3WINDOW(old_window);
1981 enable_irq(dev->irq); 1981 enable_irq_lockdep(dev->irq);
1982 mod_timer(&vp->timer, RUN_AT(next_tick)); 1982 mod_timer(&vp->timer, RUN_AT(next_tick));
1983 if (vp->deferred) 1983 if (vp->deferred)
1984 iowrite16(FakeIntr, ioaddr + EL3_CMD); 1984 iowrite16(FakeIntr, ioaddr + EL3_CMD);
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index 86be96af9c8f..d2935ae39814 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -249,7 +249,7 @@ void ei_tx_timeout(struct net_device *dev)
249 249
250 /* Ugly but a reset can be slow, yet must be protected */ 250 /* Ugly but a reset can be slow, yet must be protected */
251 251
252 disable_irq_nosync(dev->irq); 252 disable_irq_nosync_lockdep(dev->irq);
253 spin_lock(&ei_local->page_lock); 253 spin_lock(&ei_local->page_lock);
254 254
255 /* Try to restart the card. Perhaps the user has fixed something. */ 255 /* Try to restart the card. Perhaps the user has fixed something. */
@@ -257,7 +257,7 @@ void ei_tx_timeout(struct net_device *dev)
257 NS8390_init(dev, 1); 257 NS8390_init(dev, 1);
258 258
259 spin_unlock(&ei_local->page_lock); 259 spin_unlock(&ei_local->page_lock);
260 enable_irq(dev->irq); 260 enable_irq_lockdep(dev->irq);
261 netif_wake_queue(dev); 261 netif_wake_queue(dev);
262} 262}
263 263
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3c90003f4230..037d870712ff 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2735,21 +2735,21 @@ static void nv_do_nic_poll(unsigned long data)
2735 2735
2736 if (!using_multi_irqs(dev)) { 2736 if (!using_multi_irqs(dev)) {
2737 if (np->msi_flags & NV_MSI_X_ENABLED) 2737 if (np->msi_flags & NV_MSI_X_ENABLED)
2738 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 2738 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2739 else 2739 else
2740 disable_irq(dev->irq); 2740 disable_irq_lockdep(dev->irq);
2741 mask = np->irqmask; 2741 mask = np->irqmask;
2742 } else { 2742 } else {
2743 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2743 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2744 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 2744 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2745 mask |= NVREG_IRQ_RX_ALL; 2745 mask |= NVREG_IRQ_RX_ALL;
2746 } 2746 }
2747 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 2747 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2748 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 2748 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2749 mask |= NVREG_IRQ_TX_ALL; 2749 mask |= NVREG_IRQ_TX_ALL;
2750 } 2750 }
2751 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 2751 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2752 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 2752 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2753 mask |= NVREG_IRQ_OTHER; 2753 mask |= NVREG_IRQ_OTHER;
2754 } 2754 }
2755 } 2755 }
@@ -2761,23 +2761,23 @@ static void nv_do_nic_poll(unsigned long data)
2761 pci_push(base); 2761 pci_push(base);
2762 2762
2763 if (!using_multi_irqs(dev)) { 2763 if (!using_multi_irqs(dev)) {
2764 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2764 nv_nic_irq(0, dev, NULL);
2765 if (np->msi_flags & NV_MSI_X_ENABLED) 2765 if (np->msi_flags & NV_MSI_X_ENABLED)
2766 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 2766 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2767 else 2767 else
2768 enable_irq(dev->irq); 2768 enable_irq_lockdep(dev->irq);
2769 } else { 2769 } else {
2770 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2770 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2771 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); 2771 nv_nic_irq_rx(0, dev, NULL);
2772 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 2772 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2773 } 2773 }
2774 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 2774 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2775 nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); 2775 nv_nic_irq_tx(0, dev, NULL);
2776 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 2776 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2777 } 2777 }
2778 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 2778 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2779 nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); 2779 nv_nic_irq_other(0, dev, NULL);
2780 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 2780 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2781 } 2781 }
2782 } 2782 }
2783} 2783}
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index f2c0bf89f0c7..29e4b5aa6ead 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -242,12 +242,12 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
242 } 242 }
243 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); 243 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
244 if (rc) { 244 if (rc) {
245 printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[1].line); 245 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
246 goto err_free_irq; 246 goto err_free_irq;
247 } 247 }
248 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); 248 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
249 if (rc) { 249 if (rc) {
250 printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[2].line); 250 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
251 goto err_free_tx_irq; 251 goto err_free_tx_irq;
252 } 252 }
253 253
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e5e1b2962936..f645921aff8b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.61" 71#define DRV_MODULE_VERSION "3.62"
72#define DRV_MODULE_RELDATE "June 29, 2006" 72#define DRV_MODULE_RELDATE "June 30, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -3798,18 +3798,24 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3798 goto out_unlock; 3798 goto out_unlock;
3799 } 3799 }
3800 3800
3801 tcp_opt_len = ((skb->h.th->doff - 5) * 4); 3801 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3802 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); 3802 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3803 else {
3804 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3805 ip_tcp_len = (skb->nh.iph->ihl * 4) +
3806 sizeof(struct tcphdr);
3807
3808 skb->nh.iph->check = 0;
3809 skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3810 tcp_opt_len);
3811 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3812 }
3803 3813
3804 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 3814 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3805 TXD_FLAG_CPU_POST_DMA); 3815 TXD_FLAG_CPU_POST_DMA);
3806 3816
3807 skb->nh.iph->check = 0;
3808 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3809
3810 skb->h.th->check = 0; 3817 skb->h.th->check = 0;
3811 3818
3812 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3813 } 3819 }
3814 else if (skb->ip_summed == CHECKSUM_HW) 3820 else if (skb->ip_summed == CHECKSUM_HW)
3815 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3821 base_flags |= TXD_FLAG_TCPUDP_CSUM;
@@ -7887,6 +7893,12 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
7887 return -EINVAL; 7893 return -EINVAL;
7888 return 0; 7894 return 0;
7889 } 7895 }
7896 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7897 if (value)
7898 dev->features |= NETIF_F_TSO6;
7899 else
7900 dev->features &= ~NETIF_F_TSO6;
7901 }
7890 return ethtool_op_set_tso(dev, value); 7902 return ethtool_op_set_tso(dev, value);
7891} 7903}
7892#endif 7904#endif
@@ -11507,8 +11519,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11507 * Firmware TSO on older chips gives lower performance, so it 11519 * Firmware TSO on older chips gives lower performance, so it
11508 * is off by default, but can be enabled using ethtool. 11520 * is off by default, but can be enabled using ethtool.
11509 */ 11521 */
11510 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 11522 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11511 dev->features |= NETIF_F_TSO; 11523 dev->features |= NETIF_F_TSO;
11524 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11525 dev->features |= NETIF_F_TSO6;
11526 }
11512 11527
11513#endif 11528#endif
11514 11529
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c b/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c
index b3ffcf501311..e386dcc32e8c 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c
@@ -32,7 +32,7 @@
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/version.h> 35#include <linux/utsrelease.h>
36 36
37 37
38static void bcm43xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 38static void bcm43xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index b764cfda6e84..dafaa5ff5aa6 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3095,6 +3095,14 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
3095} 3095}
3096 3096
3097 3097
3098/*
3099 * HostAP uses two layers of net devices, where the inner
3100 * layer gets called all the time from the outer layer.
3101 * This is a natural nesting, which needs a split lock type.
3102 */
3103static struct lock_class_key hostap_netdev_xmit_lock_key;
3104
3105
3098static struct net_device * 3106static struct net_device *
3099prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, 3107prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
3100 struct device *sdev) 3108 struct device *sdev)
@@ -3259,6 +3267,8 @@ while (0)
3259 SET_NETDEV_DEV(dev, sdev); 3267 SET_NETDEV_DEV(dev, sdev);
3260 if (ret >= 0) 3268 if (ret >= 0)
3261 ret = register_netdevice(dev); 3269 ret = register_netdevice(dev);
3270
3271 lockdep_set_class(&dev->_xmit_lock, &hostap_netdev_xmit_lock_key);
3262 rtnl_unlock(); 3272 rtnl_unlock();
3263 if (ret < 0) { 3273 if (ret < 0) {
3264 printk(KERN_WARNING "%s: register netdevice failed!\n", 3274 printk(KERN_WARNING "%s: register netdevice failed!\n",
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 3a4a644c2686..212268881857 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -74,7 +74,7 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity)
74 74
75static void 75static void
76pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, u32 gsi, 76pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, u32 gsi,
77 int triggering, int polarity) 77 int triggering, int polarity, int shareable)
78{ 78{
79 int i = 0; 79 int i = 0;
80 int irq; 80 int irq;
@@ -95,6 +95,9 @@ pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, u32 gsi,
95 return; 95 return;
96 } 96 }
97 97
98 if (shareable)
99 res->irq_resource[i].flags |= IORESOURCE_IRQ_SHAREABLE;
100
98 res->irq_resource[i].start = irq; 101 res->irq_resource[i].start = irq;
99 res->irq_resource[i].end = irq; 102 res->irq_resource[i].end = irq;
100 pcibios_penalize_isa_irq(irq, 1); 103 pcibios_penalize_isa_irq(irq, 1);
@@ -194,7 +197,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
194 pnpacpi_parse_allocated_irqresource(res_table, 197 pnpacpi_parse_allocated_irqresource(res_table,
195 res->data.irq.interrupts[i], 198 res->data.irq.interrupts[i],
196 res->data.irq.triggering, 199 res->data.irq.triggering,
197 res->data.irq.polarity); 200 res->data.irq.polarity,
201 res->data.irq.sharable);
198 } 202 }
199 break; 203 break;
200 204
@@ -255,7 +259,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
255 pnpacpi_parse_allocated_irqresource(res_table, 259 pnpacpi_parse_allocated_irqresource(res_table,
256 res->data.extended_irq.interrupts[i], 260 res->data.extended_irq.interrupts[i],
257 res->data.extended_irq.triggering, 261 res->data.extended_irq.triggering,
258 res->data.extended_irq.polarity); 262 res->data.extended_irq.polarity,
263 res->data.extended_irq.sharable);
259 } 264 }
260 break; 265 break;
261 266
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 4138564402b8..985d1613baaa 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -383,6 +383,7 @@ void
383sclp_sync_wait(void) 383sclp_sync_wait(void)
384{ 384{
385 unsigned long psw_mask; 385 unsigned long psw_mask;
386 unsigned long flags;
386 unsigned long cr0, cr0_sync; 387 unsigned long cr0, cr0_sync;
387 u64 timeout; 388 u64 timeout;
388 389
@@ -395,9 +396,11 @@ sclp_sync_wait(void)
395 sclp_tod_from_jiffies(sclp_request_timer.expires - 396 sclp_tod_from_jiffies(sclp_request_timer.expires -
396 jiffies); 397 jiffies);
397 } 398 }
399 local_irq_save(flags);
398 /* Prevent bottom half from executing once we force interrupts open */ 400 /* Prevent bottom half from executing once we force interrupts open */
399 local_bh_disable(); 401 local_bh_disable();
400 /* Enable service-signal interruption, disable timer interrupts */ 402 /* Enable service-signal interruption, disable timer interrupts */
403 trace_hardirqs_on();
401 __ctl_store(cr0, 0, 0); 404 __ctl_store(cr0, 0, 0);
402 cr0_sync = cr0; 405 cr0_sync = cr0;
403 cr0_sync |= 0x00000200; 406 cr0_sync |= 0x00000200;
@@ -415,11 +418,10 @@ sclp_sync_wait(void)
415 barrier(); 418 barrier();
416 cpu_relax(); 419 cpu_relax();
417 } 420 }
418 /* Restore interrupt settings */ 421 local_irq_disable();
419 asm volatile ("SSM 0(%0)"
420 : : "a" (&psw_mask) : "memory");
421 __ctl_load(cr0, 0, 0); 422 __ctl_load(cr0, 0, 0);
422 __local_bh_enable(); 423 _local_bh_enable();
424 local_irq_restore(flags);
423} 425}
424 426
425EXPORT_SYMBOL(sclp_sync_wait); 427EXPORT_SYMBOL(sclp_sync_wait);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index a3423267467f..6fec90eab00e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -147,7 +147,7 @@ cio_tpi(void)
147 sch->driver->irq(&sch->dev); 147 sch->driver->irq(&sch->dev);
148 spin_unlock(&sch->lock); 148 spin_unlock(&sch->lock);
149 irq_exit (); 149 irq_exit ();
150 __local_bh_enable(); 150 _local_bh_enable();
151 return 1; 151 return 1;
152} 152}
153 153
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 36733b9823c6..8e8963f15731 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -84,6 +84,8 @@ static debug_info_t *qeth_dbf_qerr = NULL;
84 84
85DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf); 85DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf);
86 86
87static struct lock_class_key qdio_out_skb_queue_key;
88
87/** 89/**
88 * some more definitions and declarations 90 * some more definitions and declarations
89 */ 91 */
@@ -3229,6 +3231,9 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
3229 &card->qdio.out_qs[i]->qdio_bufs[j]; 3231 &card->qdio.out_qs[i]->qdio_bufs[j];
3230 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j]. 3232 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
3231 skb_list); 3233 skb_list);
3234 lockdep_set_class(
3235 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
3236 &qdio_out_skb_queue_key);
3232 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list); 3237 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
3233 } 3238 }
3234 } 3239 }
@@ -5272,6 +5277,7 @@ qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5272 struct sk_buff_head tmp_list; 5277 struct sk_buff_head tmp_list;
5273 5278
5274 skb_queue_head_init(&tmp_list); 5279 skb_queue_head_init(&tmp_list);
5280 lockdep_set_class(&tmp_list.lock, &qdio_out_skb_queue_key);
5275 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){ 5281 for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){
5276 while ((skb = skb_dequeue(&buf->skb_list))){ 5282 while ((skb = skb_dequeue(&buf->skb_list))){
5277 if (vlan_tx_tag_present(skb) && 5283 if (vlan_tx_tag_present(skb) &&
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 432136f96e64..ffb3677e354f 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -378,6 +378,8 @@ s390_do_machine_check(struct pt_regs *regs)
378 struct mcck_struct *mcck; 378 struct mcck_struct *mcck;
379 int umode; 379 int umode;
380 380
381 lockdep_off();
382
381 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 383 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
382 mcck = &__get_cpu_var(cpu_mcck); 384 mcck = &__get_cpu_var(cpu_mcck);
383 umode = user_mode(regs); 385 umode = user_mode(regs);
@@ -482,6 +484,7 @@ s390_do_machine_check(struct pt_regs *regs)
482 mcck->warning = 1; 484 mcck->warning = 1;
483 set_thread_flag(TIF_MCCK_PENDING); 485 set_thread_flag(TIF_MCCK_PENDING);
484 } 486 }
487 lockdep_on();
485} 488}
486 489
487/* 490/*
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 909731b99d26..8ec8da0beaa8 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -2168,9 +2168,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2168 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 2168 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2169 &adapter->status); 2169 &adapter->status);
2170 ZFCP_LOG_DEBUG("Doing exchange config data\n"); 2170 ZFCP_LOG_DEBUG("Doing exchange config data\n");
2171 write_lock(&adapter->erp_lock); 2171 write_lock_irq(&adapter->erp_lock);
2172 zfcp_erp_action_to_running(erp_action); 2172 zfcp_erp_action_to_running(erp_action);
2173 write_unlock(&adapter->erp_lock); 2173 write_unlock_irq(&adapter->erp_lock);
2174 zfcp_erp_timeout_init(erp_action); 2174 zfcp_erp_timeout_init(erp_action);
2175 if (zfcp_fsf_exchange_config_data(erp_action)) { 2175 if (zfcp_fsf_exchange_config_data(erp_action)) {
2176 retval = ZFCP_ERP_FAILED; 2176 retval = ZFCP_ERP_FAILED;
@@ -2236,9 +2236,9 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2236 adapter = erp_action->adapter; 2236 adapter = erp_action->adapter;
2237 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2237 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2238 2238
2239 write_lock(&adapter->erp_lock); 2239 write_lock_irq(&adapter->erp_lock);
2240 zfcp_erp_action_to_running(erp_action); 2240 zfcp_erp_action_to_running(erp_action);
2241 write_unlock(&adapter->erp_lock); 2241 write_unlock_irq(&adapter->erp_lock);
2242 2242
2243 zfcp_erp_timeout_init(erp_action); 2243 zfcp_erp_timeout_init(erp_action);
2244 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); 2244 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 345a191926a4..49ea5add4abc 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -427,6 +427,7 @@ int
427zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr) 427zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
428{ 428{
429 struct zfcp_fsf_req *fsf_req; 429 struct zfcp_fsf_req *fsf_req;
430 unsigned long flags;
430 431
431 /* invalid (per convention used in this driver) */ 432 /* invalid (per convention used in this driver) */
432 if (unlikely(!sbale_addr)) { 433 if (unlikely(!sbale_addr)) {
@@ -438,15 +439,15 @@ zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
438 fsf_req = (struct zfcp_fsf_req *) sbale_addr; 439 fsf_req = (struct zfcp_fsf_req *) sbale_addr;
439 440
440 /* serialize with zfcp_fsf_req_dismiss_all */ 441 /* serialize with zfcp_fsf_req_dismiss_all */
441 spin_lock(&adapter->fsf_req_list_lock); 442 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
442 if (list_empty(&adapter->fsf_req_list_head)) { 443 if (list_empty(&adapter->fsf_req_list_head)) {
443 spin_unlock(&adapter->fsf_req_list_lock); 444 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
444 return 0; 445 return 0;
445 } 446 }
446 list_del(&fsf_req->list); 447 list_del(&fsf_req->list);
447 atomic_dec(&adapter->fsf_reqs_active); 448 atomic_dec(&adapter->fsf_reqs_active);
448 spin_unlock(&adapter->fsf_req_list_lock); 449 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
449 450
450 if (unlikely(adapter != fsf_req->adapter)) { 451 if (unlikely(adapter != fsf_req->adapter)) {
451 ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, " 452 ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, "
452 "fsf_req->adapter=%p, adapter=%p)\n", 453 "fsf_req->adapter=%p, adapter=%p)\n",
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index eb7a6a4ded75..657a3ab75399 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -173,6 +173,7 @@ STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
173STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); 173STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
174STATIC void NCR_700_chip_setup(struct Scsi_Host *host); 174STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
175STATIC void NCR_700_chip_reset(struct Scsi_Host *host); 175STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
176STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); 177STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); 178STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); 179static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
@@ -182,10 +183,6 @@ STATIC struct device_attribute *NCR_700_dev_attrs[];
182 183
183STATIC struct scsi_transport_template *NCR_700_transport_template = NULL; 184STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
184 185
185struct NCR_700_sense {
186 unsigned char cmnd[MAX_COMMAND_SIZE];
187};
188
189static char *NCR_700_phase[] = { 186static char *NCR_700_phase[] = {
190 "", 187 "",
191 "after selection", 188 "after selection",
@@ -333,6 +330,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
333 tpnt->use_clustering = ENABLE_CLUSTERING; 330 tpnt->use_clustering = ENABLE_CLUSTERING;
334 tpnt->slave_configure = NCR_700_slave_configure; 331 tpnt->slave_configure = NCR_700_slave_configure;
335 tpnt->slave_destroy = NCR_700_slave_destroy; 332 tpnt->slave_destroy = NCR_700_slave_destroy;
333 tpnt->slave_alloc = NCR_700_slave_alloc;
336 tpnt->change_queue_depth = NCR_700_change_queue_depth; 334 tpnt->change_queue_depth = NCR_700_change_queue_depth;
337 tpnt->change_queue_type = NCR_700_change_queue_type; 335 tpnt->change_queue_type = NCR_700_change_queue_type;
338 336
@@ -611,9 +609,10 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
611 struct NCR_700_command_slot *slot = 609 struct NCR_700_command_slot *slot =
612 (struct NCR_700_command_slot *)SCp->host_scribble; 610 (struct NCR_700_command_slot *)SCp->host_scribble;
613 611
614 NCR_700_unmap(hostdata, SCp, slot); 612 dma_unmap_single(hostdata->dev, slot->pCmd,
613 sizeof(SCp->cmnd), DMA_TO_DEVICE);
615 if (slot->flags == NCR_700_FLAG_AUTOSENSE) { 614 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
616 struct NCR_700_sense *sense = SCp->device->hostdata; 615 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
617#ifdef NCR_700_DEBUG 616#ifdef NCR_700_DEBUG
618 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n", 617 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
619 SCp, SCp->cmnd[7], result); 618 SCp, SCp->cmnd[7], result);
@@ -624,10 +623,9 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
624 /* restore the old result if the request sense was 623 /* restore the old result if the request sense was
625 * successful */ 624 * successful */
626 if(result == 0) 625 if(result == 0)
627 result = sense->cmnd[7]; 626 result = cmnd[7];
628 } else 627 } else
629 dma_unmap_single(hostdata->dev, slot->pCmd, 628 NCR_700_unmap(hostdata, SCp, slot);
630 sizeof(SCp->cmnd), DMA_TO_DEVICE);
631 629
632 free_slot(slot, hostdata); 630 free_slot(slot, hostdata);
633#ifdef NCR_700_DEBUG 631#ifdef NCR_700_DEBUG
@@ -969,14 +967,15 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
969 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) { 967 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
970 struct NCR_700_command_slot *slot = 968 struct NCR_700_command_slot *slot =
971 (struct NCR_700_command_slot *)SCp->host_scribble; 969 (struct NCR_700_command_slot *)SCp->host_scribble;
972 if(SCp->cmnd[0] == REQUEST_SENSE) { 970 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
973 /* OOPS: bad device, returning another 971 /* OOPS: bad device, returning another
974 * contingent allegiance condition */ 972 * contingent allegiance condition */
975 scmd_printk(KERN_ERR, SCp, 973 scmd_printk(KERN_ERR, SCp,
976 "broken device is looping in contingent allegiance: ignoring\n"); 974 "broken device is looping in contingent allegiance: ignoring\n");
977 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]); 975 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
978 } else { 976 } else {
979 struct NCR_700_sense *sense = SCp->device->hostdata; 977 char *cmnd =
978 NCR_700_get_sense_cmnd(SCp->device);
980#ifdef NCR_DEBUG 979#ifdef NCR_DEBUG
981 scsi_print_command(SCp); 980 scsi_print_command(SCp);
982 printk(" cmd %p has status %d, requesting sense\n", 981 printk(" cmd %p has status %d, requesting sense\n",
@@ -994,21 +993,21 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
994 sizeof(SCp->cmnd), 993 sizeof(SCp->cmnd),
995 DMA_TO_DEVICE); 994 DMA_TO_DEVICE);
996 995
997 sense->cmnd[0] = REQUEST_SENSE; 996 cmnd[0] = REQUEST_SENSE;
998 sense->cmnd[1] = (SCp->device->lun & 0x7) << 5; 997 cmnd[1] = (SCp->device->lun & 0x7) << 5;
999 sense->cmnd[2] = 0; 998 cmnd[2] = 0;
1000 sense->cmnd[3] = 0; 999 cmnd[3] = 0;
1001 sense->cmnd[4] = sizeof(SCp->sense_buffer); 1000 cmnd[4] = sizeof(SCp->sense_buffer);
1002 sense->cmnd[5] = 0; 1001 cmnd[5] = 0;
1003 /* Here's a quiet hack: the 1002 /* Here's a quiet hack: the
1004 * REQUEST_SENSE command is six bytes, 1003 * REQUEST_SENSE command is six bytes,
1005 * so store a flag indicating that 1004 * so store a flag indicating that
1006 * this was an internal sense request 1005 * this was an internal sense request
1007 * and the original status at the end 1006 * and the original status at the end
1008 * of the command */ 1007 * of the command */
1009 sense->cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC; 1008 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1010 sense->cmnd[7] = hostdata->status[0]; 1009 cmnd[7] = hostdata->status[0];
1011 slot->pCmd = dma_map_single(hostdata->dev, sense->cmnd, sizeof(sense->cmnd), DMA_TO_DEVICE); 1010 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1012 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 1011 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1013 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer)); 1012 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1014 slot->SG[0].pAddr = bS_to_host(slot->dma_handle); 1013 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
@@ -1530,7 +1529,7 @@ NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
1530 1529
1531 /* clear all the negotiated parameters */ 1530 /* clear all the negotiated parameters */
1532 __shost_for_each_device(SDp, host) 1531 __shost_for_each_device(SDp, host)
1533 SDp->hostdata = NULL; 1532 NCR_700_clear_flag(SDp, ~0);
1534 1533
1535 /* clear all the slots and their pending commands */ 1534 /* clear all the slots and their pending commands */
1536 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) { 1535 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
@@ -2035,7 +2034,17 @@ NCR_700_set_offset(struct scsi_target *STp, int offset)
2035 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION; 2034 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2036} 2035}
2037 2036
2037STATIC int
2038NCR_700_slave_alloc(struct scsi_device *SDp)
2039{
2040 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2041 GFP_KERNEL);
2038 2042
2043 if (!SDp->hostdata)
2044 return -ENOMEM;
2045
2046 return 0;
2047}
2039 2048
2040STATIC int 2049STATIC int
2041NCR_700_slave_configure(struct scsi_device *SDp) 2050NCR_700_slave_configure(struct scsi_device *SDp)
@@ -2043,11 +2052,6 @@ NCR_700_slave_configure(struct scsi_device *SDp)
2043 struct NCR_700_Host_Parameters *hostdata = 2052 struct NCR_700_Host_Parameters *hostdata =
2044 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; 2053 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2045 2054
2046 SDp->hostdata = kmalloc(GFP_KERNEL, sizeof(struct NCR_700_sense));
2047
2048 if (!SDp->hostdata)
2049 return -ENOMEM;
2050
2051 /* to do here: allocate memory; build a queue_full list */ 2055 /* to do here: allocate memory; build a queue_full list */
2052 if(SDp->tagged_supported) { 2056 if(SDp->tagged_supported) {
2053 scsi_set_tag_type(SDp, MSG_ORDERED_TAG); 2057 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 7f22a06fe5ec..97ebe71b701b 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -12,7 +12,7 @@
12#include <asm/io.h> 12#include <asm/io.h>
13 13
14#include <scsi/scsi_device.h> 14#include <scsi/scsi_device.h>
15 15#include <scsi/scsi_cmnd.h>
16 16
17/* Turn on for general debugging---too verbose for normal use */ 17/* Turn on for general debugging---too verbose for normal use */
18#undef NCR_700_DEBUG 18#undef NCR_700_DEBUG
@@ -76,11 +76,16 @@ struct NCR_700_SG_List {
76 #define SCRIPT_RETURN 0x90080000 76 #define SCRIPT_RETURN 0x90080000
77}; 77};
78 78
79/* We use device->hostdata to store negotiated parameters. This is 79struct NCR_700_Device_Parameters {
80 * supposed to be a pointer to a device private area, but we cannot 80 /* space for creating a request sense command. Really, except
81 * really use it as such since it will never be freed, so just use the 81 * for the annoying SCSI-2 requirement for LUN information in
82 * 32 bits to cram the information. The SYNC negotiation sequence looks 82 * cmnd[1], this could be in static storage */
83 * like: 83 unsigned char cmnd[MAX_COMMAND_SIZE];
84 __u8 depth;
85};
86
87
88/* The SYNC negotiation sequence looks like:
84 * 89 *
85 * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the 90 * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
86 * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION 91 * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION
@@ -98,19 +103,26 @@ struct NCR_700_SG_List {
98#define NCR_700_DEV_BEGIN_SYNC_NEGOTIATION (1<<17) 103#define NCR_700_DEV_BEGIN_SYNC_NEGOTIATION (1<<17)
99#define NCR_700_DEV_PRINT_SYNC_NEGOTIATION (1<<19) 104#define NCR_700_DEV_PRINT_SYNC_NEGOTIATION (1<<19)
100 105
106static inline char *NCR_700_get_sense_cmnd(struct scsi_device *SDp)
107{
108 struct NCR_700_Device_Parameters *hostdata = SDp->hostdata;
109
110 return hostdata->cmnd;
111}
112
101static inline void 113static inline void
102NCR_700_set_depth(struct scsi_device *SDp, __u8 depth) 114NCR_700_set_depth(struct scsi_device *SDp, __u8 depth)
103{ 115{
104 long l = (long)SDp->hostdata; 116 struct NCR_700_Device_Parameters *hostdata = SDp->hostdata;
105 117
106 l &= 0xffff00ff; 118 hostdata->depth = depth;
107 l |= 0xff00 & (depth << 8);
108 SDp->hostdata = (void *)l;
109} 119}
110static inline __u8 120static inline __u8
111NCR_700_get_depth(struct scsi_device *SDp) 121NCR_700_get_depth(struct scsi_device *SDp)
112{ 122{
113 return ((((unsigned long)SDp->hostdata) & 0xff00)>>8); 123 struct NCR_700_Device_Parameters *hostdata = SDp->hostdata;
124
125 return hostdata->depth;
114} 126}
115static inline int 127static inline int
116NCR_700_is_flag_set(struct scsi_device *SDp, __u32 flag) 128NCR_700_is_flag_set(struct scsi_device *SDp, __u32 flag)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 7cea514e810a..1cd3584ba7ff 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -92,31 +92,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
92 init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys); 92 init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
93 init->AdapterFibsSize = cpu_to_le32(fibsize); 93 init->AdapterFibsSize = cpu_to_le32(fibsize);
94 init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib)); 94 init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
95 /* 95 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
96 * number of 4k pages of host physical memory. The aacraid fw needs
97 * this number to be less than 4gb worth of pages. num_physpages is in
98 * system page units. New firmware doesn't have any issues with the
99 * mapping system, but older Firmware did, and had *troubles* dealing
100 * with the math overloading past 32 bits, thus we must limit this
101 * field.
102 *
103 * This assumes the memory is mapped zero->n, which isnt
104 * always true on real computers. It also has some slight problems
105 * with the GART on x86-64. I've btw never tried DMA from PCI space
106 * on this platform but don't be surprised if its problematic.
107 * [AK: something is very very wrong when a driver tests this symbol.
108 * Someone should figure out what the comment writer really meant here and fix
109 * the code. Or just remove that bad code. ]
110 */
111#ifndef CONFIG_IOMMU
112 if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
113 init->HostPhysMemPages =
114 cpu_to_le32(num_physpages << (PAGE_SHIFT-12));
115 } else
116#endif
117 {
118 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
119 }
120 96
121 init->InitFlags = 0; 97 init->InitFlags = 0;
122 if (dev->new_comm_interface) { 98 if (dev->new_comm_interface) {
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index eb7745692682..df3346b5caf8 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -1487,6 +1487,7 @@ typedef enum {
1487} ahd_queue_alg; 1487} ahd_queue_alg;
1488 1488
1489void ahd_set_tags(struct ahd_softc *ahd, 1489void ahd_set_tags(struct ahd_softc *ahd,
1490 struct scsi_cmnd *cmd,
1490 struct ahd_devinfo *devinfo, 1491 struct ahd_devinfo *devinfo,
1491 ahd_queue_alg alg); 1492 ahd_queue_alg alg);
1492 1493
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 801fc81d0b20..a1e8ca758594 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -1090,7 +1090,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
1090 1090
1091 /* Notify XPT */ 1091 /* Notify XPT */
1092 ahd_send_async(ahd, devinfo.channel, devinfo.target, 1092 ahd_send_async(ahd, devinfo.channel, devinfo.target,
1093 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 1093 CAM_LUN_WILDCARD, AC_SENT_BDR);
1094 1094
1095 /* 1095 /*
1096 * Allow the sequencer to continue with 1096 * Allow the sequencer to continue with
@@ -3062,7 +3062,7 @@ ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3062 tinfo->curr.ppr_options = ppr_options; 3062 tinfo->curr.ppr_options = ppr_options;
3063 3063
3064 ahd_send_async(ahd, devinfo->channel, devinfo->target, 3064 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3065 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 3065 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
3066 if (bootverbose) { 3066 if (bootverbose) {
3067 if (offset != 0) { 3067 if (offset != 0) {
3068 int options; 3068 int options;
@@ -3184,7 +3184,7 @@ ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3184 3184
3185 tinfo->curr.width = width; 3185 tinfo->curr.width = width;
3186 ahd_send_async(ahd, devinfo->channel, devinfo->target, 3186 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3187 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 3187 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
3188 if (bootverbose) { 3188 if (bootverbose) {
3189 printf("%s: target %d using %dbit transfers\n", 3189 printf("%s: target %d using %dbit transfers\n",
3190 ahd_name(ahd), devinfo->target, 3190 ahd_name(ahd), devinfo->target,
@@ -3211,12 +3211,14 @@ ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3211 * Update the current state of tagged queuing for a given target. 3211 * Update the current state of tagged queuing for a given target.
3212 */ 3212 */
3213void 3213void
3214ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3214ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3215 ahd_queue_alg alg) 3215 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
3216{ 3216{
3217 ahd_platform_set_tags(ahd, devinfo, alg); 3217 struct scsi_device *sdev = cmd->device;
3218
3219 ahd_platform_set_tags(ahd, sdev, devinfo, alg);
3218 ahd_send_async(ahd, devinfo->channel, devinfo->target, 3220 ahd_send_async(ahd, devinfo->channel, devinfo->target,
3219 devinfo->lun, AC_TRANSFER_NEG, &alg); 3221 devinfo->lun, AC_TRANSFER_NEG);
3220} 3222}
3221 3223
3222static void 3224static void
@@ -4746,7 +4748,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4746 printf("(%s:%c:%d:%d): refuses tagged commands. " 4748 printf("(%s:%c:%d:%d): refuses tagged commands. "
4747 "Performing non-tagged I/O\n", ahd_name(ahd), 4749 "Performing non-tagged I/O\n", ahd_name(ahd),
4748 devinfo->channel, devinfo->target, devinfo->lun); 4750 devinfo->channel, devinfo->target, devinfo->lun);
4749 ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE); 4751 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE);
4750 mask = ~0x23; 4752 mask = ~0x23;
4751 } else { 4753 } else {
4752 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 4754 printf("(%s:%c:%d:%d): refuses %s tagged commands. "
@@ -4754,7 +4756,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4754 ahd_name(ahd), devinfo->channel, devinfo->target, 4756 ahd_name(ahd), devinfo->channel, devinfo->target,
4755 devinfo->lun, tag_type == MSG_ORDERED_TASK 4757 devinfo->lun, tag_type == MSG_ORDERED_TASK
4756 ? "ordered" : "head of queue"); 4758 ? "ordered" : "head of queue");
4757 ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC); 4759 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC);
4758 mask = ~0x03; 4760 mask = ~0x03;
4759 } 4761 }
4760 4762
@@ -5098,7 +5100,7 @@ ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
5098 5100
5099 if (status != CAM_SEL_TIMEOUT) 5101 if (status != CAM_SEL_TIMEOUT)
5100 ahd_send_async(ahd, devinfo->channel, devinfo->target, 5102 ahd_send_async(ahd, devinfo->channel, devinfo->target,
5101 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 5103 CAM_LUN_WILDCARD, AC_SENT_BDR);
5102 5104
5103 if (message != NULL && bootverbose) 5105 if (message != NULL && bootverbose)
5104 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), 5106 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd),
@@ -7952,7 +7954,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
7952#endif 7954#endif
7953 /* Notify the XPT that a bus reset occurred */ 7955 /* Notify the XPT that a bus reset occurred */
7954 ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, 7956 ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
7955 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 7957 CAM_LUN_WILDCARD, AC_BUS_RESET);
7956 7958
7957 /* 7959 /*
7958 * Revert to async/narrow transfers until we renegotiate. 7960 * Revert to async/narrow transfers until we renegotiate.
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index e0ccdf362200..b244c7124179 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -484,7 +484,6 @@ ahd_linux_target_alloc(struct scsi_target *starget)
484 struct seeprom_config *sc = ahd->seep_config; 484 struct seeprom_config *sc = ahd->seep_config;
485 unsigned long flags; 485 unsigned long flags;
486 struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget); 486 struct scsi_target **ahd_targp = ahd_linux_target_in_softc(starget);
487 struct ahd_linux_target *targ = scsi_transport_target_data(starget);
488 struct ahd_devinfo devinfo; 487 struct ahd_devinfo devinfo;
489 struct ahd_initiator_tinfo *tinfo; 488 struct ahd_initiator_tinfo *tinfo;
490 struct ahd_tmode_tstate *tstate; 489 struct ahd_tmode_tstate *tstate;
@@ -495,7 +494,6 @@ ahd_linux_target_alloc(struct scsi_target *starget)
495 BUG_ON(*ahd_targp != NULL); 494 BUG_ON(*ahd_targp != NULL);
496 495
497 *ahd_targp = starget; 496 *ahd_targp = starget;
498 memset(targ, 0, sizeof(*targ));
499 497
500 if (sc) { 498 if (sc) {
501 int flags = sc->device_flags[starget->id]; 499 int flags = sc->device_flags[starget->id];
@@ -551,15 +549,11 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
551{ 549{
552 struct ahd_softc *ahd = 550 struct ahd_softc *ahd =
553 *((struct ahd_softc **)sdev->host->hostdata); 551 *((struct ahd_softc **)sdev->host->hostdata);
554 struct scsi_target *starget = sdev->sdev_target;
555 struct ahd_linux_target *targ = scsi_transport_target_data(starget);
556 struct ahd_linux_device *dev; 552 struct ahd_linux_device *dev;
557 553
558 if (bootverbose) 554 if (bootverbose)
559 printf("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id); 555 printf("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
560 556
561 BUG_ON(targ->sdev[sdev->lun] != NULL);
562
563 dev = scsi_transport_device_data(sdev); 557 dev = scsi_transport_device_data(sdev);
564 memset(dev, 0, sizeof(*dev)); 558 memset(dev, 0, sizeof(*dev));
565 559
@@ -576,8 +570,6 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
576 */ 570 */
577 dev->maxtags = 0; 571 dev->maxtags = 0;
578 572
579 targ->sdev[sdev->lun] = sdev;
580
581 return (0); 573 return (0);
582} 574}
583 575
@@ -599,23 +591,6 @@ ahd_linux_slave_configure(struct scsi_device *sdev)
599 return 0; 591 return 0;
600} 592}
601 593
602static void
603ahd_linux_slave_destroy(struct scsi_device *sdev)
604{
605 struct ahd_softc *ahd;
606 struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
607 struct ahd_linux_target *targ = scsi_transport_target_data(sdev->sdev_target);
608
609 ahd = *((struct ahd_softc **)sdev->host->hostdata);
610 if (bootverbose)
611 printf("%s: Slave Destroy %d\n", ahd_name(ahd), sdev->id);
612
613 BUG_ON(dev->active);
614
615 targ->sdev[sdev->lun] = NULL;
616
617}
618
619#if defined(__i386__) 594#if defined(__i386__)
620/* 595/*
621 * Return the disk geometry for the given SCSI device. 596 * Return the disk geometry for the given SCSI device.
@@ -822,7 +797,6 @@ struct scsi_host_template aic79xx_driver_template = {
822 .use_clustering = ENABLE_CLUSTERING, 797 .use_clustering = ENABLE_CLUSTERING,
823 .slave_alloc = ahd_linux_slave_alloc, 798 .slave_alloc = ahd_linux_slave_alloc,
824 .slave_configure = ahd_linux_slave_configure, 799 .slave_configure = ahd_linux_slave_configure,
825 .slave_destroy = ahd_linux_slave_destroy,
826 .target_alloc = ahd_linux_target_alloc, 800 .target_alloc = ahd_linux_target_alloc,
827 .target_destroy = ahd_linux_target_destroy, 801 .target_destroy = ahd_linux_target_destroy,
828}; 802};
@@ -1249,20 +1223,13 @@ void
1249ahd_platform_free(struct ahd_softc *ahd) 1223ahd_platform_free(struct ahd_softc *ahd)
1250{ 1224{
1251 struct scsi_target *starget; 1225 struct scsi_target *starget;
1252 int i, j; 1226 int i;
1253 1227
1254 if (ahd->platform_data != NULL) { 1228 if (ahd->platform_data != NULL) {
1255 /* destroy all of the device and target objects */ 1229 /* destroy all of the device and target objects */
1256 for (i = 0; i < AHD_NUM_TARGETS; i++) { 1230 for (i = 0; i < AHD_NUM_TARGETS; i++) {
1257 starget = ahd->platform_data->starget[i]; 1231 starget = ahd->platform_data->starget[i];
1258 if (starget != NULL) { 1232 if (starget != NULL) {
1259 for (j = 0; j < AHD_NUM_LUNS; j++) {
1260 struct ahd_linux_target *targ =
1261 scsi_transport_target_data(starget);
1262 if (targ->sdev[j] == NULL)
1263 continue;
1264 targ->sdev[j] = NULL;
1265 }
1266 ahd->platform_data->starget[i] = NULL; 1233 ahd->platform_data->starget[i] = NULL;
1267 } 1234 }
1268 } 1235 }
@@ -1318,20 +1285,13 @@ ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
1318} 1285}
1319 1286
1320void 1287void
1321ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 1288ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
1322 ahd_queue_alg alg) 1289 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
1323{ 1290{
1324 struct scsi_target *starget;
1325 struct ahd_linux_target *targ;
1326 struct ahd_linux_device *dev; 1291 struct ahd_linux_device *dev;
1327 struct scsi_device *sdev;
1328 int was_queuing; 1292 int was_queuing;
1329 int now_queuing; 1293 int now_queuing;
1330 1294
1331 starget = ahd->platform_data->starget[devinfo->target];
1332 targ = scsi_transport_target_data(starget);
1333 BUG_ON(targ == NULL);
1334 sdev = targ->sdev[devinfo->lun];
1335 if (sdev == NULL) 1295 if (sdev == NULL)
1336 return; 1296 return;
1337 1297
@@ -1467,11 +1427,15 @@ ahd_linux_device_queue_depth(struct scsi_device *sdev)
1467 tags = ahd_linux_user_tagdepth(ahd, &devinfo); 1427 tags = ahd_linux_user_tagdepth(ahd, &devinfo);
1468 if (tags != 0 && sdev->tagged_supported != 0) { 1428 if (tags != 0 && sdev->tagged_supported != 0) {
1469 1429
1470 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED); 1430 ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_TAGGED);
1431 ahd_send_async(ahd, devinfo.channel, devinfo.target,
1432 devinfo.lun, AC_TRANSFER_NEG);
1471 ahd_print_devinfo(ahd, &devinfo); 1433 ahd_print_devinfo(ahd, &devinfo);
1472 printf("Tagged Queuing enabled. Depth %d\n", tags); 1434 printf("Tagged Queuing enabled. Depth %d\n", tags);
1473 } else { 1435 } else {
1474 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_NONE); 1436 ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE);
1437 ahd_send_async(ahd, devinfo.channel, devinfo.target,
1438 devinfo.lun, AC_TRANSFER_NEG);
1475 } 1439 }
1476} 1440}
1477 1441
@@ -1629,7 +1593,7 @@ ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
1629 1593
1630void 1594void
1631ahd_send_async(struct ahd_softc *ahd, char channel, 1595ahd_send_async(struct ahd_softc *ahd, char channel,
1632 u_int target, u_int lun, ac_code code, void *arg) 1596 u_int target, u_int lun, ac_code code)
1633{ 1597{
1634 switch (code) { 1598 switch (code) {
1635 case AC_TRANSFER_NEG: 1599 case AC_TRANSFER_NEG:
@@ -1956,7 +1920,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
1956 } 1920 }
1957 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); 1921 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
1958 ahd_set_scsi_status(scb, SCSI_STATUS_OK); 1922 ahd_set_scsi_status(scb, SCSI_STATUS_OK);
1959 ahd_platform_set_tags(ahd, &devinfo, 1923 ahd_platform_set_tags(ahd, sdev, &devinfo,
1960 (dev->flags & AHD_DEV_Q_BASIC) 1924 (dev->flags & AHD_DEV_Q_BASIC)
1961 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); 1925 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
1962 break; 1926 break;
@@ -1966,7 +1930,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
1966 * as if the target returned BUSY SCSI status. 1930 * as if the target returned BUSY SCSI status.
1967 */ 1931 */
1968 dev->openings = 1; 1932 dev->openings = 1;
1969 ahd_platform_set_tags(ahd, &devinfo, 1933 ahd_platform_set_tags(ahd, sdev, &devinfo,
1970 (dev->flags & AHD_DEV_Q_BASIC) 1934 (dev->flags & AHD_DEV_Q_BASIC)
1971 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED); 1935 ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
1972 ahd_set_scsi_status(scb, SCSI_STATUS_BUSY); 1936 ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
@@ -2778,8 +2742,6 @@ ahd_linux_init(void)
2778 if (!ahd_linux_transport_template) 2742 if (!ahd_linux_transport_template)
2779 return -ENODEV; 2743 return -ENODEV;
2780 2744
2781 scsi_transport_reserve_target(ahd_linux_transport_template,
2782 sizeof(struct ahd_linux_target));
2783 scsi_transport_reserve_device(ahd_linux_transport_template, 2745 scsi_transport_reserve_device(ahd_linux_transport_template,
2784 sizeof(struct ahd_linux_device)); 2746 sizeof(struct ahd_linux_device));
2785 2747
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 92c6154575e7..9e871de23835 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -262,7 +262,6 @@ typedef enum {
262 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */ 262 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
263} ahd_linux_dev_flags; 263} ahd_linux_dev_flags;
264 264
265struct ahd_linux_target;
266struct ahd_linux_device { 265struct ahd_linux_device {
267 TAILQ_ENTRY(ahd_linux_device) links; 266 TAILQ_ENTRY(ahd_linux_device) links;
268 267
@@ -342,12 +341,6 @@ struct ahd_linux_device {
342#define AHD_OTAG_THRESH 500 341#define AHD_OTAG_THRESH 500
343}; 342};
344 343
345struct ahd_linux_target {
346 struct scsi_device *sdev[AHD_NUM_LUNS];
347 struct ahd_transinfo last_tinfo;
348 struct ahd_softc *ahd;
349};
350
351/********************* Definitions Required by the Core ***********************/ 344/********************* Definitions Required by the Core ***********************/
352/* 345/*
353 * Number of SG segments we require. So long as the S/G segments for 346 * Number of SG segments we require. So long as the S/G segments for
@@ -864,7 +857,7 @@ ahd_freeze_scb(struct scb *scb)
864 } 857 }
865} 858}
866 859
867void ahd_platform_set_tags(struct ahd_softc *ahd, 860void ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
868 struct ahd_devinfo *devinfo, ahd_queue_alg); 861 struct ahd_devinfo *devinfo, ahd_queue_alg);
869int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, 862int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target,
870 char channel, int lun, u_int tag, 863 char channel, int lun, u_int tag,
@@ -873,7 +866,7 @@ irqreturn_t
873 ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs); 866 ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs);
874void ahd_done(struct ahd_softc*, struct scb*); 867void ahd_done(struct ahd_softc*, struct scb*);
875void ahd_send_async(struct ahd_softc *, char channel, 868void ahd_send_async(struct ahd_softc *, char channel,
876 u_int target, u_int lun, ac_code, void *); 869 u_int target, u_int lun, ac_code);
877void ahd_print_path(struct ahd_softc *, struct scb *); 870void ahd_print_path(struct ahd_softc *, struct scb *);
878 871
879#ifdef CONFIG_PCI 872#ifdef CONFIG_PCI
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index 24fd59a230bf..c5f0ee591509 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -47,7 +47,7 @@ static int copy_info(struct info_str *info, char *fmt, ...);
47static void ahd_dump_target_state(struct ahd_softc *ahd, 47static void ahd_dump_target_state(struct ahd_softc *ahd,
48 struct info_str *info, 48 struct info_str *info,
49 u_int our_id, char channel, 49 u_int our_id, char channel,
50 u_int target_id, u_int target_offset); 50 u_int target_id);
51static void ahd_dump_device_state(struct info_str *info, 51static void ahd_dump_device_state(struct info_str *info,
52 struct scsi_device *sdev); 52 struct scsi_device *sdev);
53static int ahd_proc_write_seeprom(struct ahd_softc *ahd, 53static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
@@ -204,10 +204,8 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
204 204
205static void 205static void
206ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info, 206ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
207 u_int our_id, char channel, u_int target_id, 207 u_int our_id, char channel, u_int target_id)
208 u_int target_offset)
209{ 208{
210 struct ahd_linux_target *targ;
211 struct scsi_target *starget; 209 struct scsi_target *starget;
212 struct ahd_initiator_tinfo *tinfo; 210 struct ahd_initiator_tinfo *tinfo;
213 struct ahd_tmode_tstate *tstate; 211 struct ahd_tmode_tstate *tstate;
@@ -218,10 +216,9 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
218 copy_info(info, "Target %d Negotiation Settings\n", target_id); 216 copy_info(info, "Target %d Negotiation Settings\n", target_id);
219 copy_info(info, "\tUser: "); 217 copy_info(info, "\tUser: ");
220 ahd_format_transinfo(info, &tinfo->user); 218 ahd_format_transinfo(info, &tinfo->user);
221 starget = ahd->platform_data->starget[target_offset]; 219 starget = ahd->platform_data->starget[target_id];
222 if (starget == NULL) 220 if (starget == NULL)
223 return; 221 return;
224 targ = scsi_transport_target_data(starget);
225 222
226 copy_info(info, "\tGoal: "); 223 copy_info(info, "\tGoal: ");
227 ahd_format_transinfo(info, &tinfo->goal); 224 ahd_format_transinfo(info, &tinfo->goal);
@@ -231,7 +228,7 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
231 for (lun = 0; lun < AHD_NUM_LUNS; lun++) { 228 for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
232 struct scsi_device *dev; 229 struct scsi_device *dev;
233 230
234 dev = targ->sdev[lun]; 231 dev = scsi_device_lookup_by_target(starget, lun);
235 232
236 if (dev == NULL) 233 if (dev == NULL)
237 continue; 234 continue;
@@ -355,7 +352,7 @@ ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
355 copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n", 352 copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n",
356 ahd->scb_data.numscbs, AHD_NSEG); 353 ahd->scb_data.numscbs, AHD_NSEG);
357 354
358 max_targ = 15; 355 max_targ = 16;
359 356
360 if (ahd->seep_config == NULL) 357 if (ahd->seep_config == NULL)
361 copy_info(&info, "No Serial EEPROM\n"); 358 copy_info(&info, "No Serial EEPROM\n");
@@ -373,12 +370,12 @@ ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
373 copy_info(&info, "\n"); 370 copy_info(&info, "\n");
374 371
375 if ((ahd->features & AHD_WIDE) == 0) 372 if ((ahd->features & AHD_WIDE) == 0)
376 max_targ = 7; 373 max_targ = 8;
377 374
378 for (i = 0; i <= max_targ; i++) { 375 for (i = 0; i < max_targ; i++) {
379 376
380 ahd_dump_target_state(ahd, &info, ahd->our_id, 'A', 377 ahd_dump_target_state(ahd, &info, ahd->our_id, 'A',
381 /*target_id*/i, /*target_offset*/i); 378 /*target_id*/i);
382 } 379 }
383 retval = info.pos > info.offset ? info.pos - info.offset : 0; 380 retval = info.pos > info.offset ? info.pos - info.offset : 0;
384done: 381done:
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 412f8301b757..0ec41f34f462 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -2625,29 +2625,32 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2625 unsigned int base_io, tmport, error,n; 2625 unsigned int base_io, tmport, error,n;
2626 unsigned char host_id; 2626 unsigned char host_id;
2627 struct Scsi_Host *shpnt = NULL; 2627 struct Scsi_Host *shpnt = NULL;
2628 struct atp_unit atp_dev, *p; 2628 struct atp_unit *atpdev, *p;
2629 unsigned char setupdata[2][16]; 2629 unsigned char setupdata[2][16];
2630 int count = 0; 2630 int count = 0;
2631 2631
2632 atpdev = kzalloc(sizeof(*atpdev), GFP_KERNEL);
2633 if (!atpdev)
2634 return -ENOMEM;
2635
2632 if (pci_enable_device(pdev)) 2636 if (pci_enable_device(pdev))
2633 return -EIO; 2637 goto err_eio;
2634 2638
2635 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 2639 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
2636 printk(KERN_INFO "atp870u: use 32bit DMA mask.\n"); 2640 printk(KERN_INFO "atp870u: use 32bit DMA mask.\n");
2637 } else { 2641 } else {
2638 printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); 2642 printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
2639 return -EIO; 2643 goto err_eio;
2640 } 2644 }
2641 2645
2642 memset(&atp_dev, 0, sizeof atp_dev);
2643 /* 2646 /*
2644 * It's probably easier to weed out some revisions like 2647 * It's probably easier to weed out some revisions like
2645 * this than via the PCI device table 2648 * this than via the PCI device table
2646 */ 2649 */
2647 if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) { 2650 if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) {
2648 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atp_dev.chip_ver); 2651 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
2649 if (atp_dev.chip_ver < 2) 2652 if (atpdev->chip_ver < 2)
2650 return -EIO; 2653 goto err_eio;
2651 } 2654 }
2652 2655
2653 switch (ent->device) { 2656 switch (ent->device) {
@@ -2656,15 +2659,15 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2656 case ATP880_DEVID1: 2659 case ATP880_DEVID1:
2657 case ATP880_DEVID2: 2660 case ATP880_DEVID2:
2658 case ATP885_DEVID: 2661 case ATP885_DEVID:
2659 atp_dev.chip_ver = 0x04; 2662 atpdev->chip_ver = 0x04;
2660 default: 2663 default:
2661 break; 2664 break;
2662 } 2665 }
2663 base_io = pci_resource_start(pdev, 0); 2666 base_io = pci_resource_start(pdev, 0);
2664 base_io &= 0xfffffff8; 2667 base_io &= 0xfffffff8;
2665 2668
2666 if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) { 2669 if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) {
2667 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atp_dev.chip_ver); 2670 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
2668 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803 2671 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803
2669 2672
2670 host_id = inb(base_io + 0x39); 2673 host_id = inb(base_io + 0x39);
@@ -2672,17 +2675,17 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2672 2675
2673 printk(KERN_INFO " ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: %d" 2676 printk(KERN_INFO " ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: %d"
2674 " IO:%x, IRQ:%d.\n", count, base_io, pdev->irq); 2677 " IO:%x, IRQ:%d.\n", count, base_io, pdev->irq);
2675 atp_dev.ioport[0] = base_io + 0x40; 2678 atpdev->ioport[0] = base_io + 0x40;
2676 atp_dev.pciport[0] = base_io + 0x28; 2679 atpdev->pciport[0] = base_io + 0x28;
2677 atp_dev.dev_id = ent->device; 2680 atpdev->dev_id = ent->device;
2678 atp_dev.host_id[0] = host_id; 2681 atpdev->host_id[0] = host_id;
2679 2682
2680 tmport = base_io + 0x22; 2683 tmport = base_io + 0x22;
2681 atp_dev.scam_on = inb(tmport); 2684 atpdev->scam_on = inb(tmport);
2682 tmport += 0x13; 2685 tmport += 0x13;
2683 atp_dev.global_map[0] = inb(tmport); 2686 atpdev->global_map[0] = inb(tmport);
2684 tmport += 0x07; 2687 tmport += 0x07;
2685 atp_dev.ultra_map[0] = inw(tmport); 2688 atpdev->ultra_map[0] = inw(tmport);
2686 2689
2687 n = 0x3f09; 2690 n = 0x3f09;
2688next_fblk_880: 2691next_fblk_880:
@@ -2695,57 +2698,57 @@ next_fblk_880:
2695 if (inb(base_io + 0x30) == 0xff) 2698 if (inb(base_io + 0x30) == 0xff)
2696 goto flash_ok_880; 2699 goto flash_ok_880;
2697 2700
2698 atp_dev.sp[0][m++] = inb(base_io + 0x30); 2701 atpdev->sp[0][m++] = inb(base_io + 0x30);
2699 atp_dev.sp[0][m++] = inb(base_io + 0x31); 2702 atpdev->sp[0][m++] = inb(base_io + 0x31);
2700 atp_dev.sp[0][m++] = inb(base_io + 0x32); 2703 atpdev->sp[0][m++] = inb(base_io + 0x32);
2701 atp_dev.sp[0][m++] = inb(base_io + 0x33); 2704 atpdev->sp[0][m++] = inb(base_io + 0x33);
2702 outw(n, base_io + 0x34); 2705 outw(n, base_io + 0x34);
2703 n += 0x0002; 2706 n += 0x0002;
2704 atp_dev.sp[0][m++] = inb(base_io + 0x30); 2707 atpdev->sp[0][m++] = inb(base_io + 0x30);
2705 atp_dev.sp[0][m++] = inb(base_io + 0x31); 2708 atpdev->sp[0][m++] = inb(base_io + 0x31);
2706 atp_dev.sp[0][m++] = inb(base_io + 0x32); 2709 atpdev->sp[0][m++] = inb(base_io + 0x32);
2707 atp_dev.sp[0][m++] = inb(base_io + 0x33); 2710 atpdev->sp[0][m++] = inb(base_io + 0x33);
2708 outw(n, base_io + 0x34); 2711 outw(n, base_io + 0x34);
2709 n += 0x0002; 2712 n += 0x0002;
2710 atp_dev.sp[0][m++] = inb(base_io + 0x30); 2713 atpdev->sp[0][m++] = inb(base_io + 0x30);
2711 atp_dev.sp[0][m++] = inb(base_io + 0x31); 2714 atpdev->sp[0][m++] = inb(base_io + 0x31);
2712 atp_dev.sp[0][m++] = inb(base_io + 0x32); 2715 atpdev->sp[0][m++] = inb(base_io + 0x32);
2713 atp_dev.sp[0][m++] = inb(base_io + 0x33); 2716 atpdev->sp[0][m++] = inb(base_io + 0x33);
2714 outw(n, base_io + 0x34); 2717 outw(n, base_io + 0x34);
2715 n += 0x0002; 2718 n += 0x0002;
2716 atp_dev.sp[0][m++] = inb(base_io + 0x30); 2719 atpdev->sp[0][m++] = inb(base_io + 0x30);
2717 atp_dev.sp[0][m++] = inb(base_io + 0x31); 2720 atpdev->sp[0][m++] = inb(base_io + 0x31);
2718 atp_dev.sp[0][m++] = inb(base_io + 0x32); 2721 atpdev->sp[0][m++] = inb(base_io + 0x32);
2719 atp_dev.sp[0][m++] = inb(base_io + 0x33); 2722 atpdev->sp[0][m++] = inb(base_io + 0x33);
2720 n += 0x0018; 2723 n += 0x0018;
2721 goto next_fblk_880; 2724 goto next_fblk_880;
2722flash_ok_880: 2725flash_ok_880:
2723 outw(0, base_io + 0x34); 2726 outw(0, base_io + 0x34);
2724 atp_dev.ultra_map[0] = 0; 2727 atpdev->ultra_map[0] = 0;
2725 atp_dev.async[0] = 0; 2728 atpdev->async[0] = 0;
2726 for (k = 0; k < 16; k++) { 2729 for (k = 0; k < 16; k++) {
2727 n = 1; 2730 n = 1;
2728 n = n << k; 2731 n = n << k;
2729 if (atp_dev.sp[0][k] > 1) { 2732 if (atpdev->sp[0][k] > 1) {
2730 atp_dev.ultra_map[0] |= n; 2733 atpdev->ultra_map[0] |= n;
2731 } else { 2734 } else {
2732 if (atp_dev.sp[0][k] == 0) 2735 if (atpdev->sp[0][k] == 0)
2733 atp_dev.async[0] |= n; 2736 atpdev->async[0] |= n;
2734 } 2737 }
2735 } 2738 }
2736 atp_dev.async[0] = ~(atp_dev.async[0]); 2739 atpdev->async[0] = ~(atpdev->async[0]);
2737 outb(atp_dev.global_map[0], base_io + 0x35); 2740 outb(atpdev->global_map[0], base_io + 0x35);
2738 2741
2739 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); 2742 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
2740 if (!shpnt) 2743 if (!shpnt)
2741 return -ENOMEM; 2744 goto err_nomem;
2742 2745
2743 p = (struct atp_unit *)&shpnt->hostdata; 2746 p = (struct atp_unit *)&shpnt->hostdata;
2744 2747
2745 atp_dev.host = shpnt; 2748 atpdev->host = shpnt;
2746 atp_dev.pdev = pdev; 2749 atpdev->pdev = pdev;
2747 pci_set_drvdata(pdev, p); 2750 pci_set_drvdata(pdev, p);
2748 memcpy(p, &atp_dev, sizeof atp_dev); 2751 memcpy(p, atpdev, sizeof(*atpdev));
2749 if (atp870u_init_tables(shpnt) < 0) { 2752 if (atp870u_init_tables(shpnt) < 0) {
2750 printk(KERN_ERR "Unable to allocate tables for Acard controller\n"); 2753 printk(KERN_ERR "Unable to allocate tables for Acard controller\n");
2751 goto unregister; 2754 goto unregister;
@@ -2798,24 +2801,24 @@ flash_ok_880:
2798 printk(KERN_INFO " ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%x, IRQ:%d.\n" 2801 printk(KERN_INFO " ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%x, IRQ:%d.\n"
2799 , base_io, pdev->irq); 2802 , base_io, pdev->irq);
2800 2803
2801 atp_dev.pdev = pdev; 2804 atpdev->pdev = pdev;
2802 atp_dev.dev_id = ent->device; 2805 atpdev->dev_id = ent->device;
2803 atp_dev.baseport = base_io; 2806 atpdev->baseport = base_io;
2804 atp_dev.ioport[0] = base_io + 0x80; 2807 atpdev->ioport[0] = base_io + 0x80;
2805 atp_dev.ioport[1] = base_io + 0xc0; 2808 atpdev->ioport[1] = base_io + 0xc0;
2806 atp_dev.pciport[0] = base_io + 0x40; 2809 atpdev->pciport[0] = base_io + 0x40;
2807 atp_dev.pciport[1] = base_io + 0x50; 2810 atpdev->pciport[1] = base_io + 0x50;
2808 2811
2809 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); 2812 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
2810 if (!shpnt) 2813 if (!shpnt)
2811 return -ENOMEM; 2814 goto err_nomem;
2812 2815
2813 p = (struct atp_unit *)&shpnt->hostdata; 2816 p = (struct atp_unit *)&shpnt->hostdata;
2814 2817
2815 atp_dev.host = shpnt; 2818 atpdev->host = shpnt;
2816 atp_dev.pdev = pdev; 2819 atpdev->pdev = pdev;
2817 pci_set_drvdata(pdev, p); 2820 pci_set_drvdata(pdev, p);
2818 memcpy(p, &atp_dev, sizeof(struct atp_unit)); 2821 memcpy(p, atpdev, sizeof(struct atp_unit));
2819 if (atp870u_init_tables(shpnt) < 0) 2822 if (atp870u_init_tables(shpnt) < 0)
2820 goto unregister; 2823 goto unregister;
2821 2824
@@ -2974,33 +2977,33 @@ flash_ok_885:
2974 printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: %d " 2977 printk(KERN_INFO " ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: %d "
2975 "IO:%x, IRQ:%d.\n", count, base_io, pdev->irq); 2978 "IO:%x, IRQ:%d.\n", count, base_io, pdev->irq);
2976 2979
2977 atp_dev.ioport[0] = base_io; 2980 atpdev->ioport[0] = base_io;
2978 atp_dev.pciport[0] = base_io + 0x20; 2981 atpdev->pciport[0] = base_io + 0x20;
2979 atp_dev.dev_id = ent->device; 2982 atpdev->dev_id = ent->device;
2980 host_id &= 0x07; 2983 host_id &= 0x07;
2981 atp_dev.host_id[0] = host_id; 2984 atpdev->host_id[0] = host_id;
2982 tmport = base_io + 0x22; 2985 tmport = base_io + 0x22;
2983 atp_dev.scam_on = inb(tmport); 2986 atpdev->scam_on = inb(tmport);
2984 tmport += 0x0b; 2987 tmport += 0x0b;
2985 atp_dev.global_map[0] = inb(tmport++); 2988 atpdev->global_map[0] = inb(tmport++);
2986 atp_dev.ultra_map[0] = inw(tmport); 2989 atpdev->ultra_map[0] = inw(tmport);
2987 2990
2988 if (atp_dev.ultra_map[0] == 0) { 2991 if (atpdev->ultra_map[0] == 0) {
2989 atp_dev.scam_on = 0x00; 2992 atpdev->scam_on = 0x00;
2990 atp_dev.global_map[0] = 0x20; 2993 atpdev->global_map[0] = 0x20;
2991 atp_dev.ultra_map[0] = 0xffff; 2994 atpdev->ultra_map[0] = 0xffff;
2992 } 2995 }
2993 2996
2994 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit)); 2997 shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
2995 if (!shpnt) 2998 if (!shpnt)
2996 return -ENOMEM; 2999 goto err_nomem;
2997 3000
2998 p = (struct atp_unit *)&shpnt->hostdata; 3001 p = (struct atp_unit *)&shpnt->hostdata;
2999 3002
3000 atp_dev.host = shpnt; 3003 atpdev->host = shpnt;
3001 atp_dev.pdev = pdev; 3004 atpdev->pdev = pdev;
3002 pci_set_drvdata(pdev, p); 3005 pci_set_drvdata(pdev, p);
3003 memcpy(p, &atp_dev, sizeof atp_dev); 3006 memcpy(p, atpdev, sizeof(*atpdev));
3004 if (atp870u_init_tables(shpnt) < 0) 3007 if (atp870u_init_tables(shpnt) < 0)
3005 goto unregister; 3008 goto unregister;
3006 3009
@@ -3010,7 +3013,7 @@ flash_ok_885:
3010 } 3013 }
3011 3014
3012 spin_lock_irqsave(shpnt->host_lock, flags); 3015 spin_lock_irqsave(shpnt->host_lock, flags);
3013 if (atp_dev.chip_ver > 0x07) { /* check if atp876 chip then enable terminator */ 3016 if (atpdev->chip_ver > 0x07) { /* check if atp876 chip then enable terminator */
3014 tmport = base_io + 0x3e; 3017 tmport = base_io + 0x3e;
3015 outb(0x00, tmport); 3018 outb(0x00, tmport);
3016 } 3019 }
@@ -3044,7 +3047,7 @@ flash_ok_885:
3044 outb((inb(tmport) & 0xef), tmport); 3047 outb((inb(tmport) & 0xef), tmport);
3045 tmport++; 3048 tmport++;
3046 outb((inb(tmport) | 0x20), tmport); 3049 outb((inb(tmport) | 0x20), tmport);
3047 if (atp_dev.chip_ver == 4) 3050 if (atpdev->chip_ver == 4)
3048 shpnt->max_id = 16; 3051 shpnt->max_id = 16;
3049 else 3052 else
3050 shpnt->max_id = 8; 3053 shpnt->max_id = 8;
@@ -3093,6 +3096,12 @@ unregister:
3093 printk("atp870u_prob:unregister\n"); 3096 printk("atp870u_prob:unregister\n");
3094 scsi_host_put(shpnt); 3097 scsi_host_put(shpnt);
3095 return -1; 3098 return -1;
3099err_eio:
3100 kfree(atpdev);
3101 return -EIO;
3102err_nomem:
3103 kfree(atpdev);
3104 return -ENOMEM;
3096} 3105}
3097 3106
3098/* The abort command does not leave the device in a clean state where 3107/* The abort command does not leave the device in a clean state where
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 944fc1203ebd..669ea4fff166 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -535,6 +535,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
535 struct ibmvscsi_host_data *hostdata) 535 struct ibmvscsi_host_data *hostdata)
536{ 536{
537 u64 *crq_as_u64 = (u64 *) &evt_struct->crq; 537 u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
538 int request_status;
538 int rc; 539 int rc;
539 540
540 /* If we have exhausted our request limit, just fail this request. 541 /* If we have exhausted our request limit, just fail this request.
@@ -542,9 +543,18 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
542 * (such as task management requests) that the mid layer may think we 543 * (such as task management requests) that the mid layer may think we
543 * can handle more requests (can_queue) when we actually can't 544 * can handle more requests (can_queue) when we actually can't
544 */ 545 */
545 if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) && 546 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
546 (atomic_dec_if_positive(&hostdata->request_limit) < 0)) 547 request_status =
547 goto send_error; 548 atomic_dec_if_positive(&hostdata->request_limit);
549 /* If request limit was -1 when we started, it is now even
550 * less than that
551 */
552 if (request_status < -1)
553 goto send_error;
554 /* Otherwise, if we have run out of requests */
555 else if (request_status < 0)
556 goto send_busy;
557 }
548 558
549 /* Copy the IU into the transfer area */ 559 /* Copy the IU into the transfer area */
550 *evt_struct->xfer_iu = evt_struct->iu; 560 *evt_struct->xfer_iu = evt_struct->iu;
@@ -567,11 +577,23 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
567 577
568 return 0; 578 return 0;
569 579
570 send_error: 580 send_busy:
571 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 581 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
572 582
573 free_event_struct(&hostdata->pool, evt_struct); 583 free_event_struct(&hostdata->pool, evt_struct);
574 return SCSI_MLQUEUE_HOST_BUSY; 584 return SCSI_MLQUEUE_HOST_BUSY;
585
586 send_error:
587 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
588
589 if (evt_struct->cmnd != NULL) {
590 evt_struct->cmnd->result = DID_ERROR << 16;
591 evt_struct->cmnd_done(evt_struct->cmnd);
592 } else if (evt_struct->done)
593 evt_struct->done(evt_struct);
594
595 free_event_struct(&hostdata->pool, evt_struct);
596 return 0;
575} 597}
576 598
577/** 599/**
@@ -1184,27 +1206,37 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1184 return; 1206 return;
1185 case 0xFF: /* Hypervisor telling us the connection is closed */ 1207 case 0xFF: /* Hypervisor telling us the connection is closed */
1186 scsi_block_requests(hostdata->host); 1208 scsi_block_requests(hostdata->host);
1209 atomic_set(&hostdata->request_limit, 0);
1187 if (crq->format == 0x06) { 1210 if (crq->format == 0x06) {
1188 /* We need to re-setup the interpartition connection */ 1211 /* We need to re-setup the interpartition connection */
1189 printk(KERN_INFO 1212 printk(KERN_INFO
1190 "ibmvscsi: Re-enabling adapter!\n"); 1213 "ibmvscsi: Re-enabling adapter!\n");
1191 atomic_set(&hostdata->request_limit, -1);
1192 purge_requests(hostdata, DID_REQUEUE); 1214 purge_requests(hostdata, DID_REQUEUE);
1193 if (ibmvscsi_reenable_crq_queue(&hostdata->queue, 1215 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
1194 hostdata) == 0) 1216 hostdata) == 0) ||
1195 if (ibmvscsi_send_crq(hostdata, 1217 (ibmvscsi_send_crq(hostdata,
1196 0xC001000000000000LL, 0)) 1218 0xC001000000000000LL, 0))) {
1219 atomic_set(&hostdata->request_limit,
1220 -1);
1197 printk(KERN_ERR 1221 printk(KERN_ERR
1198 "ibmvscsi: transmit error after" 1222 "ibmvscsi: error after"
1199 " enable\n"); 1223 " enable\n");
1224 }
1200 } else { 1225 } else {
1201 printk(KERN_INFO 1226 printk(KERN_INFO
1202 "ibmvscsi: Virtual adapter failed rc %d!\n", 1227 "ibmvscsi: Virtual adapter failed rc %d!\n",
1203 crq->format); 1228 crq->format);
1204 1229
1205 atomic_set(&hostdata->request_limit, -1);
1206 purge_requests(hostdata, DID_ERROR); 1230 purge_requests(hostdata, DID_ERROR);
1207 ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); 1231 if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
1232 hostdata)) ||
1233 (ibmvscsi_send_crq(hostdata,
1234 0xC001000000000000LL, 0))) {
1235 atomic_set(&hostdata->request_limit,
1236 -1);
1237 printk(KERN_ERR
1238 "ibmvscsi: error after reset\n");
1239 }
1208 } 1240 }
1209 scsi_unblock_requests(hostdata->host); 1241 scsi_unblock_requests(hostdata->host);
1210 return; 1242 return;
@@ -1467,6 +1499,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1467 struct Scsi_Host *host; 1499 struct Scsi_Host *host;
1468 struct device *dev = &vdev->dev; 1500 struct device *dev = &vdev->dev;
1469 unsigned long wait_switch = 0; 1501 unsigned long wait_switch = 0;
1502 int rc;
1470 1503
1471 vdev->dev.driver_data = NULL; 1504 vdev->dev.driver_data = NULL;
1472 1505
@@ -1484,8 +1517,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1484 atomic_set(&hostdata->request_limit, -1); 1517 atomic_set(&hostdata->request_limit, -1);
1485 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ 1518 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
1486 1519
1487 if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, 1520 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
1488 max_requests) != 0) { 1521 if (rc != 0 && rc != H_RESOURCE) {
1489 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); 1522 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
1490 goto init_crq_failed; 1523 goto init_crq_failed;
1491 } 1524 }
@@ -1505,7 +1538,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1505 * to fail if the other end is not acive. In that case we don't 1538 * to fail if the other end is not acive. In that case we don't
1506 * want to scan 1539 * want to scan
1507 */ 1540 */
1508 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) { 1541 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
1542 || rc == H_RESOURCE) {
1509 /* 1543 /*
1510 * Wait around max init_timeout secs for the adapter to finish 1544 * Wait around max init_timeout secs for the adapter to finish
1511 * initializing. When we are done initializing, we will have a 1545 * initializing. When we are done initializing, we will have a
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 1a9992bdfef8..242b8873b333 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -208,6 +208,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
208 int max_requests) 208 int max_requests)
209{ 209{
210 int rc; 210 int rc;
211 int retrc;
211 struct vio_dev *vdev = to_vio_dev(hostdata->dev); 212 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
212 213
213 queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); 214 queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
@@ -226,7 +227,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
226 gather_partition_info(); 227 gather_partition_info();
227 set_adapter_info(hostdata); 228 set_adapter_info(hostdata);
228 229
229 rc = plpar_hcall_norets(H_REG_CRQ, 230 retrc = rc = plpar_hcall_norets(H_REG_CRQ,
230 vdev->unit_address, 231 vdev->unit_address,
231 queue->msg_token, PAGE_SIZE); 232 queue->msg_token, PAGE_SIZE);
232 if (rc == H_RESOURCE) 233 if (rc == H_RESOURCE)
@@ -263,7 +264,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
263 tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task, 264 tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
264 (unsigned long)hostdata); 265 (unsigned long)hostdata);
265 266
266 return 0; 267 return retrc;
267 268
268 req_irq_failed: 269 req_irq_failed:
269 do { 270 do {
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b4743a9ecc80..848fb2aa4ca3 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -2130,19 +2130,21 @@ iscsi_r2tpool_free(struct iscsi_session *session)
2130 2130
2131static int 2131static int
2132iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, 2132iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2133 uint32_t value) 2133 char *buf, int buflen)
2134{ 2134{
2135 struct iscsi_conn *conn = cls_conn->dd_data; 2135 struct iscsi_conn *conn = cls_conn->dd_data;
2136 struct iscsi_session *session = conn->session; 2136 struct iscsi_session *session = conn->session;
2137 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2137 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2138 int value;
2138 2139
2139 switch(param) { 2140 switch(param) {
2140 case ISCSI_PARAM_MAX_RECV_DLENGTH: { 2141 case ISCSI_PARAM_MAX_RECV_DLENGTH: {
2141 char *saveptr = tcp_conn->data; 2142 char *saveptr = tcp_conn->data;
2142 gfp_t flags = GFP_KERNEL; 2143 gfp_t flags = GFP_KERNEL;
2143 2144
2145 sscanf(buf, "%d", &value);
2144 if (tcp_conn->data_size >= value) { 2146 if (tcp_conn->data_size >= value) {
2145 conn->max_recv_dlength = value; 2147 iscsi_set_param(cls_conn, param, buf, buflen);
2146 break; 2148 break;
2147 } 2149 }
2148 2150
@@ -2165,15 +2167,12 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2165 else 2167 else
2166 free_pages((unsigned long)saveptr, 2168 free_pages((unsigned long)saveptr,
2167 get_order(tcp_conn->data_size)); 2169 get_order(tcp_conn->data_size));
2168 conn->max_recv_dlength = value; 2170 iscsi_set_param(cls_conn, param, buf, buflen);
2169 tcp_conn->data_size = value; 2171 tcp_conn->data_size = value;
2170 }
2171 break;
2172 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2173 conn->max_xmit_dlength = value;
2174 break; 2172 break;
2173 }
2175 case ISCSI_PARAM_HDRDGST_EN: 2174 case ISCSI_PARAM_HDRDGST_EN:
2176 conn->hdrdgst_en = value; 2175 iscsi_set_param(cls_conn, param, buf, buflen);
2177 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 2176 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
2178 if (conn->hdrdgst_en) { 2177 if (conn->hdrdgst_en) {
2179 tcp_conn->hdr_size += sizeof(__u32); 2178 tcp_conn->hdr_size += sizeof(__u32);
@@ -2197,7 +2196,7 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2197 } 2196 }
2198 break; 2197 break;
2199 case ISCSI_PARAM_DATADGST_EN: 2198 case ISCSI_PARAM_DATADGST_EN:
2200 conn->datadgst_en = value; 2199 iscsi_set_param(cls_conn, param, buf, buflen);
2201 if (conn->datadgst_en) { 2200 if (conn->datadgst_en) {
2202 if (!tcp_conn->data_tx_tfm) 2201 if (!tcp_conn->data_tx_tfm)
2203 tcp_conn->data_tx_tfm = 2202 tcp_conn->data_tx_tfm =
@@ -2220,121 +2219,36 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2220 tcp_conn->sendpage = conn->datadgst_en ? 2219 tcp_conn->sendpage = conn->datadgst_en ?
2221 sock_no_sendpage : tcp_conn->sock->ops->sendpage; 2220 sock_no_sendpage : tcp_conn->sock->ops->sendpage;
2222 break; 2221 break;
2223 case ISCSI_PARAM_INITIAL_R2T_EN:
2224 session->initial_r2t_en = value;
2225 break;
2226 case ISCSI_PARAM_MAX_R2T: 2222 case ISCSI_PARAM_MAX_R2T:
2223 sscanf(buf, "%d", &value);
2227 if (session->max_r2t == roundup_pow_of_two(value)) 2224 if (session->max_r2t == roundup_pow_of_two(value))
2228 break; 2225 break;
2229 iscsi_r2tpool_free(session); 2226 iscsi_r2tpool_free(session);
2230 session->max_r2t = value; 2227 iscsi_set_param(cls_conn, param, buf, buflen);
2231 if (session->max_r2t & (session->max_r2t - 1)) 2228 if (session->max_r2t & (session->max_r2t - 1))
2232 session->max_r2t = roundup_pow_of_two(session->max_r2t); 2229 session->max_r2t = roundup_pow_of_two(session->max_r2t);
2233 if (iscsi_r2tpool_alloc(session)) 2230 if (iscsi_r2tpool_alloc(session))
2234 return -ENOMEM; 2231 return -ENOMEM;
2235 break; 2232 break;
2236 case ISCSI_PARAM_IMM_DATA_EN:
2237 session->imm_data_en = value;
2238 break;
2239 case ISCSI_PARAM_FIRST_BURST:
2240 session->first_burst = value;
2241 break;
2242 case ISCSI_PARAM_MAX_BURST:
2243 session->max_burst = value;
2244 break;
2245 case ISCSI_PARAM_PDU_INORDER_EN:
2246 session->pdu_inorder_en = value;
2247 break;
2248 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2249 session->dataseq_inorder_en = value;
2250 break;
2251 case ISCSI_PARAM_ERL:
2252 session->erl = value;
2253 break;
2254 case ISCSI_PARAM_IFMARKER_EN:
2255 BUG_ON(value);
2256 session->ifmarker_en = value;
2257 break;
2258 case ISCSI_PARAM_OFMARKER_EN:
2259 BUG_ON(value);
2260 session->ofmarker_en = value;
2261 break;
2262 case ISCSI_PARAM_EXP_STATSN:
2263 conn->exp_statsn = value;
2264 break;
2265 default:
2266 break;
2267 }
2268
2269 return 0;
2270}
2271
2272static int
2273iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2274 enum iscsi_param param, uint32_t *value)
2275{
2276 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2277 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2278
2279 switch(param) {
2280 case ISCSI_PARAM_INITIAL_R2T_EN:
2281 *value = session->initial_r2t_en;
2282 break;
2283 case ISCSI_PARAM_MAX_R2T:
2284 *value = session->max_r2t;
2285 break;
2286 case ISCSI_PARAM_IMM_DATA_EN:
2287 *value = session->imm_data_en;
2288 break;
2289 case ISCSI_PARAM_FIRST_BURST:
2290 *value = session->first_burst;
2291 break;
2292 case ISCSI_PARAM_MAX_BURST:
2293 *value = session->max_burst;
2294 break;
2295 case ISCSI_PARAM_PDU_INORDER_EN:
2296 *value = session->pdu_inorder_en;
2297 break;
2298 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2299 *value = session->dataseq_inorder_en;
2300 break;
2301 case ISCSI_PARAM_ERL:
2302 *value = session->erl;
2303 break;
2304 case ISCSI_PARAM_IFMARKER_EN:
2305 *value = session->ifmarker_en;
2306 break;
2307 case ISCSI_PARAM_OFMARKER_EN:
2308 *value = session->ofmarker_en;
2309 break;
2310 default: 2233 default:
2311 return -EINVAL; 2234 return iscsi_set_param(cls_conn, param, buf, buflen);
2312 } 2235 }
2313 2236
2314 return 0; 2237 return 0;
2315} 2238}
2316 2239
2317static int 2240static int
2318iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 2241iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
2319 enum iscsi_param param, uint32_t *value) 2242 enum iscsi_param param, char *buf)
2320{ 2243{
2321 struct iscsi_conn *conn = cls_conn->dd_data; 2244 struct iscsi_conn *conn = cls_conn->dd_data;
2322 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2245 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2323 struct inet_sock *inet; 2246 struct inet_sock *inet;
2247 struct ipv6_pinfo *np;
2248 struct sock *sk;
2249 int len;
2324 2250
2325 switch(param) { 2251 switch(param) {
2326 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2327 *value = conn->max_recv_dlength;
2328 break;
2329 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2330 *value = conn->max_xmit_dlength;
2331 break;
2332 case ISCSI_PARAM_HDRDGST_EN:
2333 *value = conn->hdrdgst_en;
2334 break;
2335 case ISCSI_PARAM_DATADGST_EN:
2336 *value = conn->datadgst_en;
2337 break;
2338 case ISCSI_PARAM_CONN_PORT: 2252 case ISCSI_PARAM_CONN_PORT:
2339 mutex_lock(&conn->xmitmutex); 2253 mutex_lock(&conn->xmitmutex);
2340 if (!tcp_conn->sock) { 2254 if (!tcp_conn->sock) {
@@ -2343,30 +2257,9 @@ iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
2343 } 2257 }
2344 2258
2345 inet = inet_sk(tcp_conn->sock->sk); 2259 inet = inet_sk(tcp_conn->sock->sk);
2346 *value = be16_to_cpu(inet->dport); 2260 len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
2347 mutex_unlock(&conn->xmitmutex); 2261 mutex_unlock(&conn->xmitmutex);
2348 case ISCSI_PARAM_EXP_STATSN:
2349 *value = conn->exp_statsn;
2350 break; 2262 break;
2351 default:
2352 return -EINVAL;
2353 }
2354
2355 return 0;
2356}
2357
2358static int
2359iscsi_conn_get_str_param(struct iscsi_cls_conn *cls_conn,
2360 enum iscsi_param param, char *buf)
2361{
2362 struct iscsi_conn *conn = cls_conn->dd_data;
2363 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2364 struct sock *sk;
2365 struct inet_sock *inet;
2366 struct ipv6_pinfo *np;
2367 int len = 0;
2368
2369 switch (param) {
2370 case ISCSI_PARAM_CONN_ADDRESS: 2263 case ISCSI_PARAM_CONN_ADDRESS:
2371 mutex_lock(&conn->xmitmutex); 2264 mutex_lock(&conn->xmitmutex);
2372 if (!tcp_conn->sock) { 2265 if (!tcp_conn->sock) {
@@ -2388,7 +2281,7 @@ iscsi_conn_get_str_param(struct iscsi_cls_conn *cls_conn,
2388 mutex_unlock(&conn->xmitmutex); 2281 mutex_unlock(&conn->xmitmutex);
2389 break; 2282 break;
2390 default: 2283 default:
2391 return -EINVAL; 2284 return iscsi_conn_get_param(cls_conn, param, buf);
2392 } 2285 }
2393 2286
2394 return len; 2287 return len;
@@ -2501,7 +2394,11 @@ static struct iscsi_transport iscsi_tcp_transport = {
2501 ISCSI_ERL | 2394 ISCSI_ERL |
2502 ISCSI_CONN_PORT | 2395 ISCSI_CONN_PORT |
2503 ISCSI_CONN_ADDRESS | 2396 ISCSI_CONN_ADDRESS |
2504 ISCSI_EXP_STATSN, 2397 ISCSI_EXP_STATSN |
2398 ISCSI_PERSISTENT_PORT |
2399 ISCSI_PERSISTENT_ADDRESS |
2400 ISCSI_TARGET_NAME |
2401 ISCSI_TPGT,
2505 .host_template = &iscsi_sht, 2402 .host_template = &iscsi_sht,
2506 .conndata_size = sizeof(struct iscsi_conn), 2403 .conndata_size = sizeof(struct iscsi_conn),
2507 .max_conn = 1, 2404 .max_conn = 1,
@@ -2514,8 +2411,7 @@ static struct iscsi_transport iscsi_tcp_transport = {
2514 .bind_conn = iscsi_tcp_conn_bind, 2411 .bind_conn = iscsi_tcp_conn_bind,
2515 .destroy_conn = iscsi_tcp_conn_destroy, 2412 .destroy_conn = iscsi_tcp_conn_destroy,
2516 .set_param = iscsi_conn_set_param, 2413 .set_param = iscsi_conn_set_param,
2517 .get_conn_param = iscsi_conn_get_param, 2414 .get_conn_param = iscsi_tcp_conn_get_param,
2518 .get_conn_str_param = iscsi_conn_get_str_param,
2519 .get_session_param = iscsi_session_get_param, 2415 .get_session_param = iscsi_session_get_param,
2520 .start_conn = iscsi_conn_start, 2416 .start_conn = iscsi_conn_start,
2521 .stop_conn = iscsi_conn_stop, 2417 .stop_conn = iscsi_conn_stop,
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 82caba464291..1c960ac1617f 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1001,7 +1001,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
1001 struct ata_queued_cmd *qc; 1001 struct ata_queued_cmd *qc;
1002 unsigned int tag, preempted_tag; 1002 unsigned int tag, preempted_tag;
1003 u32 preempted_sactive, preempted_qc_active; 1003 u32 preempted_sactive, preempted_qc_active;
1004 DECLARE_COMPLETION(wait); 1004 DECLARE_COMPLETION_ONSTACK(wait);
1005 unsigned long flags; 1005 unsigned long flags;
1006 unsigned int err_mask; 1006 unsigned int err_mask;
1007 int rc; 1007 int rc;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 2673a11a9495..7e6e031cc41b 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1287,13 +1287,18 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1287 if (scsi_add_host(shost, NULL)) 1287 if (scsi_add_host(shost, NULL))
1288 goto add_host_fail; 1288 goto add_host_fail;
1289 1289
1290 if (!try_module_get(iscsit->owner))
1291 goto cls_session_fail;
1292
1290 cls_session = iscsi_create_session(shost, iscsit, 0); 1293 cls_session = iscsi_create_session(shost, iscsit, 0);
1291 if (!cls_session) 1294 if (!cls_session)
1292 goto cls_session_fail; 1295 goto module_put;
1293 *(unsigned long*)shost->hostdata = (unsigned long)cls_session; 1296 *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
1294 1297
1295 return cls_session; 1298 return cls_session;
1296 1299
1300module_put:
1301 module_put(iscsit->owner);
1297cls_session_fail: 1302cls_session_fail:
1298 scsi_remove_host(shost); 1303 scsi_remove_host(shost);
1299add_host_fail: 1304add_host_fail:
@@ -1325,6 +1330,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1325 1330
1326 iscsi_destroy_session(cls_session); 1331 iscsi_destroy_session(cls_session);
1327 scsi_host_put(shost); 1332 scsi_host_put(shost);
1333 module_put(cls_session->transport->owner);
1328} 1334}
1329EXPORT_SYMBOL_GPL(iscsi_session_teardown); 1335EXPORT_SYMBOL_GPL(iscsi_session_teardown);
1330 1336
@@ -1697,6 +1703,185 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
1697} 1703}
1698EXPORT_SYMBOL_GPL(iscsi_conn_bind); 1704EXPORT_SYMBOL_GPL(iscsi_conn_bind);
1699 1705
1706
1707int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
1708 enum iscsi_param param, char *buf, int buflen)
1709{
1710 struct iscsi_conn *conn = cls_conn->dd_data;
1711 struct iscsi_session *session = conn->session;
1712 uint32_t value;
1713
1714 switch(param) {
1715 case ISCSI_PARAM_MAX_RECV_DLENGTH:
1716 sscanf(buf, "%d", &conn->max_recv_dlength);
1717 break;
1718 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
1719 sscanf(buf, "%d", &conn->max_xmit_dlength);
1720 break;
1721 case ISCSI_PARAM_HDRDGST_EN:
1722 sscanf(buf, "%d", &conn->hdrdgst_en);
1723 break;
1724 case ISCSI_PARAM_DATADGST_EN:
1725 sscanf(buf, "%d", &conn->datadgst_en);
1726 break;
1727 case ISCSI_PARAM_INITIAL_R2T_EN:
1728 sscanf(buf, "%d", &session->initial_r2t_en);
1729 break;
1730 case ISCSI_PARAM_MAX_R2T:
1731 sscanf(buf, "%d", &session->max_r2t);
1732 break;
1733 case ISCSI_PARAM_IMM_DATA_EN:
1734 sscanf(buf, "%d", &session->imm_data_en);
1735 break;
1736 case ISCSI_PARAM_FIRST_BURST:
1737 sscanf(buf, "%d", &session->first_burst);
1738 break;
1739 case ISCSI_PARAM_MAX_BURST:
1740 sscanf(buf, "%d", &session->max_burst);
1741 break;
1742 case ISCSI_PARAM_PDU_INORDER_EN:
1743 sscanf(buf, "%d", &session->pdu_inorder_en);
1744 break;
1745 case ISCSI_PARAM_DATASEQ_INORDER_EN:
1746 sscanf(buf, "%d", &session->dataseq_inorder_en);
1747 break;
1748 case ISCSI_PARAM_ERL:
1749 sscanf(buf, "%d", &session->erl);
1750 break;
1751 case ISCSI_PARAM_IFMARKER_EN:
1752 sscanf(buf, "%d", &value);
1753 BUG_ON(value);
1754 break;
1755 case ISCSI_PARAM_OFMARKER_EN:
1756 sscanf(buf, "%d", &value);
1757 BUG_ON(value);
1758 break;
1759 case ISCSI_PARAM_EXP_STATSN:
1760 sscanf(buf, "%u", &conn->exp_statsn);
1761 break;
1762 case ISCSI_PARAM_TARGET_NAME:
1763 /* this should not change between logins */
1764 if (session->targetname)
1765 break;
1766
1767 session->targetname = kstrdup(buf, GFP_KERNEL);
1768 if (!session->targetname)
1769 return -ENOMEM;
1770 break;
1771 case ISCSI_PARAM_TPGT:
1772 sscanf(buf, "%d", &session->tpgt);
1773 break;
1774 case ISCSI_PARAM_PERSISTENT_PORT:
1775 sscanf(buf, "%d", &conn->persistent_port);
1776 break;
1777 case ISCSI_PARAM_PERSISTENT_ADDRESS:
1778 /*
1779 * this is the address returned in discovery so it should
1780 * not change between logins.
1781 */
1782 if (conn->persistent_address)
1783 break;
1784
1785 conn->persistent_address = kstrdup(buf, GFP_KERNEL);
1786 if (!conn->persistent_address)
1787 return -ENOMEM;
1788 break;
1789 default:
1790 return -ENOSYS;
1791 }
1792
1793 return 0;
1794}
1795EXPORT_SYMBOL_GPL(iscsi_set_param);
1796
1797int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
1798 enum iscsi_param param, char *buf)
1799{
1800 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1801 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1802 int len;
1803
1804 switch(param) {
1805 case ISCSI_PARAM_INITIAL_R2T_EN:
1806 len = sprintf(buf, "%d\n", session->initial_r2t_en);
1807 break;
1808 case ISCSI_PARAM_MAX_R2T:
1809 len = sprintf(buf, "%hu\n", session->max_r2t);
1810 break;
1811 case ISCSI_PARAM_IMM_DATA_EN:
1812 len = sprintf(buf, "%d\n", session->imm_data_en);
1813 break;
1814 case ISCSI_PARAM_FIRST_BURST:
1815 len = sprintf(buf, "%u\n", session->first_burst);
1816 break;
1817 case ISCSI_PARAM_MAX_BURST:
1818 len = sprintf(buf, "%u\n", session->max_burst);
1819 break;
1820 case ISCSI_PARAM_PDU_INORDER_EN:
1821 len = sprintf(buf, "%d\n", session->pdu_inorder_en);
1822 break;
1823 case ISCSI_PARAM_DATASEQ_INORDER_EN:
1824 len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
1825 break;
1826 case ISCSI_PARAM_ERL:
1827 len = sprintf(buf, "%d\n", session->erl);
1828 break;
1829 case ISCSI_PARAM_TARGET_NAME:
1830 len = sprintf(buf, "%s\n", session->targetname);
1831 break;
1832 case ISCSI_PARAM_TPGT:
1833 len = sprintf(buf, "%d\n", session->tpgt);
1834 break;
1835 default:
1836 return -ENOSYS;
1837 }
1838
1839 return len;
1840}
1841EXPORT_SYMBOL_GPL(iscsi_session_get_param);
1842
1843int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
1844 enum iscsi_param param, char *buf)
1845{
1846 struct iscsi_conn *conn = cls_conn->dd_data;
1847 int len;
1848
1849 switch(param) {
1850 case ISCSI_PARAM_MAX_RECV_DLENGTH:
1851 len = sprintf(buf, "%u\n", conn->max_recv_dlength);
1852 break;
1853 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
1854 len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
1855 break;
1856 case ISCSI_PARAM_HDRDGST_EN:
1857 len = sprintf(buf, "%d\n", conn->hdrdgst_en);
1858 break;
1859 case ISCSI_PARAM_DATADGST_EN:
1860 len = sprintf(buf, "%d\n", conn->datadgst_en);
1861 break;
1862 case ISCSI_PARAM_IFMARKER_EN:
1863 len = sprintf(buf, "%d\n", conn->ifmarker_en);
1864 break;
1865 case ISCSI_PARAM_OFMARKER_EN:
1866 len = sprintf(buf, "%d\n", conn->ofmarker_en);
1867 break;
1868 case ISCSI_PARAM_EXP_STATSN:
1869 len = sprintf(buf, "%u\n", conn->exp_statsn);
1870 break;
1871 case ISCSI_PARAM_PERSISTENT_PORT:
1872 len = sprintf(buf, "%d\n", conn->persistent_port);
1873 break;
1874 case ISCSI_PARAM_PERSISTENT_ADDRESS:
1875 len = sprintf(buf, "%s\n", conn->persistent_address);
1876 break;
1877 default:
1878 return -ENOSYS;
1879 }
1880
1881 return len;
1882}
1883EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
1884
1700MODULE_AUTHOR("Mike Christie"); 1885MODULE_AUTHOR("Mike Christie");
1701MODULE_DESCRIPTION("iSCSI library functions"); 1886MODULE_DESCRIPTION("iSCSI library functions");
1702MODULE_LICENSE("GPL"); 1887MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 087c44539a16..f81691fcf177 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -174,7 +174,6 @@ struct lpfc_hba {
174 dma_addr_t slim2p_mapping; 174 dma_addr_t slim2p_mapping;
175 uint16_t pci_cfg_value; 175 uint16_t pci_cfg_value;
176 176
177 struct semaphore hba_can_block;
178 int32_t hba_state; 177 int32_t hba_state;
179 178
180#define LPFC_STATE_UNKNOWN 0 /* HBA state is unknown */ 179#define LPFC_STATE_UNKNOWN 0 /* HBA state is unknown */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 283b7d824c34..4126fd87956f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -821,7 +821,7 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
821 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 821 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
822 822
823 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); 823 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
824 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, 0, did, 824 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, NULL, did,
825 ELS_CMD_PLOGI); 825 ELS_CMD_PLOGI);
826 if (!elsiocb) 826 if (!elsiocb)
827 return 1; 827 return 1;
@@ -2791,8 +2791,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2791 2791
2792 ndlp = (struct lpfc_nodelist *) pmb->context2; 2792 ndlp = (struct lpfc_nodelist *) pmb->context2;
2793 xri = (uint16_t) ((unsigned long)(pmb->context1)); 2793 xri = (uint16_t) ((unsigned long)(pmb->context1));
2794 pmb->context1 = 0; 2794 pmb->context1 = NULL;
2795 pmb->context2 = 0; 2795 pmb->context2 = NULL;
2796 2796
2797 if (mb->mbxStatus) { 2797 if (mb->mbxStatus) {
2798 mempool_free( pmb, phba->mbox_mem_pool); 2798 mempool_free( pmb, phba->mbox_mem_pool);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5a28d9bf8e4d..81755a3f7c68 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -939,12 +939,12 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
939 "10-port ", "PCIe"}; 939 "10-port ", "PCIe"};
940 break; 940 break;
941 default: 941 default:
942 m = (typeof(m)){ 0 }; 942 m = (typeof(m)){ NULL };
943 break; 943 break;
944 } 944 }
945 break; 945 break;
946 default: 946 default:
947 m = (typeof(m)){ 0 }; 947 m = (typeof(m)){ NULL };
948 break; 948 break;
949 } 949 }
950 950
@@ -1451,7 +1451,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1451 goto out_put_host; 1451 goto out_put_host;
1452 1452
1453 host->unique_id = phba->brd_no; 1453 host->unique_id = phba->brd_no;
1454 init_MUTEX(&phba->hba_can_block);
1455 INIT_LIST_HEAD(&phba->ctrspbuflist); 1454 INIT_LIST_HEAD(&phba->ctrspbuflist);
1456 INIT_LIST_HEAD(&phba->rnidrspbuflist); 1455 INIT_LIST_HEAD(&phba->rnidrspbuflist);
1457 INIT_LIST_HEAD(&phba->freebufList); 1456 INIT_LIST_HEAD(&phba->freebufList);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7dc4c2e6bed2..aea1ee472f3d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -41,20 +41,6 @@
41#define LPFC_ABORT_WAIT 2 41#define LPFC_ABORT_WAIT 2
42 42
43 43
44static inline void
45lpfc_block_requests(struct lpfc_hba * phba)
46{
47 down(&phba->hba_can_block);
48 scsi_block_requests(phba->host);
49}
50
51static inline void
52lpfc_unblock_requests(struct lpfc_hba * phba)
53{
54 scsi_unblock_requests(phba->host);
55 up(&phba->hba_can_block);
56}
57
58/* 44/*
59 * This routine allocates a scsi buffer, which contains all the necessary 45 * This routine allocates a scsi buffer, which contains all the necessary
60 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 46 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
@@ -859,7 +845,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
859 unsigned int loop_count = 0; 845 unsigned int loop_count = 0;
860 int ret = SUCCESS; 846 int ret = SUCCESS;
861 847
862 lpfc_block_requests(phba);
863 spin_lock_irq(shost->host_lock); 848 spin_lock_irq(shost->host_lock);
864 849
865 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 850 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
@@ -945,7 +930,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
945 cmnd->device->lun, cmnd->serial_number); 930 cmnd->device->lun, cmnd->serial_number);
946 931
947 spin_unlock_irq(shost->host_lock); 932 spin_unlock_irq(shost->host_lock);
948 lpfc_unblock_requests(phba);
949 933
950 return ret; 934 return ret;
951} 935}
@@ -963,7 +947,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
963 int ret = FAILED; 947 int ret = FAILED;
964 int cnt, loopcnt; 948 int cnt, loopcnt;
965 949
966 lpfc_block_requests(phba);
967 spin_lock_irq(shost->host_lock); 950 spin_lock_irq(shost->host_lock);
968 /* 951 /*
969 * If target is not in a MAPPED state, delay the reset until 952 * If target is not in a MAPPED state, delay the reset until
@@ -1065,7 +1048,6 @@ out_free_scsi_buf:
1065 1048
1066out: 1049out:
1067 spin_unlock_irq(shost->host_lock); 1050 spin_unlock_irq(shost->host_lock);
1068 lpfc_unblock_requests(phba);
1069 return ret; 1051 return ret;
1070} 1052}
1071 1053
@@ -1080,7 +1062,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1080 int cnt, loopcnt; 1062 int cnt, loopcnt;
1081 struct lpfc_scsi_buf * lpfc_cmd; 1063 struct lpfc_scsi_buf * lpfc_cmd;
1082 1064
1083 lpfc_block_requests(phba);
1084 spin_lock_irq(shost->host_lock); 1065 spin_lock_irq(shost->host_lock);
1085 1066
1086 lpfc_cmd = lpfc_get_scsi_buf(phba); 1067 lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -1163,7 +1144,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1163 phba->brd_no, ret); 1144 phba->brd_no, ret);
1164out: 1145out:
1165 spin_unlock_irq(shost->host_lock); 1146 spin_unlock_irq(shost->host_lock);
1166 lpfc_unblock_requests(phba);
1167 return ret; 1147 return ret;
1168} 1148}
1169 1149
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index e5c017ccda59..a8c9627a15c4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.02.04 13 * Version : v00.00.03.01
14 * 14 *
15 * Authors: 15 * Authors:
16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> 16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
@@ -55,19 +55,25 @@ static struct pci_device_id megasas_pci_table[] = {
55 55
56 { 56 {
57 PCI_VENDOR_ID_LSI_LOGIC, 57 PCI_VENDOR_ID_LSI_LOGIC,
58 PCI_DEVICE_ID_LSI_SAS1064R, // xscale IOP 58 PCI_DEVICE_ID_LSI_SAS1064R, /* xscale IOP */
59 PCI_ANY_ID, 59 PCI_ANY_ID,
60 PCI_ANY_ID, 60 PCI_ANY_ID,
61 }, 61 },
62 { 62 {
63 PCI_VENDOR_ID_LSI_LOGIC, 63 PCI_VENDOR_ID_LSI_LOGIC,
64 PCI_DEVICE_ID_LSI_SAS1078R, // ppc IOP 64 PCI_DEVICE_ID_LSI_SAS1078R, /* ppc IOP */
65 PCI_ANY_ID, 65 PCI_ANY_ID,
66 PCI_ANY_ID, 66 PCI_ANY_ID,
67 }, 67 },
68 { 68 {
69 PCI_VENDOR_ID_LSI_LOGIC,
70 PCI_DEVICE_ID_LSI_VERDE_ZCR, /* xscale IOP, vega */
71 PCI_ANY_ID,
72 PCI_ANY_ID,
73 },
74 {
69 PCI_VENDOR_ID_DELL, 75 PCI_VENDOR_ID_DELL,
70 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP 76 PCI_DEVICE_ID_DELL_PERC5, /* xscale IOP */
71 PCI_ANY_ID, 77 PCI_ANY_ID,
72 PCI_ANY_ID, 78 PCI_ANY_ID,
73 }, 79 },
@@ -289,9 +295,14 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
289 * @regs: MFI register set 295 * @regs: MFI register set
290 */ 296 */
291static inline void 297static inline void
292megasas_disable_intr(struct megasas_register_set __iomem * regs) 298megasas_disable_intr(struct megasas_instance *instance)
293{ 299{
294 u32 mask = 0x1f; 300 u32 mask = 0x1f;
301 struct megasas_register_set __iomem *regs = instance->reg_set;
302
303 if(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078R)
304 mask = 0xffffffff;
305
295 writel(mask, &regs->outbound_intr_mask); 306 writel(mask, &regs->outbound_intr_mask);
296 307
297 /* Dummy readl to force pci flush */ 308 /* Dummy readl to force pci flush */
@@ -1260,7 +1271,7 @@ megasas_transition_to_ready(struct megasas_instance* instance)
1260 /* 1271 /*
1261 * Bring it to READY state; assuming max wait 2 secs 1272 * Bring it to READY state; assuming max wait 2 secs
1262 */ 1273 */
1263 megasas_disable_intr(instance->reg_set); 1274 megasas_disable_intr(instance);
1264 writel(MFI_INIT_READY, &instance->reg_set->inbound_doorbell); 1275 writel(MFI_INIT_READY, &instance->reg_set->inbound_doorbell);
1265 1276
1266 max_wait = 10; 1277 max_wait = 10;
@@ -1757,6 +1768,11 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1757 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); 1768 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
1758 1769
1759 /* 1770 /*
1771 * disable the intr before firing the init frame to FW
1772 */
1773 megasas_disable_intr(instance);
1774
1775 /*
1760 * Issue the init frame in polled mode 1776 * Issue the init frame in polled mode
1761 */ 1777 */
1762 if (megasas_issue_polled(instance, cmd)) { 1778 if (megasas_issue_polled(instance, cmd)) {
@@ -2234,7 +2250,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2234 megasas_mgmt_info.max_index--; 2250 megasas_mgmt_info.max_index--;
2235 2251
2236 pci_set_drvdata(pdev, NULL); 2252 pci_set_drvdata(pdev, NULL);
2237 megasas_disable_intr(instance->reg_set); 2253 megasas_disable_intr(instance);
2238 free_irq(instance->pdev->irq, instance); 2254 free_irq(instance->pdev->irq, instance);
2239 2255
2240 megasas_release_mfi(instance); 2256 megasas_release_mfi(instance);
@@ -2364,7 +2380,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
2364 2380
2365 pci_set_drvdata(instance->pdev, NULL); 2381 pci_set_drvdata(instance->pdev, NULL);
2366 2382
2367 megasas_disable_intr(instance->reg_set); 2383 megasas_disable_intr(instance);
2368 2384
2369 free_irq(instance->pdev->irq, instance); 2385 free_irq(instance->pdev->irq, instance);
2370 2386
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 927d6ffef05f..3531a14222a7 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,16 @@
18/** 18/**
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.02.04" 21#define MEGASAS_VERSION "00.00.03.01"
22#define MEGASAS_RELDATE "Feb 03, 2006" 22#define MEGASAS_RELDATE "May 14, 2006"
23#define MEGASAS_EXT_VERSION "Fri Feb 03 14:31:44 PST 2006" 23#define MEGASAS_EXT_VERSION "Sun May 14 22:49:52 PDT 2006"
24
25/*
26 * Device IDs
27 */
28#define PCI_DEVICE_ID_LSI_SAS1078R 0x0060
29#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413
30
24/* 31/*
25 * ===================================== 32 * =====================================
26 * MegaRAID SAS MFI firmware definitions 33 * MegaRAID SAS MFI firmware definitions
@@ -554,7 +561,11 @@ struct megasas_ctrl_info {
554#define MFI_POLL_TIMEOUT_SECS 10 561#define MFI_POLL_TIMEOUT_SECS 10
555 562
556#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 563#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
557#define PCI_DEVICE_ID_LSI_SAS1078R 0x00000060 564
565/*
566* register set for both 1068 and 1078 controllers
567* structure extended for 1078 registers
568*/
558 569
559struct megasas_register_set { 570struct megasas_register_set {
560 u32 reserved_0[4]; /*0000h*/ 571 u32 reserved_0[4]; /*0000h*/
@@ -1150,10 +1161,10 @@ struct compat_megasas_iocpacket {
1150 struct compat_iovec sgl[MAX_IOCTL_SGE]; 1161 struct compat_iovec sgl[MAX_IOCTL_SGE];
1151} __attribute__ ((packed)); 1162} __attribute__ ((packed));
1152 1163
1164#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket)
1153#endif 1165#endif
1154 1166
1155#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket) 1167#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
1156#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket)
1157#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen) 1168#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
1158 1169
1159struct megasas_mgmt_info { 1170struct megasas_mgmt_info {
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index bd337a914298..bfb4f49e125d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -2866,8 +2866,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2866 */ 2866 */
2867 nsp32_do_bus_reset(data); 2867 nsp32_do_bus_reset(data);
2868 2868
2869 ret = request_irq(host->irq, do_nsp32_isr, 2869 ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data);
2870 IRQF_SHARED | IRQF_SAMPLE_RANDOM, "nsp32", data);
2871 if (ret < 0) { 2870 if (ret < 0) {
2872 nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 " 2871 nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 "
2873 "SCSI PCI controller. Interrupt: %d", host->irq); 2872 "SCSI PCI controller. Interrupt: %d", host->irq);
@@ -2886,12 +2885,19 @@ static int nsp32_detect(struct scsi_host_template *sht)
2886 } 2885 }
2887 2886
2888#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73)) 2887#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
2889 scsi_add_host (host, &PCIDEV->dev); 2888 ret = scsi_add_host(host, &PCIDEV->dev);
2889 if (ret) {
2890 nsp32_msg(KERN_ERR, "failed to add scsi host");
2891 goto free_region;
2892 }
2890 scsi_scan_host(host); 2893 scsi_scan_host(host);
2891#endif 2894#endif
2892 pci_set_drvdata(PCIDEV, host); 2895 pci_set_drvdata(PCIDEV, host);
2893 return DETECT_OK; 2896 return DETECT_OK;
2894 2897
2898 free_region:
2899 release_region(host->io_port, host->n_io_port);
2900
2895 free_irq: 2901 free_irq:
2896 free_irq(host->irq, data); 2902 free_irq(host->irq, data);
2897 2903
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 7ff5851c040b..0d4c04e1f3de 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1623,7 +1623,7 @@ static int nsp_cs_probe(struct pcmcia_device *link)
1623 /* Interrupt handler */ 1623 /* Interrupt handler */
1624 link->irq.Handler = &nspintr; 1624 link->irq.Handler = &nspintr;
1625 link->irq.Instance = info; 1625 link->irq.Instance = info;
1626 link->irq.Attributes |= (IRQF_SHARED | IRQF_SAMPLE_RANDOM); 1626 link->irq.Attributes |= IRQF_SHARED;
1627 1627
1628 /* General socket configuration */ 1628 /* General socket configuration */
1629 link->conf.Attributes = CONF_ENABLE_IRQ; 1629 link->conf.Attributes = CONF_ENABLE_IRQ;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index b818b9bfe678..8953991462d7 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4209,7 +4209,7 @@ qla1280_setup(char *s)
4209} 4209}
4210 4210
4211 4211
4212static int 4212static int __init
4213qla1280_get_token(char *str) 4213qla1280_get_token(char *str)
4214{ 4214{
4215 char *sep; 4215 char *sep;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index e96d58ded57c..87f90c4f08e9 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -16,15 +16,16 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf, loff_t off,
16{ 16{
17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
18 struct device, kobj))); 18 struct device, kobj)));
19 char *rbuf = (char *)ha->fw_dump;
19 20
20 if (ha->fw_dump_reading == 0) 21 if (ha->fw_dump_reading == 0)
21 return 0; 22 return 0;
22 if (off > ha->fw_dump_buffer_len) 23 if (off > ha->fw_dump_len)
23 return 0; 24 return 0;
24 if (off + count > ha->fw_dump_buffer_len) 25 if (off + count > ha->fw_dump_len)
25 count = ha->fw_dump_buffer_len - off; 26 count = ha->fw_dump_len - off;
26 27
27 memcpy(buf, &ha->fw_dump_buffer[off], count); 28 memcpy(buf, &rbuf[off], count);
28 29
29 return (count); 30 return (count);
30} 31}
@@ -36,7 +37,6 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf, loff_t off,
36 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 37 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
37 struct device, kobj))); 38 struct device, kobj)));
38 int reading; 39 int reading;
39 uint32_t dump_size;
40 40
41 if (off != 0) 41 if (off != 0)
42 return (0); 42 return (0);
@@ -44,46 +44,27 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf, loff_t off,
44 reading = simple_strtol(buf, NULL, 10); 44 reading = simple_strtol(buf, NULL, 10);
45 switch (reading) { 45 switch (reading) {
46 case 0: 46 case 0:
47 if (ha->fw_dump_reading == 1) { 47 if (!ha->fw_dump_reading)
48 qla_printk(KERN_INFO, ha, 48 break;
49 "Firmware dump cleared on (%ld).\n", ha->host_no);
50 49
51 vfree(ha->fw_dump_buffer); 50 qla_printk(KERN_INFO, ha,
52 ha->fw_dump_buffer = NULL; 51 "Firmware dump cleared on (%ld).\n", ha->host_no);
53 ha->fw_dump_reading = 0; 52
54 ha->fw_dumped = 0; 53 ha->fw_dump_reading = 0;
55 } 54 ha->fw_dumped = 0;
56 break; 55 break;
57 case 1: 56 case 1:
58 if (ha->fw_dumped && !ha->fw_dump_reading) { 57 if (ha->fw_dumped && !ha->fw_dump_reading) {
59 ha->fw_dump_reading = 1; 58 ha->fw_dump_reading = 1;
60 59
61 if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
62 dump_size = FW_DUMP_SIZE_24XX;
63 else {
64 dump_size = FW_DUMP_SIZE_1M;
65 if (ha->fw_memory_size < 0x20000)
66 dump_size = FW_DUMP_SIZE_128K;
67 else if (ha->fw_memory_size < 0x80000)
68 dump_size = FW_DUMP_SIZE_512K;
69 }
70 ha->fw_dump_buffer = (char *)vmalloc(dump_size);
71 if (ha->fw_dump_buffer == NULL) {
72 qla_printk(KERN_WARNING, ha,
73 "Unable to allocate memory for firmware "
74 "dump buffer (%d).\n", dump_size);
75
76 ha->fw_dump_reading = 0;
77 return (count);
78 }
79 qla_printk(KERN_INFO, ha, 60 qla_printk(KERN_INFO, ha,
80 "Firmware dump ready for read on (%ld).\n", 61 "Raw firmware dump ready for read on (%ld).\n",
81 ha->host_no); 62 ha->host_no);
82 memset(ha->fw_dump_buffer, 0, dump_size);
83 ha->isp_ops.ascii_fw_dump(ha);
84 ha->fw_dump_buffer_len = strlen(ha->fw_dump_buffer);
85 } 63 }
86 break; 64 break;
65 case 2:
66 qla2x00_alloc_fw_dump(ha);
67 break;
87 } 68 }
88 return (count); 69 return (count);
89} 70}
@@ -313,9 +294,6 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj, char *buf, loff_t off,
313 if (!capable(CAP_SYS_ADMIN) || off != 0) 294 if (!capable(CAP_SYS_ADMIN) || off != 0)
314 return 0; 295 return 0;
315 296
316 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
317 return -ENOTSUPP;
318
319 /* Read NVRAM. */ 297 /* Read NVRAM. */
320 spin_lock_irqsave(&ha->hardware_lock, flags); 298 spin_lock_irqsave(&ha->hardware_lock, flags);
321 ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->vpd_base, ha->vpd_size); 299 ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->vpd_base, ha->vpd_size);
@@ -335,9 +313,6 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj, char *buf, loff_t off,
335 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size) 313 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
336 return 0; 314 return 0;
337 315
338 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
339 return -ENOTSUPP;
340
341 /* Write NVRAM. */ 316 /* Write NVRAM. */
342 spin_lock_irqsave(&ha->hardware_lock, flags); 317 spin_lock_irqsave(&ha->hardware_lock, flags);
343 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count); 318 ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
@@ -357,6 +332,53 @@ static struct bin_attribute sysfs_vpd_attr = {
357 .write = qla2x00_sysfs_write_vpd, 332 .write = qla2x00_sysfs_write_vpd,
358}; 333};
359 334
335static ssize_t
336qla2x00_sysfs_read_sfp(struct kobject *kobj, char *buf, loff_t off,
337 size_t count)
338{
339 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
340 struct device, kobj)));
341 uint16_t iter, addr, offset;
342 int rval;
343
344 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
345 return 0;
346
347 addr = 0xa0;
348 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
349 iter++, offset += SFP_BLOCK_SIZE) {
350 if (iter == 4) {
351 /* Skip to next device address. */
352 addr = 0xa2;
353 offset = 0;
354 }
355
356 rval = qla2x00_read_sfp(ha, ha->sfp_data_dma, addr, offset,
357 SFP_BLOCK_SIZE);
358 if (rval != QLA_SUCCESS) {
359 qla_printk(KERN_WARNING, ha,
360 "Unable to read SFP data (%x/%x/%x).\n", rval,
361 addr, offset);
362 count = 0;
363 break;
364 }
365 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
366 buf += SFP_BLOCK_SIZE;
367 }
368
369 return count;
370}
371
372static struct bin_attribute sysfs_sfp_attr = {
373 .attr = {
374 .name = "sfp",
375 .mode = S_IRUSR | S_IWUSR,
376 .owner = THIS_MODULE,
377 },
378 .size = SFP_DEV_SIZE * 2,
379 .read = qla2x00_sysfs_read_sfp,
380};
381
360void 382void
361qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha) 383qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
362{ 384{
@@ -367,7 +389,12 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
367 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); 389 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
368 sysfs_create_bin_file(&host->shost_gendev.kobj, 390 sysfs_create_bin_file(&host->shost_gendev.kobj,
369 &sysfs_optrom_ctl_attr); 391 &sysfs_optrom_ctl_attr);
370 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_vpd_attr); 392 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
393 sysfs_create_bin_file(&host->shost_gendev.kobj,
394 &sysfs_vpd_attr);
395 sysfs_create_bin_file(&host->shost_gendev.kobj,
396 &sysfs_sfp_attr);
397 }
371} 398}
372 399
373void 400void
@@ -380,7 +407,12 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
380 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr); 407 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
381 sysfs_remove_bin_file(&host->shost_gendev.kobj, 408 sysfs_remove_bin_file(&host->shost_gendev.kobj,
382 &sysfs_optrom_ctl_attr); 409 &sysfs_optrom_ctl_attr);
383 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_vpd_attr); 410 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
411 sysfs_remove_bin_file(&host->shost_gendev.kobj,
412 &sysfs_vpd_attr);
413 sysfs_remove_bin_file(&host->shost_gendev.kobj,
414 &sysfs_sfp_attr);
415 }
384 416
385 if (ha->beacon_blink_led == 1) 417 if (ha->beacon_blink_led == 1)
386 ha->isp_ops.beacon_off(ha); 418 ha->isp_ops.beacon_off(ha);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 74e54713aa7c..f6ed6962bc2b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -8,7 +8,34 @@
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10 10
11static int qla_uprintf(char **, char *, ...); 11static inline void
12qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
13{
14 fw_dump->fw_major_version = htonl(ha->fw_major_version);
15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
16 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
17 fw_dump->fw_attributes = htonl(ha->fw_attributes);
18
19 fw_dump->vendor = htonl(ha->pdev->vendor);
20 fw_dump->device = htonl(ha->pdev->device);
21 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
22 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
23}
24
25static inline void *
26qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
27{
28 /* Request queue. */
29 memcpy(ptr, ha->request_ring, ha->request_q_length *
30 sizeof(request_t));
31
32 /* Response queue. */
33 ptr += ha->request_q_length * sizeof(request_t);
34 memcpy(ptr, ha->response_ring, ha->response_q_length *
35 sizeof(response_t));
36
37 return ptr + (ha->response_q_length * sizeof(response_t));
38}
12 39
13/** 40/**
14 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 41 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
@@ -49,10 +76,11 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
49 "request...\n", ha->fw_dump); 76 "request...\n", ha->fw_dump);
50 goto qla2300_fw_dump_failed; 77 goto qla2300_fw_dump_failed;
51 } 78 }
52 fw = ha->fw_dump; 79 fw = &ha->fw_dump->isp.isp23;
80 qla2xxx_prep_dump(ha, ha->fw_dump);
53 81
54 rval = QLA_SUCCESS; 82 rval = QLA_SUCCESS;
55 fw->hccr = RD_REG_WORD(&reg->hccr); 83 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
56 84
57 /* Pause RISC. */ 85 /* Pause RISC. */
58 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 86 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
@@ -73,85 +101,86 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
73 if (rval == QLA_SUCCESS) { 101 if (rval == QLA_SUCCESS) {
74 dmp_reg = (uint16_t __iomem *)(reg + 0); 102 dmp_reg = (uint16_t __iomem *)(reg + 0);
75 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 103 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
76 fw->pbiu_reg[cnt] = RD_REG_WORD(dmp_reg++); 104 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
77 105
78 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10); 106 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10);
79 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++) 107 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
80 fw->risc_host_reg[cnt] = RD_REG_WORD(dmp_reg++); 108 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
81 109
82 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x40); 110 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x40);
83 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 111 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
84 fw->mailbox_reg[cnt] = RD_REG_WORD(dmp_reg++); 112 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
85 113
86 WRT_REG_WORD(&reg->ctrl_status, 0x40); 114 WRT_REG_WORD(&reg->ctrl_status, 0x40);
87 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 115 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
88 for (cnt = 0; cnt < sizeof(fw->resp_dma_reg) / 2; cnt++) 116 for (cnt = 0; cnt < sizeof(fw->resp_dma_reg) / 2; cnt++)
89 fw->resp_dma_reg[cnt] = RD_REG_WORD(dmp_reg++); 117 fw->resp_dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
90 118
91 WRT_REG_WORD(&reg->ctrl_status, 0x50); 119 WRT_REG_WORD(&reg->ctrl_status, 0x50);
92 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 120 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
93 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) 121 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
94 fw->dma_reg[cnt] = RD_REG_WORD(dmp_reg++); 122 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
95 123
96 WRT_REG_WORD(&reg->ctrl_status, 0x00); 124 WRT_REG_WORD(&reg->ctrl_status, 0x00);
97 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0); 125 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0);
98 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 126 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
99 fw->risc_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); 127 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
100 128
101 WRT_REG_WORD(&reg->pcr, 0x2000); 129 WRT_REG_WORD(&reg->pcr, 0x2000);
102 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 130 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
103 for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++) 131 for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++)
104 fw->risc_gp0_reg[cnt] = RD_REG_WORD(dmp_reg++); 132 fw->risc_gp0_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
105 133
106 WRT_REG_WORD(&reg->pcr, 0x2200); 134 WRT_REG_WORD(&reg->pcr, 0x2200);
107 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 135 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
108 for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++) 136 for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++)
109 fw->risc_gp1_reg[cnt] = RD_REG_WORD(dmp_reg++); 137 fw->risc_gp1_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
110 138
111 WRT_REG_WORD(&reg->pcr, 0x2400); 139 WRT_REG_WORD(&reg->pcr, 0x2400);
112 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 140 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
113 for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++) 141 for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++)
114 fw->risc_gp2_reg[cnt] = RD_REG_WORD(dmp_reg++); 142 fw->risc_gp2_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
115 143
116 WRT_REG_WORD(&reg->pcr, 0x2600); 144 WRT_REG_WORD(&reg->pcr, 0x2600);
117 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 145 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
118 for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++) 146 for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++)
119 fw->risc_gp3_reg[cnt] = RD_REG_WORD(dmp_reg++); 147 fw->risc_gp3_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
120 148
121 WRT_REG_WORD(&reg->pcr, 0x2800); 149 WRT_REG_WORD(&reg->pcr, 0x2800);
122 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 150 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
123 for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++) 151 for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++)
124 fw->risc_gp4_reg[cnt] = RD_REG_WORD(dmp_reg++); 152 fw->risc_gp4_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
125 153
126 WRT_REG_WORD(&reg->pcr, 0x2A00); 154 WRT_REG_WORD(&reg->pcr, 0x2A00);
127 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 155 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
128 for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++) 156 for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++)
129 fw->risc_gp5_reg[cnt] = RD_REG_WORD(dmp_reg++); 157 fw->risc_gp5_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
130 158
131 WRT_REG_WORD(&reg->pcr, 0x2C00); 159 WRT_REG_WORD(&reg->pcr, 0x2C00);
132 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 160 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
133 for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++) 161 for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++)
134 fw->risc_gp6_reg[cnt] = RD_REG_WORD(dmp_reg++); 162 fw->risc_gp6_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
135 163
136 WRT_REG_WORD(&reg->pcr, 0x2E00); 164 WRT_REG_WORD(&reg->pcr, 0x2E00);
137 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 165 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
138 for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++) 166 for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++)
139 fw->risc_gp7_reg[cnt] = RD_REG_WORD(dmp_reg++); 167 fw->risc_gp7_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
140 168
141 WRT_REG_WORD(&reg->ctrl_status, 0x10); 169 WRT_REG_WORD(&reg->ctrl_status, 0x10);
142 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 170 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
143 for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++) 171 for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++)
144 fw->frame_buf_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); 172 fw->frame_buf_hdw_reg[cnt] =
173 htons(RD_REG_WORD(dmp_reg++));
145 174
146 WRT_REG_WORD(&reg->ctrl_status, 0x20); 175 WRT_REG_WORD(&reg->ctrl_status, 0x20);
147 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 176 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
148 for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++) 177 for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++)
149 fw->fpm_b0_reg[cnt] = RD_REG_WORD(dmp_reg++); 178 fw->fpm_b0_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
150 179
151 WRT_REG_WORD(&reg->ctrl_status, 0x30); 180 WRT_REG_WORD(&reg->ctrl_status, 0x30);
152 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 181 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
153 for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++) 182 for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++)
154 fw->fpm_b1_reg[cnt] = RD_REG_WORD(dmp_reg++); 183 fw->fpm_b1_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
155 184
156 /* Reset RISC. */ 185 /* Reset RISC. */
157 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 186 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
@@ -226,7 +255,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
226 255
227 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 256 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
228 rval = mb0 & MBS_MASK; 257 rval = mb0 & MBS_MASK;
229 fw->risc_ram[cnt] = mb2; 258 fw->risc_ram[cnt] = htons(mb2);
230 } else { 259 } else {
231 rval = QLA_FUNCTION_FAILED; 260 rval = QLA_FUNCTION_FAILED;
232 } 261 }
@@ -285,7 +314,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
285 314
286 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 315 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
287 rval = mb0 & MBS_MASK; 316 rval = mb0 & MBS_MASK;
288 fw->stack_ram[cnt] = mb2; 317 fw->stack_ram[cnt] = htons(mb2);
289 } else { 318 } else {
290 rval = QLA_FUNCTION_FAILED; 319 rval = QLA_FUNCTION_FAILED;
291 } 320 }
@@ -345,12 +374,15 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
345 374
346 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 375 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
347 rval = mb0 & MBS_MASK; 376 rval = mb0 & MBS_MASK;
348 fw->data_ram[cnt] = mb2; 377 fw->data_ram[cnt] = htons(mb2);
349 } else { 378 } else {
350 rval = QLA_FUNCTION_FAILED; 379 rval = QLA_FUNCTION_FAILED;
351 } 380 }
352 } 381 }
353 382
383 if (rval == QLA_SUCCESS)
384 qla2xxx_copy_queues(ha, &fw->data_ram[cnt]);
385
354 if (rval != QLA_SUCCESS) { 386 if (rval != QLA_SUCCESS) {
355 qla_printk(KERN_WARNING, ha, 387 qla_printk(KERN_WARNING, ha,
356 "Failed to dump firmware (%x)!!!\n", rval); 388 "Failed to dump firmware (%x)!!!\n", rval);
@@ -369,193 +401,6 @@ qla2300_fw_dump_failed:
369} 401}
370 402
371/** 403/**
372 * qla2300_ascii_fw_dump() - Converts a binary firmware dump to ASCII.
373 * @ha: HA context
374 */
375void
376qla2300_ascii_fw_dump(scsi_qla_host_t *ha)
377{
378 uint32_t cnt;
379 char *uiter;
380 char fw_info[30];
381 struct qla2300_fw_dump *fw;
382 uint32_t data_ram_cnt;
383
384 uiter = ha->fw_dump_buffer;
385 fw = ha->fw_dump;
386
387 qla_uprintf(&uiter, "%s Firmware Version %s\n", ha->model_number,
388 ha->isp_ops.fw_version_str(ha, fw_info));
389
390 qla_uprintf(&uiter, "\n[==>BEG]\n");
391
392 qla_uprintf(&uiter, "HCCR Register:\n%04x\n\n", fw->hccr);
393
394 qla_uprintf(&uiter, "PBIU Registers:");
395 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
396 if (cnt % 8 == 0) {
397 qla_uprintf(&uiter, "\n");
398 }
399 qla_uprintf(&uiter, "%04x ", fw->pbiu_reg[cnt]);
400 }
401
402 qla_uprintf(&uiter, "\n\nReqQ-RspQ-Risc2Host Status registers:");
403 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
404 if (cnt % 8 == 0) {
405 qla_uprintf(&uiter, "\n");
406 }
407 qla_uprintf(&uiter, "%04x ", fw->risc_host_reg[cnt]);
408 }
409
410 qla_uprintf(&uiter, "\n\nMailbox Registers:");
411 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
412 if (cnt % 8 == 0) {
413 qla_uprintf(&uiter, "\n");
414 }
415 qla_uprintf(&uiter, "%04x ", fw->mailbox_reg[cnt]);
416 }
417
418 qla_uprintf(&uiter, "\n\nAuto Request Response DMA Registers:");
419 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
420 if (cnt % 8 == 0) {
421 qla_uprintf(&uiter, "\n");
422 }
423 qla_uprintf(&uiter, "%04x ", fw->resp_dma_reg[cnt]);
424 }
425
426 qla_uprintf(&uiter, "\n\nDMA Registers:");
427 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
428 if (cnt % 8 == 0) {
429 qla_uprintf(&uiter, "\n");
430 }
431 qla_uprintf(&uiter, "%04x ", fw->dma_reg[cnt]);
432 }
433
434 qla_uprintf(&uiter, "\n\nRISC Hardware Registers:");
435 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
436 if (cnt % 8 == 0) {
437 qla_uprintf(&uiter, "\n");
438 }
439 qla_uprintf(&uiter, "%04x ", fw->risc_hdw_reg[cnt]);
440 }
441
442 qla_uprintf(&uiter, "\n\nRISC GP0 Registers:");
443 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
444 if (cnt % 8 == 0) {
445 qla_uprintf(&uiter, "\n");
446 }
447 qla_uprintf(&uiter, "%04x ", fw->risc_gp0_reg[cnt]);
448 }
449
450 qla_uprintf(&uiter, "\n\nRISC GP1 Registers:");
451 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
452 if (cnt % 8 == 0) {
453 qla_uprintf(&uiter, "\n");
454 }
455 qla_uprintf(&uiter, "%04x ", fw->risc_gp1_reg[cnt]);
456 }
457
458 qla_uprintf(&uiter, "\n\nRISC GP2 Registers:");
459 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
460 if (cnt % 8 == 0) {
461 qla_uprintf(&uiter, "\n");
462 }
463 qla_uprintf(&uiter, "%04x ", fw->risc_gp2_reg[cnt]);
464 }
465
466 qla_uprintf(&uiter, "\n\nRISC GP3 Registers:");
467 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
468 if (cnt % 8 == 0) {
469 qla_uprintf(&uiter, "\n");
470 }
471 qla_uprintf(&uiter, "%04x ", fw->risc_gp3_reg[cnt]);
472 }
473
474 qla_uprintf(&uiter, "\n\nRISC GP4 Registers:");
475 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
476 if (cnt % 8 == 0) {
477 qla_uprintf(&uiter, "\n");
478 }
479 qla_uprintf(&uiter, "%04x ", fw->risc_gp4_reg[cnt]);
480 }
481
482 qla_uprintf(&uiter, "\n\nRISC GP5 Registers:");
483 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
484 if (cnt % 8 == 0) {
485 qla_uprintf(&uiter, "\n");
486 }
487 qla_uprintf(&uiter, "%04x ", fw->risc_gp5_reg[cnt]);
488 }
489
490 qla_uprintf(&uiter, "\n\nRISC GP6 Registers:");
491 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
492 if (cnt % 8 == 0) {
493 qla_uprintf(&uiter, "\n");
494 }
495 qla_uprintf(&uiter, "%04x ", fw->risc_gp6_reg[cnt]);
496 }
497
498 qla_uprintf(&uiter, "\n\nRISC GP7 Registers:");
499 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
500 if (cnt % 8 == 0) {
501 qla_uprintf(&uiter, "\n");
502 }
503 qla_uprintf(&uiter, "%04x ", fw->risc_gp7_reg[cnt]);
504 }
505
506 qla_uprintf(&uiter, "\n\nFrame Buffer Hardware Registers:");
507 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
508 if (cnt % 8 == 0) {
509 qla_uprintf(&uiter, "\n");
510 }
511 qla_uprintf(&uiter, "%04x ", fw->frame_buf_hdw_reg[cnt]);
512 }
513
514 qla_uprintf(&uiter, "\n\nFPM B0 Registers:");
515 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
516 if (cnt % 8 == 0) {
517 qla_uprintf(&uiter, "\n");
518 }
519 qla_uprintf(&uiter, "%04x ", fw->fpm_b0_reg[cnt]);
520 }
521
522 qla_uprintf(&uiter, "\n\nFPM B1 Registers:");
523 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
524 if (cnt % 8 == 0) {
525 qla_uprintf(&uiter, "\n");
526 }
527 qla_uprintf(&uiter, "%04x ", fw->fpm_b1_reg[cnt]);
528 }
529
530 qla_uprintf(&uiter, "\n\nCode RAM Dump:");
531 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
532 if (cnt % 8 == 0) {
533 qla_uprintf(&uiter, "\n%04x: ", cnt + 0x0800);
534 }
535 qla_uprintf(&uiter, "%04x ", fw->risc_ram[cnt]);
536 }
537
538 qla_uprintf(&uiter, "\n\nStack RAM Dump:");
539 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
540 if (cnt % 8 == 0) {
541 qla_uprintf(&uiter, "\n%05x: ", cnt + 0x10000);
542 }
543 qla_uprintf(&uiter, "%04x ", fw->stack_ram[cnt]);
544 }
545
546 qla_uprintf(&uiter, "\n\nData RAM Dump:");
547 data_ram_cnt = ha->fw_memory_size - 0x11000 + 1;
548 for (cnt = 0; cnt < data_ram_cnt; cnt++) {
549 if (cnt % 8 == 0) {
550 qla_uprintf(&uiter, "\n%05x: ", cnt + 0x11000);
551 }
552 qla_uprintf(&uiter, "%04x ", fw->data_ram[cnt]);
553 }
554
555 qla_uprintf(&uiter, "\n\n[<==END] ISP Debug Dump.");
556}
557
558/**
559 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. 404 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
560 * @ha: HA context 405 * @ha: HA context
561 * @hardware_locked: Called with the hardware_lock 406 * @hardware_locked: Called with the hardware_lock
@@ -591,10 +436,11 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
591 "request...\n", ha->fw_dump); 436 "request...\n", ha->fw_dump);
592 goto qla2100_fw_dump_failed; 437 goto qla2100_fw_dump_failed;
593 } 438 }
594 fw = ha->fw_dump; 439 fw = &ha->fw_dump->isp.isp21;
440 qla2xxx_prep_dump(ha, ha->fw_dump);
595 441
596 rval = QLA_SUCCESS; 442 rval = QLA_SUCCESS;
597 fw->hccr = RD_REG_WORD(&reg->hccr); 443 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
598 444
599 /* Pause RISC. */ 445 /* Pause RISC. */
600 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 446 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
@@ -608,79 +454,81 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
608 if (rval == QLA_SUCCESS) { 454 if (rval == QLA_SUCCESS) {
609 dmp_reg = (uint16_t __iomem *)(reg + 0); 455 dmp_reg = (uint16_t __iomem *)(reg + 0);
610 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 456 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
611 fw->pbiu_reg[cnt] = RD_REG_WORD(dmp_reg++); 457 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
612 458
613 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10); 459 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10);
614 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 460 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
615 if (cnt == 8) { 461 if (cnt == 8) {
616 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xe0); 462 dmp_reg = (uint16_t __iomem *)
463 ((uint8_t __iomem *)reg + 0xe0);
617 } 464 }
618 fw->mailbox_reg[cnt] = RD_REG_WORD(dmp_reg++); 465 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
619 } 466 }
620 467
621 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x20); 468 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x20);
622 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) 469 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
623 fw->dma_reg[cnt] = RD_REG_WORD(dmp_reg++); 470 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
624 471
625 WRT_REG_WORD(&reg->ctrl_status, 0x00); 472 WRT_REG_WORD(&reg->ctrl_status, 0x00);
626 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0); 473 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0);
627 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 474 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
628 fw->risc_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); 475 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
629 476
630 WRT_REG_WORD(&reg->pcr, 0x2000); 477 WRT_REG_WORD(&reg->pcr, 0x2000);
631 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 478 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
632 for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++) 479 for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++)
633 fw->risc_gp0_reg[cnt] = RD_REG_WORD(dmp_reg++); 480 fw->risc_gp0_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
634 481
635 WRT_REG_WORD(&reg->pcr, 0x2100); 482 WRT_REG_WORD(&reg->pcr, 0x2100);
636 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 483 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
637 for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++) 484 for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++)
638 fw->risc_gp1_reg[cnt] = RD_REG_WORD(dmp_reg++); 485 fw->risc_gp1_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
639 486
640 WRT_REG_WORD(&reg->pcr, 0x2200); 487 WRT_REG_WORD(&reg->pcr, 0x2200);
641 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 488 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
642 for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++) 489 for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++)
643 fw->risc_gp2_reg[cnt] = RD_REG_WORD(dmp_reg++); 490 fw->risc_gp2_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
644 491
645 WRT_REG_WORD(&reg->pcr, 0x2300); 492 WRT_REG_WORD(&reg->pcr, 0x2300);
646 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 493 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
647 for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++) 494 for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++)
648 fw->risc_gp3_reg[cnt] = RD_REG_WORD(dmp_reg++); 495 fw->risc_gp3_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
649 496
650 WRT_REG_WORD(&reg->pcr, 0x2400); 497 WRT_REG_WORD(&reg->pcr, 0x2400);
651 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 498 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
652 for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++) 499 for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++)
653 fw->risc_gp4_reg[cnt] = RD_REG_WORD(dmp_reg++); 500 fw->risc_gp4_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
654 501
655 WRT_REG_WORD(&reg->pcr, 0x2500); 502 WRT_REG_WORD(&reg->pcr, 0x2500);
656 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 503 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
657 for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++) 504 for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++)
658 fw->risc_gp5_reg[cnt] = RD_REG_WORD(dmp_reg++); 505 fw->risc_gp5_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
659 506
660 WRT_REG_WORD(&reg->pcr, 0x2600); 507 WRT_REG_WORD(&reg->pcr, 0x2600);
661 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 508 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
662 for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++) 509 for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++)
663 fw->risc_gp6_reg[cnt] = RD_REG_WORD(dmp_reg++); 510 fw->risc_gp6_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
664 511
665 WRT_REG_WORD(&reg->pcr, 0x2700); 512 WRT_REG_WORD(&reg->pcr, 0x2700);
666 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 513 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
667 for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++) 514 for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++)
668 fw->risc_gp7_reg[cnt] = RD_REG_WORD(dmp_reg++); 515 fw->risc_gp7_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
669 516
670 WRT_REG_WORD(&reg->ctrl_status, 0x10); 517 WRT_REG_WORD(&reg->ctrl_status, 0x10);
671 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 518 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
672 for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++) 519 for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++)
673 fw->frame_buf_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); 520 fw->frame_buf_hdw_reg[cnt] =
521 htons(RD_REG_WORD(dmp_reg++));
674 522
675 WRT_REG_WORD(&reg->ctrl_status, 0x20); 523 WRT_REG_WORD(&reg->ctrl_status, 0x20);
676 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 524 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
677 for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++) 525 for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++)
678 fw->fpm_b0_reg[cnt] = RD_REG_WORD(dmp_reg++); 526 fw->fpm_b0_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
679 527
680 WRT_REG_WORD(&reg->ctrl_status, 0x30); 528 WRT_REG_WORD(&reg->ctrl_status, 0x30);
681 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 529 dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
682 for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++) 530 for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++)
683 fw->fpm_b1_reg[cnt] = RD_REG_WORD(dmp_reg++); 531 fw->fpm_b1_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
684 532
685 /* Reset the ISP. */ 533 /* Reset the ISP. */
686 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 534 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
@@ -755,12 +603,15 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
755 603
756 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 604 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
757 rval = mb0 & MBS_MASK; 605 rval = mb0 & MBS_MASK;
758 fw->risc_ram[cnt] = mb2; 606 fw->risc_ram[cnt] = htons(mb2);
759 } else { 607 } else {
760 rval = QLA_FUNCTION_FAILED; 608 rval = QLA_FUNCTION_FAILED;
761 } 609 }
762 } 610 }
763 611
612 if (rval == QLA_SUCCESS)
613 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
614
764 if (rval != QLA_SUCCESS) { 615 if (rval != QLA_SUCCESS) {
765 qla_printk(KERN_WARNING, ha, 616 qla_printk(KERN_WARNING, ha,
766 "Failed to dump firmware (%x)!!!\n", rval); 617 "Failed to dump firmware (%x)!!!\n", rval);
@@ -778,179 +629,6 @@ qla2100_fw_dump_failed:
778 spin_unlock_irqrestore(&ha->hardware_lock, flags); 629 spin_unlock_irqrestore(&ha->hardware_lock, flags);
779} 630}
780 631
781/**
782 * qla2100_ascii_fw_dump() - Converts a binary firmware dump to ASCII.
783 * @ha: HA context
784 */
785void
786qla2100_ascii_fw_dump(scsi_qla_host_t *ha)
787{
788 uint32_t cnt;
789 char *uiter;
790 char fw_info[30];
791 struct qla2100_fw_dump *fw;
792
793 uiter = ha->fw_dump_buffer;
794 fw = ha->fw_dump;
795
796 qla_uprintf(&uiter, "%s Firmware Version %s\n", ha->model_number,
797 ha->isp_ops.fw_version_str(ha, fw_info));
798
799 qla_uprintf(&uiter, "\n[==>BEG]\n");
800
801 qla_uprintf(&uiter, "HCCR Register:\n%04x\n\n", fw->hccr);
802
803 qla_uprintf(&uiter, "PBIU Registers:");
804 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
805 if (cnt % 8 == 0) {
806 qla_uprintf(&uiter, "\n");
807 }
808 qla_uprintf(&uiter, "%04x ", fw->pbiu_reg[cnt]);
809 }
810
811 qla_uprintf(&uiter, "\n\nMailbox Registers:");
812 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
813 if (cnt % 8 == 0) {
814 qla_uprintf(&uiter, "\n");
815 }
816 qla_uprintf(&uiter, "%04x ", fw->mailbox_reg[cnt]);
817 }
818
819 qla_uprintf(&uiter, "\n\nDMA Registers:");
820 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
821 if (cnt % 8 == 0) {
822 qla_uprintf(&uiter, "\n");
823 }
824 qla_uprintf(&uiter, "%04x ", fw->dma_reg[cnt]);
825 }
826
827 qla_uprintf(&uiter, "\n\nRISC Hardware Registers:");
828 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
829 if (cnt % 8 == 0) {
830 qla_uprintf(&uiter, "\n");
831 }
832 qla_uprintf(&uiter, "%04x ", fw->risc_hdw_reg[cnt]);
833 }
834
835 qla_uprintf(&uiter, "\n\nRISC GP0 Registers:");
836 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
837 if (cnt % 8 == 0) {
838 qla_uprintf(&uiter, "\n");
839 }
840 qla_uprintf(&uiter, "%04x ", fw->risc_gp0_reg[cnt]);
841 }
842
843 qla_uprintf(&uiter, "\n\nRISC GP1 Registers:");
844 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
845 if (cnt % 8 == 0) {
846 qla_uprintf(&uiter, "\n");
847 }
848 qla_uprintf(&uiter, "%04x ", fw->risc_gp1_reg[cnt]);
849 }
850
851 qla_uprintf(&uiter, "\n\nRISC GP2 Registers:");
852 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
853 if (cnt % 8 == 0) {
854 qla_uprintf(&uiter, "\n");
855 }
856 qla_uprintf(&uiter, "%04x ", fw->risc_gp2_reg[cnt]);
857 }
858
859 qla_uprintf(&uiter, "\n\nRISC GP3 Registers:");
860 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
861 if (cnt % 8 == 0) {
862 qla_uprintf(&uiter, "\n");
863 }
864 qla_uprintf(&uiter, "%04x ", fw->risc_gp3_reg[cnt]);
865 }
866
867 qla_uprintf(&uiter, "\n\nRISC GP4 Registers:");
868 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
869 if (cnt % 8 == 0) {
870 qla_uprintf(&uiter, "\n");
871 }
872 qla_uprintf(&uiter, "%04x ", fw->risc_gp4_reg[cnt]);
873 }
874
875 qla_uprintf(&uiter, "\n\nRISC GP5 Registers:");
876 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
877 if (cnt % 8 == 0) {
878 qla_uprintf(&uiter, "\n");
879 }
880 qla_uprintf(&uiter, "%04x ", fw->risc_gp5_reg[cnt]);
881 }
882
883 qla_uprintf(&uiter, "\n\nRISC GP6 Registers:");
884 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
885 if (cnt % 8 == 0) {
886 qla_uprintf(&uiter, "\n");
887 }
888 qla_uprintf(&uiter, "%04x ", fw->risc_gp6_reg[cnt]);
889 }
890
891 qla_uprintf(&uiter, "\n\nRISC GP7 Registers:");
892 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
893 if (cnt % 8 == 0) {
894 qla_uprintf(&uiter, "\n");
895 }
896 qla_uprintf(&uiter, "%04x ", fw->risc_gp7_reg[cnt]);
897 }
898
899 qla_uprintf(&uiter, "\n\nFrame Buffer Hardware Registers:");
900 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
901 if (cnt % 8 == 0) {
902 qla_uprintf(&uiter, "\n");
903 }
904 qla_uprintf(&uiter, "%04x ", fw->frame_buf_hdw_reg[cnt]);
905 }
906
907 qla_uprintf(&uiter, "\n\nFPM B0 Registers:");
908 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
909 if (cnt % 8 == 0) {
910 qla_uprintf(&uiter, "\n");
911 }
912 qla_uprintf(&uiter, "%04x ", fw->fpm_b0_reg[cnt]);
913 }
914
915 qla_uprintf(&uiter, "\n\nFPM B1 Registers:");
916 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
917 if (cnt % 8 == 0) {
918 qla_uprintf(&uiter, "\n");
919 }
920 qla_uprintf(&uiter, "%04x ", fw->fpm_b1_reg[cnt]);
921 }
922
923 qla_uprintf(&uiter, "\n\nRISC SRAM:");
924 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
925 if (cnt % 8 == 0) {
926 qla_uprintf(&uiter, "\n%04x: ", cnt + 0x1000);
927 }
928 qla_uprintf(&uiter, "%04x ", fw->risc_ram[cnt]);
929 }
930
931 qla_uprintf(&uiter, "\n\n[<==END] ISP Debug Dump.");
932
933 return;
934}
935
936static int
937qla_uprintf(char **uiter, char *fmt, ...)
938{
939 int iter, len;
940 char buf[128];
941 va_list args;
942
943 va_start(args, fmt);
944 len = vsprintf(buf, fmt, args);
945 va_end(args);
946
947 for (iter = 0; iter < len; iter++, *uiter += 1)
948 *uiter[0] = buf[iter];
949
950 return (len);
951}
952
953
954void 632void
955qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 633qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
956{ 634{
@@ -967,6 +645,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
967 unsigned long flags; 645 unsigned long flags;
968 struct qla24xx_fw_dump *fw; 646 struct qla24xx_fw_dump *fw;
969 uint32_t ext_mem_cnt; 647 uint32_t ext_mem_cnt;
648 void *eft;
970 649
971 risc_address = ext_mem_cnt = 0; 650 risc_address = ext_mem_cnt = 0;
972 memset(mb, 0, sizeof(mb)); 651 memset(mb, 0, sizeof(mb));
@@ -987,10 +666,11 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
987 "request...\n", ha->fw_dump); 666 "request...\n", ha->fw_dump);
988 goto qla24xx_fw_dump_failed; 667 goto qla24xx_fw_dump_failed;
989 } 668 }
990 fw = ha->fw_dump; 669 fw = &ha->fw_dump->isp.isp24;
670 qla2xxx_prep_dump(ha, ha->fw_dump);
991 671
992 rval = QLA_SUCCESS; 672 rval = QLA_SUCCESS;
993 fw->host_status = RD_REG_DWORD(&reg->host_status); 673 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
994 674
995 /* Pause RISC. */ 675 /* Pause RISC. */
996 if ((RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0) { 676 if ((RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0) {
@@ -1012,7 +692,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1012 /* Host interface registers. */ 692 /* Host interface registers. */
1013 dmp_reg = (uint32_t __iomem *)(reg + 0); 693 dmp_reg = (uint32_t __iomem *)(reg + 0);
1014 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) 694 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1015 fw->host_reg[cnt] = RD_REG_DWORD(dmp_reg++); 695 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1016 696
1017 /* Disable interrupts. */ 697 /* Disable interrupts. */
1018 WRT_REG_DWORD(&reg->ictrl, 0); 698 WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1024,470 +704,471 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1024 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 704 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1025 WRT_REG_DWORD(dmp_reg, 0xB0000000); 705 WRT_REG_DWORD(dmp_reg, 0xB0000000);
1026 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 706 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1027 fw->shadow_reg[0] = RD_REG_DWORD(dmp_reg); 707 fw->shadow_reg[0] = htonl(RD_REG_DWORD(dmp_reg));
1028 708
1029 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 709 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1030 WRT_REG_DWORD(dmp_reg, 0xB0100000); 710 WRT_REG_DWORD(dmp_reg, 0xB0100000);
1031 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 711 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1032 fw->shadow_reg[1] = RD_REG_DWORD(dmp_reg); 712 fw->shadow_reg[1] = htonl(RD_REG_DWORD(dmp_reg));
1033 713
1034 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 714 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1035 WRT_REG_DWORD(dmp_reg, 0xB0200000); 715 WRT_REG_DWORD(dmp_reg, 0xB0200000);
1036 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 716 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1037 fw->shadow_reg[2] = RD_REG_DWORD(dmp_reg); 717 fw->shadow_reg[2] = htonl(RD_REG_DWORD(dmp_reg));
1038 718
1039 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 719 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1040 WRT_REG_DWORD(dmp_reg, 0xB0300000); 720 WRT_REG_DWORD(dmp_reg, 0xB0300000);
1041 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 721 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1042 fw->shadow_reg[3] = RD_REG_DWORD(dmp_reg); 722 fw->shadow_reg[3] = htonl(RD_REG_DWORD(dmp_reg));
1043 723
1044 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 724 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1045 WRT_REG_DWORD(dmp_reg, 0xB0400000); 725 WRT_REG_DWORD(dmp_reg, 0xB0400000);
1046 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 726 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1047 fw->shadow_reg[4] = RD_REG_DWORD(dmp_reg); 727 fw->shadow_reg[4] = htonl(RD_REG_DWORD(dmp_reg));
1048 728
1049 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 729 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1050 WRT_REG_DWORD(dmp_reg, 0xB0500000); 730 WRT_REG_DWORD(dmp_reg, 0xB0500000);
1051 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 731 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1052 fw->shadow_reg[5] = RD_REG_DWORD(dmp_reg); 732 fw->shadow_reg[5] = htonl(RD_REG_DWORD(dmp_reg));
1053 733
1054 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0); 734 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
1055 WRT_REG_DWORD(dmp_reg, 0xB0600000); 735 WRT_REG_DWORD(dmp_reg, 0xB0600000);
1056 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC); 736 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
1057 fw->shadow_reg[6] = RD_REG_DWORD(dmp_reg); 737 fw->shadow_reg[6] = htonl(RD_REG_DWORD(dmp_reg));
1058 738
1059 /* Mailbox registers. */ 739 /* Mailbox registers. */
1060 mbx_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); 740 mbx_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
1061 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 741 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1062 fw->mailbox_reg[cnt] = RD_REG_WORD(mbx_reg++); 742 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1063 743
1064 /* Transfer sequence registers. */ 744 /* Transfer sequence registers. */
1065 iter_reg = fw->xseq_gp_reg; 745 iter_reg = fw->xseq_gp_reg;
1066 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00); 746 WRT_REG_DWORD(&reg->iobase_addr, 0xBF00);
1067 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 747 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1068 for (cnt = 0; cnt < 16; cnt++) 748 for (cnt = 0; cnt < 16; cnt++)
1069 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 749 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1070 750
1071 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10); 751 WRT_REG_DWORD(&reg->iobase_addr, 0xBF10);
1072 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 752 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1073 for (cnt = 0; cnt < 16; cnt++) 753 for (cnt = 0; cnt < 16; cnt++)
1074 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 754 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1075 755
1076 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20); 756 WRT_REG_DWORD(&reg->iobase_addr, 0xBF20);
1077 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 757 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1078 for (cnt = 0; cnt < 16; cnt++) 758 for (cnt = 0; cnt < 16; cnt++)
1079 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 759 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1080 760
1081 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30); 761 WRT_REG_DWORD(&reg->iobase_addr, 0xBF30);
1082 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 762 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1083 for (cnt = 0; cnt < 16; cnt++) 763 for (cnt = 0; cnt < 16; cnt++)
1084 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 764 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1085 765
1086 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40); 766 WRT_REG_DWORD(&reg->iobase_addr, 0xBF40);
1087 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 767 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1088 for (cnt = 0; cnt < 16; cnt++) 768 for (cnt = 0; cnt < 16; cnt++)
1089 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 769 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1090 770
1091 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50); 771 WRT_REG_DWORD(&reg->iobase_addr, 0xBF50);
1092 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 772 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1093 for (cnt = 0; cnt < 16; cnt++) 773 for (cnt = 0; cnt < 16; cnt++)
1094 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 774 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1095 775
1096 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60); 776 WRT_REG_DWORD(&reg->iobase_addr, 0xBF60);
1097 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 777 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1098 for (cnt = 0; cnt < 16; cnt++) 778 for (cnt = 0; cnt < 16; cnt++)
1099 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 779 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1100 780
1101 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70); 781 WRT_REG_DWORD(&reg->iobase_addr, 0xBF70);
1102 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 782 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1103 for (cnt = 0; cnt < 16; cnt++) 783 for (cnt = 0; cnt < 16; cnt++)
1104 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 784 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1105 785
1106 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0); 786 WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0);
1107 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 787 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1108 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++) 788 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++)
1109 fw->xseq_0_reg[cnt] = RD_REG_DWORD(dmp_reg++); 789 fw->xseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1110 790
1111 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0); 791 WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0);
1112 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 792 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1113 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++) 793 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++)
1114 fw->xseq_1_reg[cnt] = RD_REG_DWORD(dmp_reg++); 794 fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1115 795
1116 /* Receive sequence registers. */ 796 /* Receive sequence registers. */
1117 iter_reg = fw->rseq_gp_reg; 797 iter_reg = fw->rseq_gp_reg;
1118 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00); 798 WRT_REG_DWORD(&reg->iobase_addr, 0xFF00);
1119 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 799 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1120 for (cnt = 0; cnt < 16; cnt++) 800 for (cnt = 0; cnt < 16; cnt++)
1121 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 801 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1122 802
1123 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10); 803 WRT_REG_DWORD(&reg->iobase_addr, 0xFF10);
1124 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 804 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1125 for (cnt = 0; cnt < 16; cnt++) 805 for (cnt = 0; cnt < 16; cnt++)
1126 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 806 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1127 807
1128 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20); 808 WRT_REG_DWORD(&reg->iobase_addr, 0xFF20);
1129 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 809 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1130 for (cnt = 0; cnt < 16; cnt++) 810 for (cnt = 0; cnt < 16; cnt++)
1131 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 811 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1132 812
1133 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30); 813 WRT_REG_DWORD(&reg->iobase_addr, 0xFF30);
1134 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 814 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1135 for (cnt = 0; cnt < 16; cnt++) 815 for (cnt = 0; cnt < 16; cnt++)
1136 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 816 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1137 817
1138 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40); 818 WRT_REG_DWORD(&reg->iobase_addr, 0xFF40);
1139 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 819 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1140 for (cnt = 0; cnt < 16; cnt++) 820 for (cnt = 0; cnt < 16; cnt++)
1141 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 821 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1142 822
1143 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50); 823 WRT_REG_DWORD(&reg->iobase_addr, 0xFF50);
1144 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 824 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1145 for (cnt = 0; cnt < 16; cnt++) 825 for (cnt = 0; cnt < 16; cnt++)
1146 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 826 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1147 827
1148 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60); 828 WRT_REG_DWORD(&reg->iobase_addr, 0xFF60);
1149 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 829 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1150 for (cnt = 0; cnt < 16; cnt++) 830 for (cnt = 0; cnt < 16; cnt++)
1151 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 831 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1152 832
1153 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70); 833 WRT_REG_DWORD(&reg->iobase_addr, 0xFF70);
1154 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 834 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1155 for (cnt = 0; cnt < 16; cnt++) 835 for (cnt = 0; cnt < 16; cnt++)
1156 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 836 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1157 837
1158 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0); 838 WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0);
1159 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 839 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1160 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++) 840 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++)
1161 fw->rseq_0_reg[cnt] = RD_REG_DWORD(dmp_reg++); 841 fw->rseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1162 842
1163 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0); 843 WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0);
1164 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 844 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1165 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++) 845 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++)
1166 fw->rseq_1_reg[cnt] = RD_REG_DWORD(dmp_reg++); 846 fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1167 847
1168 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0); 848 WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0);
1169 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 849 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1170 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++) 850 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++)
1171 fw->rseq_2_reg[cnt] = RD_REG_DWORD(dmp_reg++); 851 fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1172 852
1173 /* Command DMA registers. */ 853 /* Command DMA registers. */
1174 WRT_REG_DWORD(&reg->iobase_addr, 0x7100); 854 WRT_REG_DWORD(&reg->iobase_addr, 0x7100);
1175 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 855 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1176 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++) 856 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++)
1177 fw->cmd_dma_reg[cnt] = RD_REG_DWORD(dmp_reg++); 857 fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1178 858
1179 /* Queues. */ 859 /* Queues. */
1180 iter_reg = fw->req0_dma_reg; 860 iter_reg = fw->req0_dma_reg;
1181 WRT_REG_DWORD(&reg->iobase_addr, 0x7200); 861 WRT_REG_DWORD(&reg->iobase_addr, 0x7200);
1182 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 862 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1183 for (cnt = 0; cnt < 8; cnt++) 863 for (cnt = 0; cnt < 8; cnt++)
1184 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 864 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1185 865
1186 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 866 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4);
1187 for (cnt = 0; cnt < 7; cnt++) 867 for (cnt = 0; cnt < 7; cnt++)
1188 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 868 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1189 869
1190 iter_reg = fw->resp0_dma_reg; 870 iter_reg = fw->resp0_dma_reg;
1191 WRT_REG_DWORD(&reg->iobase_addr, 0x7300); 871 WRT_REG_DWORD(&reg->iobase_addr, 0x7300);
1192 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 872 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1193 for (cnt = 0; cnt < 8; cnt++) 873 for (cnt = 0; cnt < 8; cnt++)
1194 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 874 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1195 875
1196 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 876 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4);
1197 for (cnt = 0; cnt < 7; cnt++) 877 for (cnt = 0; cnt < 7; cnt++)
1198 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 878 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1199 879
1200 iter_reg = fw->req1_dma_reg; 880 iter_reg = fw->req1_dma_reg;
1201 WRT_REG_DWORD(&reg->iobase_addr, 0x7400); 881 WRT_REG_DWORD(&reg->iobase_addr, 0x7400);
1202 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 882 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1203 for (cnt = 0; cnt < 8; cnt++) 883 for (cnt = 0; cnt < 8; cnt++)
1204 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 884 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1205 885
1206 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4); 886 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4);
1207 for (cnt = 0; cnt < 7; cnt++) 887 for (cnt = 0; cnt < 7; cnt++)
1208 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 888 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1209 889
1210 /* Transmit DMA registers. */ 890 /* Transmit DMA registers. */
1211 iter_reg = fw->xmt0_dma_reg; 891 iter_reg = fw->xmt0_dma_reg;
1212 WRT_REG_DWORD(&reg->iobase_addr, 0x7600); 892 WRT_REG_DWORD(&reg->iobase_addr, 0x7600);
1213 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 893 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1214 for (cnt = 0; cnt < 16; cnt++) 894 for (cnt = 0; cnt < 16; cnt++)
1215 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 895 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1216 896
1217 WRT_REG_DWORD(&reg->iobase_addr, 0x7610); 897 WRT_REG_DWORD(&reg->iobase_addr, 0x7610);
1218 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 898 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1219 for (cnt = 0; cnt < 16; cnt++) 899 for (cnt = 0; cnt < 16; cnt++)
1220 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 900 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1221 901
1222 iter_reg = fw->xmt1_dma_reg; 902 iter_reg = fw->xmt1_dma_reg;
1223 WRT_REG_DWORD(&reg->iobase_addr, 0x7620); 903 WRT_REG_DWORD(&reg->iobase_addr, 0x7620);
1224 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 904 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1225 for (cnt = 0; cnt < 16; cnt++) 905 for (cnt = 0; cnt < 16; cnt++)
1226 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 906 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1227 907
1228 WRT_REG_DWORD(&reg->iobase_addr, 0x7630); 908 WRT_REG_DWORD(&reg->iobase_addr, 0x7630);
1229 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 909 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1230 for (cnt = 0; cnt < 16; cnt++) 910 for (cnt = 0; cnt < 16; cnt++)
1231 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 911 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1232 912
1233 iter_reg = fw->xmt2_dma_reg; 913 iter_reg = fw->xmt2_dma_reg;
1234 WRT_REG_DWORD(&reg->iobase_addr, 0x7640); 914 WRT_REG_DWORD(&reg->iobase_addr, 0x7640);
1235 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 915 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1236 for (cnt = 0; cnt < 16; cnt++) 916 for (cnt = 0; cnt < 16; cnt++)
1237 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 917 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1238 918
1239 WRT_REG_DWORD(&reg->iobase_addr, 0x7650); 919 WRT_REG_DWORD(&reg->iobase_addr, 0x7650);
1240 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 920 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1241 for (cnt = 0; cnt < 16; cnt++) 921 for (cnt = 0; cnt < 16; cnt++)
1242 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 922 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1243 923
1244 iter_reg = fw->xmt3_dma_reg; 924 iter_reg = fw->xmt3_dma_reg;
1245 WRT_REG_DWORD(&reg->iobase_addr, 0x7660); 925 WRT_REG_DWORD(&reg->iobase_addr, 0x7660);
1246 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 926 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1247 for (cnt = 0; cnt < 16; cnt++) 927 for (cnt = 0; cnt < 16; cnt++)
1248 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 928 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1249 929
1250 WRT_REG_DWORD(&reg->iobase_addr, 0x7670); 930 WRT_REG_DWORD(&reg->iobase_addr, 0x7670);
1251 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 931 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1252 for (cnt = 0; cnt < 16; cnt++) 932 for (cnt = 0; cnt < 16; cnt++)
1253 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 933 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1254 934
1255 iter_reg = fw->xmt4_dma_reg; 935 iter_reg = fw->xmt4_dma_reg;
1256 WRT_REG_DWORD(&reg->iobase_addr, 0x7680); 936 WRT_REG_DWORD(&reg->iobase_addr, 0x7680);
1257 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 937 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1258 for (cnt = 0; cnt < 16; cnt++) 938 for (cnt = 0; cnt < 16; cnt++)
1259 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 939 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1260 940
1261 WRT_REG_DWORD(&reg->iobase_addr, 0x7690); 941 WRT_REG_DWORD(&reg->iobase_addr, 0x7690);
1262 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 942 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1263 for (cnt = 0; cnt < 16; cnt++) 943 for (cnt = 0; cnt < 16; cnt++)
1264 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 944 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1265 945
1266 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0); 946 WRT_REG_DWORD(&reg->iobase_addr, 0x76A0);
1267 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 947 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1268 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++) 948 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++)
1269 fw->xmt_data_dma_reg[cnt] = RD_REG_DWORD(dmp_reg++); 949 fw->xmt_data_dma_reg[cnt] =
950 htonl(RD_REG_DWORD(dmp_reg++));
1270 951
1271 /* Receive DMA registers. */ 952 /* Receive DMA registers. */
1272 iter_reg = fw->rcvt0_data_dma_reg; 953 iter_reg = fw->rcvt0_data_dma_reg;
1273 WRT_REG_DWORD(&reg->iobase_addr, 0x7700); 954 WRT_REG_DWORD(&reg->iobase_addr, 0x7700);
1274 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 955 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1275 for (cnt = 0; cnt < 16; cnt++) 956 for (cnt = 0; cnt < 16; cnt++)
1276 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 957 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1277 958
1278 WRT_REG_DWORD(&reg->iobase_addr, 0x7710); 959 WRT_REG_DWORD(&reg->iobase_addr, 0x7710);
1279 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 960 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1280 for (cnt = 0; cnt < 16; cnt++) 961 for (cnt = 0; cnt < 16; cnt++)
1281 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 962 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1282 963
1283 iter_reg = fw->rcvt1_data_dma_reg; 964 iter_reg = fw->rcvt1_data_dma_reg;
1284 WRT_REG_DWORD(&reg->iobase_addr, 0x7720); 965 WRT_REG_DWORD(&reg->iobase_addr, 0x7720);
1285 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 966 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1286 for (cnt = 0; cnt < 16; cnt++) 967 for (cnt = 0; cnt < 16; cnt++)
1287 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 968 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1288 969
1289 WRT_REG_DWORD(&reg->iobase_addr, 0x7730); 970 WRT_REG_DWORD(&reg->iobase_addr, 0x7730);
1290 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 971 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1291 for (cnt = 0; cnt < 16; cnt++) 972 for (cnt = 0; cnt < 16; cnt++)
1292 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 973 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1293 974
1294 /* RISC registers. */ 975 /* RISC registers. */
1295 iter_reg = fw->risc_gp_reg; 976 iter_reg = fw->risc_gp_reg;
1296 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00); 977 WRT_REG_DWORD(&reg->iobase_addr, 0x0F00);
1297 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 978 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1298 for (cnt = 0; cnt < 16; cnt++) 979 for (cnt = 0; cnt < 16; cnt++)
1299 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 980 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1300 981
1301 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10); 982 WRT_REG_DWORD(&reg->iobase_addr, 0x0F10);
1302 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 983 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1303 for (cnt = 0; cnt < 16; cnt++) 984 for (cnt = 0; cnt < 16; cnt++)
1304 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 985 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1305 986
1306 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20); 987 WRT_REG_DWORD(&reg->iobase_addr, 0x0F20);
1307 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 988 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1308 for (cnt = 0; cnt < 16; cnt++) 989 for (cnt = 0; cnt < 16; cnt++)
1309 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 990 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1310 991
1311 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30); 992 WRT_REG_DWORD(&reg->iobase_addr, 0x0F30);
1312 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 993 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1313 for (cnt = 0; cnt < 16; cnt++) 994 for (cnt = 0; cnt < 16; cnt++)
1314 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 995 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1315 996
1316 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40); 997 WRT_REG_DWORD(&reg->iobase_addr, 0x0F40);
1317 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 998 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1318 for (cnt = 0; cnt < 16; cnt++) 999 for (cnt = 0; cnt < 16; cnt++)
1319 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1000 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1320 1001
1321 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50); 1002 WRT_REG_DWORD(&reg->iobase_addr, 0x0F50);
1322 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1003 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1323 for (cnt = 0; cnt < 16; cnt++) 1004 for (cnt = 0; cnt < 16; cnt++)
1324 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1005 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1325 1006
1326 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60); 1007 WRT_REG_DWORD(&reg->iobase_addr, 0x0F60);
1327 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1008 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1328 for (cnt = 0; cnt < 16; cnt++) 1009 for (cnt = 0; cnt < 16; cnt++)
1329 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1010 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1330 1011
1331 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70); 1012 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1332 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1013 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1333 for (cnt = 0; cnt < 16; cnt++) 1014 for (cnt = 0; cnt < 16; cnt++)
1334 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1015 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1335 1016
1336 /* Local memory controller registers. */ 1017 /* Local memory controller registers. */
1337 iter_reg = fw->lmc_reg; 1018 iter_reg = fw->lmc_reg;
1338 WRT_REG_DWORD(&reg->iobase_addr, 0x3000); 1019 WRT_REG_DWORD(&reg->iobase_addr, 0x3000);
1339 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1020 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1340 for (cnt = 0; cnt < 16; cnt++) 1021 for (cnt = 0; cnt < 16; cnt++)
1341 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1022 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1342 1023
1343 WRT_REG_DWORD(&reg->iobase_addr, 0x3010); 1024 WRT_REG_DWORD(&reg->iobase_addr, 0x3010);
1344 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1025 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1345 for (cnt = 0; cnt < 16; cnt++) 1026 for (cnt = 0; cnt < 16; cnt++)
1346 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1027 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1347 1028
1348 WRT_REG_DWORD(&reg->iobase_addr, 0x3020); 1029 WRT_REG_DWORD(&reg->iobase_addr, 0x3020);
1349 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1030 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1350 for (cnt = 0; cnt < 16; cnt++) 1031 for (cnt = 0; cnt < 16; cnt++)
1351 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1032 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1352 1033
1353 WRT_REG_DWORD(&reg->iobase_addr, 0x3030); 1034 WRT_REG_DWORD(&reg->iobase_addr, 0x3030);
1354 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1035 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1355 for (cnt = 0; cnt < 16; cnt++) 1036 for (cnt = 0; cnt < 16; cnt++)
1356 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1037 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1357 1038
1358 WRT_REG_DWORD(&reg->iobase_addr, 0x3040); 1039 WRT_REG_DWORD(&reg->iobase_addr, 0x3040);
1359 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1040 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1360 for (cnt = 0; cnt < 16; cnt++) 1041 for (cnt = 0; cnt < 16; cnt++)
1361 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1042 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1362 1043
1363 WRT_REG_DWORD(&reg->iobase_addr, 0x3050); 1044 WRT_REG_DWORD(&reg->iobase_addr, 0x3050);
1364 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1045 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1365 for (cnt = 0; cnt < 16; cnt++) 1046 for (cnt = 0; cnt < 16; cnt++)
1366 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1047 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1367 1048
1368 WRT_REG_DWORD(&reg->iobase_addr, 0x3060); 1049 WRT_REG_DWORD(&reg->iobase_addr, 0x3060);
1369 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1050 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1370 for (cnt = 0; cnt < 16; cnt++) 1051 for (cnt = 0; cnt < 16; cnt++)
1371 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1052 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1372 1053
1373 /* Fibre Protocol Module registers. */ 1054 /* Fibre Protocol Module registers. */
1374 iter_reg = fw->fpm_hdw_reg; 1055 iter_reg = fw->fpm_hdw_reg;
1375 WRT_REG_DWORD(&reg->iobase_addr, 0x4000); 1056 WRT_REG_DWORD(&reg->iobase_addr, 0x4000);
1376 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1057 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1377 for (cnt = 0; cnt < 16; cnt++) 1058 for (cnt = 0; cnt < 16; cnt++)
1378 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1059 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1379 1060
1380 WRT_REG_DWORD(&reg->iobase_addr, 0x4010); 1061 WRT_REG_DWORD(&reg->iobase_addr, 0x4010);
1381 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1062 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1382 for (cnt = 0; cnt < 16; cnt++) 1063 for (cnt = 0; cnt < 16; cnt++)
1383 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1064 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1384 1065
1385 WRT_REG_DWORD(&reg->iobase_addr, 0x4020); 1066 WRT_REG_DWORD(&reg->iobase_addr, 0x4020);
1386 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1067 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1387 for (cnt = 0; cnt < 16; cnt++) 1068 for (cnt = 0; cnt < 16; cnt++)
1388 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1069 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1389 1070
1390 WRT_REG_DWORD(&reg->iobase_addr, 0x4030); 1071 WRT_REG_DWORD(&reg->iobase_addr, 0x4030);
1391 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1072 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1392 for (cnt = 0; cnt < 16; cnt++) 1073 for (cnt = 0; cnt < 16; cnt++)
1393 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1074 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1394 1075
1395 WRT_REG_DWORD(&reg->iobase_addr, 0x4040); 1076 WRT_REG_DWORD(&reg->iobase_addr, 0x4040);
1396 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1077 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1397 for (cnt = 0; cnt < 16; cnt++) 1078 for (cnt = 0; cnt < 16; cnt++)
1398 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1079 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1399 1080
1400 WRT_REG_DWORD(&reg->iobase_addr, 0x4050); 1081 WRT_REG_DWORD(&reg->iobase_addr, 0x4050);
1401 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1082 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1402 for (cnt = 0; cnt < 16; cnt++) 1083 for (cnt = 0; cnt < 16; cnt++)
1403 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1084 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1404 1085
1405 WRT_REG_DWORD(&reg->iobase_addr, 0x4060); 1086 WRT_REG_DWORD(&reg->iobase_addr, 0x4060);
1406 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1087 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1407 for (cnt = 0; cnt < 16; cnt++) 1088 for (cnt = 0; cnt < 16; cnt++)
1408 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1089 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1409 1090
1410 WRT_REG_DWORD(&reg->iobase_addr, 0x4070); 1091 WRT_REG_DWORD(&reg->iobase_addr, 0x4070);
1411 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1092 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1412 for (cnt = 0; cnt < 16; cnt++) 1093 for (cnt = 0; cnt < 16; cnt++)
1413 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1094 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1414 1095
1415 WRT_REG_DWORD(&reg->iobase_addr, 0x4080); 1096 WRT_REG_DWORD(&reg->iobase_addr, 0x4080);
1416 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1097 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1417 for (cnt = 0; cnt < 16; cnt++) 1098 for (cnt = 0; cnt < 16; cnt++)
1418 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1099 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1419 1100
1420 WRT_REG_DWORD(&reg->iobase_addr, 0x4090); 1101 WRT_REG_DWORD(&reg->iobase_addr, 0x4090);
1421 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1102 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1422 for (cnt = 0; cnt < 16; cnt++) 1103 for (cnt = 0; cnt < 16; cnt++)
1423 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1104 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1424 1105
1425 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0); 1106 WRT_REG_DWORD(&reg->iobase_addr, 0x40A0);
1426 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1107 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1427 for (cnt = 0; cnt < 16; cnt++) 1108 for (cnt = 0; cnt < 16; cnt++)
1428 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1109 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1429 1110
1430 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0); 1111 WRT_REG_DWORD(&reg->iobase_addr, 0x40B0);
1431 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1112 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1432 for (cnt = 0; cnt < 16; cnt++) 1113 for (cnt = 0; cnt < 16; cnt++)
1433 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1114 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1434 1115
1435 /* Frame Buffer registers. */ 1116 /* Frame Buffer registers. */
1436 iter_reg = fw->fb_hdw_reg; 1117 iter_reg = fw->fb_hdw_reg;
1437 WRT_REG_DWORD(&reg->iobase_addr, 0x6000); 1118 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1438 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1119 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1439 for (cnt = 0; cnt < 16; cnt++) 1120 for (cnt = 0; cnt < 16; cnt++)
1440 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1121 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1441 1122
1442 WRT_REG_DWORD(&reg->iobase_addr, 0x6010); 1123 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
1443 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1124 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1444 for (cnt = 0; cnt < 16; cnt++) 1125 for (cnt = 0; cnt < 16; cnt++)
1445 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1126 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1446 1127
1447 WRT_REG_DWORD(&reg->iobase_addr, 0x6020); 1128 WRT_REG_DWORD(&reg->iobase_addr, 0x6020);
1448 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1129 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1449 for (cnt = 0; cnt < 16; cnt++) 1130 for (cnt = 0; cnt < 16; cnt++)
1450 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1131 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1451 1132
1452 WRT_REG_DWORD(&reg->iobase_addr, 0x6030); 1133 WRT_REG_DWORD(&reg->iobase_addr, 0x6030);
1453 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1134 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1454 for (cnt = 0; cnt < 16; cnt++) 1135 for (cnt = 0; cnt < 16; cnt++)
1455 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1136 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1456 1137
1457 WRT_REG_DWORD(&reg->iobase_addr, 0x6040); 1138 WRT_REG_DWORD(&reg->iobase_addr, 0x6040);
1458 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1139 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1459 for (cnt = 0; cnt < 16; cnt++) 1140 for (cnt = 0; cnt < 16; cnt++)
1460 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1141 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1461 1142
1462 WRT_REG_DWORD(&reg->iobase_addr, 0x6100); 1143 WRT_REG_DWORD(&reg->iobase_addr, 0x6100);
1463 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1144 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1464 for (cnt = 0; cnt < 16; cnt++) 1145 for (cnt = 0; cnt < 16; cnt++)
1465 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1146 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1466 1147
1467 WRT_REG_DWORD(&reg->iobase_addr, 0x6130); 1148 WRT_REG_DWORD(&reg->iobase_addr, 0x6130);
1468 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1149 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1469 for (cnt = 0; cnt < 16; cnt++) 1150 for (cnt = 0; cnt < 16; cnt++)
1470 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1151 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1471 1152
1472 WRT_REG_DWORD(&reg->iobase_addr, 0x6150); 1153 WRT_REG_DWORD(&reg->iobase_addr, 0x6150);
1473 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1154 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1474 for (cnt = 0; cnt < 16; cnt++) 1155 for (cnt = 0; cnt < 16; cnt++)
1475 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1156 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1476 1157
1477 WRT_REG_DWORD(&reg->iobase_addr, 0x6170); 1158 WRT_REG_DWORD(&reg->iobase_addr, 0x6170);
1478 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1159 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1479 for (cnt = 0; cnt < 16; cnt++) 1160 for (cnt = 0; cnt < 16; cnt++)
1480 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1161 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1481 1162
1482 WRT_REG_DWORD(&reg->iobase_addr, 0x6190); 1163 WRT_REG_DWORD(&reg->iobase_addr, 0x6190);
1483 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1164 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1484 for (cnt = 0; cnt < 16; cnt++) 1165 for (cnt = 0; cnt < 16; cnt++)
1485 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1166 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1486 1167
1487 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0); 1168 WRT_REG_DWORD(&reg->iobase_addr, 0x61B0);
1488 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0); 1169 dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
1489 for (cnt = 0; cnt < 16; cnt++) 1170 for (cnt = 0; cnt < 16; cnt++)
1490 *iter_reg++ = RD_REG_DWORD(dmp_reg++); 1171 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1491 1172
1492 /* Reset RISC. */ 1173 /* Reset RISC. */
1493 WRT_REG_DWORD(&reg->ctrl_status, 1174 WRT_REG_DWORD(&reg->ctrl_status,
@@ -1577,7 +1258,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1577 1258
1578 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1259 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1579 rval = mb[0] & MBS_MASK; 1260 rval = mb[0] & MBS_MASK;
1580 fw->code_ram[cnt] = (mb[3] << 16) | mb[2]; 1261 fw->code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
1581 } else { 1262 } else {
1582 rval = QLA_FUNCTION_FAILED; 1263 rval = QLA_FUNCTION_FAILED;
1583 } 1264 }
@@ -1627,12 +1308,18 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1627 1308
1628 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 1309 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1629 rval = mb[0] & MBS_MASK; 1310 rval = mb[0] & MBS_MASK;
1630 fw->ext_mem[cnt] = (mb[3] << 16) | mb[2]; 1311 fw->ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]);
1631 } else { 1312 } else {
1632 rval = QLA_FUNCTION_FAILED; 1313 rval = QLA_FUNCTION_FAILED;
1633 } 1314 }
1634 } 1315 }
1635 1316
1317 if (rval == QLA_SUCCESS) {
1318 eft = qla2xxx_copy_queues(ha, &fw->ext_mem[cnt]);
1319 if (ha->eft)
1320 memcpy(eft, ha->eft, ntohl(ha->fw_dump->eft_size));
1321 }
1322
1636 if (rval != QLA_SUCCESS) { 1323 if (rval != QLA_SUCCESS) {
1637 qla_printk(KERN_WARNING, ha, 1324 qla_printk(KERN_WARNING, ha,
1638 "Failed to dump firmware (%x)!!!\n", rval); 1325 "Failed to dump firmware (%x)!!!\n", rval);
@@ -1650,252 +1337,6 @@ qla24xx_fw_dump_failed:
1650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1337 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1651} 1338}
1652 1339
1653void
1654qla24xx_ascii_fw_dump(scsi_qla_host_t *ha)
1655{
1656 uint32_t cnt;
1657 char *uiter;
1658 struct qla24xx_fw_dump *fw;
1659 uint32_t ext_mem_cnt;
1660
1661 uiter = ha->fw_dump_buffer;
1662 fw = ha->fw_dump;
1663
1664 qla_uprintf(&uiter, "ISP FW Version %d.%02d.%02d Attributes %04x\n",
1665 ha->fw_major_version, ha->fw_minor_version,
1666 ha->fw_subminor_version, ha->fw_attributes);
1667
1668 qla_uprintf(&uiter, "\nR2H Status Register\n%04x\n", fw->host_status);
1669
1670 qla_uprintf(&uiter, "\nHost Interface Registers");
1671 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++) {
1672 if (cnt % 8 == 0)
1673 qla_uprintf(&uiter, "\n");
1674
1675 qla_uprintf(&uiter, "%08x ", fw->host_reg[cnt]);
1676 }
1677
1678 qla_uprintf(&uiter, "\n\nShadow Registers");
1679 for (cnt = 0; cnt < sizeof(fw->shadow_reg) / 4; cnt++) {
1680 if (cnt % 8 == 0)
1681 qla_uprintf(&uiter, "\n");
1682
1683 qla_uprintf(&uiter, "%08x ", fw->shadow_reg[cnt]);
1684 }
1685
1686 qla_uprintf(&uiter, "\n\nMailbox Registers");
1687 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) {
1688 if (cnt % 8 == 0)
1689 qla_uprintf(&uiter, "\n");
1690
1691 qla_uprintf(&uiter, "%08x ", fw->mailbox_reg[cnt]);
1692 }
1693
1694 qla_uprintf(&uiter, "\n\nXSEQ GP Registers");
1695 for (cnt = 0; cnt < sizeof(fw->xseq_gp_reg) / 4; cnt++) {
1696 if (cnt % 8 == 0)
1697 qla_uprintf(&uiter, "\n");
1698
1699 qla_uprintf(&uiter, "%08x ", fw->xseq_gp_reg[cnt]);
1700 }
1701
1702 qla_uprintf(&uiter, "\n\nXSEQ-0 Registers");
1703 for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++) {
1704 if (cnt % 8 == 0)
1705 qla_uprintf(&uiter, "\n");
1706
1707 qla_uprintf(&uiter, "%08x ", fw->xseq_0_reg[cnt]);
1708 }
1709
1710 qla_uprintf(&uiter, "\n\nXSEQ-1 Registers");
1711 for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++) {
1712 if (cnt % 8 == 0)
1713 qla_uprintf(&uiter, "\n");
1714
1715 qla_uprintf(&uiter, "%08x ", fw->xseq_1_reg[cnt]);
1716 }
1717
1718 qla_uprintf(&uiter, "\n\nRSEQ GP Registers");
1719 for (cnt = 0; cnt < sizeof(fw->rseq_gp_reg) / 4; cnt++) {
1720 if (cnt % 8 == 0)
1721 qla_uprintf(&uiter, "\n");
1722
1723 qla_uprintf(&uiter, "%08x ", fw->rseq_gp_reg[cnt]);
1724 }
1725
1726 qla_uprintf(&uiter, "\n\nRSEQ-0 Registers");
1727 for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++) {
1728 if (cnt % 8 == 0)
1729 qla_uprintf(&uiter, "\n");
1730
1731 qla_uprintf(&uiter, "%08x ", fw->rseq_0_reg[cnt]);
1732 }
1733
1734 qla_uprintf(&uiter, "\n\nRSEQ-1 Registers");
1735 for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++) {
1736 if (cnt % 8 == 0)
1737 qla_uprintf(&uiter, "\n");
1738
1739 qla_uprintf(&uiter, "%08x ", fw->rseq_1_reg[cnt]);
1740 }
1741
1742 qla_uprintf(&uiter, "\n\nRSEQ-2 Registers");
1743 for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++) {
1744 if (cnt % 8 == 0)
1745 qla_uprintf(&uiter, "\n");
1746
1747 qla_uprintf(&uiter, "%08x ", fw->rseq_2_reg[cnt]);
1748 }
1749
1750 qla_uprintf(&uiter, "\n\nCommand DMA Registers");
1751 for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++) {
1752 if (cnt % 8 == 0)
1753 qla_uprintf(&uiter, "\n");
1754
1755 qla_uprintf(&uiter, "%08x ", fw->cmd_dma_reg[cnt]);
1756 }
1757
1758 qla_uprintf(&uiter, "\n\nRequest0 Queue DMA Channel Registers");
1759 for (cnt = 0; cnt < sizeof(fw->req0_dma_reg) / 4; cnt++) {
1760 if (cnt % 8 == 0)
1761 qla_uprintf(&uiter, "\n");
1762
1763 qla_uprintf(&uiter, "%08x ", fw->req0_dma_reg[cnt]);
1764 }
1765
1766 qla_uprintf(&uiter, "\n\nResponse0 Queue DMA Channel Registers");
1767 for (cnt = 0; cnt < sizeof(fw->resp0_dma_reg) / 4; cnt++) {
1768 if (cnt % 8 == 0)
1769 qla_uprintf(&uiter, "\n");
1770
1771 qla_uprintf(&uiter, "%08x ", fw->resp0_dma_reg[cnt]);
1772 }
1773
1774 qla_uprintf(&uiter, "\n\nRequest1 Queue DMA Channel Registers");
1775 for (cnt = 0; cnt < sizeof(fw->req1_dma_reg) / 4; cnt++) {
1776 if (cnt % 8 == 0)
1777 qla_uprintf(&uiter, "\n");
1778
1779 qla_uprintf(&uiter, "%08x ", fw->req1_dma_reg[cnt]);
1780 }
1781
1782 qla_uprintf(&uiter, "\n\nXMT0 Data DMA Registers");
1783 for (cnt = 0; cnt < sizeof(fw->xmt0_dma_reg) / 4; cnt++) {
1784 if (cnt % 8 == 0)
1785 qla_uprintf(&uiter, "\n");
1786
1787 qla_uprintf(&uiter, "%08x ", fw->xmt0_dma_reg[cnt]);
1788 }
1789
1790 qla_uprintf(&uiter, "\n\nXMT1 Data DMA Registers");
1791 for (cnt = 0; cnt < sizeof(fw->xmt1_dma_reg) / 4; cnt++) {
1792 if (cnt % 8 == 0)
1793 qla_uprintf(&uiter, "\n");
1794
1795 qla_uprintf(&uiter, "%08x ", fw->xmt1_dma_reg[cnt]);
1796 }
1797
1798 qla_uprintf(&uiter, "\n\nXMT2 Data DMA Registers");
1799 for (cnt = 0; cnt < sizeof(fw->xmt2_dma_reg) / 4; cnt++) {
1800 if (cnt % 8 == 0)
1801 qla_uprintf(&uiter, "\n");
1802
1803 qla_uprintf(&uiter, "%08x ", fw->xmt2_dma_reg[cnt]);
1804 }
1805
1806 qla_uprintf(&uiter, "\n\nXMT3 Data DMA Registers");
1807 for (cnt = 0; cnt < sizeof(fw->xmt3_dma_reg) / 4; cnt++) {
1808 if (cnt % 8 == 0)
1809 qla_uprintf(&uiter, "\n");
1810
1811 qla_uprintf(&uiter, "%08x ", fw->xmt3_dma_reg[cnt]);
1812 }
1813
1814 qla_uprintf(&uiter, "\n\nXMT4 Data DMA Registers");
1815 for (cnt = 0; cnt < sizeof(fw->xmt4_dma_reg) / 4; cnt++) {
1816 if (cnt % 8 == 0)
1817 qla_uprintf(&uiter, "\n");
1818
1819 qla_uprintf(&uiter, "%08x ", fw->xmt4_dma_reg[cnt]);
1820 }
1821
1822 qla_uprintf(&uiter, "\n\nXMT Data DMA Common Registers");
1823 for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++) {
1824 if (cnt % 8 == 0)
1825 qla_uprintf(&uiter, "\n");
1826
1827 qla_uprintf(&uiter, "%08x ", fw->xmt_data_dma_reg[cnt]);
1828 }
1829
1830 qla_uprintf(&uiter, "\n\nRCV Thread 0 Data DMA Registers");
1831 for (cnt = 0; cnt < sizeof(fw->rcvt0_data_dma_reg) / 4; cnt++) {
1832 if (cnt % 8 == 0)
1833 qla_uprintf(&uiter, "\n");
1834
1835 qla_uprintf(&uiter, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
1836 }
1837
1838 qla_uprintf(&uiter, "\n\nRCV Thread 1 Data DMA Registers");
1839 for (cnt = 0; cnt < sizeof(fw->rcvt1_data_dma_reg) / 4; cnt++) {
1840 if (cnt % 8 == 0)
1841 qla_uprintf(&uiter, "\n");
1842
1843 qla_uprintf(&uiter, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
1844 }
1845
1846 qla_uprintf(&uiter, "\n\nRISC GP Registers");
1847 for (cnt = 0; cnt < sizeof(fw->risc_gp_reg) / 4; cnt++) {
1848 if (cnt % 8 == 0)
1849 qla_uprintf(&uiter, "\n");
1850
1851 qla_uprintf(&uiter, "%08x ", fw->risc_gp_reg[cnt]);
1852 }
1853
1854 qla_uprintf(&uiter, "\n\nLMC Registers");
1855 for (cnt = 0; cnt < sizeof(fw->lmc_reg) / 4; cnt++) {
1856 if (cnt % 8 == 0)
1857 qla_uprintf(&uiter, "\n");
1858
1859 qla_uprintf(&uiter, "%08x ", fw->lmc_reg[cnt]);
1860 }
1861
1862 qla_uprintf(&uiter, "\n\nFPM Hardware Registers");
1863 for (cnt = 0; cnt < sizeof(fw->fpm_hdw_reg) / 4; cnt++) {
1864 if (cnt % 8 == 0)
1865 qla_uprintf(&uiter, "\n");
1866
1867 qla_uprintf(&uiter, "%08x ", fw->fpm_hdw_reg[cnt]);
1868 }
1869
1870 qla_uprintf(&uiter, "\n\nFB Hardware Registers");
1871 for (cnt = 0; cnt < sizeof(fw->fb_hdw_reg) / 4; cnt++) {
1872 if (cnt % 8 == 0)
1873 qla_uprintf(&uiter, "\n");
1874
1875 qla_uprintf(&uiter, "%08x ", fw->fb_hdw_reg[cnt]);
1876 }
1877
1878 qla_uprintf(&uiter, "\n\nCode RAM");
1879 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
1880 if (cnt % 8 == 0) {
1881 qla_uprintf(&uiter, "\n%08x: ", cnt + 0x20000);
1882 }
1883 qla_uprintf(&uiter, "%08x ", fw->code_ram[cnt]);
1884 }
1885
1886 qla_uprintf(&uiter, "\n\nExternal Memory");
1887 ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
1888 for (cnt = 0; cnt < ext_mem_cnt; cnt++) {
1889 if (cnt % 8 == 0) {
1890 qla_uprintf(&uiter, "\n%08x: ", cnt + 0x100000);
1891 }
1892 qla_uprintf(&uiter, "%08x ", fw->ext_mem[cnt]);
1893 }
1894
1895 qla_uprintf(&uiter, "\n[<==END] ISP Debug Dump");
1896}
1897
1898
1899/****************************************************************************/ 1340/****************************************************************************/
1900/* Driver Debug Functions. */ 1341/* Driver Debug Functions. */
1901/****************************************************************************/ 1342/****************************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index ab6afeaa2f2c..533425338e05 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -37,134 +37,86 @@
37/* 37/*
38* Macros use for debugging the driver. 38* Macros use for debugging the driver.
39*/ 39*/
40#undef ENTER_TRACE
41#if defined(ENTER_TRACE)
42#define ENTER(x) do { printk("qla2100 : Entering %s()\n", x); } while (0)
43#define LEAVE(x) do { printk("qla2100 : Leaving %s()\n", x); } while (0)
44#define ENTER_INTR(x) do { printk("qla2100 : Entering %s()\n", x); } while (0)
45#define LEAVE_INTR(x) do { printk("qla2100 : Leaving %s()\n", x); } while (0)
46#else
47#define ENTER(x) do {} while (0)
48#define LEAVE(x) do {} while (0)
49#define ENTER_INTR(x) do {} while (0)
50#define LEAVE_INTR(x) do {} while (0)
51#endif
52 40
53#if DEBUG_QLA2100 41#define DEBUG(x) do { if (extended_error_logging) { x; } } while (0)
54#define DEBUG(x) do {x;} while (0);
55#else
56#define DEBUG(x) do {} while (0);
57#endif
58 42
59#if defined(QL_DEBUG_LEVEL_1) 43#if defined(QL_DEBUG_LEVEL_1)
60#define DEBUG1(x) do {x;} while (0); 44#define DEBUG1(x) do {x;} while (0)
61#else 45#else
62#define DEBUG1(x) do {} while (0); 46#define DEBUG1(x) do {} while (0)
63#endif 47#endif
64 48
65#if defined(QL_DEBUG_LEVEL_2) 49#define DEBUG2(x) do { if (extended_error_logging) { x; } } while (0)
66#define DEBUG2(x) do {x;} while (0); 50#define DEBUG2_3(x) do { if (extended_error_logging) { x; } } while (0)
67#define DEBUG2_3(x) do {x;} while (0); 51#define DEBUG2_3_11(x) do { if (extended_error_logging) { x; } } while (0)
68#define DEBUG2_3_11(x) do {x;} while (0); 52#define DEBUG2_9_10(x) do { if (extended_error_logging) { x; } } while (0)
69#define DEBUG2_9_10(x) do {x;} while (0); 53#define DEBUG2_11(x) do { if (extended_error_logging) { x; } } while (0)
70#define DEBUG2_11(x) do {x;} while (0); 54#define DEBUG2_13(x) do { if (extended_error_logging) { x; } } while (0)
71#define DEBUG2_13(x) do {x;} while (0);
72#else
73#define DEBUG2(x) do {} while (0);
74#endif
75 55
76#if defined(QL_DEBUG_LEVEL_3) 56#if defined(QL_DEBUG_LEVEL_3)
77#define DEBUG3(x) do {x;} while (0); 57#define DEBUG3(x) do {x;} while (0)
78#define DEBUG2_3(x) do {x;} while (0); 58#define DEBUG3_11(x) do {x;} while (0)
79#define DEBUG2_3_11(x) do {x;} while (0);
80#define DEBUG3_11(x) do {x;} while (0);
81#else 59#else
82#define DEBUG3(x) do {} while (0); 60#define DEBUG3(x) do {} while (0)
83 #if !defined(QL_DEBUG_LEVEL_2)
84 #define DEBUG2_3(x) do {} while (0);
85 #endif
86#endif 61#endif
87 62
88#if defined(QL_DEBUG_LEVEL_4) 63#if defined(QL_DEBUG_LEVEL_4)
89#define DEBUG4(x) do {x;} while (0); 64#define DEBUG4(x) do {x;} while (0)
90#else 65#else
91#define DEBUG4(x) do {} while (0); 66#define DEBUG4(x) do {} while (0)
92#endif 67#endif
93 68
94#if defined(QL_DEBUG_LEVEL_5) 69#if defined(QL_DEBUG_LEVEL_5)
95#define DEBUG5(x) do {x;} while (0); 70#define DEBUG5(x) do {x;} while (0)
96#else 71#else
97#define DEBUG5(x) do {} while (0); 72#define DEBUG5(x) do {} while (0)
98#endif 73#endif
99 74
100#if defined(QL_DEBUG_LEVEL_7) 75#if defined(QL_DEBUG_LEVEL_7)
101#define DEBUG7(x) do {x;} while (0); 76#define DEBUG7(x) do {x;} while (0)
102#else 77#else
103#define DEBUG7(x) do {} while (0); 78#define DEBUG7(x) do {} while (0)
104#endif 79#endif
105 80
106#if defined(QL_DEBUG_LEVEL_9) 81#if defined(QL_DEBUG_LEVEL_9)
107#define DEBUG9(x) do {x;} while (0); 82#define DEBUG9(x) do {x;} while (0)
108#define DEBUG9_10(x) do {x;} while (0); 83#define DEBUG9_10(x) do {x;} while (0)
109#define DEBUG2_9_10(x) do {x;} while (0);
110#else 84#else
111#define DEBUG9(x) do {} while (0); 85#define DEBUG9(x) do {} while (0)
112#endif 86#endif
113 87
114#if defined(QL_DEBUG_LEVEL_10) 88#if defined(QL_DEBUG_LEVEL_10)
115#define DEBUG10(x) do {x;} while (0); 89#define DEBUG10(x) do {x;} while (0)
116#define DEBUG2_9_10(x) do {x;} while (0); 90#define DEBUG9_10(x) do {x;} while (0)
117#define DEBUG9_10(x) do {x;} while (0);
118#else 91#else
119#define DEBUG10(x) do {} while (0); 92#define DEBUG10(x) do {} while (0)
120 #if !defined(DEBUG2_9_10)
121 #define DEBUG2_9_10(x) do {} while (0);
122 #endif
123 #if !defined(DEBUG9_10) 93 #if !defined(DEBUG9_10)
124 #define DEBUG9_10(x) do {} while (0); 94 #define DEBUG9_10(x) do {} while (0)
125 #endif 95 #endif
126#endif 96#endif
127 97
128#if defined(QL_DEBUG_LEVEL_11) 98#if defined(QL_DEBUG_LEVEL_11)
129#define DEBUG11(x) do{x;} while(0); 99#define DEBUG11(x) do{x;} while(0)
130#if !defined(DEBUG2_11)
131#define DEBUG2_11(x) do{x;} while(0);
132#endif
133#if !defined(DEBUG2_3_11)
134#define DEBUG2_3_11(x) do{x;} while(0);
135#endif
136#if !defined(DEBUG3_11) 100#if !defined(DEBUG3_11)
137#define DEBUG3_11(x) do{x;} while(0); 101#define DEBUG3_11(x) do{x;} while(0)
138#endif 102#endif
139#else 103#else
140#define DEBUG11(x) do{} while(0); 104#define DEBUG11(x) do{} while(0)
141 #if !defined(QL_DEBUG_LEVEL_2)
142 #define DEBUG2_11(x) do{} while(0);
143 #if !defined(QL_DEBUG_LEVEL_3)
144 #define DEBUG2_3_11(x) do{} while(0);
145 #endif
146 #endif
147 #if !defined(QL_DEBUG_LEVEL_3) 105 #if !defined(QL_DEBUG_LEVEL_3)
148 #define DEBUG3_11(x) do{} while(0); 106 #define DEBUG3_11(x) do{} while(0)
149 #endif 107 #endif
150#endif 108#endif
151 109
152#if defined(QL_DEBUG_LEVEL_12) 110#if defined(QL_DEBUG_LEVEL_12)
153#define DEBUG12(x) do {x;} while (0); 111#define DEBUG12(x) do {x;} while (0)
154#else 112#else
155#define DEBUG12(x) do {} while (0); 113#define DEBUG12(x) do {} while (0)
156#endif 114#endif
157 115
158#if defined(QL_DEBUG_LEVEL_13) 116#if defined(QL_DEBUG_LEVEL_13)
159#define DEBUG13(x) do {x;} while (0) 117#define DEBUG13(x) do {x;} while (0)
160#if !defined(DEBUG2_13)
161#define DEBUG2_13(x) do {x;} while(0)
162#endif
163#else 118#else
164#define DEBUG13(x) do {} while (0) 119#define DEBUG13(x) do {} while (0)
165#if !defined(QL_DEBUG_LEVEL_2)
166#define DEBUG2_13(x) do {} while(0)
167#endif
168#endif 120#endif
169 121
170#if defined(QL_DEBUG_LEVEL_14) 122#if defined(QL_DEBUG_LEVEL_14)
@@ -176,9 +128,6 @@
176/* 128/*
177 * Firmware Dump structure definition 129 * Firmware Dump structure definition
178 */ 130 */
179#define FW_DUMP_SIZE_128K 0xBC000
180#define FW_DUMP_SIZE_512K 0x2FC000
181#define FW_DUMP_SIZE_1M 0x5FC000
182 131
183struct qla2300_fw_dump { 132struct qla2300_fw_dump {
184 uint16_t hccr; 133 uint16_t hccr;
@@ -224,8 +173,6 @@ struct qla2100_fw_dump {
224 uint16_t risc_ram[0xf000]; 173 uint16_t risc_ram[0xf000];
225}; 174};
226 175
227#define FW_DUMP_SIZE_24XX 0x2B0000
228
229struct qla24xx_fw_dump { 176struct qla24xx_fw_dump {
230 uint32_t host_status; 177 uint32_t host_status;
231 uint32_t host_reg[32]; 178 uint32_t host_reg[32];
@@ -257,3 +204,39 @@ struct qla24xx_fw_dump {
257 uint32_t code_ram[0x2000]; 204 uint32_t code_ram[0x2000];
258 uint32_t ext_mem[1]; 205 uint32_t ext_mem[1];
259}; 206};
207
208#define EFT_NUM_BUFFERS 4
209#define EFT_BYTES_PER_BUFFER 0x4000
210#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
211
212struct qla2xxx_fw_dump {
213 uint8_t signature[4];
214 uint32_t version;
215
216 uint32_t fw_major_version;
217 uint32_t fw_minor_version;
218 uint32_t fw_subminor_version;
219 uint32_t fw_attributes;
220
221 uint32_t vendor;
222 uint32_t device;
223 uint32_t subsystem_vendor;
224 uint32_t subsystem_device;
225
226 uint32_t fixed_size;
227 uint32_t mem_size;
228 uint32_t req_q_size;
229 uint32_t rsp_q_size;
230
231 uint32_t eft_size;
232 uint32_t eft_addr_l;
233 uint32_t eft_addr_h;
234
235 uint32_t header_size;
236
237 union {
238 struct qla2100_fw_dump isp21;
239 struct qla2300_fw_dump isp23;
240 struct qla24xx_fw_dump isp24;
241 } isp;
242};
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6734453ea28a..139ea0e27fd7 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -608,7 +608,9 @@ typedef struct {
608#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */ 608#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
609#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */ 609#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */
610#define MBC_GET_TIMEOUT_PARAMS 0x22 /* Get FW timeouts. */ 610#define MBC_GET_TIMEOUT_PARAMS 0x22 /* Get FW timeouts. */
611#define MBC_TRACE_CONTROL 0x27 /* Trace control command. */
611#define MBC_GEN_SYSTEM_ERROR 0x2a /* Generate System Error. */ 612#define MBC_GEN_SYSTEM_ERROR 0x2a /* Generate System Error. */
613#define MBC_READ_SFP 0x31 /* Read SFP Data. */
612#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */ 614#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */
613#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */ 615#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */
614#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */ 616#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */
@@ -618,6 +620,9 @@ typedef struct {
618#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ 620#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
619#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ 621#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
620 622
623#define TC_ENABLE 4
624#define TC_DISABLE 5
625
621/* Firmware return data sizes */ 626/* Firmware return data sizes */
622#define FCAL_MAP_SIZE 128 627#define FCAL_MAP_SIZE 128
623 628
@@ -1997,7 +2002,6 @@ struct isp_operations {
1997 uint32_t); 2002 uint32_t);
1998 2003
1999 void (*fw_dump) (struct scsi_qla_host *, int); 2004 void (*fw_dump) (struct scsi_qla_host *, int);
2000 void (*ascii_fw_dump) (struct scsi_qla_host *);
2001 2005
2002 int (*beacon_on) (struct scsi_qla_host *); 2006 int (*beacon_on) (struct scsi_qla_host *);
2003 int (*beacon_off) (struct scsi_qla_host *); 2007 int (*beacon_off) (struct scsi_qla_host *);
@@ -2041,6 +2045,7 @@ typedef struct scsi_qla_host {
2041 uint32_t enable_led_scheme :1; 2045 uint32_t enable_led_scheme :1;
2042 uint32_t msi_enabled :1; 2046 uint32_t msi_enabled :1;
2043 uint32_t msix_enabled :1; 2047 uint32_t msix_enabled :1;
2048 uint32_t disable_serdes :1;
2044 } flags; 2049 } flags;
2045 2050
2046 atomic_t loop_state; 2051 atomic_t loop_state;
@@ -2238,6 +2243,11 @@ typedef struct scsi_qla_host {
2238 struct sns_cmd_pkt *sns_cmd; 2243 struct sns_cmd_pkt *sns_cmd;
2239 dma_addr_t sns_cmd_dma; 2244 dma_addr_t sns_cmd_dma;
2240 2245
2246#define SFP_DEV_SIZE 256
2247#define SFP_BLOCK_SIZE 64
2248 void *sfp_data;
2249 dma_addr_t sfp_data_dma;
2250
2241 struct task_struct *dpc_thread; 2251 struct task_struct *dpc_thread;
2242 uint8_t dpc_active; /* DPC routine is active */ 2252 uint8_t dpc_active; /* DPC routine is active */
2243 2253
@@ -2303,11 +2313,12 @@ typedef struct scsi_qla_host {
2303 uint16_t fw_seriallink_options24[4]; 2313 uint16_t fw_seriallink_options24[4];
2304 2314
2305 /* Firmware dump information. */ 2315 /* Firmware dump information. */
2306 void *fw_dump; 2316 struct qla2xxx_fw_dump *fw_dump;
2317 uint32_t fw_dump_len;
2307 int fw_dumped; 2318 int fw_dumped;
2308 int fw_dump_reading; 2319 int fw_dump_reading;
2309 char *fw_dump_buffer; 2320 dma_addr_t eft_dma;
2310 int fw_dump_buffer_len; 2321 void *eft;
2311 2322
2312 uint8_t host_str[16]; 2323 uint8_t host_str[16];
2313 uint32_t pci_attr; 2324 uint32_t pci_attr;
diff --git a/drivers/scsi/qla2xxx/qla_devtbl.h b/drivers/scsi/qla2xxx/qla_devtbl.h
index a8fc0ffc7fc5..dd435410dfa2 100644
--- a/drivers/scsi/qla2xxx/qla_devtbl.h
+++ b/drivers/scsi/qla2xxx/qla_devtbl.h
@@ -1,4 +1,4 @@
1#define QLA_MODEL_NAMES 0x4A 1#define QLA_MODEL_NAMES 0x57
2 2
3/* 3/*
4 * Adapter model names and descriptions. 4 * Adapter model names and descriptions.
@@ -76,6 +76,19 @@ static char *qla2x00_model_name[QLA_MODEL_NAMES*2] = {
76 "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */ 76 "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */
77 "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */ 77 "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */
78 "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */ 78 "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */
79 " ", " ", /* 0x148 */ 79 "HP AE369A", "PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x148 */
80 "QLA2340", "Sun 133MHz PCI-X to 2Gb FC, Single Channel", /* 0x149 */ 80 "QLA2340", "Sun 133MHz PCI-X to 2Gb FC, Single Channel", /* 0x149 */
81 " ", " ", /* 0x14a */
82 " ", " ", /* 0x14b */
83 "QMC2432M", "IBM eServer BC 4Gb FC Expansion Card CFFE", /* 0x14c */
84 "QMC2422M", "IBM eServer BC 4Gb FC Expansion Card CFFX", /* 0x14d */
85 "QLE220", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x14e */
86 " ", " ", /* 0x14f */
87 " ", " ", /* 0x150 */
88 " ", " ", /* 0x151 */
89 "QME2462", "PCI-Express to 4Gb FC, Dual Channel Mezz HBA", /* 0x152 */
90 "QMH2462", "PCI-Express to 4Gb FC, Dual Channel Mezz HBA", /* 0x153 */
91 " ", " ", /* 0x154 */
92 "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x155 */
93 "QLE220", "PCI-Express to 4Gb FC, Single Channel", /* 0x156 */
81}; 94};
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 3af478663be7..a0a722cf4237 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -141,7 +141,7 @@ struct nvram_24xx {
141 * BIT 2 = Enable Memory Map BIOS 141 * BIT 2 = Enable Memory Map BIOS
142 * BIT 3 = Enable Selectable Boot 142 * BIT 3 = Enable Selectable Boot
143 * BIT 4 = Disable RISC code load 143 * BIT 4 = Disable RISC code load
144 * BIT 5 = 144 * BIT 5 = Disable Serdes
145 * BIT 6 = 145 * BIT 6 =
146 * BIT 7 = 146 * BIT 7 =
147 * 147 *
@@ -278,7 +278,7 @@ struct init_cb_24xx {
278 uint16_t response_q_length; 278 uint16_t response_q_length;
279 uint16_t request_q_length; 279 uint16_t request_q_length;
280 280
281 uint16_t link_down_timeout; /* Milliseconds. */ 281 uint16_t link_down_on_nos; /* Milliseconds. */
282 282
283 uint16_t prio_request_q_length; 283 uint16_t prio_request_q_length;
284 284
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 164d53ccbfd0..8311ac2b93a8 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -31,13 +31,9 @@ extern void qla2x00_update_fw_options(struct scsi_qla_host *);
31extern void qla24xx_update_fw_options(scsi_qla_host_t *); 31extern void qla24xx_update_fw_options(scsi_qla_host_t *);
32extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); 32extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
33extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); 33extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
34extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *);
35
36extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t);
37 34
38extern int qla2x00_loop_resync(scsi_qla_host_t *); 35extern int qla2x00_loop_resync(scsi_qla_host_t *);
39 36
40extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
41extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); 37extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
42extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); 38extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
43 39
@@ -51,6 +47,8 @@ extern int qla2x00_abort_isp(scsi_qla_host_t *);
51extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); 47extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
52extern void qla2x00_reg_remote_port(scsi_qla_host_t *, fc_port_t *); 48extern void qla2x00_reg_remote_port(scsi_qla_host_t *, fc_port_t *);
53 49
50extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
51
54/* 52/*
55 * Global Data in qla_os.c source file. 53 * Global Data in qla_os.c source file.
56 */ 54 */
@@ -61,6 +59,8 @@ extern int qlport_down_retry;
61extern int ql2xplogiabsentdevice; 59extern int ql2xplogiabsentdevice;
62extern int ql2xloginretrycount; 60extern int ql2xloginretrycount;
63extern int ql2xfdmienable; 61extern int ql2xfdmienable;
62extern int ql2xallocfwdump;
63extern int extended_error_logging;
64 64
65extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); 65extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
66 66
@@ -80,8 +80,6 @@ extern void qla2xxx_wake_dpc(scsi_qla_host_t *);
80/* 80/*
81 * Global Function Prototypes in qla_iocb.c source file. 81 * Global Function Prototypes in qla_iocb.c source file.
82 */ 82 */
83extern void qla2x00_isp_cmd(scsi_qla_host_t *);
84
85extern uint16_t qla2x00_calc_iocbs_32(uint16_t); 83extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
86extern uint16_t qla2x00_calc_iocbs_64(uint16_t); 84extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
87extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); 85extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -204,6 +202,12 @@ qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
204extern int 202extern int
205qla2x00_stop_firmware(scsi_qla_host_t *); 203qla2x00_stop_firmware(scsi_qla_host_t *);
206 204
205extern int
206qla2x00_trace_control(scsi_qla_host_t *, uint16_t, dma_addr_t, uint16_t);
207
208extern int
209qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t);
210
207/* 211/*
208 * Global Function Prototypes in qla_isr.c source file. 212 * Global Function Prototypes in qla_isr.c source file.
209 */ 213 */
@@ -254,9 +258,6 @@ extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
254extern void qla2100_fw_dump(scsi_qla_host_t *, int); 258extern void qla2100_fw_dump(scsi_qla_host_t *, int);
255extern void qla2300_fw_dump(scsi_qla_host_t *, int); 259extern void qla2300_fw_dump(scsi_qla_host_t *, int);
256extern void qla24xx_fw_dump(scsi_qla_host_t *, int); 260extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
257extern void qla2100_ascii_fw_dump(scsi_qla_host_t *);
258extern void qla2300_ascii_fw_dump(scsi_qla_host_t *);
259extern void qla24xx_ascii_fw_dump(scsi_qla_host_t *);
260extern void qla2x00_dump_regs(scsi_qla_host_t *); 261extern void qla2x00_dump_regs(scsi_qla_host_t *);
261extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 262extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
262extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *); 263extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *);
@@ -280,13 +281,6 @@ extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
280extern int qla2x00_fdmi_register(scsi_qla_host_t *); 281extern int qla2x00_fdmi_register(scsi_qla_host_t *);
281 282
282/* 283/*
283 * Global Function Prototypes in qla_xioctl.c source file.
284 */
285#define qla2x00_enqueue_aen(ha, cmd, mode) do { } while (0)
286#define qla2x00_alloc_ioctl_mem(ha) (0)
287#define qla2x00_free_ioctl_mem(ha) do { } while (0)
288
289/*
290 * Global Function Prototypes in qla_attr.c source file. 284 * Global Function Prototypes in qla_attr.c source file.
291 */ 285 */
292struct class_device_attribute; 286struct class_device_attribute;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3d4487eac9b7..9758dba95542 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -39,6 +39,8 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
39 39
40static int qla2x00_restart_isp(scsi_qla_host_t *); 40static int qla2x00_restart_isp(scsi_qla_host_t *);
41 41
42static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev);
43
42/****************************************************************************/ 44/****************************************************************************/
43/* QLogic ISP2x00 Hardware Support Functions. */ 45/* QLogic ISP2x00 Hardware Support Functions. */
44/****************************************************************************/ 46/****************************************************************************/
@@ -89,6 +91,17 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
89 91
90 ha->isp_ops.nvram_config(ha); 92 ha->isp_ops.nvram_config(ha);
91 93
94 if (ha->flags.disable_serdes) {
95 /* Mask HBA via NVRAM settings? */
96 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
97 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
98 ha->port_name[0], ha->port_name[1],
99 ha->port_name[2], ha->port_name[3],
100 ha->port_name[4], ha->port_name[5],
101 ha->port_name[6], ha->port_name[7]);
102 return QLA_FUNCTION_FAILED;
103 }
104
92 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 105 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
93 106
94 retry = 10; 107 retry = 10;
@@ -770,29 +783,104 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
770 return rval; 783 return rval;
771} 784}
772 785
773static void 786void
774qla2x00_alloc_fw_dump(scsi_qla_host_t *ha) 787qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
775{ 788{
776 uint32_t dump_size = 0; 789 int rval;
790 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
791 eft_size;
792 dma_addr_t eft_dma;
793 void *eft;
794
795 if (ha->fw_dump) {
796 qla_printk(KERN_WARNING, ha,
797 "Firmware dump previously allocated.\n");
798 return;
799 }
777 800
778 ha->fw_dumped = 0; 801 ha->fw_dumped = 0;
802 fixed_size = mem_size = eft_size = 0;
779 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 803 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
780 dump_size = sizeof(struct qla2100_fw_dump); 804 fixed_size = sizeof(struct qla2100_fw_dump);
781 } else if (IS_QLA23XX(ha)) { 805 } else if (IS_QLA23XX(ha)) {
782 dump_size = sizeof(struct qla2300_fw_dump); 806 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
783 dump_size += (ha->fw_memory_size - 0x11000) * sizeof(uint16_t); 807 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
784 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 808 sizeof(uint16_t);
785 dump_size = sizeof(struct qla24xx_fw_dump); 809 } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
786 dump_size += (ha->fw_memory_size - 0x100000) * sizeof(uint32_t); 810 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
811 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
812 sizeof(uint32_t);
813
814 /* Allocate memory for Extended Trace Buffer. */
815 eft = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &eft_dma,
816 GFP_KERNEL);
817 if (!eft) {
818 qla_printk(KERN_WARNING, ha, "Unable to allocate "
819 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
820 goto cont_alloc;
821 }
822
823 rval = qla2x00_trace_control(ha, TC_ENABLE, eft_dma,
824 EFT_NUM_BUFFERS);
825 if (rval) {
826 qla_printk(KERN_WARNING, ha, "Unable to initialize "
827 "EFT (%d).\n", rval);
828 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, eft,
829 eft_dma);
830 goto cont_alloc;
831 }
832
833 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
834 EFT_SIZE / 1024);
835
836 eft_size = EFT_SIZE;
837 memset(eft, 0, eft_size);
838 ha->eft_dma = eft_dma;
839 ha->eft = eft;
787 } 840 }
841cont_alloc:
842 req_q_size = ha->request_q_length * sizeof(request_t);
843 rsp_q_size = ha->response_q_length * sizeof(response_t);
844
845 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
846 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
847 eft_size;
788 848
789 ha->fw_dump = vmalloc(dump_size); 849 ha->fw_dump = vmalloc(dump_size);
790 if (ha->fw_dump) 850 if (!ha->fw_dump) {
791 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware "
792 "dump...\n", dump_size / 1024);
793 else
794 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 851 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
795 "firmware dump!!!\n", dump_size / 1024); 852 "firmware dump!!!\n", dump_size / 1024);
853
854 if (ha->eft) {
855 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
856 ha->eft_dma);
857 ha->eft = NULL;
858 ha->eft_dma = 0;
859 }
860 return;
861 }
862
863 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
864 dump_size / 1024);
865
866 ha->fw_dump_len = dump_size;
867 ha->fw_dump->signature[0] = 'Q';
868 ha->fw_dump->signature[1] = 'L';
869 ha->fw_dump->signature[2] = 'G';
870 ha->fw_dump->signature[3] = 'C';
871 ha->fw_dump->version = __constant_htonl(1);
872
873 ha->fw_dump->fixed_size = htonl(fixed_size);
874 ha->fw_dump->mem_size = htonl(mem_size);
875 ha->fw_dump->req_q_size = htonl(req_q_size);
876 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
877
878 ha->fw_dump->eft_size = htonl(eft_size);
879 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
880 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
881
882 ha->fw_dump->header_size =
883 htonl(offsetof(struct qla2xxx_fw_dump, isp));
796} 884}
797 885
798/** 886/**
@@ -810,8 +898,6 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
810 dma_addr_t request_dma; 898 dma_addr_t request_dma;
811 request_t *request_ring; 899 request_t *request_ring;
812 900
813 qla2x00_alloc_fw_dump(ha);
814
815 /* Valid only on recent ISPs. */ 901 /* Valid only on recent ISPs. */
816 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 902 if (IS_QLA2100(ha) || IS_QLA2200(ha))
817 return; 903 return;
@@ -883,6 +969,9 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
883 &ha->fw_subminor_version, 969 &ha->fw_subminor_version,
884 &ha->fw_attributes, &ha->fw_memory_size); 970 &ha->fw_attributes, &ha->fw_memory_size);
885 qla2x00_resize_request_q(ha); 971 qla2x00_resize_request_q(ha);
972
973 if (ql2xallocfwdump)
974 qla2x00_alloc_fw_dump(ha);
886 } 975 }
887 } else { 976 } else {
888 DEBUG2(printk(KERN_INFO 977 DEBUG2(printk(KERN_INFO
@@ -1186,8 +1275,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1186 rval = QLA_FUNCTION_FAILED; 1275 rval = QLA_FUNCTION_FAILED;
1187 1276
1188 if (atomic_read(&ha->loop_down_timer) && 1277 if (atomic_read(&ha->loop_down_timer) &&
1189 (fw_state >= FSTATE_LOSS_OF_SYNC || 1278 fw_state != FSTATE_READY) {
1190 fw_state == FSTATE_WAIT_AL_PA)) {
1191 /* Loop down. Timeout on min_wait for states 1279 /* Loop down. Timeout on min_wait for states
1192 * other than Wait for Login. 1280 * other than Wait for Login.
1193 */ 1281 */
@@ -1555,6 +1643,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1555 /* 1643 /*
1556 * Set host adapter parameters. 1644 * Set host adapter parameters.
1557 */ 1645 */
1646 if (nv->host_p[0] & BIT_7)
1647 extended_error_logging = 1;
1558 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 1648 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
1559 /* Always load RISC code on non ISP2[12]00 chips. */ 1649 /* Always load RISC code on non ISP2[12]00 chips. */
1560 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 1650 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -1563,6 +1653,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1563 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); 1653 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
1564 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); 1654 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
1565 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; 1655 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
1656 ha->flags.disable_serdes = 0;
1566 1657
1567 ha->operating_mode = 1658 ha->operating_mode =
1568 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; 1659 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -1701,7 +1792,7 @@ qla2x00_rport_del(void *data)
1701 * 1792 *
1702 * Returns a pointer to the allocated fcport, or NULL, if none available. 1793 * Returns a pointer to the allocated fcport, or NULL, if none available.
1703 */ 1794 */
1704fc_port_t * 1795static fc_port_t *
1705qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags) 1796qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1706{ 1797{
1707 fc_port_t *fcport; 1798 fc_port_t *fcport;
@@ -2497,7 +2588,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2497 * Context: 2588 * Context:
2498 * Kernel context. 2589 * Kernel context.
2499 */ 2590 */
2500int 2591static int
2501qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev) 2592qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2502{ 2593{
2503 int rval; 2594 int rval;
@@ -3048,14 +3139,14 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3048 ha->isp_abort_cnt--; 3139 ha->isp_abort_cnt--;
3049 DEBUG(printk("qla%ld: ISP abort - " 3140 DEBUG(printk("qla%ld: ISP abort - "
3050 "retry remaining %d\n", 3141 "retry remaining %d\n",
3051 ha->host_no, ha->isp_abort_cnt);) 3142 ha->host_no, ha->isp_abort_cnt));
3052 status = 1; 3143 status = 1;
3053 } 3144 }
3054 } else { 3145 } else {
3055 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3146 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3056 DEBUG(printk("qla2x00(%ld): ISP error recovery " 3147 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3057 "- retrying (%d) more times\n", 3148 "- retrying (%d) more times\n",
3058 ha->host_no, ha->isp_abort_cnt);) 3149 ha->host_no, ha->isp_abort_cnt));
3059 set_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3150 set_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3060 status = 1; 3151 status = 1;
3061 } 3152 }
@@ -3069,7 +3160,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3069 } else { 3160 } else {
3070 DEBUG(printk(KERN_INFO 3161 DEBUG(printk(KERN_INFO
3071 "qla2x00_abort_isp(%ld): exiting.\n", 3162 "qla2x00_abort_isp(%ld): exiting.\n",
3072 ha->host_no);) 3163 ha->host_no));
3073 } 3164 }
3074 3165
3075 return(status); 3166 return(status);
@@ -3145,7 +3236,7 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3145 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3236 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
3146 if (!(status = qla2x00_fw_ready(ha))) { 3237 if (!(status = qla2x00_fw_ready(ha))) {
3147 DEBUG(printk("%s(): Start configure loop, " 3238 DEBUG(printk("%s(): Start configure loop, "
3148 "status = %d\n", __func__, status);) 3239 "status = %d\n", __func__, status));
3149 3240
3150 /* Issue a marker after FW becomes ready. */ 3241 /* Issue a marker after FW becomes ready. */
3151 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3242 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
@@ -3169,7 +3260,7 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3169 3260
3170 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 3261 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
3171 __func__, 3262 __func__,
3172 status);) 3263 status));
3173 } 3264 }
3174 return (status); 3265 return (status);
3175} 3266}
@@ -3289,7 +3380,6 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3289 nv->node_name[6] = 0x55; 3380 nv->node_name[6] = 0x55;
3290 nv->node_name[7] = 0x86; 3381 nv->node_name[7] = 0x86;
3291 nv->login_retry_count = __constant_cpu_to_le16(8); 3382 nv->login_retry_count = __constant_cpu_to_le16(8);
3292 nv->link_down_timeout = __constant_cpu_to_le16(200);
3293 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 3383 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3294 nv->login_timeout = __constant_cpu_to_le16(0); 3384 nv->login_timeout = __constant_cpu_to_le16(0);
3295 nv->firmware_options_1 = 3385 nv->firmware_options_1 =
@@ -3318,7 +3408,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3318 *dptr1++ = *dptr2++; 3408 *dptr1++ = *dptr2++;
3319 3409
3320 icb->login_retry_count = nv->login_retry_count; 3410 icb->login_retry_count = nv->login_retry_count;
3321 icb->link_down_timeout = nv->link_down_timeout; 3411 icb->link_down_on_nos = nv->link_down_on_nos;
3322 3412
3323 /* Copy 2nd segment. */ 3413 /* Copy 2nd segment. */
3324 dptr1 = (uint8_t *)&icb->interrupt_delay_timer; 3414 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
@@ -3373,6 +3463,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3373 ha->flags.enable_lip_full_login = 1; 3463 ha->flags.enable_lip_full_login = 1;
3374 ha->flags.enable_target_reset = 1; 3464 ha->flags.enable_target_reset = 1;
3375 ha->flags.enable_led_scheme = 0; 3465 ha->flags.enable_led_scheme = 0;
3466 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
3376 3467
3377 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & 3468 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
3378 (BIT_6 | BIT_5 | BIT_4)) >> 4; 3469 (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -3472,7 +3563,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3472 return (rval); 3563 return (rval);
3473} 3564}
3474 3565
3475int 3566static int
3476qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3567qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3477{ 3568{
3478 int rval; 3569 int rval;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8c769cfaa14c..2b60a27eff0b 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -15,6 +15,7 @@ static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
15static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *); 15static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
16static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *); 16static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
17static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); 17static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
18static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
18 19
19/** 20/**
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 21 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -574,7 +575,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
574 * 575 *
575 * Note: The caller must hold the hardware lock before calling this routine. 576 * Note: The caller must hold the hardware lock before calling this routine.
576 */ 577 */
577void 578static void
578qla2x00_isp_cmd(scsi_qla_host_t *ha) 579qla2x00_isp_cmd(scsi_qla_host_t *ha)
579{ 580{
580 device_reg_t __iomem *reg = ha->iobase; 581 device_reg_t __iomem *reg = ha->iobase;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index b28ac0a27e25..795bf15b1b8f 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -395,10 +395,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
395 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 395 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
396 396
397 ha->flags.management_server_logged_in = 0; 397 ha->flags.management_server_logged_in = 0;
398
399 /* Update AEN queue. */
400 qla2x00_enqueue_aen(ha, MBA_LIP_OCCURRED, NULL);
401
402 break; 398 break;
403 399
404 case MBA_LOOP_UP: /* Loop Up Event */ 400 case MBA_LOOP_UP: /* Loop Up Event */
@@ -418,9 +414,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
418 link_speed); 414 link_speed);
419 415
420 ha->flags.management_server_logged_in = 0; 416 ha->flags.management_server_logged_in = 0;
421
422 /* Update AEN queue. */
423 qla2x00_enqueue_aen(ha, MBA_LOOP_UP, NULL);
424 break; 417 break;
425 418
426 case MBA_LOOP_DOWN: /* Loop Down Event */ 419 case MBA_LOOP_DOWN: /* Loop Down Event */
@@ -439,9 +432,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
439 ha->link_data_rate = LDR_UNKNOWN; 432 ha->link_data_rate = LDR_UNKNOWN;
440 if (ql2xfdmienable) 433 if (ql2xfdmienable)
441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 434 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
442
443 /* Update AEN queue. */
444 qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL);
445 break; 435 break;
446 436
447 case MBA_LIP_RESET: /* LIP reset occurred */ 437 case MBA_LIP_RESET: /* LIP reset occurred */
@@ -460,10 +450,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
460 450
461 ha->operating_mode = LOOP; 451 ha->operating_mode = LOOP;
462 ha->flags.management_server_logged_in = 0; 452 ha->flags.management_server_logged_in = 0;
463
464 /* Update AEN queue. */
465 qla2x00_enqueue_aen(ha, MBA_LIP_RESET, NULL);
466
467 break; 453 break;
468 454
469 case MBA_POINT_TO_POINT: /* Point-to-Point */ 455 case MBA_POINT_TO_POINT: /* Point-to-Point */
@@ -545,9 +531,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
545 531
546 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 532 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
547 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 533 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
548
549 /* Update AEN queue. */
550 qla2x00_enqueue_aen(ha, MBA_PORT_UPDATE, NULL);
551 break; 534 break;
552 535
553 case MBA_RSCN_UPDATE: /* State Change Registration */ 536 case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -584,9 +567,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
584 567
585 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 568 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
586 set_bit(RSCN_UPDATE, &ha->dpc_flags); 569 set_bit(RSCN_UPDATE, &ha->dpc_flags);
587
588 /* Update AEN queue. */
589 qla2x00_enqueue_aen(ha, MBA_RSCN_UPDATE, &mb[0]);
590 break; 570 break;
591 571
592 /* case MBA_RIO_RESPONSE: */ 572 /* case MBA_RIO_RESPONSE: */
@@ -1452,8 +1432,8 @@ qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
1452 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n", 1432 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1453 __func__, ha->host_no, pkt, pkt->handle)); 1433 __func__, ha->host_no, pkt, pkt->handle));
1454 1434
1455 DEBUG9(printk("%s: ct pkt dump:\n", __func__);) 1435 DEBUG9(printk("%s: ct pkt dump:\n", __func__));
1456 DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx));) 1436 DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));
1457 1437
1458 /* Validate handle. */ 1438 /* Validate handle. */
1459 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1439 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d6cb3bd1a29a..879f281e2ea2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -13,13 +13,13 @@ qla2x00_mbx_sem_timeout(unsigned long data)
13{ 13{
14 struct semaphore *sem_ptr = (struct semaphore *)data; 14 struct semaphore *sem_ptr = (struct semaphore *)data;
15 15
16 DEBUG11(printk("qla2x00_sem_timeout: entered.\n");) 16 DEBUG11(printk("qla2x00_sem_timeout: entered.\n"));
17 17
18 if (sem_ptr != NULL) { 18 if (sem_ptr != NULL) {
19 up(sem_ptr); 19 up(sem_ptr);
20 } 20 }
21 21
22 DEBUG11(printk("qla2x00_mbx_sem_timeout: exiting.\n");) 22 DEBUG11(printk("qla2x00_mbx_sem_timeout: exiting.\n"));
23} 23}
24 24
25/* 25/*
@@ -61,7 +61,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
61 rval = QLA_SUCCESS; 61 rval = QLA_SUCCESS;
62 abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 62 abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
63 63
64 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 64 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
65 65
66 /* 66 /*
67 * Wait for active mailbox commands to finish by waiting at most tov 67 * Wait for active mailbox commands to finish by waiting at most tov
@@ -72,7 +72,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
72 if (qla2x00_down_timeout(&ha->mbx_cmd_sem, mcp->tov * HZ)) { 72 if (qla2x00_down_timeout(&ha->mbx_cmd_sem, mcp->tov * HZ)) {
73 /* Timeout occurred. Return error. */ 73 /* Timeout occurred. Return error. */
74 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 74 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
75 "Exiting.\n", __func__, ha->host_no);) 75 "Exiting.\n", __func__, ha->host_no));
76 return QLA_FUNCTION_TIMEOUT; 76 return QLA_FUNCTION_TIMEOUT;
77 } 77 }
78 } 78 }
@@ -86,7 +86,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
86 spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags); 86 spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags);
87 87
88 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 88 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
89 ha->host_no, mcp->mb[0]);) 89 ha->host_no, mcp->mb[0]));
90 90
91 spin_lock_irqsave(&ha->hardware_lock, flags); 91 spin_lock_irqsave(&ha->hardware_lock, flags);
92 92
@@ -131,14 +131,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
131 131
132 /* Unlock mbx registers and wait for interrupt */ 132 /* Unlock mbx registers and wait for interrupt */
133 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " 133 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. "
134 "jiffies=%lx.\n", __func__, ha->host_no, jiffies);) 134 "jiffies=%lx.\n", __func__, ha->host_no, jiffies));
135 135
136 /* Wait for mbx cmd completion until timeout */ 136 /* Wait for mbx cmd completion until timeout */
137 137
138 if (!abort_active && io_lock_on) { 138 if (!abort_active && io_lock_on) {
139 /* sleep on completion semaphore */ 139 /* sleep on completion semaphore */
140 DEBUG11(printk("%s(%ld): INTERRUPT MODE. Initializing timer.\n", 140 DEBUG11(printk("%s(%ld): INTERRUPT MODE. Initializing timer.\n",
141 __func__, ha->host_no);) 141 __func__, ha->host_no));
142 142
143 init_timer(&tmp_intr_timer); 143 init_timer(&tmp_intr_timer);
144 tmp_intr_timer.data = (unsigned long)&ha->mbx_intr_sem; 144 tmp_intr_timer.data = (unsigned long)&ha->mbx_intr_sem;
@@ -147,11 +147,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
147 (void (*)(unsigned long))qla2x00_mbx_sem_timeout; 147 (void (*)(unsigned long))qla2x00_mbx_sem_timeout;
148 148
149 DEBUG11(printk("%s(%ld): Adding timer.\n", __func__, 149 DEBUG11(printk("%s(%ld): Adding timer.\n", __func__,
150 ha->host_no);) 150 ha->host_no));
151 add_timer(&tmp_intr_timer); 151 add_timer(&tmp_intr_timer);
152 152
153 DEBUG11(printk("%s(%ld): going to unlock & sleep. " 153 DEBUG11(printk("%s(%ld): going to unlock & sleep. "
154 "time=0x%lx.\n", __func__, ha->host_no, jiffies);) 154 "time=0x%lx.\n", __func__, ha->host_no, jiffies));
155 155
156 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 156 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
157 157
@@ -170,14 +170,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
170 down(&ha->mbx_intr_sem); 170 down(&ha->mbx_intr_sem);
171 171
172 DEBUG11(printk("%s(%ld): waking up. time=0x%lx\n", __func__, 172 DEBUG11(printk("%s(%ld): waking up. time=0x%lx\n", __func__,
173 ha->host_no, jiffies);) 173 ha->host_no, jiffies));
174 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 174 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
175 175
176 /* delete the timer */ 176 /* delete the timer */
177 del_timer(&tmp_intr_timer); 177 del_timer(&tmp_intr_timer);
178 } else { 178 } else {
179 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 179 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
180 ha->host_no, command);) 180 ha->host_no, command));
181 181
182 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 182 if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
183 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 183 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
@@ -209,7 +209,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
209 uint16_t *iptr2; 209 uint16_t *iptr2;
210 210
211 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, 211 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__,
212 ha->host_no, command);) 212 ha->host_no, command));
213 213
214 /* Got interrupt. Clear the flag. */ 214 /* Got interrupt. Clear the flag. */
215 ha->flags.mbox_int = 0; 215 ha->flags.mbox_int = 0;
@@ -266,7 +266,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
266 266
267 if (!abort_active) { 267 if (!abort_active) {
268 DEBUG11(printk("%s(%ld): checking for additional resp " 268 DEBUG11(printk("%s(%ld): checking for additional resp "
269 "interrupt.\n", __func__, ha->host_no);) 269 "interrupt.\n", __func__, ha->host_no));
270 270
271 /* polling mode for non isp_abort commands. */ 271 /* polling mode for non isp_abort commands. */
272 qla2x00_poll(ha); 272 qla2x00_poll(ha);
@@ -277,9 +277,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
277 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { 277 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
278 /* not in dpc. schedule it for dpc to take over. */ 278 /* not in dpc. schedule it for dpc to take over. */
279 DEBUG(printk("%s(%ld): timeout schedule " 279 DEBUG(printk("%s(%ld): timeout schedule "
280 "isp_abort_needed.\n", __func__, ha->host_no);) 280 "isp_abort_needed.\n", __func__, ha->host_no));
281 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 281 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
282 "isp_abort_needed.\n", __func__, ha->host_no);) 282 "isp_abort_needed.\n", __func__, ha->host_no));
283 qla_printk(KERN_WARNING, ha, 283 qla_printk(KERN_WARNING, ha,
284 "Mailbox command timeout occured. Scheduling ISP " 284 "Mailbox command timeout occured. Scheduling ISP "
285 "abort.\n"); 285 "abort.\n");
@@ -288,9 +288,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
288 } else if (!abort_active) { 288 } else if (!abort_active) {
289 /* call abort directly since we are in the DPC thread */ 289 /* call abort directly since we are in the DPC thread */
290 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 290 DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
291 __func__, ha->host_no);) 291 __func__, ha->host_no));
292 DEBUG2_3_11(printk("%s(%ld): timeout calling " 292 DEBUG2_3_11(printk("%s(%ld): timeout calling "
293 "abort_isp\n", __func__, ha->host_no);) 293 "abort_isp\n", __func__, ha->host_no));
294 qla_printk(KERN_WARNING, ha, 294 qla_printk(KERN_WARNING, ha,
295 "Mailbox command timeout occured. Issuing ISP " 295 "Mailbox command timeout occured. Issuing ISP "
296 "abort.\n"); 296 "abort.\n");
@@ -303,9 +303,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
303 } 303 }
304 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 304 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
305 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__, 305 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__,
306 ha->host_no);) 306 ha->host_no));
307 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n", 307 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n",
308 __func__, ha->host_no);) 308 __func__, ha->host_no));
309 } 309 }
310 } 310 }
311 311
@@ -316,9 +316,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
316 if (rval) { 316 if (rval) {
317 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 317 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
318 "mbx2=%x, cmd=%x ****\n", __func__, ha->host_no, 318 "mbx2=%x, cmd=%x ****\n", __func__, ha->host_no,
319 mcp->mb[0], mcp->mb[1], mcp->mb[2], command);) 319 mcp->mb[0], mcp->mb[1], mcp->mb[2], command));
320 } else { 320 } else {
321 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 321 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
322 } 322 }
323 323
324 return rval; 324 return rval;
@@ -394,7 +394,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
394 mbx_cmd_t mc; 394 mbx_cmd_t mc;
395 mbx_cmd_t *mcp = &mc; 395 mbx_cmd_t *mcp = &mc;
396 396
397 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 397 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
398 398
399 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 399 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
400 mcp->out_mb = MBX_0; 400 mcp->out_mb = MBX_0;
@@ -424,10 +424,10 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
424 } else { 424 } else {
425 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 425 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
426 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 426 DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
427 __func__, ha->host_no, mcp->mb[1]);) 427 __func__, ha->host_no, mcp->mb[1]));
428 } else { 428 } else {
429 DEBUG11(printk("%s(%ld): done.\n", __func__, 429 DEBUG11(printk("%s(%ld): done.\n", __func__,
430 ha->host_no);) 430 ha->host_no));
431 } 431 }
432 } 432 }
433 433
@@ -611,7 +611,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
611 mbx_cmd_t mc; 611 mbx_cmd_t mc;
612 mbx_cmd_t *mcp = &mc; 612 mbx_cmd_t *mcp = &mc;
613 613
614 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", ha->host_no);) 614 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", ha->host_no));
615 615
616 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 616 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
617 mcp->mb[1] = 0xAAAA; 617 mcp->mb[1] = 0xAAAA;
@@ -639,11 +639,11 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
639 if (rval != QLA_SUCCESS) { 639 if (rval != QLA_SUCCESS) {
640 /*EMPTY*/ 640 /*EMPTY*/
641 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", 641 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n",
642 ha->host_no, rval);) 642 ha->host_no, rval));
643 } else { 643 } else {
644 /*EMPTY*/ 644 /*EMPTY*/
645 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", 645 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n",
646 ha->host_no);) 646 ha->host_no));
647 } 647 }
648 648
649 return rval; 649 return rval;
@@ -671,7 +671,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
671 mbx_cmd_t mc; 671 mbx_cmd_t mc;
672 mbx_cmd_t *mcp = &mc; 672 mbx_cmd_t *mcp = &mc;
673 673
674 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 674 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
675 675
676 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 676 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
677 mcp->out_mb = MBX_0; 677 mcp->out_mb = MBX_0;
@@ -694,9 +694,9 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
694 if (rval != QLA_SUCCESS) { 694 if (rval != QLA_SUCCESS) {
695 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 695 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
696 ha->host_no, rval, (IS_QLA24XX(ha) || IS_QLA54XX(ha) ? 696 ha->host_no, rval, (IS_QLA24XX(ha) || IS_QLA54XX(ha) ?
697 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));) 697 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])));
698 } else { 698 } else {
699 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 699 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
700 } 700 }
701 701
702 return rval; 702 return rval;
@@ -743,9 +743,9 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void* buffer, dma_addr_t phys_addr,
743 if (rval != QLA_SUCCESS) { 743 if (rval != QLA_SUCCESS) {
744 /*EMPTY*/ 744 /*EMPTY*/
745 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 745 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
746 ha->host_no, rval);) 746 ha->host_no, rval));
747 DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 747 DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
748 ha->host_no, rval);) 748 ha->host_no, rval));
749 } else { 749 } else {
750 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 750 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
751 751
@@ -781,7 +781,7 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
781 mbx_cmd_t mc; 781 mbx_cmd_t mc;
782 mbx_cmd_t *mcp = &mc; 782 mbx_cmd_t *mcp = &mc;
783 783
784 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no);) 784 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no));
785 785
786 fcport = sp->fcport; 786 fcport = sp->fcport;
787 787
@@ -813,11 +813,11 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
813 813
814 if (rval != QLA_SUCCESS) { 814 if (rval != QLA_SUCCESS) {
815 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 815 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
816 ha->host_no, rval);) 816 ha->host_no, rval));
817 } else { 817 } else {
818 sp->flags |= SRB_ABORT_PENDING; 818 sp->flags |= SRB_ABORT_PENDING;
819 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 819 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
820 ha->host_no);) 820 ha->host_no));
821 } 821 }
822 822
823 return rval; 823 return rval;
@@ -848,7 +848,7 @@ qla2x00_abort_target(fc_port_t *fcport)
848 if (fcport == NULL) 848 if (fcport == NULL)
849 return 0; 849 return 0;
850 850
851 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no);) 851 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
852 852
853 ha = fcport->ha; 853 ha = fcport->ha;
854 mcp->mb[0] = MBC_ABORT_TARGET; 854 mcp->mb[0] = MBC_ABORT_TARGET;
@@ -872,11 +872,11 @@ qla2x00_abort_target(fc_port_t *fcport)
872 872
873 if (rval != QLA_SUCCESS) { 873 if (rval != QLA_SUCCESS) {
874 DEBUG2_3_11(printk("qla2x00_abort_target(%ld): failed=%x.\n", 874 DEBUG2_3_11(printk("qla2x00_abort_target(%ld): failed=%x.\n",
875 ha->host_no, rval);) 875 ha->host_no, rval));
876 } else { 876 } else {
877 /*EMPTY*/ 877 /*EMPTY*/
878 DEBUG11(printk("qla2x00_abort_target(%ld): done.\n", 878 DEBUG11(printk("qla2x00_abort_target(%ld): done.\n",
879 ha->host_no);) 879 ha->host_no));
880 } 880 }
881 881
882 return rval; 882 return rval;
@@ -912,7 +912,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
912 mbx_cmd_t *mcp = &mc; 912 mbx_cmd_t *mcp = &mc;
913 913
914 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", 914 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n",
915 ha->host_no);) 915 ha->host_no));
916 916
917 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 917 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
918 mcp->out_mb = MBX_0; 918 mcp->out_mb = MBX_0;
@@ -933,11 +933,11 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
933 if (rval != QLA_SUCCESS) { 933 if (rval != QLA_SUCCESS) {
934 /*EMPTY*/ 934 /*EMPTY*/
935 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 935 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
936 ha->host_no, rval);) 936 ha->host_no, rval));
937 } else { 937 } else {
938 /*EMPTY*/ 938 /*EMPTY*/
939 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 939 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
940 ha->host_no);) 940 ha->host_no));
941 } 941 }
942 942
943 return rval; 943 return rval;
@@ -968,7 +968,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
968 mbx_cmd_t *mcp = &mc; 968 mbx_cmd_t *mcp = &mc;
969 969
970 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", 970 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n",
971 ha->host_no);) 971 ha->host_no));
972 972
973 mcp->mb[0] = MBC_GET_RETRY_COUNT; 973 mcp->mb[0] = MBC_GET_RETRY_COUNT;
974 mcp->out_mb = MBX_0; 974 mcp->out_mb = MBX_0;
@@ -980,7 +980,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
980 if (rval != QLA_SUCCESS) { 980 if (rval != QLA_SUCCESS) {
981 /*EMPTY*/ 981 /*EMPTY*/
982 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", 982 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n",
983 ha->host_no, mcp->mb[0]);) 983 ha->host_no, mcp->mb[0]));
984 } else { 984 } else {
985 /* Convert returned data and check our values. */ 985 /* Convert returned data and check our values. */
986 *r_a_tov = mcp->mb[3] / 2; 986 *r_a_tov = mcp->mb[3] / 2;
@@ -992,7 +992,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
992 } 992 }
993 993
994 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " 994 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d "
995 "ratov=%d.\n", ha->host_no, mcp->mb[3], ratov);) 995 "ratov=%d.\n", ha->host_no, mcp->mb[3], ratov));
996 } 996 }
997 997
998 return rval; 998 return rval;
@@ -1023,7 +1023,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1023 mbx_cmd_t *mcp = &mc; 1023 mbx_cmd_t *mcp = &mc;
1024 1024
1025 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1025 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
1026 ha->host_no);) 1026 ha->host_no));
1027 1027
1028 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1028 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1029 mcp->mb[2] = MSW(ha->init_cb_dma); 1029 mcp->mb[2] = MSW(ha->init_cb_dma);
@@ -1043,11 +1043,11 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1043 /*EMPTY*/ 1043 /*EMPTY*/
1044 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " 1044 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x "
1045 "mb0=%x.\n", 1045 "mb0=%x.\n",
1046 ha->host_no, rval, mcp->mb[0]);) 1046 ha->host_no, rval, mcp->mb[0]));
1047 } else { 1047 } else {
1048 /*EMPTY*/ 1048 /*EMPTY*/
1049 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", 1049 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n",
1050 ha->host_no);) 1050 ha->host_no));
1051 } 1051 }
1052 1052
1053 return rval; 1053 return rval;
@@ -1079,7 +1079,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1079 struct port_database_24xx *pd24; 1079 struct port_database_24xx *pd24;
1080 dma_addr_t pd_dma; 1080 dma_addr_t pd_dma;
1081 1081
1082 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1082 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1083 1083
1084 pd24 = NULL; 1084 pd24 = NULL;
1085 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1085 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1220,7 +1220,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *dptr)
1220 mbx_cmd_t *mcp = &mc; 1220 mbx_cmd_t *mcp = &mc;
1221 1221
1222 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", 1222 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n",
1223 ha->host_no);) 1223 ha->host_no));
1224 1224
1225 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1225 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1226 mcp->out_mb = MBX_0; 1226 mcp->out_mb = MBX_0;
@@ -1235,11 +1235,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *dptr)
1235 if (rval != QLA_SUCCESS) { 1235 if (rval != QLA_SUCCESS) {
1236 /*EMPTY*/ 1236 /*EMPTY*/
1237 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " 1237 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): "
1238 "failed=%x.\n", ha->host_no, rval);) 1238 "failed=%x.\n", ha->host_no, rval));
1239 } else { 1239 } else {
1240 /*EMPTY*/ 1240 /*EMPTY*/
1241 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", 1241 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n",
1242 ha->host_no);) 1242 ha->host_no));
1243 } 1243 }
1244 1244
1245 return rval; 1245 return rval;
@@ -1272,7 +1272,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1272 mbx_cmd_t *mcp = &mc; 1272 mbx_cmd_t *mcp = &mc;
1273 1273
1274 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", 1274 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n",
1275 ha->host_no);) 1275 ha->host_no));
1276 1276
1277 mcp->mb[0] = MBC_GET_PORT_NAME; 1277 mcp->mb[0] = MBC_GET_PORT_NAME;
1278 mcp->out_mb = MBX_1|MBX_0; 1278 mcp->out_mb = MBX_1|MBX_0;
@@ -1292,7 +1292,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1292 if (rval != QLA_SUCCESS) { 1292 if (rval != QLA_SUCCESS) {
1293 /*EMPTY*/ 1293 /*EMPTY*/
1294 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", 1294 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n",
1295 ha->host_no, rval);) 1295 ha->host_no, rval));
1296 } else { 1296 } else {
1297 if (name != NULL) { 1297 if (name != NULL) {
1298 /* This function returns name in big endian. */ 1298 /* This function returns name in big endian. */
@@ -1307,7 +1307,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1307 } 1307 }
1308 1308
1309 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1309 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
1310 ha->host_no);) 1310 ha->host_no));
1311 } 1311 }
1312 1312
1313 return rval; 1313 return rval;
@@ -1335,7 +1335,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
1335 mbx_cmd_t mc; 1335 mbx_cmd_t mc;
1336 mbx_cmd_t *mcp = &mc; 1336 mbx_cmd_t *mcp = &mc;
1337 1337
1338 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1338 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1339 1339
1340 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1340 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
1341 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1341 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
@@ -1364,10 +1364,10 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
1364 if (rval != QLA_SUCCESS) { 1364 if (rval != QLA_SUCCESS) {
1365 /*EMPTY*/ 1365 /*EMPTY*/
1366 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", 1366 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n",
1367 __func__, ha->host_no, rval);) 1367 __func__, ha->host_no, rval));
1368 } else { 1368 } else {
1369 /*EMPTY*/ 1369 /*EMPTY*/
1370 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 1370 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
1371 } 1371 }
1372 1372
1373 return rval; 1373 return rval;
@@ -1400,10 +1400,10 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1400 mbx_cmd_t *mcp = &mc; 1400 mbx_cmd_t *mcp = &mc;
1401 1401
1402 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", 1402 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n",
1403 ha->host_no);) 1403 ha->host_no));
1404 1404
1405 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " 1405 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total "
1406 "tov=%d.\n", ha->retry_count, ha->login_timeout, mcp->tov);) 1406 "tov=%d.\n", ha->retry_count, ha->login_timeout, mcp->tov));
1407 1407
1408 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 1408 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1409 mcp->mb[1] = cmd_size; 1409 mcp->mb[1] = cmd_size;
@@ -1421,12 +1421,12 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1421 if (rval != QLA_SUCCESS) { 1421 if (rval != QLA_SUCCESS) {
1422 /*EMPTY*/ 1422 /*EMPTY*/
1423 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1423 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1424 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]);) 1424 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1425 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1425 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1426 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]);) 1426 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1427 } else { 1427 } else {
1428 /*EMPTY*/ 1428 /*EMPTY*/
1429 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", ha->host_no);) 1429 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", ha->host_no));
1430 } 1430 }
1431 1431
1432 return rval; 1432 return rval;
@@ -1442,7 +1442,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1442 dma_addr_t lg_dma; 1442 dma_addr_t lg_dma;
1443 uint32_t iop[2]; 1443 uint32_t iop[2];
1444 1444
1445 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1445 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1446 1446
1447 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1447 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1448 if (lg == NULL) { 1448 if (lg == NULL) {
@@ -1458,13 +1458,15 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1458 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); 1458 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1459 if (opt & BIT_0) 1459 if (opt & BIT_0)
1460 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI); 1460 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
1461 if (opt & BIT_1)
1462 lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
1461 lg->port_id[0] = al_pa; 1463 lg->port_id[0] = al_pa;
1462 lg->port_id[1] = area; 1464 lg->port_id[1] = area;
1463 lg->port_id[2] = domain; 1465 lg->port_id[2] = domain;
1464 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1466 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1465 if (rval != QLA_SUCCESS) { 1467 if (rval != QLA_SUCCESS) {
1466 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1468 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
1467 "(%x).\n", __func__, ha->host_no, rval);) 1469 "(%x).\n", __func__, ha->host_no, rval));
1468 } else if (lg->entry_status != 0) { 1470 } else if (lg->entry_status != 0) {
1469 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1471 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1470 "-- error status (%x).\n", __func__, ha->host_no, 1472 "-- error status (%x).\n", __func__, ha->host_no,
@@ -1505,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1505 break; 1507 break;
1506 } 1508 }
1507 } else { 1509 } else {
1508 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 1510 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
1509 1511
1510 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1512 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1511 1513
@@ -1559,7 +1561,7 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1559 mbx_cmd_t mc; 1561 mbx_cmd_t mc;
1560 mbx_cmd_t *mcp = &mc; 1562 mbx_cmd_t *mcp = &mc;
1561 1563
1562 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", ha->host_no);) 1564 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", ha->host_no));
1563 1565
1564 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1566 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1565 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1567 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1604,11 +1606,11 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1604 /*EMPTY*/ 1606 /*EMPTY*/
1605 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " 1607 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x "
1606 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", ha->host_no, rval, 1608 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", ha->host_no, rval,
1607 mcp->mb[0], mcp->mb[1], mcp->mb[2]);) 1609 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
1608 } else { 1610 } else {
1609 /*EMPTY*/ 1611 /*EMPTY*/
1610 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", 1612 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n",
1611 ha->host_no);) 1613 ha->host_no));
1612 } 1614 }
1613 1615
1614 return rval; 1616 return rval;
@@ -1643,7 +1645,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1643 fcport->d_id.b.domain, fcport->d_id.b.area, 1645 fcport->d_id.b.domain, fcport->d_id.b.area,
1644 fcport->d_id.b.al_pa, mb_ret, opt); 1646 fcport->d_id.b.al_pa, mb_ret, opt);
1645 1647
1646 DEBUG3(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1648 DEBUG3(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1647 1649
1648 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 1650 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1649 if (HAS_EXTENDED_IDS(ha)) 1651 if (HAS_EXTENDED_IDS(ha))
@@ -1677,13 +1679,13 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1677 1679
1678 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1680 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1679 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1681 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval,
1680 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);) 1682 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1681 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1683 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1682 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1684 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval,
1683 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);) 1685 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1684 } else { 1686 } else {
1685 /*EMPTY*/ 1687 /*EMPTY*/
1686 DEBUG3(printk("%s(%ld): done.\n", __func__, ha->host_no);) 1688 DEBUG3(printk("%s(%ld): done.\n", __func__, ha->host_no));
1687 } 1689 }
1688 1690
1689 return (rval); 1691 return (rval);
@@ -1697,7 +1699,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1697 struct logio_entry_24xx *lg; 1699 struct logio_entry_24xx *lg;
1698 dma_addr_t lg_dma; 1700 dma_addr_t lg_dma;
1699 1701
1700 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 1702 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
1701 1703
1702 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1704 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1703 if (lg == NULL) { 1705 if (lg == NULL) {
@@ -1718,7 +1720,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1718 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1720 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1719 if (rval != QLA_SUCCESS) { 1721 if (rval != QLA_SUCCESS) {
1720 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1722 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
1721 "(%x).\n", __func__, ha->host_no, rval);) 1723 "(%x).\n", __func__, ha->host_no, rval));
1722 } else if (lg->entry_status != 0) { 1724 } else if (lg->entry_status != 0) {
1723 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1725 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1724 "-- error status (%x).\n", __func__, ha->host_no, 1726 "-- error status (%x).\n", __func__, ha->host_no,
@@ -1729,10 +1731,10 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1729 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1731 "-- completion status (%x) ioparam=%x/%x.\n", __func__,
1730 ha->host_no, le16_to_cpu(lg->comp_status), 1732 ha->host_no, le16_to_cpu(lg->comp_status),
1731 le32_to_cpu(lg->io_parameter[0]), 1733 le32_to_cpu(lg->io_parameter[0]),
1732 le32_to_cpu(lg->io_parameter[1]));) 1734 le32_to_cpu(lg->io_parameter[1])));
1733 } else { 1735 } else {
1734 /*EMPTY*/ 1736 /*EMPTY*/
1735 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 1737 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
1736 } 1738 }
1737 1739
1738 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1740 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1765,7 +1767,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1765 mbx_cmd_t *mcp = &mc; 1767 mbx_cmd_t *mcp = &mc;
1766 1768
1767 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", 1769 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n",
1768 ha->host_no);) 1770 ha->host_no));
1769 1771
1770 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 1772 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1771 mcp->out_mb = MBX_1|MBX_0; 1773 mcp->out_mb = MBX_1|MBX_0;
@@ -1785,11 +1787,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1785 if (rval != QLA_SUCCESS) { 1787 if (rval != QLA_SUCCESS) {
1786 /*EMPTY*/ 1788 /*EMPTY*/
1787 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " 1789 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x "
1788 "mbx1=%x.\n", ha->host_no, rval, mcp->mb[1]);) 1790 "mbx1=%x.\n", ha->host_no, rval, mcp->mb[1]));
1789 } else { 1791 } else {
1790 /*EMPTY*/ 1792 /*EMPTY*/
1791 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", 1793 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n",
1792 ha->host_no);) 1794 ha->host_no));
1793 } 1795 }
1794 1796
1795 return rval; 1797 return rval;
@@ -1818,7 +1820,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
1818 mbx_cmd_t *mcp = &mc; 1820 mbx_cmd_t *mcp = &mc;
1819 1821
1820 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1822 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1821 ha->host_no);) 1823 ha->host_no));
1822 1824
1823 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1825 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1824 mcp->mb[1] = 0; 1826 mcp->mb[1] = 0;
@@ -1833,11 +1835,11 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
1833 if (rval != QLA_SUCCESS) { 1835 if (rval != QLA_SUCCESS) {
1834 /*EMPTY*/ 1836 /*EMPTY*/
1835 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", 1837 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n",
1836 ha->host_no, rval);) 1838 ha->host_no, rval));
1837 } else { 1839 } else {
1838 /*EMPTY*/ 1840 /*EMPTY*/
1839 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", 1841 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n",
1840 ha->host_no);) 1842 ha->host_no));
1841 } 1843 }
1842 1844
1843 return rval; 1845 return rval;
@@ -1864,7 +1866,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1864 mbx_cmd_t *mcp = &mc; 1866 mbx_cmd_t *mcp = &mc;
1865 1867
1866 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", 1868 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n",
1867 ha->host_no);) 1869 ha->host_no));
1868 1870
1869 if (id_list == NULL) 1871 if (id_list == NULL)
1870 return QLA_FUNCTION_FAILED; 1872 return QLA_FUNCTION_FAILED;
@@ -1893,11 +1895,11 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1893 if (rval != QLA_SUCCESS) { 1895 if (rval != QLA_SUCCESS) {
1894 /*EMPTY*/ 1896 /*EMPTY*/
1895 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", 1897 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n",
1896 ha->host_no, rval);) 1898 ha->host_no, rval));
1897 } else { 1899 } else {
1898 *entries = mcp->mb[1]; 1900 *entries = mcp->mb[1];
1899 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", 1901 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n",
1900 ha->host_no);) 1902 ha->host_no));
1901 } 1903 }
1902 1904
1903 return rval; 1905 return rval;
@@ -1936,7 +1938,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1936 if (rval != QLA_SUCCESS) { 1938 if (rval != QLA_SUCCESS) {
1937 /*EMPTY*/ 1939 /*EMPTY*/
1938 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, 1940 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__,
1939 ha->host_no, mcp->mb[0]);) 1941 ha->host_no, mcp->mb[0]));
1940 } else { 1942 } else {
1941 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 1943 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
1942 "mb7=%x mb10=%x.\n", __func__, ha->host_no, 1944 "mb7=%x mb10=%x.\n", __func__, ha->host_no,
@@ -2045,7 +2047,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2045 link_stat_t *stat_buf; 2047 link_stat_t *stat_buf;
2046 dma_addr_t stat_buf_dma; 2048 dma_addr_t stat_buf_dma;
2047 2049
2048 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 2050 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2049 2051
2050 stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma); 2052 stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma);
2051 if (stat_buf == NULL) { 2053 if (stat_buf == NULL) {
@@ -2083,7 +2085,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2083 if (rval == QLA_SUCCESS) { 2085 if (rval == QLA_SUCCESS) {
2084 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2086 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2085 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2087 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2086 __func__, ha->host_no, mcp->mb[0]);) 2088 __func__, ha->host_no, mcp->mb[0]));
2087 status[0] = mcp->mb[0]; 2089 status[0] = mcp->mb[0];
2088 rval = BIT_1; 2090 rval = BIT_1;
2089 } else { 2091 } else {
@@ -2108,12 +2110,12 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2108 stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt, 2110 stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt,
2109 stat_buf->prim_seq_err_cnt, 2111 stat_buf->prim_seq_err_cnt,
2110 stat_buf->inval_xmit_word_cnt, 2112 stat_buf->inval_xmit_word_cnt,
2111 stat_buf->inval_crc_cnt);) 2113 stat_buf->inval_crc_cnt));
2112 } 2114 }
2113 } else { 2115 } else {
2114 /* Failed. */ 2116 /* Failed. */
2115 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2117 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2116 ha->host_no, rval);) 2118 ha->host_no, rval));
2117 rval = BIT_1; 2119 rval = BIT_1;
2118 } 2120 }
2119 2121
@@ -2132,7 +2134,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
2132 uint32_t *sbuf, *siter; 2134 uint32_t *sbuf, *siter;
2133 dma_addr_t sbuf_dma; 2135 dma_addr_t sbuf_dma;
2134 2136
2135 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 2137 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2136 2138
2137 if (dwords > (DMA_POOL_SIZE / 4)) { 2139 if (dwords > (DMA_POOL_SIZE / 4)) {
2138 DEBUG2_3_11(printk("%s(%ld): Unabled to retrieve %d DWORDs " 2140 DEBUG2_3_11(printk("%s(%ld): Unabled to retrieve %d DWORDs "
@@ -2196,7 +2198,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2196 dma_addr_t abt_dma; 2198 dma_addr_t abt_dma;
2197 uint32_t handle; 2199 uint32_t handle;
2198 2200
2199 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);) 2201 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2200 2202
2201 fcport = sp->fcport; 2203 fcport = sp->fcport;
2202 2204
@@ -2229,7 +2231,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2229 rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0); 2231 rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0);
2230 if (rval != QLA_SUCCESS) { 2232 if (rval != QLA_SUCCESS) {
2231 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2233 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
2232 __func__, ha->host_no, rval);) 2234 __func__, ha->host_no, rval));
2233 } else if (abt->entry_status != 0) { 2235 } else if (abt->entry_status != 0) {
2234 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2236 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2235 "-- error status (%x).\n", __func__, ha->host_no, 2237 "-- error status (%x).\n", __func__, ha->host_no,
@@ -2238,10 +2240,10 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2238 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2240 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2239 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2241 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2240 "-- completion status (%x).\n", __func__, ha->host_no, 2242 "-- completion status (%x).\n", __func__, ha->host_no,
2241 le16_to_cpu(abt->nport_handle));) 2243 le16_to_cpu(abt->nport_handle)));
2242 rval = QLA_FUNCTION_FAILED; 2244 rval = QLA_FUNCTION_FAILED;
2243 } else { 2245 } else {
2244 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 2246 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2245 sp->flags |= SRB_ABORT_PENDING; 2247 sp->flags |= SRB_ABORT_PENDING;
2246 } 2248 }
2247 2249
@@ -2268,7 +2270,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2268 if (fcport == NULL) 2270 if (fcport == NULL)
2269 return 0; 2271 return 0;
2270 2272
2271 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no);) 2273 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
2272 2274
2273 ha = fcport->ha; 2275 ha = fcport->ha;
2274 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2276 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
@@ -2290,7 +2292,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2290 rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0); 2292 rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0);
2291 if (rval != QLA_SUCCESS) { 2293 if (rval != QLA_SUCCESS) {
2292 DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB " 2294 DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB "
2293 "(%x).\n", __func__, ha->host_no, rval);) 2295 "(%x).\n", __func__, ha->host_no, rval));
2294 goto atarget_done; 2296 goto atarget_done;
2295 } else if (tsk->p.sts.entry_status != 0) { 2297 } else if (tsk->p.sts.entry_status != 0) {
2296 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2298 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
@@ -2302,7 +2304,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2302 __constant_cpu_to_le16(CS_COMPLETE)) { 2304 __constant_cpu_to_le16(CS_COMPLETE)) {
2303 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2305 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2304 "-- completion status (%x).\n", __func__, 2306 "-- completion status (%x).\n", __func__,
2305 ha->host_no, le16_to_cpu(tsk->p.sts.comp_status));) 2307 ha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
2306 rval = QLA_FUNCTION_FAILED; 2308 rval = QLA_FUNCTION_FAILED;
2307 goto atarget_done; 2309 goto atarget_done;
2308 } 2310 }
@@ -2311,9 +2313,9 @@ qla24xx_abort_target(fc_port_t *fcport)
2311 rval = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID); 2313 rval = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID);
2312 if (rval != QLA_SUCCESS) { 2314 if (rval != QLA_SUCCESS) {
2313 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 2315 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
2314 "(%x).\n", __func__, ha->host_no, rval);) 2316 "(%x).\n", __func__, ha->host_no, rval));
2315 } else { 2317 } else {
2316 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no);) 2318 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2317 } 2319 }
2318 2320
2319atarget_done: 2321atarget_done:
@@ -2460,3 +2462,81 @@ qla2x00_stop_firmware(scsi_qla_host_t *ha)
2460 2462
2461 return rval; 2463 return rval;
2462} 2464}
2465
2466int
2467qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
2468 uint16_t buffers)
2469{
2470 int rval;
2471 mbx_cmd_t mc;
2472 mbx_cmd_t *mcp = &mc;
2473
2474 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
2475 return QLA_FUNCTION_FAILED;
2476
2477 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2478
2479 mcp->mb[0] = MBC_TRACE_CONTROL;
2480 mcp->mb[1] = ctrl;
2481 mcp->out_mb = MBX_1|MBX_0;
2482 mcp->in_mb = MBX_1|MBX_0;
2483 if (ctrl == TC_ENABLE) {
2484 mcp->mb[2] = LSW(eft_dma);
2485 mcp->mb[3] = MSW(eft_dma);
2486 mcp->mb[4] = LSW(MSD(eft_dma));
2487 mcp->mb[5] = MSW(MSD(eft_dma));
2488 mcp->mb[6] = buffers;
2489 mcp->mb[7] = buffers;
2490 mcp->out_mb |= MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2;
2491 }
2492 mcp->tov = 30;
2493 mcp->flags = 0;
2494 rval = qla2x00_mailbox_command(ha, mcp);
2495
2496 if (rval != QLA_SUCCESS) {
2497 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2498 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2499 } else {
2500 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2501 }
2502
2503 return rval;
2504}
2505
2506int
2507qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2508 uint16_t off, uint16_t count)
2509{
2510 int rval;
2511 mbx_cmd_t mc;
2512 mbx_cmd_t *mcp = &mc;
2513
2514 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
2515 return QLA_FUNCTION_FAILED;
2516
2517 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2518
2519 mcp->mb[0] = MBC_READ_SFP;
2520 mcp->mb[1] = addr;
2521 mcp->mb[2] = MSW(sfp_dma);
2522 mcp->mb[3] = LSW(sfp_dma);
2523 mcp->mb[6] = MSW(MSD(sfp_dma));
2524 mcp->mb[7] = LSW(MSD(sfp_dma));
2525 mcp->mb[8] = count;
2526 mcp->mb[9] = off;
2527 mcp->mb[10] = 0;
2528 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2529 mcp->in_mb = MBX_0;
2530 mcp->tov = 30;
2531 mcp->flags = 0;
2532 rval = qla2x00_mailbox_command(ha, mcp);
2533
2534 if (rval != QLA_SUCCESS) {
2535 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2536 ha->host_no, rval, mcp->mb[0]));
2537 } else {
2538 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
2539 }
2540
2541 return rval;
2542}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 93062593ebe7..ec7ebb6037e6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -39,14 +39,14 @@ MODULE_PARM_DESC(ql2xlogintimeout,
39int qlport_down_retry = 30; 39int qlport_down_retry = 30;
40module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); 40module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
41MODULE_PARM_DESC(qlport_down_retry, 41MODULE_PARM_DESC(qlport_down_retry,
42 "Maximum number of command retries to a port that returns" 42 "Maximum number of command retries to a port that returns "
43 "a PORT-DOWN status."); 43 "a PORT-DOWN status.");
44 44
45int ql2xplogiabsentdevice; 45int ql2xplogiabsentdevice;
46module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR); 46module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
47MODULE_PARM_DESC(ql2xplogiabsentdevice, 47MODULE_PARM_DESC(ql2xplogiabsentdevice,
48 "Option to enable PLOGI to devices that are not present after " 48 "Option to enable PLOGI to devices that are not present after "
49 "a Fabric scan. This is needed for several broken switches." 49 "a Fabric scan. This is needed for several broken switches. "
50 "Default is 0 - no PLOGI. 1 - perfom PLOGI."); 50 "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
51 51
52int ql2xloginretrycount = 0; 52int ql2xloginretrycount = 0;
@@ -54,6 +54,19 @@ module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
54MODULE_PARM_DESC(ql2xloginretrycount, 54MODULE_PARM_DESC(ql2xloginretrycount,
55 "Specify an alternate value for the NVRAM login retry count."); 55 "Specify an alternate value for the NVRAM login retry count.");
56 56
57int ql2xallocfwdump = 1;
58module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR);
59MODULE_PARM_DESC(ql2xallocfwdump,
60 "Option to enable allocation of memory for a firmware dump "
61 "during HBA initialization. Memory allocation requirements "
62 "vary by ISP type. Default is 1 - allocate memory.");
63
64int extended_error_logging;
65module_param(extended_error_logging, int, S_IRUGO|S_IRUSR);
66MODULE_PARM_DESC(extended_error_logging,
67 "Option to enable extended error logging, "
68 "Default is 0 - no logging. 1 - log errors.");
69
57static void qla2x00_free_device(scsi_qla_host_t *); 70static void qla2x00_free_device(scsi_qla_host_t *);
58 71
59static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); 72static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
@@ -624,7 +637,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
624 637
625 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 638 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n",
626 __func__, ha->host_no, sp, serial)); 639 __func__, ha->host_no, sp, serial));
627 DEBUG3(qla2x00_print_scsi_cmd(cmd);) 640 DEBUG3(qla2x00_print_scsi_cmd(cmd));
628 641
629 spin_unlock_irqrestore(&ha->hardware_lock, flags); 642 spin_unlock_irqrestore(&ha->hardware_lock, flags);
630 if (ha->isp_ops.abort_command(ha, sp)) { 643 if (ha->isp_ops.abort_command(ha, sp)) {
@@ -766,7 +779,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
766#endif 779#endif
767 } else { 780 } else {
768 DEBUG2(printk(KERN_INFO 781 DEBUG2(printk(KERN_INFO
769 "%s failed: loop not ready\n",__func__);) 782 "%s failed: loop not ready\n",__func__));
770 } 783 }
771 784
772 if (ret == FAILED) { 785 if (ret == FAILED) {
@@ -1021,12 +1034,12 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
1021 /* Empty */ 1034 /* Empty */
1022 DEBUG2_3(printk("%s(%ld): **** FAILED ****\n", 1035 DEBUG2_3(printk("%s(%ld): **** FAILED ****\n",
1023 __func__, 1036 __func__,
1024 ha->host_no);) 1037 ha->host_no));
1025 } else { 1038 } else {
1026 /* Empty */ 1039 /* Empty */
1027 DEBUG3(printk("%s(%ld): exiting normally.\n", 1040 DEBUG3(printk("%s(%ld): exiting normally.\n",
1028 __func__, 1041 __func__,
1029 ha->host_no);) 1042 ha->host_no));
1030 } 1043 }
1031 1044
1032 return(status); 1045 return(status);
@@ -1324,7 +1337,8 @@ qla24xx_disable_intrs(scsi_qla_host_t *ha)
1324/* 1337/*
1325 * PCI driver interface 1338 * PCI driver interface
1326 */ 1339 */
1327static int qla2x00_probe_one(struct pci_dev *pdev) 1340static int __devinit
1341qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1328{ 1342{
1329 int ret = -ENODEV; 1343 int ret = -ENODEV;
1330 device_reg_t __iomem *reg; 1344 device_reg_t __iomem *reg;
@@ -1405,7 +1419,6 @@ static int qla2x00_probe_one(struct pci_dev *pdev)
1405 ha->isp_ops.read_nvram = qla2x00_read_nvram_data; 1419 ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
1406 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1420 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1407 ha->isp_ops.fw_dump = qla2100_fw_dump; 1421 ha->isp_ops.fw_dump = qla2100_fw_dump;
1408 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump;
1409 ha->isp_ops.read_optrom = qla2x00_read_optrom_data; 1422 ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
1410 ha->isp_ops.write_optrom = qla2x00_write_optrom_data; 1423 ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
1411 if (IS_QLA2100(ha)) { 1424 if (IS_QLA2100(ha)) {
@@ -1432,7 +1445,6 @@ static int qla2x00_probe_one(struct pci_dev *pdev)
1432 ha->isp_ops.pci_config = qla2300_pci_config; 1445 ha->isp_ops.pci_config = qla2300_pci_config;
1433 ha->isp_ops.intr_handler = qla2300_intr_handler; 1446 ha->isp_ops.intr_handler = qla2300_intr_handler;
1434 ha->isp_ops.fw_dump = qla2300_fw_dump; 1447 ha->isp_ops.fw_dump = qla2300_fw_dump;
1435 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump;
1436 ha->isp_ops.beacon_on = qla2x00_beacon_on; 1448 ha->isp_ops.beacon_on = qla2x00_beacon_on;
1437 ha->isp_ops.beacon_off = qla2x00_beacon_off; 1449 ha->isp_ops.beacon_off = qla2x00_beacon_off;
1438 ha->isp_ops.beacon_blink = qla2x00_beacon_blink; 1450 ha->isp_ops.beacon_blink = qla2x00_beacon_blink;
@@ -1469,7 +1481,6 @@ static int qla2x00_probe_one(struct pci_dev *pdev)
1469 ha->isp_ops.read_nvram = qla24xx_read_nvram_data; 1481 ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
1470 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1482 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1471 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1483 ha->isp_ops.fw_dump = qla24xx_fw_dump;
1472 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump;
1473 ha->isp_ops.read_optrom = qla24xx_read_optrom_data; 1484 ha->isp_ops.read_optrom = qla24xx_read_optrom_data;
1474 ha->isp_ops.write_optrom = qla24xx_write_optrom_data; 1485 ha->isp_ops.write_optrom = qla24xx_write_optrom_data;
1475 ha->isp_ops.beacon_on = qla24xx_beacon_on; 1486 ha->isp_ops.beacon_on = qla24xx_beacon_on;
@@ -1640,7 +1651,8 @@ probe_out:
1640 return ret; 1651 return ret;
1641} 1652}
1642 1653
1643static void qla2x00_remove_one(struct pci_dev *pdev) 1654static void __devexit
1655qla2x00_remove_one(struct pci_dev *pdev)
1644{ 1656{
1645 scsi_qla_host_t *ha; 1657 scsi_qla_host_t *ha;
1646 1658
@@ -1678,6 +1690,9 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1678 kthread_stop(t); 1690 kthread_stop(t);
1679 } 1691 }
1680 1692
1693 if (ha->eft)
1694 qla2x00_trace_control(ha, TC_DISABLE, 0, 0);
1695
1681 /* Stop currently executing firmware. */ 1696 /* Stop currently executing firmware. */
1682 qla2x00_stop_firmware(ha); 1697 qla2x00_stop_firmware(ha);
1683 1698
@@ -1899,17 +1914,6 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
1899 } 1914 }
1900 memset(ha->init_cb, 0, ha->init_cb_size); 1915 memset(ha->init_cb, 0, ha->init_cb_size);
1901 1916
1902 /* Allocate ioctl related memory. */
1903 if (qla2x00_alloc_ioctl_mem(ha)) {
1904 qla_printk(KERN_WARNING, ha,
1905 "Memory Allocation failed - ioctl_mem\n");
1906
1907 qla2x00_mem_free(ha);
1908 msleep(100);
1909
1910 continue;
1911 }
1912
1913 if (qla2x00_allocate_sp_pool(ha)) { 1917 if (qla2x00_allocate_sp_pool(ha)) {
1914 qla_printk(KERN_WARNING, ha, 1918 qla_printk(KERN_WARNING, ha,
1915 "Memory Allocation failed - " 1919 "Memory Allocation failed - "
@@ -1972,6 +1976,26 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
1972 continue; 1976 continue;
1973 } 1977 }
1974 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt)); 1978 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt));
1979
1980 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
1981 /*
1982 * Get consistent memory allocated for SFP
1983 * block.
1984 */
1985 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool,
1986 GFP_KERNEL, &ha->sfp_data_dma);
1987 if (ha->sfp_data == NULL) {
1988 qla_printk(KERN_WARNING, ha,
1989 "Memory Allocation failed - "
1990 "sfp_data\n");
1991
1992 qla2x00_mem_free(ha);
1993 msleep(100);
1994
1995 continue;
1996 }
1997 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
1998 }
1975 } 1999 }
1976 2000
1977 /* Done all allocations without any error. */ 2001 /* Done all allocations without any error. */
@@ -2006,12 +2030,16 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2006 return; 2030 return;
2007 } 2031 }
2008 2032
2009 /* free ioctl memory */
2010 qla2x00_free_ioctl_mem(ha);
2011
2012 /* free sp pool */ 2033 /* free sp pool */
2013 qla2x00_free_sp_pool(ha); 2034 qla2x00_free_sp_pool(ha);
2014 2035
2036 if (ha->fw_dump) {
2037 if (ha->eft)
2038 dma_free_coherent(&ha->pdev->dev,
2039 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2040 vfree(ha->fw_dump);
2041 }
2042
2015 if (ha->sns_cmd) 2043 if (ha->sns_cmd)
2016 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2044 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2017 ha->sns_cmd, ha->sns_cmd_dma); 2045 ha->sns_cmd, ha->sns_cmd_dma);
@@ -2020,6 +2048,9 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2020 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2048 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2021 ha->ct_sns, ha->ct_sns_dma); 2049 ha->ct_sns, ha->ct_sns_dma);
2022 2050
2051 if (ha->sfp_data)
2052 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
2053
2023 if (ha->ms_iocb) 2054 if (ha->ms_iocb)
2024 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2055 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2025 2056
@@ -2043,6 +2074,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2043 (ha->request_q_length + 1) * sizeof(request_t), 2074 (ha->request_q_length + 1) * sizeof(request_t),
2044 ha->request_ring, ha->request_dma); 2075 ha->request_ring, ha->request_dma);
2045 2076
2077 ha->eft = NULL;
2078 ha->eft_dma = 0;
2046 ha->sns_cmd = NULL; 2079 ha->sns_cmd = NULL;
2047 ha->sns_cmd_dma = 0; 2080 ha->sns_cmd_dma = 0;
2048 ha->ct_sns = NULL; 2081 ha->ct_sns = NULL;
@@ -2071,13 +2104,9 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2071 } 2104 }
2072 INIT_LIST_HEAD(&ha->fcports); 2105 INIT_LIST_HEAD(&ha->fcports);
2073 2106
2074 vfree(ha->fw_dump);
2075 vfree(ha->fw_dump_buffer);
2076
2077 ha->fw_dump = NULL; 2107 ha->fw_dump = NULL;
2078 ha->fw_dumped = 0; 2108 ha->fw_dumped = 0;
2079 ha->fw_dump_reading = 0; 2109 ha->fw_dump_reading = 0;
2080 ha->fw_dump_buffer = NULL;
2081 2110
2082 vfree(ha->optrom_buffer); 2111 vfree(ha->optrom_buffer);
2083} 2112}
@@ -2617,40 +2646,16 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
2617}; 2646};
2618MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 2647MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
2619 2648
2620static int __devinit
2621qla2xxx_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2622{
2623 return qla2x00_probe_one(pdev);
2624}
2625
2626static void __devexit
2627qla2xxx_remove_one(struct pci_dev *pdev)
2628{
2629 qla2x00_remove_one(pdev);
2630}
2631
2632static struct pci_driver qla2xxx_pci_driver = { 2649static struct pci_driver qla2xxx_pci_driver = {
2633 .name = QLA2XXX_DRIVER_NAME, 2650 .name = QLA2XXX_DRIVER_NAME,
2634 .driver = { 2651 .driver = {
2635 .owner = THIS_MODULE, 2652 .owner = THIS_MODULE,
2636 }, 2653 },
2637 .id_table = qla2xxx_pci_tbl, 2654 .id_table = qla2xxx_pci_tbl,
2638 .probe = qla2xxx_probe_one, 2655 .probe = qla2x00_probe_one,
2639 .remove = __devexit_p(qla2xxx_remove_one), 2656 .remove = __devexit_p(qla2x00_remove_one),
2640}; 2657};
2641 2658
2642static inline int
2643qla2x00_pci_module_init(void)
2644{
2645 return pci_module_init(&qla2xxx_pci_driver);
2646}
2647
2648static inline void
2649qla2x00_pci_module_exit(void)
2650{
2651 pci_unregister_driver(&qla2xxx_pci_driver);
2652}
2653
2654/** 2659/**
2655 * qla2x00_module_init - Module initialization. 2660 * qla2x00_module_init - Module initialization.
2656 **/ 2661 **/
@@ -2670,16 +2675,16 @@ qla2x00_module_init(void)
2670 2675
2671 /* Derive version string. */ 2676 /* Derive version string. */
2672 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 2677 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
2673#if DEBUG_QLA2100 2678 if (extended_error_logging)
2674 strcat(qla2x00_version_str, "-debug"); 2679 strcat(qla2x00_version_str, "-debug");
2675#endif 2680
2676 qla2xxx_transport_template = 2681 qla2xxx_transport_template =
2677 fc_attach_transport(&qla2xxx_transport_functions); 2682 fc_attach_transport(&qla2xxx_transport_functions);
2678 if (!qla2xxx_transport_template) 2683 if (!qla2xxx_transport_template)
2679 return -ENODEV; 2684 return -ENODEV;
2680 2685
2681 printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n"); 2686 printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n");
2682 ret = qla2x00_pci_module_init(); 2687 ret = pci_register_driver(&qla2xxx_pci_driver);
2683 if (ret) { 2688 if (ret) {
2684 kmem_cache_destroy(srb_cachep); 2689 kmem_cache_destroy(srb_cachep);
2685 fc_release_transport(qla2xxx_transport_template); 2690 fc_release_transport(qla2xxx_transport_template);
@@ -2693,7 +2698,7 @@ qla2x00_module_init(void)
2693static void __exit 2698static void __exit
2694qla2x00_module_exit(void) 2699qla2x00_module_exit(void)
2695{ 2700{
2696 qla2x00_pci_module_exit(); 2701 pci_unregister_driver(&qla2xxx_pci_driver);
2697 qla2x00_release_firmware(); 2702 qla2x00_release_firmware();
2698 kmem_cache_destroy(srb_cachep); 2703 kmem_cache_destroy(srb_cachep);
2699 fc_release_transport(qla2xxx_transport_template); 2704 fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 6b315521bd89..d2d683440659 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.05-k2" 10#define QLA2XXX_VERSION "8.01.05-k3"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index e1168860045c..9c63b00773c4 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -50,18 +50,22 @@
50#include "scsi_logging.h" 50#include "scsi_logging.h"
51#include "scsi_debug.h" 51#include "scsi_debug.h"
52 52
53#define SCSI_DEBUG_VERSION "1.75" 53#define SCSI_DEBUG_VERSION "1.79"
54static const char * scsi_debug_version_date = "20050113"; 54static const char * scsi_debug_version_date = "20060604";
55 55
56/* Additional Sense Code (ASC) used */ 56/* Additional Sense Code (ASC) used */
57#define NO_ADDED_SENSE 0x0 57#define NO_ADDITIONAL_SENSE 0x0
58#define LOGICAL_UNIT_NOT_READY 0x4
58#define UNRECOVERED_READ_ERR 0x11 59#define UNRECOVERED_READ_ERR 0x11
60#define PARAMETER_LIST_LENGTH_ERR 0x1a
59#define INVALID_OPCODE 0x20 61#define INVALID_OPCODE 0x20
60#define ADDR_OUT_OF_RANGE 0x21 62#define ADDR_OUT_OF_RANGE 0x21
61#define INVALID_FIELD_IN_CDB 0x24 63#define INVALID_FIELD_IN_CDB 0x24
64#define INVALID_FIELD_IN_PARAM_LIST 0x26
62#define POWERON_RESET 0x29 65#define POWERON_RESET 0x29
63#define SAVING_PARAMS_UNSUP 0x39 66#define SAVING_PARAMS_UNSUP 0x39
64#define THRESHHOLD_EXCEEDED 0x5d 67#define THRESHOLD_EXCEEDED 0x5d
68#define LOW_POWER_COND_ON 0x5e
65 69
66#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ 70#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
67 71
@@ -80,6 +84,8 @@ static const char * scsi_debug_version_date = "20050113";
80#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ 84#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
81#define DEF_PTYPE 0 85#define DEF_PTYPE 0
82#define DEF_D_SENSE 0 86#define DEF_D_SENSE 0
87#define DEF_NO_LUN_0 0
88#define DEF_VIRTUAL_GB 0
83 89
84/* bit mask values for scsi_debug_opts */ 90/* bit mask values for scsi_debug_opts */
85#define SCSI_DEBUG_OPT_NOISE 1 91#define SCSI_DEBUG_OPT_NOISE 1
@@ -106,6 +112,7 @@ static const char * scsi_debug_version_date = "20050113";
106/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1) 112/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
107 * or "peripheral device" addressing (value 0) */ 113 * or "peripheral device" addressing (value 0) */
108#define SAM2_LUN_ADDRESS_METHOD 0 114#define SAM2_LUN_ADDRESS_METHOD 0
115#define SAM2_WLUN_REPORT_LUNS 0xc101
109 116
110static int scsi_debug_add_host = DEF_NUM_HOST; 117static int scsi_debug_add_host = DEF_NUM_HOST;
111static int scsi_debug_delay = DEF_DELAY; 118static int scsi_debug_delay = DEF_DELAY;
@@ -118,13 +125,16 @@ static int scsi_debug_opts = DEF_OPTS;
118static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; 125static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
119static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */ 126static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
120static int scsi_debug_dsense = DEF_D_SENSE; 127static int scsi_debug_dsense = DEF_D_SENSE;
128static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
129static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
121 130
122static int scsi_debug_cmnd_count = 0; 131static int scsi_debug_cmnd_count = 0;
123 132
124#define DEV_READONLY(TGT) (0) 133#define DEV_READONLY(TGT) (0)
125#define DEV_REMOVEABLE(TGT) (0) 134#define DEV_REMOVEABLE(TGT) (0)
126 135
127static unsigned long sdebug_store_size; /* in bytes */ 136static unsigned int sdebug_store_size; /* in bytes */
137static unsigned int sdebug_store_sectors;
128static sector_t sdebug_capacity; /* in sectors */ 138static sector_t sdebug_capacity; /* in sectors */
129 139
130/* old BIOS stuff, kernel may get rid of them but some mode sense pages 140/* old BIOS stuff, kernel may get rid of them but some mode sense pages
@@ -149,7 +159,9 @@ struct sdebug_dev_info {
149 unsigned int target; 159 unsigned int target;
150 unsigned int lun; 160 unsigned int lun;
151 struct sdebug_host_info *sdbg_host; 161 struct sdebug_host_info *sdbg_host;
162 unsigned int wlun;
152 char reset; 163 char reset;
164 char stopped;
153 char used; 165 char used;
154}; 166};
155 167
@@ -193,11 +205,11 @@ static struct scsi_host_template sdebug_driver_template = {
193 .bios_param = scsi_debug_biosparam, 205 .bios_param = scsi_debug_biosparam,
194 .can_queue = SCSI_DEBUG_CANQUEUE, 206 .can_queue = SCSI_DEBUG_CANQUEUE,
195 .this_id = 7, 207 .this_id = 7,
196 .sg_tablesize = 64, 208 .sg_tablesize = 256,
197 .cmd_per_lun = 3, 209 .cmd_per_lun = 16,
198 .max_sectors = 4096, 210 .max_sectors = 0xffff,
199 .unchecked_isa_dma = 0, 211 .unchecked_isa_dma = 0,
200 .use_clustering = DISABLE_CLUSTERING, 212 .use_clustering = ENABLE_CLUSTERING,
201 .module = THIS_MODULE, 213 .module = THIS_MODULE,
202}; 214};
203 215
@@ -225,19 +237,32 @@ static struct device_driver sdebug_driverfs_driver = {
225static const int check_condition_result = 237static const int check_condition_result =
226 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 238 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
227 239
240static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
241 0, 0, 0x2, 0x4b};
242static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
243 0, 0, 0x0, 0x0};
244
228/* function declarations */ 245/* function declarations */
229static int resp_inquiry(struct scsi_cmnd * SCpnt, int target, 246static int resp_inquiry(struct scsi_cmnd * SCpnt, int target,
230 struct sdebug_dev_info * devip); 247 struct sdebug_dev_info * devip);
231static int resp_requests(struct scsi_cmnd * SCpnt, 248static int resp_requests(struct scsi_cmnd * SCpnt,
232 struct sdebug_dev_info * devip); 249 struct sdebug_dev_info * devip);
250static int resp_start_stop(struct scsi_cmnd * scp,
251 struct sdebug_dev_info * devip);
233static int resp_readcap(struct scsi_cmnd * SCpnt, 252static int resp_readcap(struct scsi_cmnd * SCpnt,
234 struct sdebug_dev_info * devip); 253 struct sdebug_dev_info * devip);
235static int resp_mode_sense(struct scsi_cmnd * SCpnt, int target, 254static int resp_readcap16(struct scsi_cmnd * SCpnt,
255 struct sdebug_dev_info * devip);
256static int resp_mode_sense(struct scsi_cmnd * scp, int target,
236 struct sdebug_dev_info * devip); 257 struct sdebug_dev_info * devip);
237static int resp_read(struct scsi_cmnd * SCpnt, int upper_blk, int block, 258static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
238 int num, struct sdebug_dev_info * devip); 259 struct sdebug_dev_info * devip);
239static int resp_write(struct scsi_cmnd * SCpnt, int upper_blk, int block, 260static int resp_log_sense(struct scsi_cmnd * scp,
240 int num, struct sdebug_dev_info * devip); 261 struct sdebug_dev_info * devip);
262static int resp_read(struct scsi_cmnd * SCpnt, unsigned long long lba,
263 unsigned int num, struct sdebug_dev_info * devip);
264static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba,
265 unsigned int num, struct sdebug_dev_info * devip);
241static int resp_report_luns(struct scsi_cmnd * SCpnt, 266static int resp_report_luns(struct scsi_cmnd * SCpnt,
242 struct sdebug_dev_info * devip); 267 struct sdebug_dev_info * devip);
243static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, 268static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
@@ -248,8 +273,8 @@ static void timer_intr_handler(unsigned long);
248static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev); 273static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev);
249static void mk_sense_buffer(struct sdebug_dev_info * devip, int key, 274static void mk_sense_buffer(struct sdebug_dev_info * devip, int key,
250 int asc, int asq); 275 int asc, int asq);
251static int check_reset(struct scsi_cmnd * SCpnt, 276static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
252 struct sdebug_dev_info * devip); 277 struct sdebug_dev_info * devip);
253static int schedule_resp(struct scsi_cmnd * cmnd, 278static int schedule_resp(struct scsi_cmnd * cmnd,
254 struct sdebug_dev_info * devip, 279 struct sdebug_dev_info * devip,
255 done_funct_t done, int scsi_result, int delta_jiff); 280 done_funct_t done, int scsi_result, int delta_jiff);
@@ -257,8 +282,10 @@ static void __init sdebug_build_parts(unsigned char * ramp);
257static void __init init_all_queued(void); 282static void __init init_all_queued(void);
258static void stop_all_queued(void); 283static void stop_all_queued(void);
259static int stop_queued_cmnd(struct scsi_cmnd * cmnd); 284static int stop_queued_cmnd(struct scsi_cmnd * cmnd);
260static int inquiry_evpd_83(unsigned char * arr, int dev_id_num, 285static int inquiry_evpd_83(unsigned char * arr, int target_dev_id,
261 const char * dev_id_str, int dev_id_str_len); 286 int dev_id_num, const char * dev_id_str,
287 int dev_id_str_len);
288static int inquiry_evpd_88(unsigned char * arr, int target_dev_id);
262static void do_create_driverfs_files(void); 289static void do_create_driverfs_files(void);
263static void do_remove_driverfs_files(void); 290static void do_remove_driverfs_files(void);
264 291
@@ -274,18 +301,22 @@ static
274int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done) 301int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
275{ 302{
276 unsigned char *cmd = (unsigned char *) SCpnt->cmnd; 303 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
277 int block, upper_blk, num, k; 304 int len, k, j;
305 unsigned int num;
306 unsigned long long lba;
278 int errsts = 0; 307 int errsts = 0;
279 int target = scmd_id(SCpnt); 308 int target = SCpnt->device->id;
280 struct sdebug_dev_info * devip = NULL; 309 struct sdebug_dev_info * devip = NULL;
281 int inj_recovered = 0; 310 int inj_recovered = 0;
311 int delay_override = 0;
282 312
283 if (done == NULL) 313 if (done == NULL)
284 return 0; /* assume mid level reprocessing command */ 314 return 0; /* assume mid level reprocessing command */
285 315
316 SCpnt->resid = 0;
286 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { 317 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
287 printk(KERN_INFO "scsi_debug: cmd "); 318 printk(KERN_INFO "scsi_debug: cmd ");
288 for (k = 0, num = SCpnt->cmd_len; k < num; ++k) 319 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
289 printk("%02x ", (int)cmd[k]); 320 printk("%02x ", (int)cmd[k]);
290 printk("\n"); 321 printk("\n");
291 } 322 }
@@ -296,7 +327,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
296 DID_NO_CONNECT << 16, 0); 327 DID_NO_CONNECT << 16, 0);
297 } 328 }
298 329
299 if (SCpnt->device->lun >= scsi_debug_max_luns) 330 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
331 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
300 return schedule_resp(SCpnt, NULL, done, 332 return schedule_resp(SCpnt, NULL, done,
301 DID_NO_CONNECT << 16, 0); 333 DID_NO_CONNECT << 16, 0);
302 devip = devInfoReg(SCpnt->device); 334 devip = devInfoReg(SCpnt->device);
@@ -315,118 +347,150 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
315 inj_recovered = 1; /* to reads and writes below */ 347 inj_recovered = 1; /* to reads and writes below */
316 } 348 }
317 349
350 if (devip->wlun) {
351 switch (*cmd) {
352 case INQUIRY:
353 case REQUEST_SENSE:
354 case TEST_UNIT_READY:
355 case REPORT_LUNS:
356 break; /* only allowable wlun commands */
357 default:
358 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
359 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
360 "not supported for wlun\n", *cmd);
361 mk_sense_buffer(devip, ILLEGAL_REQUEST,
362 INVALID_OPCODE, 0);
363 errsts = check_condition_result;
364 return schedule_resp(SCpnt, devip, done, errsts,
365 0);
366 }
367 }
368
318 switch (*cmd) { 369 switch (*cmd) {
319 case INQUIRY: /* mandatory, ignore unit attention */ 370 case INQUIRY: /* mandatory, ignore unit attention */
371 delay_override = 1;
320 errsts = resp_inquiry(SCpnt, target, devip); 372 errsts = resp_inquiry(SCpnt, target, devip);
321 break; 373 break;
322 case REQUEST_SENSE: /* mandatory, ignore unit attention */ 374 case REQUEST_SENSE: /* mandatory, ignore unit attention */
375 delay_override = 1;
323 errsts = resp_requests(SCpnt, devip); 376 errsts = resp_requests(SCpnt, devip);
324 break; 377 break;
325 case REZERO_UNIT: /* actually this is REWIND for SSC */ 378 case REZERO_UNIT: /* actually this is REWIND for SSC */
326 case START_STOP: 379 case START_STOP:
327 errsts = check_reset(SCpnt, devip); 380 errsts = resp_start_stop(SCpnt, devip);
328 break; 381 break;
329 case ALLOW_MEDIUM_REMOVAL: 382 case ALLOW_MEDIUM_REMOVAL:
330 if ((errsts = check_reset(SCpnt, devip))) 383 if ((errsts = check_readiness(SCpnt, 1, devip)))
331 break; 384 break;
332 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 385 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
333 printk(KERN_INFO "scsi_debug: Medium removal %s\n", 386 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
334 cmd[4] ? "inhibited" : "enabled"); 387 cmd[4] ? "inhibited" : "enabled");
335 break; 388 break;
336 case SEND_DIAGNOSTIC: /* mandatory */ 389 case SEND_DIAGNOSTIC: /* mandatory */
337 errsts = check_reset(SCpnt, devip); 390 errsts = check_readiness(SCpnt, 1, devip);
338 break; 391 break;
339 case TEST_UNIT_READY: /* mandatory */ 392 case TEST_UNIT_READY: /* mandatory */
340 errsts = check_reset(SCpnt, devip); 393 delay_override = 1;
394 errsts = check_readiness(SCpnt, 0, devip);
341 break; 395 break;
342 case RESERVE: 396 case RESERVE:
343 errsts = check_reset(SCpnt, devip); 397 errsts = check_readiness(SCpnt, 1, devip);
344 break; 398 break;
345 case RESERVE_10: 399 case RESERVE_10:
346 errsts = check_reset(SCpnt, devip); 400 errsts = check_readiness(SCpnt, 1, devip);
347 break; 401 break;
348 case RELEASE: 402 case RELEASE:
349 errsts = check_reset(SCpnt, devip); 403 errsts = check_readiness(SCpnt, 1, devip);
350 break; 404 break;
351 case RELEASE_10: 405 case RELEASE_10:
352 errsts = check_reset(SCpnt, devip); 406 errsts = check_readiness(SCpnt, 1, devip);
353 break; 407 break;
354 case READ_CAPACITY: 408 case READ_CAPACITY:
355 errsts = resp_readcap(SCpnt, devip); 409 errsts = resp_readcap(SCpnt, devip);
356 break; 410 break;
411 case SERVICE_ACTION_IN:
412 if (SAI_READ_CAPACITY_16 != cmd[1]) {
413 mk_sense_buffer(devip, ILLEGAL_REQUEST,
414 INVALID_OPCODE, 0);
415 errsts = check_condition_result;
416 break;
417 }
418 errsts = resp_readcap16(SCpnt, devip);
419 break;
357 case READ_16: 420 case READ_16:
358 case READ_12: 421 case READ_12:
359 case READ_10: 422 case READ_10:
360 case READ_6: 423 case READ_6:
361 if ((errsts = check_reset(SCpnt, devip))) 424 if ((errsts = check_readiness(SCpnt, 0, devip)))
362 break; 425 break;
363 upper_blk = 0;
364 if ((*cmd) == READ_16) { 426 if ((*cmd) == READ_16) {
365 upper_blk = cmd[5] + (cmd[4] << 8) + 427 for (lba = 0, j = 0; j < 8; ++j) {
366 (cmd[3] << 16) + (cmd[2] << 24); 428 if (j > 0)
367 block = cmd[9] + (cmd[8] << 8) + 429 lba <<= 8;
368 (cmd[7] << 16) + (cmd[6] << 24); 430 lba += cmd[2 + j];
431 }
369 num = cmd[13] + (cmd[12] << 8) + 432 num = cmd[13] + (cmd[12] << 8) +
370 (cmd[11] << 16) + (cmd[10] << 24); 433 (cmd[11] << 16) + (cmd[10] << 24);
371 } else if ((*cmd) == READ_12) { 434 } else if ((*cmd) == READ_12) {
372 block = cmd[5] + (cmd[4] << 8) + 435 lba = cmd[5] + (cmd[4] << 8) +
373 (cmd[3] << 16) + (cmd[2] << 24); 436 (cmd[3] << 16) + (cmd[2] << 24);
374 num = cmd[9] + (cmd[8] << 8) + 437 num = cmd[9] + (cmd[8] << 8) +
375 (cmd[7] << 16) + (cmd[6] << 24); 438 (cmd[7] << 16) + (cmd[6] << 24);
376 } else if ((*cmd) == READ_10) { 439 } else if ((*cmd) == READ_10) {
377 block = cmd[5] + (cmd[4] << 8) + 440 lba = cmd[5] + (cmd[4] << 8) +
378 (cmd[3] << 16) + (cmd[2] << 24); 441 (cmd[3] << 16) + (cmd[2] << 24);
379 num = cmd[8] + (cmd[7] << 8); 442 num = cmd[8] + (cmd[7] << 8);
380 } else { 443 } else { /* READ (6) */
381 block = cmd[3] + (cmd[2] << 8) + 444 lba = cmd[3] + (cmd[2] << 8) +
382 ((cmd[1] & 0x1f) << 16); 445 ((cmd[1] & 0x1f) << 16);
383 num = cmd[4]; 446 num = (0 == cmd[4]) ? 256 : cmd[4];
384 } 447 }
385 errsts = resp_read(SCpnt, upper_blk, block, num, devip); 448 errsts = resp_read(SCpnt, lba, num, devip);
386 if (inj_recovered && (0 == errsts)) { 449 if (inj_recovered && (0 == errsts)) {
387 mk_sense_buffer(devip, RECOVERED_ERROR, 450 mk_sense_buffer(devip, RECOVERED_ERROR,
388 THRESHHOLD_EXCEEDED, 0); 451 THRESHOLD_EXCEEDED, 0);
389 errsts = check_condition_result; 452 errsts = check_condition_result;
390 } 453 }
391 break; 454 break;
392 case REPORT_LUNS: /* mandatory, ignore unit attention */ 455 case REPORT_LUNS: /* mandatory, ignore unit attention */
456 delay_override = 1;
393 errsts = resp_report_luns(SCpnt, devip); 457 errsts = resp_report_luns(SCpnt, devip);
394 break; 458 break;
395 case VERIFY: /* 10 byte SBC-2 command */ 459 case VERIFY: /* 10 byte SBC-2 command */
396 errsts = check_reset(SCpnt, devip); 460 errsts = check_readiness(SCpnt, 0, devip);
397 break; 461 break;
398 case WRITE_16: 462 case WRITE_16:
399 case WRITE_12: 463 case WRITE_12:
400 case WRITE_10: 464 case WRITE_10:
401 case WRITE_6: 465 case WRITE_6:
402 if ((errsts = check_reset(SCpnt, devip))) 466 if ((errsts = check_readiness(SCpnt, 0, devip)))
403 break; 467 break;
404 upper_blk = 0;
405 if ((*cmd) == WRITE_16) { 468 if ((*cmd) == WRITE_16) {
406 upper_blk = cmd[5] + (cmd[4] << 8) + 469 for (lba = 0, j = 0; j < 8; ++j) {
407 (cmd[3] << 16) + (cmd[2] << 24); 470 if (j > 0)
408 block = cmd[9] + (cmd[8] << 8) + 471 lba <<= 8;
409 (cmd[7] << 16) + (cmd[6] << 24); 472 lba += cmd[2 + j];
473 }
410 num = cmd[13] + (cmd[12] << 8) + 474 num = cmd[13] + (cmd[12] << 8) +
411 (cmd[11] << 16) + (cmd[10] << 24); 475 (cmd[11] << 16) + (cmd[10] << 24);
412 } else if ((*cmd) == WRITE_12) { 476 } else if ((*cmd) == WRITE_12) {
413 block = cmd[5] + (cmd[4] << 8) + 477 lba = cmd[5] + (cmd[4] << 8) +
414 (cmd[3] << 16) + (cmd[2] << 24); 478 (cmd[3] << 16) + (cmd[2] << 24);
415 num = cmd[9] + (cmd[8] << 8) + 479 num = cmd[9] + (cmd[8] << 8) +
416 (cmd[7] << 16) + (cmd[6] << 24); 480 (cmd[7] << 16) + (cmd[6] << 24);
417 } else if ((*cmd) == WRITE_10) { 481 } else if ((*cmd) == WRITE_10) {
418 block = cmd[5] + (cmd[4] << 8) + 482 lba = cmd[5] + (cmd[4] << 8) +
419 (cmd[3] << 16) + (cmd[2] << 24); 483 (cmd[3] << 16) + (cmd[2] << 24);
420 num = cmd[8] + (cmd[7] << 8); 484 num = cmd[8] + (cmd[7] << 8);
421 } else { 485 } else { /* WRITE (6) */
422 block = cmd[3] + (cmd[2] << 8) + 486 lba = cmd[3] + (cmd[2] << 8) +
423 ((cmd[1] & 0x1f) << 16); 487 ((cmd[1] & 0x1f) << 16);
424 num = cmd[4]; 488 num = (0 == cmd[4]) ? 256 : cmd[4];
425 } 489 }
426 errsts = resp_write(SCpnt, upper_blk, block, num, devip); 490 errsts = resp_write(SCpnt, lba, num, devip);
427 if (inj_recovered && (0 == errsts)) { 491 if (inj_recovered && (0 == errsts)) {
428 mk_sense_buffer(devip, RECOVERED_ERROR, 492 mk_sense_buffer(devip, RECOVERED_ERROR,
429 THRESHHOLD_EXCEEDED, 0); 493 THRESHOLD_EXCEEDED, 0);
430 errsts = check_condition_result; 494 errsts = check_condition_result;
431 } 495 }
432 break; 496 break;
@@ -434,20 +498,31 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
434 case MODE_SENSE_10: 498 case MODE_SENSE_10:
435 errsts = resp_mode_sense(SCpnt, target, devip); 499 errsts = resp_mode_sense(SCpnt, target, devip);
436 break; 500 break;
501 case MODE_SELECT:
502 errsts = resp_mode_select(SCpnt, 1, devip);
503 break;
504 case MODE_SELECT_10:
505 errsts = resp_mode_select(SCpnt, 0, devip);
506 break;
507 case LOG_SENSE:
508 errsts = resp_log_sense(SCpnt, devip);
509 break;
437 case SYNCHRONIZE_CACHE: 510 case SYNCHRONIZE_CACHE:
438 errsts = check_reset(SCpnt, devip); 511 delay_override = 1;
512 errsts = check_readiness(SCpnt, 0, devip);
439 break; 513 break;
440 default: 514 default:
441 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 515 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
442 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " 516 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
443 "supported\n", *cmd); 517 "supported\n", *cmd);
444 if ((errsts = check_reset(SCpnt, devip))) 518 if ((errsts = check_readiness(SCpnt, 1, devip)))
445 break; /* Unit attention takes precedence */ 519 break; /* Unit attention takes precedence */
446 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); 520 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
447 errsts = check_condition_result; 521 errsts = check_condition_result;
448 break; 522 break;
449 } 523 }
450 return schedule_resp(SCpnt, devip, done, errsts, scsi_debug_delay); 524 return schedule_resp(SCpnt, devip, done, errsts,
525 (delay_override ? 0 : scsi_debug_delay));
451} 526}
452 527
453static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 528static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
@@ -459,7 +534,8 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
459 /* return -ENOTTY; // correct return but upsets fdisk */ 534 /* return -ENOTTY; // correct return but upsets fdisk */
460} 535}
461 536
462static int check_reset(struct scsi_cmnd * SCpnt, struct sdebug_dev_info * devip) 537static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
538 struct sdebug_dev_info * devip)
463{ 539{
464 if (devip->reset) { 540 if (devip->reset) {
465 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 541 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
@@ -469,6 +545,14 @@ static int check_reset(struct scsi_cmnd * SCpnt, struct sdebug_dev_info * devip)
469 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0); 545 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
470 return check_condition_result; 546 return check_condition_result;
471 } 547 }
548 if ((0 == reset_only) && devip->stopped) {
549 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
550 printk(KERN_INFO "scsi_debug: Reporting Not "
551 "ready: initializing command required\n");
552 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
553 0x2);
554 return check_condition_result;
555 }
472 return 0; 556 return 0;
473} 557}
474 558
@@ -492,7 +576,10 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
492 req_len = scp->request_bufflen; 576 req_len = scp->request_bufflen;
493 act_len = (req_len < arr_len) ? req_len : arr_len; 577 act_len = (req_len < arr_len) ? req_len : arr_len;
494 memcpy(scp->request_buffer, arr, act_len); 578 memcpy(scp->request_buffer, arr, act_len);
495 scp->resid = req_len - act_len; 579 if (scp->resid)
580 scp->resid -= act_len;
581 else
582 scp->resid = req_len - act_len;
496 return 0; 583 return 0;
497 } 584 }
498 sgpnt = (struct scatterlist *)scp->request_buffer; 585 sgpnt = (struct scatterlist *)scp->request_buffer;
@@ -515,7 +602,10 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
515 } 602 }
516 req_len += sgpnt->length; 603 req_len += sgpnt->length;
517 } 604 }
518 scp->resid = req_len - act_len; 605 if (scp->resid)
606 scp->resid -= act_len;
607 else
608 scp->resid = req_len - act_len;
519 return 0; 609 return 0;
520} 610}
521 611
@@ -566,12 +656,14 @@ static const char * inq_vendor_id = "Linux ";
566static const char * inq_product_id = "scsi_debug "; 656static const char * inq_product_id = "scsi_debug ";
567static const char * inq_product_rev = "0004"; 657static const char * inq_product_rev = "0004";
568 658
569static int inquiry_evpd_83(unsigned char * arr, int dev_id_num, 659static int inquiry_evpd_83(unsigned char * arr, int target_dev_id,
570 const char * dev_id_str, int dev_id_str_len) 660 int dev_id_num, const char * dev_id_str,
661 int dev_id_str_len)
571{ 662{
572 int num; 663 int num, port_a;
664 char b[32];
573 665
574 /* Two identification descriptors: */ 666 port_a = target_dev_id + 1;
575 /* T10 vendor identifier field format (faked) */ 667 /* T10 vendor identifier field format (faked) */
576 arr[0] = 0x2; /* ASCII */ 668 arr[0] = 0x2; /* ASCII */
577 arr[1] = 0x1; 669 arr[1] = 0x1;
@@ -582,25 +674,246 @@ static int inquiry_evpd_83(unsigned char * arr, int dev_id_num,
582 num = 8 + 16 + dev_id_str_len; 674 num = 8 + 16 + dev_id_str_len;
583 arr[3] = num; 675 arr[3] = num;
584 num += 4; 676 num += 4;
585 /* NAA IEEE registered identifier (faked) */ 677 if (dev_id_num >= 0) {
586 arr[num] = 0x1; /* binary */ 678 /* NAA-5, Logical unit identifier (binary) */
587 arr[num + 1] = 0x3; 679 arr[num++] = 0x1; /* binary (not necessarily sas) */
588 arr[num + 2] = 0x0; 680 arr[num++] = 0x3; /* PIV=0, lu, naa */
589 arr[num + 3] = 0x8; 681 arr[num++] = 0x0;
590 arr[num + 4] = 0x51; /* ieee company id=0x123456 (faked) */ 682 arr[num++] = 0x8;
591 arr[num + 5] = 0x23; 683 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
592 arr[num + 6] = 0x45; 684 arr[num++] = 0x33;
593 arr[num + 7] = 0x60; 685 arr[num++] = 0x33;
594 arr[num + 8] = (dev_id_num >> 24); 686 arr[num++] = 0x30;
595 arr[num + 9] = (dev_id_num >> 16) & 0xff; 687 arr[num++] = (dev_id_num >> 24);
596 arr[num + 10] = (dev_id_num >> 8) & 0xff; 688 arr[num++] = (dev_id_num >> 16) & 0xff;
597 arr[num + 11] = dev_id_num & 0xff; 689 arr[num++] = (dev_id_num >> 8) & 0xff;
598 return num + 12; 690 arr[num++] = dev_id_num & 0xff;
691 /* Target relative port number */
692 arr[num++] = 0x61; /* proto=sas, binary */
693 arr[num++] = 0x94; /* PIV=1, target port, rel port */
694 arr[num++] = 0x0; /* reserved */
695 arr[num++] = 0x4; /* length */
696 arr[num++] = 0x0; /* reserved */
697 arr[num++] = 0x0; /* reserved */
698 arr[num++] = 0x0;
699 arr[num++] = 0x1; /* relative port A */
700 }
701 /* NAA-5, Target port identifier */
702 arr[num++] = 0x61; /* proto=sas, binary */
703 arr[num++] = 0x93; /* piv=1, target port, naa */
704 arr[num++] = 0x0;
705 arr[num++] = 0x8;
706 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
707 arr[num++] = 0x22;
708 arr[num++] = 0x22;
709 arr[num++] = 0x20;
710 arr[num++] = (port_a >> 24);
711 arr[num++] = (port_a >> 16) & 0xff;
712 arr[num++] = (port_a >> 8) & 0xff;
713 arr[num++] = port_a & 0xff;
714 /* NAA-5, Target device identifier */
715 arr[num++] = 0x61; /* proto=sas, binary */
716 arr[num++] = 0xa3; /* piv=1, target device, naa */
717 arr[num++] = 0x0;
718 arr[num++] = 0x8;
719 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
720 arr[num++] = 0x22;
721 arr[num++] = 0x22;
722 arr[num++] = 0x20;
723 arr[num++] = (target_dev_id >> 24);
724 arr[num++] = (target_dev_id >> 16) & 0xff;
725 arr[num++] = (target_dev_id >> 8) & 0xff;
726 arr[num++] = target_dev_id & 0xff;
727 /* SCSI name string: Target device identifier */
728 arr[num++] = 0x63; /* proto=sas, UTF-8 */
729 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
730 arr[num++] = 0x0;
731 arr[num++] = 24;
732 memcpy(arr + num, "naa.52222220", 12);
733 num += 12;
734 snprintf(b, sizeof(b), "%08X", target_dev_id);
735 memcpy(arr + num, b, 8);
736 num += 8;
737 memset(arr + num, 0, 4);
738 num += 4;
739 return num;
740}
741
742
743static unsigned char vpd84_data[] = {
744/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
745 0x22,0x22,0x22,0x0,0xbb,0x1,
746 0x22,0x22,0x22,0x0,0xbb,0x2,
747};
748
749static int inquiry_evpd_84(unsigned char * arr)
750{
751 memcpy(arr, vpd84_data, sizeof(vpd84_data));
752 return sizeof(vpd84_data);
753}
754
755static int inquiry_evpd_85(unsigned char * arr)
756{
757 int num = 0;
758 const char * na1 = "https://www.kernel.org/config";
759 const char * na2 = "http://www.kernel.org/log";
760 int plen, olen;
761
762 arr[num++] = 0x1; /* lu, storage config */
763 arr[num++] = 0x0; /* reserved */
764 arr[num++] = 0x0;
765 olen = strlen(na1);
766 plen = olen + 1;
767 if (plen % 4)
768 plen = ((plen / 4) + 1) * 4;
769 arr[num++] = plen; /* length, null termianted, padded */
770 memcpy(arr + num, na1, olen);
771 memset(arr + num + olen, 0, plen - olen);
772 num += plen;
773
774 arr[num++] = 0x4; /* lu, logging */
775 arr[num++] = 0x0; /* reserved */
776 arr[num++] = 0x0;
777 olen = strlen(na2);
778 plen = olen + 1;
779 if (plen % 4)
780 plen = ((plen / 4) + 1) * 4;
781 arr[num++] = plen; /* length, null terminated, padded */
782 memcpy(arr + num, na2, olen);
783 memset(arr + num + olen, 0, plen - olen);
784 num += plen;
785
786 return num;
787}
788
789/* SCSI ports VPD page */
790static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
791{
792 int num = 0;
793 int port_a, port_b;
794
795 port_a = target_dev_id + 1;
796 port_b = port_a + 1;
797 arr[num++] = 0x0; /* reserved */
798 arr[num++] = 0x0; /* reserved */
799 arr[num++] = 0x0;
800 arr[num++] = 0x1; /* relative port 1 (primary) */
801 memset(arr + num, 0, 6);
802 num += 6;
803 arr[num++] = 0x0;
804 arr[num++] = 12; /* length tp descriptor */
805 /* naa-5 target port identifier (A) */
806 arr[num++] = 0x61; /* proto=sas, binary */
807 arr[num++] = 0x93; /* PIV=1, target port, NAA */
808 arr[num++] = 0x0; /* reserved */
809 arr[num++] = 0x8; /* length */
810 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
811 arr[num++] = 0x22;
812 arr[num++] = 0x22;
813 arr[num++] = 0x20;
814 arr[num++] = (port_a >> 24);
815 arr[num++] = (port_a >> 16) & 0xff;
816 arr[num++] = (port_a >> 8) & 0xff;
817 arr[num++] = port_a & 0xff;
818
819 arr[num++] = 0x0; /* reserved */
820 arr[num++] = 0x0; /* reserved */
821 arr[num++] = 0x0;
822 arr[num++] = 0x2; /* relative port 2 (secondary) */
823 memset(arr + num, 0, 6);
824 num += 6;
825 arr[num++] = 0x0;
826 arr[num++] = 12; /* length tp descriptor */
827 /* naa-5 target port identifier (B) */
828 arr[num++] = 0x61; /* proto=sas, binary */
829 arr[num++] = 0x93; /* PIV=1, target port, NAA */
830 arr[num++] = 0x0; /* reserved */
831 arr[num++] = 0x8; /* length */
832 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
833 arr[num++] = 0x22;
834 arr[num++] = 0x22;
835 arr[num++] = 0x20;
836 arr[num++] = (port_b >> 24);
837 arr[num++] = (port_b >> 16) & 0xff;
838 arr[num++] = (port_b >> 8) & 0xff;
839 arr[num++] = port_b & 0xff;
840
841 return num;
842}
843
844
845static unsigned char vpd89_data[] = {
846/* from 4th byte */ 0,0,0,0,
847'l','i','n','u','x',' ',' ',' ',
848'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
849'1','2','3','4',
8500x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
8510xec,0,0,0,
8520x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
8530,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
8540x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
8550x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
8560x53,0x41,
8570x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
8580x20,0x20,
8590x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
8600x10,0x80,
8610,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
8620x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
8630x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
8640,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
8650x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
8660x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
8670,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
8680,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8690,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8700,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8710x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
8720,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
8730xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
8740,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
8750,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8760,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8770,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8780,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8790,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8800,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8810,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8820,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8830,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8840,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8850,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
8860,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
887};
888
889static int inquiry_evpd_89(unsigned char * arr)
890{
891 memcpy(arr, vpd89_data, sizeof(vpd89_data));
892 return sizeof(vpd89_data);
893}
894
895
896static unsigned char vpdb0_data[] = {
897 /* from 4th byte */ 0,0,0,4,
898 0,0,0x4,0,
899 0,0,0,64,
900};
901
902static int inquiry_evpd_b0(unsigned char * arr)
903{
904 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
905 if (sdebug_store_sectors > 0x400) {
906 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
907 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
908 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
909 arr[7] = sdebug_store_sectors & 0xff;
910 }
911 return sizeof(vpdb0_data);
599} 912}
600 913
601 914
602#define SDEBUG_LONG_INQ_SZ 96 915#define SDEBUG_LONG_INQ_SZ 96
603#define SDEBUG_MAX_INQ_ARR_SZ 128 916#define SDEBUG_MAX_INQ_ARR_SZ 584
604 917
605static int resp_inquiry(struct scsi_cmnd * scp, int target, 918static int resp_inquiry(struct scsi_cmnd * scp, int target,
606 struct sdebug_dev_info * devip) 919 struct sdebug_dev_info * devip)
@@ -608,64 +921,113 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
608 unsigned char pq_pdt; 921 unsigned char pq_pdt;
609 unsigned char arr[SDEBUG_MAX_INQ_ARR_SZ]; 922 unsigned char arr[SDEBUG_MAX_INQ_ARR_SZ];
610 unsigned char *cmd = (unsigned char *)scp->cmnd; 923 unsigned char *cmd = (unsigned char *)scp->cmnd;
611 int alloc_len; 924 int alloc_len, n;
612 925
613 alloc_len = (cmd[3] << 8) + cmd[4]; 926 alloc_len = (cmd[3] << 8) + cmd[4];
614 memset(arr, 0, SDEBUG_MAX_INQ_ARR_SZ); 927 memset(arr, 0, SDEBUG_MAX_INQ_ARR_SZ);
615 pq_pdt = (scsi_debug_ptype & 0x1f); 928 if (devip->wlun)
929 pq_pdt = 0x1e; /* present, wlun */
930 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
931 pq_pdt = 0x7f; /* not present, no device type */
932 else
933 pq_pdt = (scsi_debug_ptype & 0x1f);
616 arr[0] = pq_pdt; 934 arr[0] = pq_pdt;
617 if (0x2 & cmd[1]) { /* CMDDT bit set */ 935 if (0x2 & cmd[1]) { /* CMDDT bit set */
618 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 936 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
619 0); 937 0);
620 return check_condition_result; 938 return check_condition_result;
621 } else if (0x1 & cmd[1]) { /* EVPD bit set */ 939 } else if (0x1 & cmd[1]) { /* EVPD bit set */
622 int dev_id_num, len; 940 int lu_id_num, target_dev_id, len;
623 char dev_id_str[6]; 941 char lu_id_str[6];
942 int host_no = devip->sdbg_host->shost->host_no;
624 943
625 dev_id_num = ((devip->sdbg_host->shost->host_no + 1) * 2000) + 944 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
626 (devip->target * 1000) + devip->lun; 945 (devip->target * 1000) + devip->lun);
627 len = scnprintf(dev_id_str, 6, "%d", dev_id_num); 946 target_dev_id = ((host_no + 1) * 2000) +
947 (devip->target * 1000) - 3;
948 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
628 if (0 == cmd[2]) { /* supported vital product data pages */ 949 if (0 == cmd[2]) { /* supported vital product data pages */
629 arr[3] = 3; 950 arr[1] = cmd[2]; /*sanity */
630 arr[4] = 0x0; /* this page */ 951 n = 4;
631 arr[5] = 0x80; /* unit serial number */ 952 arr[n++] = 0x0; /* this page */
632 arr[6] = 0x83; /* device identification */ 953 arr[n++] = 0x80; /* unit serial number */
954 arr[n++] = 0x83; /* device identification */
955 arr[n++] = 0x84; /* software interface ident. */
956 arr[n++] = 0x85; /* management network addresses */
957 arr[n++] = 0x86; /* extended inquiry */
958 arr[n++] = 0x87; /* mode page policy */
959 arr[n++] = 0x88; /* SCSI ports */
960 arr[n++] = 0x89; /* ATA information */
961 arr[n++] = 0xb0; /* Block limits (SBC) */
962 arr[3] = n - 4; /* number of supported VPD pages */
633 } else if (0x80 == cmd[2]) { /* unit serial number */ 963 } else if (0x80 == cmd[2]) { /* unit serial number */
634 arr[1] = 0x80; 964 arr[1] = cmd[2]; /*sanity */
635 arr[3] = len; 965 arr[3] = len;
636 memcpy(&arr[4], dev_id_str, len); 966 memcpy(&arr[4], lu_id_str, len);
637 } else if (0x83 == cmd[2]) { /* device identification */ 967 } else if (0x83 == cmd[2]) { /* device identification */
638 arr[1] = 0x83; 968 arr[1] = cmd[2]; /*sanity */
639 arr[3] = inquiry_evpd_83(&arr[4], dev_id_num, 969 arr[3] = inquiry_evpd_83(&arr[4], target_dev_id,
640 dev_id_str, len); 970 lu_id_num, lu_id_str, len);
971 } else if (0x84 == cmd[2]) { /* Software interface ident. */
972 arr[1] = cmd[2]; /*sanity */
973 arr[3] = inquiry_evpd_84(&arr[4]);
974 } else if (0x85 == cmd[2]) { /* Management network addresses */
975 arr[1] = cmd[2]; /*sanity */
976 arr[3] = inquiry_evpd_85(&arr[4]);
977 } else if (0x86 == cmd[2]) { /* extended inquiry */
978 arr[1] = cmd[2]; /*sanity */
979 arr[3] = 0x3c; /* number of following entries */
980 arr[4] = 0x0; /* no protection stuff */
981 arr[5] = 0x7; /* head of q, ordered + simple q's */
982 } else if (0x87 == cmd[2]) { /* mode page policy */
983 arr[1] = cmd[2]; /*sanity */
984 arr[3] = 0x8; /* number of following entries */
985 arr[4] = 0x2; /* disconnect-reconnect mp */
986 arr[6] = 0x80; /* mlus, shared */
987 arr[8] = 0x18; /* protocol specific lu */
988 arr[10] = 0x82; /* mlus, per initiator port */
989 } else if (0x88 == cmd[2]) { /* SCSI Ports */
990 arr[1] = cmd[2]; /*sanity */
991 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
992 } else if (0x89 == cmd[2]) { /* ATA information */
993 arr[1] = cmd[2]; /*sanity */
994 n = inquiry_evpd_89(&arr[4]);
995 arr[2] = (n >> 8);
996 arr[3] = (n & 0xff);
997 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
998 arr[1] = cmd[2]; /*sanity */
999 arr[3] = inquiry_evpd_b0(&arr[4]);
641 } else { 1000 } else {
642 /* Illegal request, invalid field in cdb */ 1001 /* Illegal request, invalid field in cdb */
643 mk_sense_buffer(devip, ILLEGAL_REQUEST, 1002 mk_sense_buffer(devip, ILLEGAL_REQUEST,
644 INVALID_FIELD_IN_CDB, 0); 1003 INVALID_FIELD_IN_CDB, 0);
645 return check_condition_result; 1004 return check_condition_result;
646 } 1005 }
1006 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
647 return fill_from_dev_buffer(scp, arr, 1007 return fill_from_dev_buffer(scp, arr,
648 min(alloc_len, SDEBUG_MAX_INQ_ARR_SZ)); 1008 min(len, SDEBUG_MAX_INQ_ARR_SZ));
649 } 1009 }
650 /* drops through here for a standard inquiry */ 1010 /* drops through here for a standard inquiry */
651 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */ 1011 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
652 arr[2] = scsi_debug_scsi_level; 1012 arr[2] = scsi_debug_scsi_level;
653 arr[3] = 2; /* response_data_format==2 */ 1013 arr[3] = 2; /* response_data_format==2 */
654 arr[4] = SDEBUG_LONG_INQ_SZ - 5; 1014 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
655 arr[6] = 0x1; /* claim: ADDR16 */ 1015 arr[6] = 0x10; /* claim: MultiP */
656 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ 1016 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
657 arr[7] = 0x3a; /* claim: WBUS16, SYNC, LINKED + CMDQUE */ 1017 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
658 memcpy(&arr[8], inq_vendor_id, 8); 1018 memcpy(&arr[8], inq_vendor_id, 8);
659 memcpy(&arr[16], inq_product_id, 16); 1019 memcpy(&arr[16], inq_product_id, 16);
660 memcpy(&arr[32], inq_product_rev, 4); 1020 memcpy(&arr[32], inq_product_rev, 4);
661 /* version descriptors (2 bytes each) follow */ 1021 /* version descriptors (2 bytes each) follow */
662 arr[58] = 0x0; arr[59] = 0x40; /* SAM-2 */ 1022 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
663 arr[60] = 0x3; arr[61] = 0x0; /* SPC-3 */ 1023 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
1024 n = 62;
664 if (scsi_debug_ptype == 0) { 1025 if (scsi_debug_ptype == 0) {
665 arr[62] = 0x1; arr[63] = 0x80; /* SBC */ 1026 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
666 } else if (scsi_debug_ptype == 1) { 1027 } else if (scsi_debug_ptype == 1) {
667 arr[62] = 0x2; arr[63] = 0x00; /* SSC */ 1028 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
668 } 1029 }
1030 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
669 return fill_from_dev_buffer(scp, arr, 1031 return fill_from_dev_buffer(scp, arr,
670 min(alloc_len, SDEBUG_LONG_INQ_SZ)); 1032 min(alloc_len, SDEBUG_LONG_INQ_SZ));
671} 1033}
@@ -676,46 +1038,141 @@ static int resp_requests(struct scsi_cmnd * scp,
676 unsigned char * sbuff; 1038 unsigned char * sbuff;
677 unsigned char *cmd = (unsigned char *)scp->cmnd; 1039 unsigned char *cmd = (unsigned char *)scp->cmnd;
678 unsigned char arr[SDEBUG_SENSE_LEN]; 1040 unsigned char arr[SDEBUG_SENSE_LEN];
1041 int want_dsense;
679 int len = 18; 1042 int len = 18;
680 1043
681 memset(arr, 0, SDEBUG_SENSE_LEN); 1044 memset(arr, 0, sizeof(arr));
682 if (devip->reset == 1) 1045 if (devip->reset == 1)
683 mk_sense_buffer(devip, 0, NO_ADDED_SENSE, 0); 1046 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
1047 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
684 sbuff = devip->sense_buff; 1048 sbuff = devip->sense_buff;
685 if ((cmd[1] & 1) && (! scsi_debug_dsense)) { 1049 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
686 /* DESC bit set and sense_buff in fixed format */ 1050 if (want_dsense) {
687 arr[0] = 0x72; 1051 arr[0] = 0x72;
688 arr[1] = sbuff[2]; /* sense key */ 1052 arr[1] = 0x0; /* NO_SENSE in sense_key */
689 arr[2] = sbuff[12]; /* asc */ 1053 arr[2] = THRESHOLD_EXCEEDED;
690 arr[3] = sbuff[13]; /* ascq */ 1054 arr[3] = 0xff; /* TEST set and MRIE==6 */
691 len = 8; 1055 } else {
692 } else 1056 arr[0] = 0x70;
1057 arr[2] = 0x0; /* NO_SENSE in sense_key */
1058 arr[7] = 0xa; /* 18 byte sense buffer */
1059 arr[12] = THRESHOLD_EXCEEDED;
1060 arr[13] = 0xff; /* TEST set and MRIE==6 */
1061 }
1062 } else if (devip->stopped) {
1063 if (want_dsense) {
1064 arr[0] = 0x72;
1065 arr[1] = 0x0; /* NO_SENSE in sense_key */
1066 arr[2] = LOW_POWER_COND_ON;
1067 arr[3] = 0x0; /* TEST set and MRIE==6 */
1068 } else {
1069 arr[0] = 0x70;
1070 arr[2] = 0x0; /* NO_SENSE in sense_key */
1071 arr[7] = 0xa; /* 18 byte sense buffer */
1072 arr[12] = LOW_POWER_COND_ON;
1073 arr[13] = 0x0; /* TEST set and MRIE==6 */
1074 }
1075 } else {
693 memcpy(arr, sbuff, SDEBUG_SENSE_LEN); 1076 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
694 mk_sense_buffer(devip, 0, NO_ADDED_SENSE, 0); 1077 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
1078 /* DESC bit set and sense_buff in fixed format */
1079 memset(arr, 0, sizeof(arr));
1080 arr[0] = 0x72;
1081 arr[1] = sbuff[2]; /* sense key */
1082 arr[2] = sbuff[12]; /* asc */
1083 arr[3] = sbuff[13]; /* ascq */
1084 len = 8;
1085 }
1086 }
1087 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
695 return fill_from_dev_buffer(scp, arr, len); 1088 return fill_from_dev_buffer(scp, arr, len);
696} 1089}
697 1090
1091static int resp_start_stop(struct scsi_cmnd * scp,
1092 struct sdebug_dev_info * devip)
1093{
1094 unsigned char *cmd = (unsigned char *)scp->cmnd;
1095 int power_cond, errsts, start;
1096
1097 if ((errsts = check_readiness(scp, 1, devip)))
1098 return errsts;
1099 power_cond = (cmd[4] & 0xf0) >> 4;
1100 if (power_cond) {
1101 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1102 0);
1103 return check_condition_result;
1104 }
1105 start = cmd[4] & 1;
1106 if (start == devip->stopped)
1107 devip->stopped = !start;
1108 return 0;
1109}
1110
698#define SDEBUG_READCAP_ARR_SZ 8 1111#define SDEBUG_READCAP_ARR_SZ 8
699static int resp_readcap(struct scsi_cmnd * scp, 1112static int resp_readcap(struct scsi_cmnd * scp,
700 struct sdebug_dev_info * devip) 1113 struct sdebug_dev_info * devip)
701{ 1114{
702 unsigned char arr[SDEBUG_READCAP_ARR_SZ]; 1115 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
703 unsigned long capac; 1116 unsigned int capac;
704 int errsts; 1117 int errsts;
705 1118
706 if ((errsts = check_reset(scp, devip))) 1119 if ((errsts = check_readiness(scp, 1, devip)))
707 return errsts; 1120 return errsts;
1121 /* following just in case virtual_gb changed */
1122 if (scsi_debug_virtual_gb > 0) {
1123 sdebug_capacity = 2048 * 1024;
1124 sdebug_capacity *= scsi_debug_virtual_gb;
1125 } else
1126 sdebug_capacity = sdebug_store_sectors;
708 memset(arr, 0, SDEBUG_READCAP_ARR_SZ); 1127 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
709 capac = (unsigned long)sdebug_capacity - 1; 1128 if (sdebug_capacity < 0xffffffff) {
710 arr[0] = (capac >> 24); 1129 capac = (unsigned int)sdebug_capacity - 1;
711 arr[1] = (capac >> 16) & 0xff; 1130 arr[0] = (capac >> 24);
712 arr[2] = (capac >> 8) & 0xff; 1131 arr[1] = (capac >> 16) & 0xff;
713 arr[3] = capac & 0xff; 1132 arr[2] = (capac >> 8) & 0xff;
1133 arr[3] = capac & 0xff;
1134 } else {
1135 arr[0] = 0xff;
1136 arr[1] = 0xff;
1137 arr[2] = 0xff;
1138 arr[3] = 0xff;
1139 }
714 arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; 1140 arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
715 arr[7] = SECT_SIZE_PER(target) & 0xff; 1141 arr[7] = SECT_SIZE_PER(target) & 0xff;
716 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); 1142 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
717} 1143}
718 1144
1145#define SDEBUG_READCAP16_ARR_SZ 32
1146static int resp_readcap16(struct scsi_cmnd * scp,
1147 struct sdebug_dev_info * devip)
1148{
1149 unsigned char *cmd = (unsigned char *)scp->cmnd;
1150 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1151 unsigned long long capac;
1152 int errsts, k, alloc_len;
1153
1154 if ((errsts = check_readiness(scp, 1, devip)))
1155 return errsts;
1156 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1157 + cmd[13]);
1158 /* following just in case virtual_gb changed */
1159 if (scsi_debug_virtual_gb > 0) {
1160 sdebug_capacity = 2048 * 1024;
1161 sdebug_capacity *= scsi_debug_virtual_gb;
1162 } else
1163 sdebug_capacity = sdebug_store_sectors;
1164 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1165 capac = sdebug_capacity - 1;
1166 for (k = 0; k < 8; ++k, capac >>= 8)
1167 arr[7 - k] = capac & 0xff;
1168 arr[8] = (SECT_SIZE_PER(target) >> 24) & 0xff;
1169 arr[9] = (SECT_SIZE_PER(target) >> 16) & 0xff;
1170 arr[10] = (SECT_SIZE_PER(target) >> 8) & 0xff;
1171 arr[11] = SECT_SIZE_PER(target) & 0xff;
1172 return fill_from_dev_buffer(scp, arr,
1173 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1174}
1175
719/* <<Following mode page info copied from ST318451LW>> */ 1176/* <<Following mode page info copied from ST318451LW>> */
720 1177
721static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target) 1178static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
@@ -771,27 +1228,98 @@ static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
771 1228
772static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) 1229static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
773{ /* Control mode page for mode_sense */ 1230{ /* Control mode page for mode_sense */
774 unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 1231 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1232 0, 0, 0, 0};
1233 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
775 0, 0, 0x2, 0x4b}; 1234 0, 0, 0x2, 0x4b};
776 1235
777 if (scsi_debug_dsense) 1236 if (scsi_debug_dsense)
778 ctrl_m_pg[2] |= 0x4; 1237 ctrl_m_pg[2] |= 0x4;
1238 else
1239 ctrl_m_pg[2] &= ~0x4;
779 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); 1240 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
780 if (1 == pcontrol) 1241 if (1 == pcontrol)
781 memset(p + 2, 0, sizeof(ctrl_m_pg) - 2); 1242 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1243 else if (2 == pcontrol)
1244 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
782 return sizeof(ctrl_m_pg); 1245 return sizeof(ctrl_m_pg);
783} 1246}
784 1247
1248
785static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target) 1249static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
786{ /* Informational Exceptions control mode page for mode_sense */ 1250{ /* Informational Exceptions control mode page for mode_sense */
787 unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 1251 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
788 0, 0, 0x0, 0x0}; 1252 0, 0, 0x0, 0x0};
1253 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1254 0, 0, 0x0, 0x0};
1255
789 memcpy(p, iec_m_pg, sizeof(iec_m_pg)); 1256 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
790 if (1 == pcontrol) 1257 if (1 == pcontrol)
791 memset(p + 2, 0, sizeof(iec_m_pg) - 2); 1258 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1259 else if (2 == pcontrol)
1260 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
792 return sizeof(iec_m_pg); 1261 return sizeof(iec_m_pg);
793} 1262}
794 1263
1264static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1265{ /* SAS SSP mode page - short format for mode_sense */
1266 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1267 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1268
1269 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1270 if (1 == pcontrol)
1271 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1272 return sizeof(sas_sf_m_pg);
1273}
1274
1275
1276static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1277 int target_dev_id)
1278{ /* SAS phy control and discover mode page for mode_sense */
1279 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1280 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1281 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1282 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1283 0x2, 0, 0, 0, 0, 0, 0, 0,
1284 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1285 0, 0, 0, 0, 0, 0, 0, 0,
1286 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1287 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1288 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1289 0x3, 0, 0, 0, 0, 0, 0, 0,
1290 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1291 0, 0, 0, 0, 0, 0, 0, 0,
1292 };
1293 int port_a, port_b;
1294
1295 port_a = target_dev_id + 1;
1296 port_b = port_a + 1;
1297 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1298 p[20] = (port_a >> 24);
1299 p[21] = (port_a >> 16) & 0xff;
1300 p[22] = (port_a >> 8) & 0xff;
1301 p[23] = port_a & 0xff;
1302 p[48 + 20] = (port_b >> 24);
1303 p[48 + 21] = (port_b >> 16) & 0xff;
1304 p[48 + 22] = (port_b >> 8) & 0xff;
1305 p[48 + 23] = port_b & 0xff;
1306 if (1 == pcontrol)
1307 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1308 return sizeof(sas_pcd_m_pg);
1309}
1310
1311static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1312{ /* SAS SSP shared protocol specific port mode subpage */
1313 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1314 0, 0, 0, 0, 0, 0, 0, 0,
1315 };
1316
1317 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1318 if (1 == pcontrol)
1319 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1320 return sizeof(sas_sha_m_pg);
1321}
1322
795#define SDEBUG_MAX_MSENSE_SZ 256 1323#define SDEBUG_MAX_MSENSE_SZ 256
796 1324
797static int resp_mode_sense(struct scsi_cmnd * scp, int target, 1325static int resp_mode_sense(struct scsi_cmnd * scp, int target,
@@ -800,12 +1328,12 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
800 unsigned char dbd; 1328 unsigned char dbd;
801 int pcontrol, pcode, subpcode; 1329 int pcontrol, pcode, subpcode;
802 unsigned char dev_spec; 1330 unsigned char dev_spec;
803 int alloc_len, msense_6, offset, len, errsts; 1331 int alloc_len, msense_6, offset, len, errsts, target_dev_id;
804 unsigned char * ap; 1332 unsigned char * ap;
805 unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; 1333 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
806 unsigned char *cmd = (unsigned char *)scp->cmnd; 1334 unsigned char *cmd = (unsigned char *)scp->cmnd;
807 1335
808 if ((errsts = check_reset(scp, devip))) 1336 if ((errsts = check_readiness(scp, 1, devip)))
809 return errsts; 1337 return errsts;
810 dbd = cmd[1] & 0x8; 1338 dbd = cmd[1] & 0x8;
811 pcontrol = (cmd[2] & 0xc0) >> 6; 1339 pcontrol = (cmd[2] & 0xc0) >> 6;
@@ -819,6 +1347,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
819 0); 1347 0);
820 return check_condition_result; 1348 return check_condition_result;
821 } 1349 }
1350 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1351 (devip->target * 1000) - 3;
822 dev_spec = DEV_READONLY(target) ? 0x80 : 0x0; 1352 dev_spec = DEV_READONLY(target) ? 0x80 : 0x0;
823 if (msense_6) { 1353 if (msense_6) {
824 arr[2] = dev_spec; 1354 arr[2] = dev_spec;
@@ -829,7 +1359,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
829 } 1359 }
830 ap = arr + offset; 1360 ap = arr + offset;
831 1361
832 if (0 != subpcode) { /* TODO: Control Extension page */ 1362 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1363 /* TODO: Control Extension page */
833 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 1364 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
834 0); 1365 0);
835 return check_condition_result; 1366 return check_condition_result;
@@ -855,17 +1386,45 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
855 len = resp_ctrl_m_pg(ap, pcontrol, target); 1386 len = resp_ctrl_m_pg(ap, pcontrol, target);
856 offset += len; 1387 offset += len;
857 break; 1388 break;
1389 case 0x19: /* if spc==1 then sas phy, control+discover */
1390 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1391 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1392 INVALID_FIELD_IN_CDB, 0);
1393 return check_condition_result;
1394 }
1395 len = 0;
1396 if ((0x0 == subpcode) || (0xff == subpcode))
1397 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1398 if ((0x1 == subpcode) || (0xff == subpcode))
1399 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1400 target_dev_id);
1401 if ((0x2 == subpcode) || (0xff == subpcode))
1402 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1403 offset += len;
1404 break;
858 case 0x1c: /* Informational Exceptions Mode page, all devices */ 1405 case 0x1c: /* Informational Exceptions Mode page, all devices */
859 len = resp_iec_m_pg(ap, pcontrol, target); 1406 len = resp_iec_m_pg(ap, pcontrol, target);
860 offset += len; 1407 offset += len;
861 break; 1408 break;
862 case 0x3f: /* Read all Mode pages */ 1409 case 0x3f: /* Read all Mode pages */
863 len = resp_err_recov_pg(ap, pcontrol, target); 1410 if ((0 == subpcode) || (0xff == subpcode)) {
864 len += resp_disconnect_pg(ap + len, pcontrol, target); 1411 len = resp_err_recov_pg(ap, pcontrol, target);
865 len += resp_format_pg(ap + len, pcontrol, target); 1412 len += resp_disconnect_pg(ap + len, pcontrol, target);
866 len += resp_caching_pg(ap + len, pcontrol, target); 1413 len += resp_format_pg(ap + len, pcontrol, target);
867 len += resp_ctrl_m_pg(ap + len, pcontrol, target); 1414 len += resp_caching_pg(ap + len, pcontrol, target);
868 len += resp_iec_m_pg(ap + len, pcontrol, target); 1415 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1416 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1417 if (0xff == subpcode) {
1418 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1419 target, target_dev_id);
1420 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1421 }
1422 len += resp_iec_m_pg(ap + len, pcontrol, target);
1423 } else {
1424 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1425 INVALID_FIELD_IN_CDB, 0);
1426 return check_condition_result;
1427 }
869 offset += len; 1428 offset += len;
870 break; 1429 break;
871 default: 1430 default:
@@ -882,71 +1441,274 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
882 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset)); 1441 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
883} 1442}
884 1443
885static int resp_read(struct scsi_cmnd * SCpnt, int upper_blk, int block, 1444#define SDEBUG_MAX_MSELECT_SZ 512
886 int num, struct sdebug_dev_info * devip) 1445
1446static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1447 struct sdebug_dev_info * devip)
1448{
1449 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1450 int param_len, res, errsts, mpage;
1451 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1452 unsigned char *cmd = (unsigned char *)scp->cmnd;
1453
1454 if ((errsts = check_readiness(scp, 1, devip)))
1455 return errsts;
1456 memset(arr, 0, sizeof(arr));
1457 pf = cmd[1] & 0x10;
1458 sp = cmd[1] & 0x1;
1459 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1460 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1461 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1462 INVALID_FIELD_IN_CDB, 0);
1463 return check_condition_result;
1464 }
1465 res = fetch_to_dev_buffer(scp, arr, param_len);
1466 if (-1 == res)
1467 return (DID_ERROR << 16);
1468 else if ((res < param_len) &&
1469 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1470 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1471 " IO sent=%d bytes\n", param_len, res);
1472 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1473 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1474 if ((md_len > 2) || (0 != bd_len)) {
1475 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1476 INVALID_FIELD_IN_PARAM_LIST, 0);
1477 return check_condition_result;
1478 }
1479 off = bd_len + (mselect6 ? 4 : 8);
1480 mpage = arr[off] & 0x3f;
1481 ps = !!(arr[off] & 0x80);
1482 if (ps) {
1483 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1484 INVALID_FIELD_IN_PARAM_LIST, 0);
1485 return check_condition_result;
1486 }
1487 spf = !!(arr[off] & 0x40);
1488 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1489 (arr[off + 1] + 2);
1490 if ((pg_len + off) > param_len) {
1491 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1492 PARAMETER_LIST_LENGTH_ERR, 0);
1493 return check_condition_result;
1494 }
1495 switch (mpage) {
1496 case 0xa: /* Control Mode page */
1497 if (ctrl_m_pg[1] == arr[off + 1]) {
1498 memcpy(ctrl_m_pg + 2, arr + off + 2,
1499 sizeof(ctrl_m_pg) - 2);
1500 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1501 return 0;
1502 }
1503 break;
1504 case 0x1c: /* Informational Exceptions Mode page */
1505 if (iec_m_pg[1] == arr[off + 1]) {
1506 memcpy(iec_m_pg + 2, arr + off + 2,
1507 sizeof(iec_m_pg) - 2);
1508 return 0;
1509 }
1510 break;
1511 default:
1512 break;
1513 }
1514 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1515 INVALID_FIELD_IN_PARAM_LIST, 0);
1516 return check_condition_result;
1517}
1518
1519static int resp_temp_l_pg(unsigned char * arr)
1520{
1521 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1522 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1523 };
1524
1525 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1526 return sizeof(temp_l_pg);
1527}
1528
1529static int resp_ie_l_pg(unsigned char * arr)
1530{
1531 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1532 };
1533
1534 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1535 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1536 arr[4] = THRESHOLD_EXCEEDED;
1537 arr[5] = 0xff;
1538 }
1539 return sizeof(ie_l_pg);
1540}
1541
1542#define SDEBUG_MAX_LSENSE_SZ 512
1543
1544static int resp_log_sense(struct scsi_cmnd * scp,
1545 struct sdebug_dev_info * devip)
1546{
1547 int ppc, sp, pcontrol, pcode, alloc_len, errsts, len, n;
1548 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1549 unsigned char *cmd = (unsigned char *)scp->cmnd;
1550
1551 if ((errsts = check_readiness(scp, 1, devip)))
1552 return errsts;
1553 memset(arr, 0, sizeof(arr));
1554 ppc = cmd[1] & 0x2;
1555 sp = cmd[1] & 0x1;
1556 if (ppc || sp) {
1557 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1558 INVALID_FIELD_IN_CDB, 0);
1559 return check_condition_result;
1560 }
1561 pcontrol = (cmd[2] & 0xc0) >> 6;
1562 pcode = cmd[2] & 0x3f;
1563 alloc_len = (cmd[7] << 8) + cmd[8];
1564 arr[0] = pcode;
1565 switch (pcode) {
1566 case 0x0: /* Supported log pages log page */
1567 n = 4;
1568 arr[n++] = 0x0; /* this page */
1569 arr[n++] = 0xd; /* Temperature */
1570 arr[n++] = 0x2f; /* Informational exceptions */
1571 arr[3] = n - 4;
1572 break;
1573 case 0xd: /* Temperature log page */
1574 arr[3] = resp_temp_l_pg(arr + 4);
1575 break;
1576 case 0x2f: /* Informational exceptions log page */
1577 arr[3] = resp_ie_l_pg(arr + 4);
1578 break;
1579 default:
1580 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1581 INVALID_FIELD_IN_CDB, 0);
1582 return check_condition_result;
1583 }
1584 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1585 return fill_from_dev_buffer(scp, arr,
1586 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1587}
1588
1589static int resp_read(struct scsi_cmnd * SCpnt, unsigned long long lba,
1590 unsigned int num, struct sdebug_dev_info * devip)
887{ 1591{
888 unsigned long iflags; 1592 unsigned long iflags;
1593 unsigned int block, from_bottom;
1594 unsigned long long u;
889 int ret; 1595 int ret;
890 1596
891 if (upper_blk || (block + num > sdebug_capacity)) { 1597 if (lba + num > sdebug_capacity) {
892 mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 1598 mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE,
893 0); 1599 0);
894 return check_condition_result; 1600 return check_condition_result;
895 } 1601 }
1602 /* transfer length excessive (tie in to block limits VPD page) */
1603 if (num > sdebug_store_sectors) {
1604 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1605 0);
1606 return check_condition_result;
1607 }
896 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && 1608 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
897 (block <= OPT_MEDIUM_ERR_ADDR) && 1609 (lba <= OPT_MEDIUM_ERR_ADDR) &&
898 ((block + num) > OPT_MEDIUM_ERR_ADDR)) { 1610 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1611 /* claim unrecoverable read error */
899 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 1612 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
900 0); 1613 0);
901 /* claim unrecoverable read error */ 1614 /* set info field and valid bit for fixed descriptor */
1615 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1616 devip->sense_buff[0] |= 0x80; /* Valid bit */
1617 ret = OPT_MEDIUM_ERR_ADDR;
1618 devip->sense_buff[3] = (ret >> 24) & 0xff;
1619 devip->sense_buff[4] = (ret >> 16) & 0xff;
1620 devip->sense_buff[5] = (ret >> 8) & 0xff;
1621 devip->sense_buff[6] = ret & 0xff;
1622 }
902 return check_condition_result; 1623 return check_condition_result;
903 } 1624 }
904 read_lock_irqsave(&atomic_rw, iflags); 1625 read_lock_irqsave(&atomic_rw, iflags);
905 ret = fill_from_dev_buffer(SCpnt, fake_storep + (block * SECT_SIZE), 1626 if ((lba + num) <= sdebug_store_sectors)
906 num * SECT_SIZE); 1627 ret = fill_from_dev_buffer(SCpnt,
1628 fake_storep + (lba * SECT_SIZE),
1629 num * SECT_SIZE);
1630 else {
1631 /* modulo when one arg is 64 bits needs do_div() */
1632 u = lba;
1633 block = do_div(u, sdebug_store_sectors);
1634 from_bottom = 0;
1635 if ((block + num) > sdebug_store_sectors)
1636 from_bottom = (block + num) - sdebug_store_sectors;
1637 ret = fill_from_dev_buffer(SCpnt,
1638 fake_storep + (block * SECT_SIZE),
1639 (num - from_bottom) * SECT_SIZE);
1640 if ((0 == ret) && (from_bottom > 0))
1641 ret = fill_from_dev_buffer(SCpnt, fake_storep,
1642 from_bottom * SECT_SIZE);
1643 }
907 read_unlock_irqrestore(&atomic_rw, iflags); 1644 read_unlock_irqrestore(&atomic_rw, iflags);
908 return ret; 1645 return ret;
909} 1646}
910 1647
911static int resp_write(struct scsi_cmnd * SCpnt, int upper_blk, int block, 1648static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba,
912 int num, struct sdebug_dev_info * devip) 1649 unsigned int num, struct sdebug_dev_info * devip)
913{ 1650{
914 unsigned long iflags; 1651 unsigned long iflags;
1652 unsigned int block, to_bottom;
1653 unsigned long long u;
915 int res; 1654 int res;
916 1655
917 if (upper_blk || (block + num > sdebug_capacity)) { 1656 if (lba + num > sdebug_capacity) {
918 mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 1657 mk_sense_buffer(devip, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE,
919 0); 1658 0);
920 return check_condition_result; 1659 return check_condition_result;
921 } 1660 }
1661 /* transfer length excessive (tie in to block limits VPD page) */
1662 if (num > sdebug_store_sectors) {
1663 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1664 0);
1665 return check_condition_result;
1666 }
922 1667
923 write_lock_irqsave(&atomic_rw, iflags); 1668 write_lock_irqsave(&atomic_rw, iflags);
924 res = fetch_to_dev_buffer(SCpnt, fake_storep + (block * SECT_SIZE), 1669 if ((lba + num) <= sdebug_store_sectors)
925 num * SECT_SIZE); 1670 res = fetch_to_dev_buffer(SCpnt,
1671 fake_storep + (lba * SECT_SIZE),
1672 num * SECT_SIZE);
1673 else {
1674 /* modulo when one arg is 64 bits needs do_div() */
1675 u = lba;
1676 block = do_div(u, sdebug_store_sectors);
1677 to_bottom = 0;
1678 if ((block + num) > sdebug_store_sectors)
1679 to_bottom = (block + num) - sdebug_store_sectors;
1680 res = fetch_to_dev_buffer(SCpnt,
1681 fake_storep + (block * SECT_SIZE),
1682 (num - to_bottom) * SECT_SIZE);
1683 if ((0 == res) && (to_bottom > 0))
1684 res = fetch_to_dev_buffer(SCpnt, fake_storep,
1685 to_bottom * SECT_SIZE);
1686 }
926 write_unlock_irqrestore(&atomic_rw, iflags); 1687 write_unlock_irqrestore(&atomic_rw, iflags);
927 if (-1 == res) 1688 if (-1 == res)
928 return (DID_ERROR << 16); 1689 return (DID_ERROR << 16);
929 else if ((res < (num * SECT_SIZE)) && 1690 else if ((res < (num * SECT_SIZE)) &&
930 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 1691 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
931 printk(KERN_INFO "scsi_debug: write: cdb indicated=%d, " 1692 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
932 " IO sent=%d bytes\n", num * SECT_SIZE, res); 1693 " IO sent=%d bytes\n", num * SECT_SIZE, res);
933 return 0; 1694 return 0;
934} 1695}
935 1696
936#define SDEBUG_RLUN_ARR_SZ 128 1697#define SDEBUG_RLUN_ARR_SZ 256
937 1698
938static int resp_report_luns(struct scsi_cmnd * scp, 1699static int resp_report_luns(struct scsi_cmnd * scp,
939 struct sdebug_dev_info * devip) 1700 struct sdebug_dev_info * devip)
940{ 1701{
941 unsigned int alloc_len; 1702 unsigned int alloc_len;
942 int lun_cnt, i, upper; 1703 int lun_cnt, i, upper, num, n, wlun, lun;
943 unsigned char *cmd = (unsigned char *)scp->cmnd; 1704 unsigned char *cmd = (unsigned char *)scp->cmnd;
944 int select_report = (int)cmd[2]; 1705 int select_report = (int)cmd[2];
945 struct scsi_lun *one_lun; 1706 struct scsi_lun *one_lun;
946 unsigned char arr[SDEBUG_RLUN_ARR_SZ]; 1707 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
1708 unsigned char * max_addr;
947 1709
948 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); 1710 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
949 if ((alloc_len < 16) || (select_report > 2)) { 1711 if ((alloc_len < 4) || (select_report > 2)) {
950 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 1712 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
951 0); 1713 0);
952 return check_condition_result; 1714 return check_condition_result;
@@ -954,18 +1716,37 @@ static int resp_report_luns(struct scsi_cmnd * scp,
954 /* can produce response with up to 16k luns (lun 0 to lun 16383) */ 1716 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
955 memset(arr, 0, SDEBUG_RLUN_ARR_SZ); 1717 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
956 lun_cnt = scsi_debug_max_luns; 1718 lun_cnt = scsi_debug_max_luns;
957 arr[2] = ((sizeof(struct scsi_lun) * lun_cnt) >> 8) & 0xff; 1719 if (1 == select_report)
958 arr[3] = (sizeof(struct scsi_lun) * lun_cnt) & 0xff; 1720 lun_cnt = 0;
959 lun_cnt = min((int)((SDEBUG_RLUN_ARR_SZ - 8) / 1721 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
960 sizeof(struct scsi_lun)), lun_cnt); 1722 --lun_cnt;
1723 wlun = (select_report > 0) ? 1 : 0;
1724 num = lun_cnt + wlun;
1725 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
1726 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
1727 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
1728 sizeof(struct scsi_lun)), num);
1729 if (n < num) {
1730 wlun = 0;
1731 lun_cnt = n;
1732 }
961 one_lun = (struct scsi_lun *) &arr[8]; 1733 one_lun = (struct scsi_lun *) &arr[8];
962 for (i = 0; i < lun_cnt; i++) { 1734 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
963 upper = (i >> 8) & 0x3f; 1735 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
1736 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
1737 i++, lun++) {
1738 upper = (lun >> 8) & 0x3f;
964 if (upper) 1739 if (upper)
965 one_lun[i].scsi_lun[0] = 1740 one_lun[i].scsi_lun[0] =
966 (upper | (SAM2_LUN_ADDRESS_METHOD << 6)); 1741 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
967 one_lun[i].scsi_lun[1] = i & 0xff; 1742 one_lun[i].scsi_lun[1] = lun & 0xff;
1743 }
1744 if (wlun) {
1745 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
1746 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
1747 i++;
968 } 1748 }
1749 alloc_len = (unsigned char *)(one_lun + i) - arr;
969 return fill_from_dev_buffer(scp, arr, 1750 return fill_from_dev_buffer(scp, arr,
970 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ)); 1751 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
971} 1752}
@@ -1001,7 +1782,8 @@ static void timer_intr_handler(unsigned long indx)
1001static int scsi_debug_slave_alloc(struct scsi_device * sdp) 1782static int scsi_debug_slave_alloc(struct scsi_device * sdp)
1002{ 1783{
1003 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 1784 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
1004 sdev_printk(KERN_INFO, sdp, "scsi_debug: slave_alloc\n"); 1785 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
1786 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
1005 return 0; 1787 return 0;
1006} 1788}
1007 1789
@@ -1010,7 +1792,8 @@ static int scsi_debug_slave_configure(struct scsi_device * sdp)
1010 struct sdebug_dev_info * devip; 1792 struct sdebug_dev_info * devip;
1011 1793
1012 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 1794 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
1013 sdev_printk(KERN_INFO, sdp, "scsi_debug: slave_configure\n"); 1795 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
1796 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
1014 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) 1797 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
1015 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; 1798 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
1016 devip = devInfoReg(sdp); 1799 devip = devInfoReg(sdp);
@@ -1018,6 +1801,7 @@ static int scsi_debug_slave_configure(struct scsi_device * sdp)
1018 if (sdp->host->cmd_per_lun) 1801 if (sdp->host->cmd_per_lun)
1019 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING, 1802 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
1020 sdp->host->cmd_per_lun); 1803 sdp->host->cmd_per_lun);
1804 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
1021 return 0; 1805 return 0;
1022} 1806}
1023 1807
@@ -1027,7 +1811,8 @@ static void scsi_debug_slave_destroy(struct scsi_device * sdp)
1027 (struct sdebug_dev_info *)sdp->hostdata; 1811 (struct sdebug_dev_info *)sdp->hostdata;
1028 1812
1029 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 1813 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
1030 sdev_printk(KERN_INFO, sdp, "scsi_debug: slave_destroy\n"); 1814 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
1815 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
1031 if (devip) { 1816 if (devip) {
1032 /* make this slot avaliable for re-use */ 1817 /* make this slot avaliable for re-use */
1033 devip->used = 0; 1818 devip->used = 0;
@@ -1084,6 +1869,8 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
1084 open_devip->sense_buff[0] = 0x70; 1869 open_devip->sense_buff[0] = 0x70;
1085 open_devip->sense_buff[7] = 0xa; 1870 open_devip->sense_buff[7] = 0xa;
1086 } 1871 }
1872 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
1873 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
1087 return open_devip; 1874 return open_devip;
1088 } 1875 }
1089 return NULL; 1876 return NULL;
@@ -1272,7 +2059,7 @@ static void __init sdebug_build_parts(unsigned char * ramp)
1272 printk(KERN_WARNING "scsi_debug:build_parts: reducing " 2059 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
1273 "partitions to %d\n", SDEBUG_MAX_PARTS); 2060 "partitions to %d\n", SDEBUG_MAX_PARTS);
1274 } 2061 }
1275 num_sectors = (int)(sdebug_store_size / SECT_SIZE); 2062 num_sectors = (int)sdebug_store_sectors;
1276 sectors_per_part = (num_sectors - sdebug_sectors_per) 2063 sectors_per_part = (num_sectors - sdebug_sectors_per)
1277 / scsi_debug_num_parts; 2064 / scsi_debug_num_parts;
1278 heads_by_sects = sdebug_heads * sdebug_sectors_per; 2065 heads_by_sects = sdebug_heads * sdebug_sectors_per;
@@ -1315,9 +2102,9 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
1315 if (scsi_result) { 2102 if (scsi_result) {
1316 struct scsi_device * sdp = cmnd->device; 2103 struct scsi_device * sdp = cmnd->device;
1317 2104
1318 sdev_printk(KERN_INFO, sdp, 2105 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
1319 "non-zero result=0x%x\n", 2106 "non-zero result=0x%x\n", sdp->host->host_no,
1320 scsi_result); 2107 sdp->channel, sdp->id, sdp->lun, scsi_result);
1321 } 2108 }
1322 } 2109 }
1323 if (cmnd && devip) { 2110 if (cmnd && devip) {
@@ -1364,21 +2151,19 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
1364 } 2151 }
1365} 2152}
1366 2153
1367/* Set 'perm' (4th argument) to 0 to disable module_param's definition 2154module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
1368 * of sysfs parameters (which module_param doesn't yet support). 2155module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
1369 * Sysfs parameters defined explicitly below. 2156module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
1370 */ 2157module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
1371module_param_named(add_host, scsi_debug_add_host, int, 0); /* perm=0644 */ 2158module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
1372module_param_named(delay, scsi_debug_delay, int, 0); /* perm=0644 */ 2159module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
1373module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, 0); 2160module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
1374module_param_named(dsense, scsi_debug_dsense, int, 0); 2161module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
1375module_param_named(every_nth, scsi_debug_every_nth, int, 0); 2162module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
1376module_param_named(max_luns, scsi_debug_max_luns, int, 0); 2163module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
1377module_param_named(num_parts, scsi_debug_num_parts, int, 0); 2164module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
1378module_param_named(num_tgts, scsi_debug_num_tgts, int, 0); 2165module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
1379module_param_named(opts, scsi_debug_opts, int, 0); /* perm=0644 */ 2166module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
1380module_param_named(ptype, scsi_debug_ptype, int, 0);
1381module_param_named(scsi_level, scsi_debug_scsi_level, int, 0);
1382 2167
1383MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2168MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
1384MODULE_DESCRIPTION("SCSI debug adapter driver"); 2169MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -1387,15 +2172,17 @@ MODULE_VERSION(SCSI_DEBUG_VERSION);
1387 2172
1388MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); 2173MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
1389MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)"); 2174MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
1390MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs"); 2175MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
1391MODULE_PARM_DESC(dsense, "use descriptor sense format(def: fixed)"); 2176MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
1392MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)"); 2177MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)");
1393MODULE_PARM_DESC(max_luns, "number of SCSI LUNs per target to simulate"); 2178MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2179MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
1394MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); 2180MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
1395MODULE_PARM_DESC(num_tgts, "number of SCSI targets per host to simulate"); 2181MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
1396MODULE_PARM_DESC(opts, "1->noise, 2->medium_error, 4->..."); 2182MODULE_PARM_DESC(opts, "1->noise, 2->medium_error, 4->... (def=0)");
1397MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 2183MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
1398MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2184MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2185MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
1399 2186
1400 2187
1401static char sdebug_info[256]; 2188static char sdebug_info[256];
@@ -1547,6 +2334,24 @@ static ssize_t sdebug_dsense_store(struct device_driver * ddp,
1547DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show, 2334DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
1548 sdebug_dsense_store); 2335 sdebug_dsense_store);
1549 2336
2337static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2338{
2339 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2340}
2341static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2342 const char * buf, size_t count)
2343{
2344 int n;
2345
2346 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2347 scsi_debug_no_lun_0 = n;
2348 return count;
2349 }
2350 return -EINVAL;
2351}
2352DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2353 sdebug_no_lun_0_store);
2354
1550static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf) 2355static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
1551{ 2356{
1552 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts); 2357 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
@@ -1622,6 +2427,29 @@ static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
1622} 2427}
1623DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL); 2428DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
1624 2429
2430static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
2431{
2432 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
2433}
2434static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
2435 const char * buf, size_t count)
2436{
2437 int n;
2438
2439 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2440 scsi_debug_virtual_gb = n;
2441 if (scsi_debug_virtual_gb > 0) {
2442 sdebug_capacity = 2048 * 1024;
2443 sdebug_capacity *= scsi_debug_virtual_gb;
2444 } else
2445 sdebug_capacity = sdebug_store_sectors;
2446 return count;
2447 }
2448 return -EINVAL;
2449}
2450DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
2451 sdebug_virtual_gb_store);
2452
1625static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf) 2453static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
1626{ 2454{
1627 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host); 2455 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
@@ -1691,14 +2519,19 @@ static void do_remove_driverfs_files(void)
1691 2519
1692static int __init scsi_debug_init(void) 2520static int __init scsi_debug_init(void)
1693{ 2521{
1694 unsigned long sz; 2522 unsigned int sz;
1695 int host_to_add; 2523 int host_to_add;
1696 int k; 2524 int k;
1697 2525
1698 if (scsi_debug_dev_size_mb < 1) 2526 if (scsi_debug_dev_size_mb < 1)
1699 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 2527 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
1700 sdebug_store_size = (unsigned long)scsi_debug_dev_size_mb * 1048576; 2528 sdebug_store_size = (unsigned int)scsi_debug_dev_size_mb * 1048576;
1701 sdebug_capacity = sdebug_store_size / SECT_SIZE; 2529 sdebug_store_sectors = sdebug_store_size / SECT_SIZE;
2530 if (scsi_debug_virtual_gb > 0) {
2531 sdebug_capacity = 2048 * 1024;
2532 sdebug_capacity *= scsi_debug_virtual_gb;
2533 } else
2534 sdebug_capacity = sdebug_store_sectors;
1702 2535
1703 /* play around with geometry, don't waste too much on track 0 */ 2536 /* play around with geometry, don't waste too much on track 0 */
1704 sdebug_heads = 8; 2537 sdebug_heads = 8;
@@ -1812,7 +2645,7 @@ static int sdebug_add_adapter(void)
1812 struct sdebug_dev_info *sdbg_devinfo; 2645 struct sdebug_dev_info *sdbg_devinfo;
1813 struct list_head *lh, *lh_sf; 2646 struct list_head *lh, *lh_sf;
1814 2647
1815 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL); 2648 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
1816 2649
1817 if (NULL == sdbg_host) { 2650 if (NULL == sdbg_host) {
1818 printk(KERN_ERR "%s: out of memory at line %d\n", 2651 printk(KERN_ERR "%s: out of memory at line %d\n",
@@ -1824,7 +2657,7 @@ static int sdebug_add_adapter(void)
1824 2657
1825 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns; 2658 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
1826 for (k = 0; k < devs_per_host; k++) { 2659 for (k = 0; k < devs_per_host; k++) {
1827 sdbg_devinfo = kzalloc(sizeof(*sdbg_devinfo), GFP_KERNEL); 2660 sdbg_devinfo = kzalloc(sizeof(*sdbg_devinfo),GFP_KERNEL);
1828 if (NULL == sdbg_devinfo) { 2661 if (NULL == sdbg_devinfo) {
1829 printk(KERN_ERR "%s: out of memory at line %d\n", 2662 printk(KERN_ERR "%s: out of memory at line %d\n",
1830 __FUNCTION__, __LINE__); 2663 __FUNCTION__, __LINE__);
@@ -1905,7 +2738,7 @@ static int sdebug_driver_probe(struct device * dev)
1905 hpnt->max_id = scsi_debug_num_tgts + 1; 2738 hpnt->max_id = scsi_debug_num_tgts + 1;
1906 else 2739 else
1907 hpnt->max_id = scsi_debug_num_tgts; 2740 hpnt->max_id = scsi_debug_num_tgts;
1908 hpnt->max_lun = scsi_debug_max_luns; 2741 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
1909 2742
1910 error = scsi_add_host(hpnt, &sdbg_host->dev); 2743 error = scsi_add_host(hpnt, &sdbg_host->dev);
1911 if (error) { 2744 if (error) {
@@ -1959,7 +2792,7 @@ static void sdebug_max_tgts_luns(void)
1959 hpnt->max_id = scsi_debug_num_tgts + 1; 2792 hpnt->max_id = scsi_debug_num_tgts + 1;
1960 else 2793 else
1961 hpnt->max_id = scsi_debug_num_tgts; 2794 hpnt->max_id = scsi_debug_num_tgts;
1962 hpnt->max_lun = scsi_debug_max_luns; 2795 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* scsi_debug_max_luns; */
1963 } 2796 }
1964 spin_unlock(&sdebug_host_list_lock); 2797 spin_unlock(&sdebug_host_list_lock);
1965} 2798}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index fb5cb4c9ac65..3d0429bc14ab 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -162,7 +162,7 @@ static struct {
162 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, 162 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
163 {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, 163 {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN},
164 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ 164 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
165 {"HP", "OPEN-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP XP Arrays */ 165 {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
166 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, 166 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
167 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 167 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
168 {"HP", "C1557A", NULL, BLIST_FORCELUN}, 168 {"HP", "C1557A", NULL, BLIST_FORCELUN},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6a7a60fc0a4e..6683d596234a 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1672,7 +1672,9 @@ int
1672scsi_reset_provider(struct scsi_device *dev, int flag) 1672scsi_reset_provider(struct scsi_device *dev, int flag)
1673{ 1673{
1674 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); 1674 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
1675 struct Scsi_Host *shost = dev->host;
1675 struct request req; 1676 struct request req;
1677 unsigned long flags;
1676 int rtn; 1678 int rtn;
1677 1679
1678 scmd->request = &req; 1680 scmd->request = &req;
@@ -1699,6 +1701,10 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1699 */ 1701 */
1700 scmd->pid = 0; 1702 scmd->pid = 0;
1701 1703
1704 spin_lock_irqsave(shost->host_lock, flags);
1705 shost->tmf_in_progress = 1;
1706 spin_unlock_irqrestore(shost->host_lock, flags);
1707
1702 switch (flag) { 1708 switch (flag) {
1703 case SCSI_TRY_RESET_DEVICE: 1709 case SCSI_TRY_RESET_DEVICE:
1704 rtn = scsi_try_bus_device_reset(scmd); 1710 rtn = scsi_try_bus_device_reset(scmd);
@@ -1717,6 +1723,22 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1717 rtn = FAILED; 1723 rtn = FAILED;
1718 } 1724 }
1719 1725
1726 spin_lock_irqsave(shost->host_lock, flags);
1727 shost->tmf_in_progress = 0;
1728 spin_unlock_irqrestore(shost->host_lock, flags);
1729
1730 /*
1731 * be sure to wake up anyone who was sleeping or had their queue
1732 * suspended while we performed the TMF.
1733 */
1734 SCSI_LOG_ERROR_RECOVERY(3,
1735 printk("%s: waking up host to restart after TMF\n",
1736 __FUNCTION__));
1737
1738 wake_up(&shost->host_wait);
1739
1740 scsi_run_host_queues(shost);
1741
1720 scsi_next_command(scmd); 1742 scsi_next_command(scmd);
1721 return rtn; 1743 return rtn;
1722} 1744}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3d04a9f386ac..08af9aae7df3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -855,8 +855,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
855 * b) We can just use scsi_requeue_command() here. This would 855 * b) We can just use scsi_requeue_command() here. This would
856 * be used if we just wanted to retry, for example. 856 * be used if we just wanted to retry, for example.
857 */ 857 */
858void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, 858void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
859 unsigned int block_bytes)
860{ 859{
861 int result = cmd->result; 860 int result = cmd->result;
862 int this_count = cmd->bufflen; 861 int this_count = cmd->bufflen;
@@ -921,87 +920,70 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
921 * Next deal with any sectors which we were able to correctly 920 * Next deal with any sectors which we were able to correctly
922 * handle. 921 * handle.
923 */ 922 */
924 if (good_bytes >= 0) { 923 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
925 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n", 924 "%d bytes done.\n",
926 req->nr_sectors, good_bytes)); 925 req->nr_sectors, good_bytes));
927 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); 926 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
928 927
929 if (clear_errors) 928 if (clear_errors)
930 req->errors = 0; 929 req->errors = 0;
931 /*
932 * If multiple sectors are requested in one buffer, then
933 * they will have been finished off by the first command.
934 * If not, then we have a multi-buffer command.
935 *
936 * If block_bytes != 0, it means we had a medium error
937 * of some sort, and that we want to mark some number of
938 * sectors as not uptodate. Thus we want to inhibit
939 * requeueing right here - we will requeue down below
940 * when we handle the bad sectors.
941 */
942 930
943 /* 931 /* A number of bytes were successfully read. If there
944 * If the command completed without error, then either 932 * are leftovers and there is some kind of error
945 * finish off the rest of the command, or start a new one. 933 * (result != 0), retry the rest.
946 */ 934 */
947 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) 935 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
948 return; 936 return;
949 } 937
950 /* 938 /* good_bytes = 0, or (inclusive) there were leftovers and
951 * Now, if we were good little boys and girls, Santa left us a request 939 * result = 0, so scsi_end_request couldn't retry.
952 * sense buffer. We can extract information from this, so we
953 * can choose a block to remap, etc.
954 */ 940 */
955 if (sense_valid && !sense_deferred) { 941 if (sense_valid && !sense_deferred) {
956 switch (sshdr.sense_key) { 942 switch (sshdr.sense_key) {
957 case UNIT_ATTENTION: 943 case UNIT_ATTENTION:
958 if (cmd->device->removable) { 944 if (cmd->device->removable) {
959 /* detected disc change. set a bit 945 /* Detected disc change. Set a bit
960 * and quietly refuse further access. 946 * and quietly refuse further access.
961 */ 947 */
962 cmd->device->changed = 1; 948 cmd->device->changed = 1;
963 scsi_end_request(cmd, 0, 949 scsi_end_request(cmd, 0, this_count, 1);
964 this_count, 1);
965 return; 950 return;
966 } else { 951 } else {
967 /* 952 /* Must have been a power glitch, or a
968 * Must have been a power glitch, or a 953 * bus reset. Could not have been a
969 * bus reset. Could not have been a 954 * media change, so we just retry the
970 * media change, so we just retry the 955 * request and see what happens.
971 * request and see what happens. 956 */
972 */
973 scsi_requeue_command(q, cmd); 957 scsi_requeue_command(q, cmd);
974 return; 958 return;
975 } 959 }
976 break; 960 break;
977 case ILLEGAL_REQUEST: 961 case ILLEGAL_REQUEST:
978 /* 962 /* If we had an ILLEGAL REQUEST returned, then
979 * If we had an ILLEGAL REQUEST returned, then we may 963 * we may have performed an unsupported
980 * have performed an unsupported command. The only 964 * command. The only thing this should be
981 * thing this should be would be a ten byte read where 965 * would be a ten byte read where only a six
982 * only a six byte read was supported. Also, on a 966 * byte read was supported. Also, on a system
983 * system where READ CAPACITY failed, we may have read 967 * where READ CAPACITY failed, we may have
984 * past the end of the disk. 968 * read past the end of the disk.
985 */ 969 */
986 if ((cmd->device->use_10_for_rw && 970 if ((cmd->device->use_10_for_rw &&
987 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 971 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
988 (cmd->cmnd[0] == READ_10 || 972 (cmd->cmnd[0] == READ_10 ||
989 cmd->cmnd[0] == WRITE_10)) { 973 cmd->cmnd[0] == WRITE_10)) {
990 cmd->device->use_10_for_rw = 0; 974 cmd->device->use_10_for_rw = 0;
991 /* 975 /* This will cause a retry with a
992 * This will cause a retry with a 6-byte 976 * 6-byte command.
993 * command.
994 */ 977 */
995 scsi_requeue_command(q, cmd); 978 scsi_requeue_command(q, cmd);
996 result = 0; 979 return;
997 } else { 980 } else {
998 scsi_end_request(cmd, 0, this_count, 1); 981 scsi_end_request(cmd, 0, this_count, 1);
999 return; 982 return;
1000 } 983 }
1001 break; 984 break;
1002 case NOT_READY: 985 case NOT_READY:
1003 /* 986 /* If the device is in the process of becoming
1004 * If the device is in the process of becoming
1005 * ready, or has a temporary blockage, retry. 987 * ready, or has a temporary blockage, retry.
1006 */ 988 */
1007 if (sshdr.asc == 0x04) { 989 if (sshdr.asc == 0x04) {
@@ -1021,7 +1003,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
1021 } 1003 }
1022 if (!(req->flags & REQ_QUIET)) { 1004 if (!(req->flags & REQ_QUIET)) {
1023 scmd_printk(KERN_INFO, cmd, 1005 scmd_printk(KERN_INFO, cmd,
1024 "Device not ready: "); 1006 "Device not ready: ");
1025 scsi_print_sense_hdr("", &sshdr); 1007 scsi_print_sense_hdr("", &sshdr);
1026 } 1008 }
1027 scsi_end_request(cmd, 0, this_count, 1); 1009 scsi_end_request(cmd, 0, this_count, 1);
@@ -1029,21 +1011,21 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
1029 case VOLUME_OVERFLOW: 1011 case VOLUME_OVERFLOW:
1030 if (!(req->flags & REQ_QUIET)) { 1012 if (!(req->flags & REQ_QUIET)) {
1031 scmd_printk(KERN_INFO, cmd, 1013 scmd_printk(KERN_INFO, cmd,
1032 "Volume overflow, CDB: "); 1014 "Volume overflow, CDB: ");
1033 __scsi_print_command(cmd->data_cmnd); 1015 __scsi_print_command(cmd->data_cmnd);
1034 scsi_print_sense("", cmd); 1016 scsi_print_sense("", cmd);
1035 } 1017 }
1036 scsi_end_request(cmd, 0, block_bytes, 1); 1018 /* See SSC3rXX or current. */
1019 scsi_end_request(cmd, 0, this_count, 1);
1037 return; 1020 return;
1038 default: 1021 default:
1039 break; 1022 break;
1040 } 1023 }
1041 } /* driver byte != 0 */ 1024 }
1042 if (host_byte(result) == DID_RESET) { 1025 if (host_byte(result) == DID_RESET) {
1043 /* 1026 /* Third party bus reset or reset for error recovery
1044 * Third party bus reset or reset for error 1027 * reasons. Just retry the request and see what
1045 * recovery reasons. Just retry the request 1028 * happens.
1046 * and see what happens.
1047 */ 1029 */
1048 scsi_requeue_command(q, cmd); 1030 scsi_requeue_command(q, cmd);
1049 return; 1031 return;
@@ -1051,21 +1033,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
1051 if (result) { 1033 if (result) {
1052 if (!(req->flags & REQ_QUIET)) { 1034 if (!(req->flags & REQ_QUIET)) {
1053 scmd_printk(KERN_INFO, cmd, 1035 scmd_printk(KERN_INFO, cmd,
1054 "SCSI error: return code = 0x%x\n", result); 1036 "SCSI error: return code = 0x%08x\n",
1055 1037 result);
1056 if (driver_byte(result) & DRIVER_SENSE) 1038 if (driver_byte(result) & DRIVER_SENSE)
1057 scsi_print_sense("", cmd); 1039 scsi_print_sense("", cmd);
1058 } 1040 }
1059 /*
1060 * Mark a single buffer as not uptodate. Queue the remainder.
1061 * We sometimes get this cruft in the event that a medium error
1062 * isn't properly reported.
1063 */
1064 block_bytes = req->hard_cur_sectors << 9;
1065 if (!block_bytes)
1066 block_bytes = req->data_len;
1067 scsi_end_request(cmd, 0, block_bytes, 1);
1068 } 1041 }
1042 scsi_end_request(cmd, 0, this_count, !result);
1069} 1043}
1070EXPORT_SYMBOL(scsi_io_completion); 1044EXPORT_SYMBOL(scsi_io_completion);
1071 1045
@@ -1169,7 +1143,7 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1169 * successfully. Since this is a REQ_BLOCK_PC command the 1143 * successfully. Since this is a REQ_BLOCK_PC command the
1170 * caller should check the request's errors value 1144 * caller should check the request's errors value
1171 */ 1145 */
1172 scsi_io_completion(cmd, cmd->bufflen, 0); 1146 scsi_io_completion(cmd, cmd->bufflen);
1173} 1147}
1174 1148
1175static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1149static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
@@ -2050,6 +2024,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2050 switch (oldstate) { 2024 switch (oldstate) {
2051 case SDEV_CREATED: 2025 case SDEV_CREATED:
2052 case SDEV_RUNNING: 2026 case SDEV_RUNNING:
2027 case SDEV_QUIESCE:
2053 case SDEV_OFFLINE: 2028 case SDEV_OFFLINE:
2054 case SDEV_BLOCK: 2029 case SDEV_BLOCK:
2055 break; 2030 break;
@@ -2060,6 +2035,9 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2060 2035
2061 case SDEV_DEL: 2036 case SDEV_DEL:
2062 switch (oldstate) { 2037 switch (oldstate) {
2038 case SDEV_CREATED:
2039 case SDEV_RUNNING:
2040 case SDEV_OFFLINE:
2063 case SDEV_CANCEL: 2041 case SDEV_CANCEL:
2064 break; 2042 break;
2065 default: 2043 default:
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 015c90cf3abc..e2fbe9a9d5a9 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -116,7 +116,7 @@ extern struct bus_type scsi_bus_type;
116 * classes. 116 * classes.
117 */ 117 */
118 118
119#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT (HZ*60) 119#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
120extern int scsi_internal_device_block(struct scsi_device *sdev); 120extern int scsi_internal_device_block(struct scsi_device *sdev);
121extern int scsi_internal_device_unblock(struct scsi_device *sdev); 121extern int scsi_internal_device_unblock(struct scsi_device *sdev);
122 122
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
index d76e6e3d8ca5..e1edab45a37b 100644
--- a/drivers/scsi/scsi_sas_internal.h
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -2,7 +2,8 @@
2#define _SCSI_SAS_INTERNAL_H 2#define _SCSI_SAS_INTERNAL_H
3 3
4#define SAS_HOST_ATTRS 0 4#define SAS_HOST_ATTRS 0
5#define SAS_PORT_ATTRS 17 5#define SAS_PHY_ATTRS 17
6#define SAS_PORT_ATTRS 1
6#define SAS_RPORT_ATTRS 7 7#define SAS_RPORT_ATTRS 7
7#define SAS_END_DEV_ATTRS 3 8#define SAS_END_DEV_ATTRS 3
8#define SAS_EXPANDER_ATTRS 7 9#define SAS_EXPANDER_ATTRS 7
@@ -13,12 +14,14 @@ struct sas_internal {
13 struct sas_domain_function_template *dft; 14 struct sas_domain_function_template *dft;
14 15
15 struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS]; 16 struct class_device_attribute private_host_attrs[SAS_HOST_ATTRS];
16 struct class_device_attribute private_phy_attrs[SAS_PORT_ATTRS]; 17 struct class_device_attribute private_phy_attrs[SAS_PHY_ATTRS];
18 struct class_device_attribute private_port_attrs[SAS_PORT_ATTRS];
17 struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS]; 19 struct class_device_attribute private_rphy_attrs[SAS_RPORT_ATTRS];
18 struct class_device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS]; 20 struct class_device_attribute private_end_dev_attrs[SAS_END_DEV_ATTRS];
19 struct class_device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS]; 21 struct class_device_attribute private_expander_attrs[SAS_EXPANDER_ATTRS];
20 22
21 struct transport_container phy_attr_cont; 23 struct transport_container phy_attr_cont;
24 struct transport_container port_attr_cont;
22 struct transport_container rphy_attr_cont; 25 struct transport_container rphy_attr_cont;
23 struct transport_container end_dev_attr_cont; 26 struct transport_container end_dev_attr_cont;
24 struct transport_container expander_attr_cont; 27 struct transport_container expander_attr_cont;
@@ -28,7 +31,8 @@ struct sas_internal {
28 * needed by scsi_sysfs.c 31 * needed by scsi_sysfs.c
29 */ 32 */
30 struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1]; 33 struct class_device_attribute *host_attrs[SAS_HOST_ATTRS + 1];
31 struct class_device_attribute *phy_attrs[SAS_PORT_ATTRS + 1]; 34 struct class_device_attribute *phy_attrs[SAS_PHY_ATTRS + 1];
35 struct class_device_attribute *port_attrs[SAS_PORT_ATTRS + 1];
32 struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1]; 36 struct class_device_attribute *rphy_attrs[SAS_RPORT_ATTRS + 1];
33 struct class_device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1]; 37 struct class_device_attribute *end_dev_attrs[SAS_END_DEV_ATTRS + 1];
34 struct class_device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1]; 38 struct class_device_attribute *expander_attrs[SAS_EXPANDER_ATTRS + 1];
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 1341608e9e3b..1bd92b9b46d9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -809,6 +809,7 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
809 809
810static inline void scsi_destroy_sdev(struct scsi_device *sdev) 810static inline void scsi_destroy_sdev(struct scsi_device *sdev)
811{ 811{
812 scsi_device_set_state(sdev, SDEV_DEL);
812 if (sdev->host->hostt->slave_destroy) 813 if (sdev->host->hostt->slave_destroy)
813 sdev->host->hostt->slave_destroy(sdev); 814 sdev->host->hostt->slave_destroy(sdev);
814 transport_destroy_device(&sdev->sdev_gendev); 815 transport_destroy_device(&sdev->sdev_gendev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index f2db7a41cf1d..b03aa85108e5 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -368,7 +368,7 @@ static DECLARE_TRANSPORT_CLASS(fc_rport_class,
368 * should insulate the loss of a remote port. 368 * should insulate the loss of a remote port.
369 * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. 369 * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
370 */ 370 */
371static unsigned int fc_dev_loss_tmo = SCSI_DEVICE_BLOCK_MAX_TIMEOUT; 371static unsigned int fc_dev_loss_tmo = 60; /* seconds */
372 372
373module_param_named(dev_loss_tmo, fc_dev_loss_tmo, int, S_IRUGO|S_IWUSR); 373module_param_named(dev_loss_tmo, fc_dev_loss_tmo, int, S_IRUGO|S_IWUSR);
374MODULE_PARM_DESC(dev_loss_tmo, 374MODULE_PARM_DESC(dev_loss_tmo,
@@ -1284,7 +1284,9 @@ EXPORT_SYMBOL(fc_release_transport);
1284 * @work: Work to queue for execution. 1284 * @work: Work to queue for execution.
1285 * 1285 *
1286 * Return value: 1286 * Return value:
1287 * 0 on success / != 0 for error 1287 * 1 - work queued for execution
1288 * 0 - work is already queued
1289 * -EINVAL - work queue doesn't exist
1288 **/ 1290 **/
1289static int 1291static int
1290fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) 1292fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
@@ -1434,8 +1436,6 @@ fc_starget_delete(void *data)
1434 struct Scsi_Host *shost = rport_to_shost(rport); 1436 struct Scsi_Host *shost = rport_to_shost(rport);
1435 unsigned long flags; 1437 unsigned long flags;
1436 1438
1437 scsi_target_unblock(&rport->dev);
1438
1439 spin_lock_irqsave(shost->host_lock, flags); 1439 spin_lock_irqsave(shost->host_lock, flags);
1440 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { 1440 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
1441 spin_unlock_irqrestore(shost->host_lock, flags); 1441 spin_unlock_irqrestore(shost->host_lock, flags);
@@ -1476,7 +1476,8 @@ fc_rport_final_delete(void *data)
1476 transport_remove_device(dev); 1476 transport_remove_device(dev);
1477 device_del(dev); 1477 device_del(dev);
1478 transport_destroy_device(dev); 1478 transport_destroy_device(dev);
1479 put_device(&shost->shost_gendev); 1479 put_device(&shost->shost_gendev); /* for fc_host->rport list */
1480 put_device(dev); /* for self-reference */
1480} 1481}
1481 1482
1482 1483
@@ -1537,13 +1538,13 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1537 else 1538 else
1538 rport->scsi_target_id = -1; 1539 rport->scsi_target_id = -1;
1539 list_add_tail(&rport->peers, &fc_host->rports); 1540 list_add_tail(&rport->peers, &fc_host->rports);
1540 get_device(&shost->shost_gendev); 1541 get_device(&shost->shost_gendev); /* for fc_host->rport list */
1541 1542
1542 spin_unlock_irqrestore(shost->host_lock, flags); 1543 spin_unlock_irqrestore(shost->host_lock, flags);
1543 1544
1544 dev = &rport->dev; 1545 dev = &rport->dev;
1545 device_initialize(dev); 1546 device_initialize(dev); /* takes self reference */
1546 dev->parent = get_device(&shost->shost_gendev); 1547 dev->parent = get_device(&shost->shost_gendev); /* parent reference */
1547 dev->release = fc_rport_dev_release; 1548 dev->release = fc_rport_dev_release;
1548 sprintf(dev->bus_id, "rport-%d:%d-%d", 1549 sprintf(dev->bus_id, "rport-%d:%d-%d",
1549 shost->host_no, channel, rport->number); 1550 shost->host_no, channel, rport->number);
@@ -1567,10 +1568,9 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1567 1568
1568delete_rport: 1569delete_rport:
1569 transport_destroy_device(dev); 1570 transport_destroy_device(dev);
1570 put_device(dev->parent);
1571 spin_lock_irqsave(shost->host_lock, flags); 1571 spin_lock_irqsave(shost->host_lock, flags);
1572 list_del(&rport->peers); 1572 list_del(&rport->peers);
1573 put_device(&shost->shost_gendev); 1573 put_device(&shost->shost_gendev); /* for fc_host->rport list */
1574 spin_unlock_irqrestore(shost->host_lock, flags); 1574 spin_unlock_irqrestore(shost->host_lock, flags);
1575 put_device(dev->parent); 1575 put_device(dev->parent);
1576 kfree(rport); 1576 kfree(rport);
@@ -1707,6 +1707,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1707 1707
1708 spin_unlock_irqrestore(shost->host_lock, flags); 1708 spin_unlock_irqrestore(shost->host_lock, flags);
1709 1709
1710 scsi_target_unblock(&rport->dev);
1711
1710 return rport; 1712 return rport;
1711 } 1713 }
1712 } 1714 }
@@ -1762,9 +1764,10 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1762 /* initiate a scan of the target */ 1764 /* initiate a scan of the target */
1763 rport->flags |= FC_RPORT_SCAN_PENDING; 1765 rport->flags |= FC_RPORT_SCAN_PENDING;
1764 scsi_queue_work(shost, &rport->scan_work); 1766 scsi_queue_work(shost, &rport->scan_work);
1765 } 1767 spin_unlock_irqrestore(shost->host_lock, flags);
1766 1768 scsi_target_unblock(&rport->dev);
1767 spin_unlock_irqrestore(shost->host_lock, flags); 1769 } else
1770 spin_unlock_irqrestore(shost->host_lock, flags);
1768 1771
1769 return rport; 1772 return rport;
1770 } 1773 }
@@ -1938,6 +1941,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
1938 rport->flags |= FC_RPORT_SCAN_PENDING; 1941 rport->flags |= FC_RPORT_SCAN_PENDING;
1939 scsi_queue_work(shost, &rport->scan_work); 1942 scsi_queue_work(shost, &rport->scan_work);
1940 spin_unlock_irqrestore(shost->host_lock, flags); 1943 spin_unlock_irqrestore(shost->host_lock, flags);
1944 scsi_target_unblock(&rport->dev);
1941 } 1945 }
1942} 1946}
1943EXPORT_SYMBOL(fc_remote_port_rolechg); 1947EXPORT_SYMBOL(fc_remote_port_rolechg);
@@ -1970,8 +1974,9 @@ fc_timeout_deleted_rport(void *data)
1970 dev_printk(KERN_ERR, &rport->dev, 1974 dev_printk(KERN_ERR, &rport->dev,
1971 "blocked FC remote port time out: no longer" 1975 "blocked FC remote port time out: no longer"
1972 " a FCP target, removing starget\n"); 1976 " a FCP target, removing starget\n");
1973 fc_queue_work(shost, &rport->stgt_delete_work);
1974 spin_unlock_irqrestore(shost->host_lock, flags); 1977 spin_unlock_irqrestore(shost->host_lock, flags);
1978 scsi_target_unblock(&rport->dev);
1979 fc_queue_work(shost, &rport->stgt_delete_work);
1975 return; 1980 return;
1976 } 1981 }
1977 1982
@@ -2035,17 +2040,15 @@ fc_timeout_deleted_rport(void *data)
2035 * went away and didn't come back - we'll remove 2040 * went away and didn't come back - we'll remove
2036 * all attached scsi devices. 2041 * all attached scsi devices.
2037 */ 2042 */
2038 fc_queue_work(shost, &rport->stgt_delete_work);
2039
2040 spin_unlock_irqrestore(shost->host_lock, flags); 2043 spin_unlock_irqrestore(shost->host_lock, flags);
2044
2045 scsi_target_unblock(&rport->dev);
2046 fc_queue_work(shost, &rport->stgt_delete_work);
2041} 2047}
2042 2048
2043/** 2049/**
2044 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 2050 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
2045 * 2051 *
2046 * Will unblock the target (in case it went away and has now come back),
2047 * then invoke a scan.
2048 *
2049 * @data: remote port to be scanned. 2052 * @data: remote port to be scanned.
2050 **/ 2053 **/
2051static void 2054static void
@@ -2057,7 +2060,6 @@ fc_scsi_scan_rport(void *data)
2057 2060
2058 if ((rport->port_state == FC_PORTSTATE_ONLINE) && 2061 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
2059 (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 2062 (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
2060 scsi_target_unblock(&rport->dev);
2061 scsi_scan_target(&rport->dev, rport->channel, 2063 scsi_scan_target(&rport->dev, rport->channel,
2062 rport->scsi_target_id, SCAN_WILD_CARD, 1); 2064 rport->scsi_target_id, SCAN_WILD_CARD, 1);
2063 } 2065 }
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 5569fdcfd621..7b9e8fa1a4e0 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -228,14 +228,11 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
228static void iscsi_session_release(struct device *dev) 228static void iscsi_session_release(struct device *dev)
229{ 229{
230 struct iscsi_cls_session *session = iscsi_dev_to_session(dev); 230 struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
231 struct iscsi_transport *transport = session->transport;
232 struct Scsi_Host *shost; 231 struct Scsi_Host *shost;
233 232
234 shost = iscsi_session_to_shost(session); 233 shost = iscsi_session_to_shost(session);
235 scsi_host_put(shost); 234 scsi_host_put(shost);
236 kfree(session->targetname);
237 kfree(session); 235 kfree(session);
238 module_put(transport->owner);
239} 236}
240 237
241static int iscsi_is_session_dev(const struct device *dev) 238static int iscsi_is_session_dev(const struct device *dev)
@@ -251,10 +248,9 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
251 248
252 mutex_lock(&ihost->mutex); 249 mutex_lock(&ihost->mutex);
253 list_for_each_entry(session, &ihost->sessions, host_list) { 250 list_for_each_entry(session, &ihost->sessions, host_list) {
254 if ((channel == SCAN_WILD_CARD || 251 if ((channel == SCAN_WILD_CARD || channel == 0) &&
255 channel == session->channel) &&
256 (id == SCAN_WILD_CARD || id == session->target_id)) 252 (id == SCAN_WILD_CARD || id == session->target_id))
257 scsi_scan_target(&session->dev, session->channel, 253 scsi_scan_target(&session->dev, 0,
258 session->target_id, lun, 1); 254 session->target_id, lun, 1);
259 } 255 }
260 mutex_unlock(&ihost->mutex); 256 mutex_unlock(&ihost->mutex);
@@ -291,80 +287,92 @@ void iscsi_block_session(struct iscsi_cls_session *session)
291} 287}
292EXPORT_SYMBOL_GPL(iscsi_block_session); 288EXPORT_SYMBOL_GPL(iscsi_block_session);
293 289
294/**
295 * iscsi_create_session - create iscsi class session
296 * @shost: scsi host
297 * @transport: iscsi transport
298 *
299 * This can be called from a LLD or iscsi_transport.
300 **/
301struct iscsi_cls_session * 290struct iscsi_cls_session *
302iscsi_create_session(struct Scsi_Host *shost, 291iscsi_alloc_session(struct Scsi_Host *shost,
303 struct iscsi_transport *transport, int channel) 292 struct iscsi_transport *transport)
304{ 293{
305 struct iscsi_host *ihost;
306 struct iscsi_cls_session *session; 294 struct iscsi_cls_session *session;
307 int err;
308
309 if (!try_module_get(transport->owner))
310 return NULL;
311 295
312 session = kzalloc(sizeof(*session) + transport->sessiondata_size, 296 session = kzalloc(sizeof(*session) + transport->sessiondata_size,
313 GFP_KERNEL); 297 GFP_KERNEL);
314 if (!session) 298 if (!session)
315 goto module_put; 299 return NULL;
300
316 session->transport = transport; 301 session->transport = transport;
317 session->recovery_tmo = 120; 302 session->recovery_tmo = 120;
318 INIT_WORK(&session->recovery_work, session_recovery_timedout, session); 303 INIT_WORK(&session->recovery_work, session_recovery_timedout, session);
319 INIT_LIST_HEAD(&session->host_list); 304 INIT_LIST_HEAD(&session->host_list);
320 INIT_LIST_HEAD(&session->sess_list); 305 INIT_LIST_HEAD(&session->sess_list);
321 306
307 /* this is released in the dev's release function */
308 scsi_host_get(shost);
309 session->dev.parent = &shost->shost_gendev;
310 session->dev.release = iscsi_session_release;
311 device_initialize(&session->dev);
322 if (transport->sessiondata_size) 312 if (transport->sessiondata_size)
323 session->dd_data = &session[1]; 313 session->dd_data = &session[1];
314 return session;
315}
316EXPORT_SYMBOL_GPL(iscsi_alloc_session);
324 317
325 /* this is released in the dev's release function */ 318int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
326 scsi_host_get(shost); 319{
327 ihost = shost->shost_data; 320 struct Scsi_Host *shost = iscsi_session_to_shost(session);
321 struct iscsi_host *ihost;
322 int err;
328 323
324 ihost = shost->shost_data;
329 session->sid = iscsi_session_nr++; 325 session->sid = iscsi_session_nr++;
330 session->channel = channel; 326 session->target_id = target_id;
331 session->target_id = ihost->next_target_id++;
332 327
333 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", 328 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
334 session->sid); 329 session->sid);
335 session->dev.parent = &shost->shost_gendev; 330 err = device_add(&session->dev);
336 session->dev.release = iscsi_session_release;
337 err = device_register(&session->dev);
338 if (err) { 331 if (err) {
339 dev_printk(KERN_ERR, &session->dev, "iscsi: could not " 332 dev_printk(KERN_ERR, &session->dev, "iscsi: could not "
340 "register session's dev\n"); 333 "register session's dev\n");
341 goto free_session; 334 goto release_host;
342 } 335 }
343 transport_register_device(&session->dev); 336 transport_register_device(&session->dev);
344 337
345 mutex_lock(&ihost->mutex); 338 mutex_lock(&ihost->mutex);
346 list_add(&session->host_list, &ihost->sessions); 339 list_add(&session->host_list, &ihost->sessions);
347 mutex_unlock(&ihost->mutex); 340 mutex_unlock(&ihost->mutex);
341 return 0;
348 342
349 return session; 343release_host:
350 344 scsi_host_put(shost);
351free_session: 345 return err;
352 kfree(session);
353module_put:
354 module_put(transport->owner);
355 return NULL;
356} 346}
357 347EXPORT_SYMBOL_GPL(iscsi_add_session);
358EXPORT_SYMBOL_GPL(iscsi_create_session);
359 348
360/** 349/**
361 * iscsi_destroy_session - destroy iscsi session 350 * iscsi_create_session - create iscsi class session
362 * @session: iscsi_session 351 * @shost: scsi host
352 * @transport: iscsi transport
363 * 353 *
364 * Can be called by a LLD or iscsi_transport. There must not be 354 * This can be called from a LLD or iscsi_transport.
365 * any running connections.
366 **/ 355 **/
367int iscsi_destroy_session(struct iscsi_cls_session *session) 356struct iscsi_cls_session *
357iscsi_create_session(struct Scsi_Host *shost,
358 struct iscsi_transport *transport,
359 unsigned int target_id)
360{
361 struct iscsi_cls_session *session;
362
363 session = iscsi_alloc_session(shost, transport);
364 if (!session)
365 return NULL;
366
367 if (iscsi_add_session(session, target_id)) {
368 iscsi_free_session(session);
369 return NULL;
370 }
371 return session;
372}
373EXPORT_SYMBOL_GPL(iscsi_create_session);
374
375void iscsi_remove_session(struct iscsi_cls_session *session)
368{ 376{
369 struct Scsi_Host *shost = iscsi_session_to_shost(session); 377 struct Scsi_Host *shost = iscsi_session_to_shost(session);
370 struct iscsi_host *ihost = shost->shost_data; 378 struct iscsi_host *ihost = shost->shost_data;
@@ -376,19 +384,88 @@ int iscsi_destroy_session(struct iscsi_cls_session *session)
376 list_del(&session->host_list); 384 list_del(&session->host_list);
377 mutex_unlock(&ihost->mutex); 385 mutex_unlock(&ihost->mutex);
378 386
387 scsi_remove_target(&session->dev);
388
379 transport_unregister_device(&session->dev); 389 transport_unregister_device(&session->dev);
380 device_unregister(&session->dev); 390 device_del(&session->dev);
381 return 0; 391}
392EXPORT_SYMBOL_GPL(iscsi_remove_session);
393
394void iscsi_free_session(struct iscsi_cls_session *session)
395{
396 put_device(&session->dev);
382} 397}
383 398
399EXPORT_SYMBOL_GPL(iscsi_free_session);
400
401/**
402 * iscsi_destroy_session - destroy iscsi session
403 * @session: iscsi_session
404 *
405 * Can be called by a LLD or iscsi_transport. There must not be
406 * any running connections.
407 **/
408int iscsi_destroy_session(struct iscsi_cls_session *session)
409{
410 iscsi_remove_session(session);
411 iscsi_free_session(session);
412 return 0;
413}
384EXPORT_SYMBOL_GPL(iscsi_destroy_session); 414EXPORT_SYMBOL_GPL(iscsi_destroy_session);
385 415
416static void mempool_zone_destroy(struct mempool_zone *zp)
417{
418 mempool_destroy(zp->pool);
419 kfree(zp);
420}
421
422static void*
423mempool_zone_alloc_skb(gfp_t gfp_mask, void *pool_data)
424{
425 struct mempool_zone *zone = pool_data;
426
427 return alloc_skb(zone->size, gfp_mask);
428}
429
430static void
431mempool_zone_free_skb(void *element, void *pool_data)
432{
433 kfree_skb(element);
434}
435
436static struct mempool_zone *
437mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
438{
439 struct mempool_zone *zp;
440
441 zp = kzalloc(sizeof(*zp), GFP_KERNEL);
442 if (!zp)
443 return NULL;
444
445 zp->size = size;
446 zp->hiwat = hiwat;
447 INIT_LIST_HEAD(&zp->freequeue);
448 spin_lock_init(&zp->freelock);
449 atomic_set(&zp->allocated, 0);
450
451 zp->pool = mempool_create(max, mempool_zone_alloc_skb,
452 mempool_zone_free_skb, zp);
453 if (!zp->pool) {
454 kfree(zp);
455 return NULL;
456 }
457
458 return zp;
459}
460
386static void iscsi_conn_release(struct device *dev) 461static void iscsi_conn_release(struct device *dev)
387{ 462{
388 struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev); 463 struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
389 struct device *parent = conn->dev.parent; 464 struct device *parent = conn->dev.parent;
390 465
391 kfree(conn->persistent_address); 466 mempool_zone_destroy(conn->z_pdu);
467 mempool_zone_destroy(conn->z_error);
468
392 kfree(conn); 469 kfree(conn);
393 put_device(parent); 470 put_device(parent);
394} 471}
@@ -398,6 +475,31 @@ static int iscsi_is_conn_dev(const struct device *dev)
398 return dev->release == iscsi_conn_release; 475 return dev->release == iscsi_conn_release;
399} 476}
400 477
478static int iscsi_create_event_pools(struct iscsi_cls_conn *conn)
479{
480 conn->z_pdu = mempool_zone_init(Z_MAX_PDU,
481 NLMSG_SPACE(sizeof(struct iscsi_uevent) +
482 sizeof(struct iscsi_hdr) +
483 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH),
484 Z_HIWAT_PDU);
485 if (!conn->z_pdu) {
486 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not allocate "
487 "pdu zone for new conn\n");
488 return -ENOMEM;
489 }
490
491 conn->z_error = mempool_zone_init(Z_MAX_ERROR,
492 NLMSG_SPACE(sizeof(struct iscsi_uevent)),
493 Z_HIWAT_ERROR);
494 if (!conn->z_error) {
495 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not allocate "
496 "error zone for new conn\n");
497 mempool_zone_destroy(conn->z_pdu);
498 return -ENOMEM;
499 }
500 return 0;
501}
502
401/** 503/**
402 * iscsi_create_conn - create iscsi class connection 504 * iscsi_create_conn - create iscsi class connection
403 * @session: iscsi cls session 505 * @session: iscsi cls session
@@ -430,9 +532,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
430 conn->transport = transport; 532 conn->transport = transport;
431 conn->cid = cid; 533 conn->cid = cid;
432 534
535 if (iscsi_create_event_pools(conn))
536 goto free_conn;
537
433 /* this is released in the dev's release function */ 538 /* this is released in the dev's release function */
434 if (!get_device(&session->dev)) 539 if (!get_device(&session->dev))
435 goto free_conn; 540 goto free_conn_pools;
436 541
437 snprintf(conn->dev.bus_id, BUS_ID_SIZE, "connection%d:%u", 542 snprintf(conn->dev.bus_id, BUS_ID_SIZE, "connection%d:%u",
438 session->sid, cid); 543 session->sid, cid);
@@ -449,6 +554,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
449 554
450release_parent_ref: 555release_parent_ref:
451 put_device(&session->dev); 556 put_device(&session->dev);
557free_conn_pools:
558
452free_conn: 559free_conn:
453 kfree(conn); 560 kfree(conn);
454 return NULL; 561 return NULL;
@@ -496,20 +603,6 @@ static inline struct list_head *skb_to_lh(struct sk_buff *skb)
496 return (struct list_head *)&skb->cb; 603 return (struct list_head *)&skb->cb;
497} 604}
498 605
499static void*
500mempool_zone_alloc_skb(gfp_t gfp_mask, void *pool_data)
501{
502 struct mempool_zone *zone = pool_data;
503
504 return alloc_skb(zone->size, gfp_mask);
505}
506
507static void
508mempool_zone_free_skb(void *element, void *pool_data)
509{
510 kfree_skb(element);
511}
512
513static void 606static void
514mempool_zone_complete(struct mempool_zone *zone) 607mempool_zone_complete(struct mempool_zone *zone)
515{ 608{
@@ -529,37 +622,6 @@ mempool_zone_complete(struct mempool_zone *zone)
529 spin_unlock_irqrestore(&zone->freelock, flags); 622 spin_unlock_irqrestore(&zone->freelock, flags);
530} 623}
531 624
532static struct mempool_zone *
533mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
534{
535 struct mempool_zone *zp;
536
537 zp = kzalloc(sizeof(*zp), GFP_KERNEL);
538 if (!zp)
539 return NULL;
540
541 zp->size = size;
542 zp->hiwat = hiwat;
543 INIT_LIST_HEAD(&zp->freequeue);
544 spin_lock_init(&zp->freelock);
545 atomic_set(&zp->allocated, 0);
546
547 zp->pool = mempool_create(max, mempool_zone_alloc_skb,
548 mempool_zone_free_skb, zp);
549 if (!zp->pool) {
550 kfree(zp);
551 return NULL;
552 }
553
554 return zp;
555}
556
557static void mempool_zone_destroy(struct mempool_zone *zp)
558{
559 mempool_destroy(zp->pool);
560 kfree(zp);
561}
562
563static struct sk_buff* 625static struct sk_buff*
564mempool_zone_get_skb(struct mempool_zone *zone) 626mempool_zone_get_skb(struct mempool_zone *zone)
565{ 627{
@@ -572,6 +634,27 @@ mempool_zone_get_skb(struct mempool_zone *zone)
572} 634}
573 635
574static int 636static int
637iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb)
638{
639 unsigned long flags;
640 int rc;
641
642 skb_get(skb);
643 rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL);
644 if (rc < 0) {
645 mempool_free(skb, zone->pool);
646 printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc);
647 return rc;
648 }
649
650 spin_lock_irqsave(&zone->freelock, flags);
651 INIT_LIST_HEAD(skb_to_lh(skb));
652 list_add(skb_to_lh(skb), &zone->freequeue);
653 spin_unlock_irqrestore(&zone->freelock, flags);
654 return 0;
655}
656
657static int
575iscsi_unicast_skb(struct mempool_zone *zone, struct sk_buff *skb, int pid) 658iscsi_unicast_skb(struct mempool_zone *zone, struct sk_buff *skb, int pid)
576{ 659{
577 unsigned long flags; 660 unsigned long flags;
@@ -666,7 +749,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
666 ev->r.connerror.cid = conn->cid; 749 ev->r.connerror.cid = conn->cid;
667 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 750 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
668 751
669 iscsi_unicast_skb(conn->z_error, skb, priv->daemon_pid); 752 iscsi_broadcast_skb(conn->z_error, skb);
670 753
671 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", 754 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
672 error); 755 error);
@@ -767,6 +850,131 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
767 return err; 850 return err;
768} 851}
769 852
853/**
854 * iscsi_if_destroy_session_done - send session destr. completion event
855 * @conn: last connection for session
856 *
857 * This is called by HW iscsi LLDs to notify userpsace that its HW has
858 * removed a session.
859 **/
860int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
861{
862 struct iscsi_internal *priv;
863 struct iscsi_cls_session *session;
864 struct Scsi_Host *shost;
865 struct iscsi_uevent *ev;
866 struct sk_buff *skb;
867 struct nlmsghdr *nlh;
868 unsigned long flags;
869 int rc, len = NLMSG_SPACE(sizeof(*ev));
870
871 priv = iscsi_if_transport_lookup(conn->transport);
872 if (!priv)
873 return -EINVAL;
874
875 session = iscsi_dev_to_session(conn->dev.parent);
876 shost = iscsi_session_to_shost(session);
877
878 mempool_zone_complete(conn->z_pdu);
879
880 skb = mempool_zone_get_skb(conn->z_pdu);
881 if (!skb) {
882 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
883 "session creation event\n");
884 return -ENOMEM;
885 }
886
887 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
888 ev = NLMSG_DATA(nlh);
889 ev->transport_handle = iscsi_handle(conn->transport);
890 ev->type = ISCSI_KEVENT_DESTROY_SESSION;
891 ev->r.d_session.host_no = shost->host_no;
892 ev->r.d_session.sid = session->sid;
893
894 /*
895 * this will occur if the daemon is not up, so we just warn
896 * the user and when the daemon is restarted it will handle it
897 */
898 rc = iscsi_broadcast_skb(conn->z_pdu, skb);
899 if (rc < 0)
900 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
901 "session destruction event. Check iscsi daemon\n");
902
903 spin_lock_irqsave(&sesslock, flags);
904 list_del(&session->sess_list);
905 spin_unlock_irqrestore(&sesslock, flags);
906
907 spin_lock_irqsave(&connlock, flags);
908 conn->active = 0;
909 list_del(&conn->conn_list);
910 spin_unlock_irqrestore(&connlock, flags);
911
912 return rc;
913}
914EXPORT_SYMBOL_GPL(iscsi_if_destroy_session_done);
915
916/**
917 * iscsi_if_create_session_done - send session creation completion event
918 * @conn: leading connection for session
919 *
920 * This is called by HW iscsi LLDs to notify userpsace that its HW has
921 * created a session or a existing session is back in the logged in state.
922 **/
923int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
924{
925 struct iscsi_internal *priv;
926 struct iscsi_cls_session *session;
927 struct Scsi_Host *shost;
928 struct iscsi_uevent *ev;
929 struct sk_buff *skb;
930 struct nlmsghdr *nlh;
931 unsigned long flags;
932 int rc, len = NLMSG_SPACE(sizeof(*ev));
933
934 priv = iscsi_if_transport_lookup(conn->transport);
935 if (!priv)
936 return -EINVAL;
937
938 session = iscsi_dev_to_session(conn->dev.parent);
939 shost = iscsi_session_to_shost(session);
940
941 mempool_zone_complete(conn->z_pdu);
942
943 skb = mempool_zone_get_skb(conn->z_pdu);
944 if (!skb) {
945 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
946 "session creation event\n");
947 return -ENOMEM;
948 }
949
950 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
951 ev = NLMSG_DATA(nlh);
952 ev->transport_handle = iscsi_handle(conn->transport);
953 ev->type = ISCSI_UEVENT_CREATE_SESSION;
954 ev->r.c_session_ret.host_no = shost->host_no;
955 ev->r.c_session_ret.sid = session->sid;
956
957 /*
958 * this will occur if the daemon is not up, so we just warn
959 * the user and when the daemon is restarted it will handle it
960 */
961 rc = iscsi_broadcast_skb(conn->z_pdu, skb);
962 if (rc < 0)
963 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
964 "session creation event. Check iscsi daemon\n");
965
966 spin_lock_irqsave(&sesslock, flags);
967 list_add(&session->sess_list, &sesslist);
968 spin_unlock_irqrestore(&sesslock, flags);
969
970 spin_lock_irqsave(&connlock, flags);
971 list_add(&conn->conn_list, &connlist);
972 conn->active = 1;
973 spin_unlock_irqrestore(&connlock, flags);
974 return rc;
975}
976EXPORT_SYMBOL_GPL(iscsi_if_create_session_done);
977
770static int 978static int
771iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 979iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
772{ 980{
@@ -812,26 +1020,6 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
812 return -ENOMEM; 1020 return -ENOMEM;
813 } 1021 }
814 1022
815 conn->z_pdu = mempool_zone_init(Z_MAX_PDU,
816 NLMSG_SPACE(sizeof(struct iscsi_uevent) +
817 sizeof(struct iscsi_hdr) +
818 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH),
819 Z_HIWAT_PDU);
820 if (!conn->z_pdu) {
821 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not allocate "
822 "pdu zone for new conn\n");
823 goto destroy_conn;
824 }
825
826 conn->z_error = mempool_zone_init(Z_MAX_ERROR,
827 NLMSG_SPACE(sizeof(struct iscsi_uevent)),
828 Z_HIWAT_ERROR);
829 if (!conn->z_error) {
830 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not allocate "
831 "error zone for new conn\n");
832 goto free_pdu_pool;
833 }
834
835 ev->r.c_conn_ret.sid = session->sid; 1023 ev->r.c_conn_ret.sid = session->sid;
836 ev->r.c_conn_ret.cid = conn->cid; 1024 ev->r.c_conn_ret.cid = conn->cid;
837 1025
@@ -841,13 +1029,6 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
841 spin_unlock_irqrestore(&connlock, flags); 1029 spin_unlock_irqrestore(&connlock, flags);
842 1030
843 return 0; 1031 return 0;
844
845free_pdu_pool:
846 mempool_zone_destroy(conn->z_pdu);
847destroy_conn:
848 if (transport->destroy_conn)
849 transport->destroy_conn(conn->dd_data);
850 return -ENOMEM;
851} 1032}
852 1033
853static int 1034static int
@@ -855,7 +1036,6 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
855{ 1036{
856 unsigned long flags; 1037 unsigned long flags;
857 struct iscsi_cls_conn *conn; 1038 struct iscsi_cls_conn *conn;
858 struct mempool_zone *z_error, *z_pdu;
859 1039
860 conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); 1040 conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
861 if (!conn) 1041 if (!conn)
@@ -865,35 +1045,18 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
865 list_del(&conn->conn_list); 1045 list_del(&conn->conn_list);
866 spin_unlock_irqrestore(&connlock, flags); 1046 spin_unlock_irqrestore(&connlock, flags);
867 1047
868 z_pdu = conn->z_pdu;
869 z_error = conn->z_error;
870
871 if (transport->destroy_conn) 1048 if (transport->destroy_conn)
872 transport->destroy_conn(conn); 1049 transport->destroy_conn(conn);
873
874 mempool_zone_destroy(z_pdu);
875 mempool_zone_destroy(z_error);
876
877 return 0; 1050 return 0;
878} 1051}
879 1052
880static void
881iscsi_copy_param(struct iscsi_uevent *ev, uint32_t *value, char *data)
882{
883 if (ev->u.set_param.len != sizeof(uint32_t))
884 BUG();
885 memcpy(value, data, min_t(uint32_t, sizeof(uint32_t),
886 ev->u.set_param.len));
887}
888
889static int 1053static int
890iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) 1054iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
891{ 1055{
892 char *data = (char*)ev + sizeof(*ev); 1056 char *data = (char*)ev + sizeof(*ev);
893 struct iscsi_cls_conn *conn; 1057 struct iscsi_cls_conn *conn;
894 struct iscsi_cls_session *session; 1058 struct iscsi_cls_session *session;
895 int err = 0; 1059 int err = 0, value = 0;
896 uint32_t value = 0;
897 1060
898 session = iscsi_session_lookup(ev->u.set_param.sid); 1061 session = iscsi_session_lookup(ev->u.set_param.sid);
899 conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); 1062 conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid);
@@ -902,42 +1065,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
902 1065
903 switch (ev->u.set_param.param) { 1066 switch (ev->u.set_param.param) {
904 case ISCSI_PARAM_SESS_RECOVERY_TMO: 1067 case ISCSI_PARAM_SESS_RECOVERY_TMO:
905 iscsi_copy_param(ev, &value, data); 1068 sscanf(data, "%d", &value);
906 if (value != 0) 1069 if (value != 0)
907 session->recovery_tmo = value; 1070 session->recovery_tmo = value;
908 break; 1071 break;
909 case ISCSI_PARAM_TARGET_NAME:
910 /* this should not change between logins */
911 if (session->targetname)
912 return 0;
913
914 session->targetname = kstrdup(data, GFP_KERNEL);
915 if (!session->targetname)
916 return -ENOMEM;
917 break;
918 case ISCSI_PARAM_TPGT:
919 iscsi_copy_param(ev, &value, data);
920 session->tpgt = value;
921 break;
922 case ISCSI_PARAM_PERSISTENT_PORT:
923 iscsi_copy_param(ev, &value, data);
924 conn->persistent_port = value;
925 break;
926 case ISCSI_PARAM_PERSISTENT_ADDRESS:
927 /*
928 * this is the address returned in discovery so it should
929 * not change between logins.
930 */
931 if (conn->persistent_address)
932 return 0;
933
934 conn->persistent_address = kstrdup(data, GFP_KERNEL);
935 if (!conn->persistent_address)
936 return -ENOMEM;
937 break;
938 default: 1072 default:
939 iscsi_copy_param(ev, &value, data); 1073 err = transport->set_param(conn, ev->u.set_param.param,
940 err = transport->set_param(conn, ev->u.set_param.param, value); 1074 data, ev->u.set_param.len);
941 } 1075 }
942 1076
943 return err; 1077 return err;
@@ -978,6 +1112,21 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
978} 1112}
979 1113
980static int 1114static int
1115iscsi_tgt_dscvr(struct iscsi_transport *transport,
1116 struct iscsi_uevent *ev)
1117{
1118 struct sockaddr *dst_addr;
1119
1120 if (!transport->tgt_dscvr)
1121 return -EINVAL;
1122
1123 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1124 return transport->tgt_dscvr(ev->u.tgt_dscvr.type,
1125 ev->u.tgt_dscvr.host_no,
1126 ev->u.tgt_dscvr.enable, dst_addr);
1127}
1128
1129static int
981iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1130iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
982{ 1131{
983 int err = 0; 1132 int err = 0;
@@ -1065,6 +1214,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1065 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: 1214 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
1066 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); 1215 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
1067 break; 1216 break;
1217 case ISCSI_UEVENT_TGT_DSCVR:
1218 err = iscsi_tgt_dscvr(transport, ev);
1219 break;
1068 default: 1220 default:
1069 err = -EINVAL; 1221 err = -EINVAL;
1070 break; 1222 break;
@@ -1147,49 +1299,31 @@ struct class_device_attribute class_device_attr_##_prefix##_##_name = \
1147/* 1299/*
1148 * iSCSI connection attrs 1300 * iSCSI connection attrs
1149 */ 1301 */
1150#define iscsi_conn_int_attr_show(param, format) \ 1302#define iscsi_conn_attr_show(param) \
1151static ssize_t \
1152show_conn_int_param_##param(struct class_device *cdev, char *buf) \
1153{ \
1154 uint32_t value = 0; \
1155 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
1156 struct iscsi_transport *t = conn->transport; \
1157 \
1158 t->get_conn_param(conn, param, &value); \
1159 return snprintf(buf, 20, format"\n", value); \
1160}
1161
1162#define iscsi_conn_int_attr(field, param, format) \
1163 iscsi_conn_int_attr_show(param, format) \
1164static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_int_param_##param, \
1165 NULL);
1166
1167iscsi_conn_int_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH, "%u");
1168iscsi_conn_int_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH, "%u");
1169iscsi_conn_int_attr(header_digest, ISCSI_PARAM_HDRDGST_EN, "%d");
1170iscsi_conn_int_attr(data_digest, ISCSI_PARAM_DATADGST_EN, "%d");
1171iscsi_conn_int_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN, "%d");
1172iscsi_conn_int_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN, "%d");
1173iscsi_conn_int_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT, "%d");
1174iscsi_conn_int_attr(port, ISCSI_PARAM_CONN_PORT, "%d");
1175iscsi_conn_int_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN, "%u");
1176
1177#define iscsi_conn_str_attr_show(param) \
1178static ssize_t \ 1303static ssize_t \
1179show_conn_str_param_##param(struct class_device *cdev, char *buf) \ 1304show_conn_param_##param(struct class_device *cdev, char *buf) \
1180{ \ 1305{ \
1181 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \ 1306 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
1182 struct iscsi_transport *t = conn->transport; \ 1307 struct iscsi_transport *t = conn->transport; \
1183 return t->get_conn_str_param(conn, param, buf); \ 1308 return t->get_conn_param(conn, param, buf); \
1184} 1309}
1185 1310
1186#define iscsi_conn_str_attr(field, param) \ 1311#define iscsi_conn_attr(field, param) \
1187 iscsi_conn_str_attr_show(param) \ 1312 iscsi_conn_attr_show(param) \
1188static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_str_param_##param, \ 1313static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_param_##param, \
1189 NULL); 1314 NULL);
1190 1315
1191iscsi_conn_str_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); 1316iscsi_conn_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH);
1192iscsi_conn_str_attr(address, ISCSI_PARAM_CONN_ADDRESS); 1317iscsi_conn_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH);
1318iscsi_conn_attr(header_digest, ISCSI_PARAM_HDRDGST_EN);
1319iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN);
1320iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN);
1321iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN);
1322iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT);
1323iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
1324iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
1325iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
1326iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
1193 1327
1194#define iscsi_cdev_to_session(_cdev) \ 1328#define iscsi_cdev_to_session(_cdev) \
1195 iscsi_dev_to_session(_cdev->dev) 1329 iscsi_dev_to_session(_cdev->dev)
@@ -1197,61 +1331,36 @@ iscsi_conn_str_attr(address, ISCSI_PARAM_CONN_ADDRESS);
1197/* 1331/*
1198 * iSCSI session attrs 1332 * iSCSI session attrs
1199 */ 1333 */
1200#define iscsi_session_int_attr_show(param, format) \ 1334#define iscsi_session_attr_show(param) \
1201static ssize_t \
1202show_session_int_param_##param(struct class_device *cdev, char *buf) \
1203{ \
1204 uint32_t value = 0; \
1205 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
1206 struct iscsi_transport *t = session->transport; \
1207 \
1208 t->get_session_param(session, param, &value); \
1209 return snprintf(buf, 20, format"\n", value); \
1210}
1211
1212#define iscsi_session_int_attr(field, param, format) \
1213 iscsi_session_int_attr_show(param, format) \
1214static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_int_param_##param, \
1215 NULL);
1216
1217iscsi_session_int_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, "%d");
1218iscsi_session_int_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, "%hu");
1219iscsi_session_int_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, "%d");
1220iscsi_session_int_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, "%u");
1221iscsi_session_int_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, "%u");
1222iscsi_session_int_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, "%d");
1223iscsi_session_int_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, "%d");
1224iscsi_session_int_attr(erl, ISCSI_PARAM_ERL, "%d");
1225iscsi_session_int_attr(tpgt, ISCSI_PARAM_TPGT, "%d");
1226
1227#define iscsi_session_str_attr_show(param) \
1228static ssize_t \ 1335static ssize_t \
1229show_session_str_param_##param(struct class_device *cdev, char *buf) \ 1336show_session_param_##param(struct class_device *cdev, char *buf) \
1230{ \ 1337{ \
1231 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1338 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
1232 struct iscsi_transport *t = session->transport; \ 1339 struct iscsi_transport *t = session->transport; \
1233 return t->get_session_str_param(session, param, buf); \ 1340 return t->get_session_param(session, param, buf); \
1234} 1341}
1235 1342
1236#define iscsi_session_str_attr(field, param) \ 1343#define iscsi_session_attr(field, param) \
1237 iscsi_session_str_attr_show(param) \ 1344 iscsi_session_attr_show(param) \
1238static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_str_param_##param, \ 1345static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
1239 NULL); 1346 NULL);
1240 1347
1241iscsi_session_str_attr(targetname, ISCSI_PARAM_TARGET_NAME); 1348iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME);
1349iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN);
1350iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T);
1351iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN);
1352iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST);
1353iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST);
1354iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN);
1355iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN);
1356iscsi_session_attr(erl, ISCSI_PARAM_ERL);
1357iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT);
1242 1358
1243/*
1244 * Private session and conn attrs. userspace uses several iscsi values
1245 * to identify each session between reboots. Some of these values may not
1246 * be present in the iscsi_transport/LLD driver becuase userspace handles
1247 * login (and failback for login redirect) so for these type of drivers
1248 * the class manages the attrs and values for the iscsi_transport/LLD
1249 */
1250#define iscsi_priv_session_attr_show(field, format) \ 1359#define iscsi_priv_session_attr_show(field, format) \
1251static ssize_t \ 1360static ssize_t \
1252show_priv_session_##field(struct class_device *cdev, char *buf) \ 1361show_priv_session_##field(struct class_device *cdev, char *buf) \
1253{ \ 1362{ \
1254 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1363 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
1255 return sprintf(buf, format"\n", session->field); \ 1364 return sprintf(buf, format"\n", session->field); \
1256} 1365}
1257 1366
@@ -1259,31 +1368,15 @@ show_priv_session_##field(struct class_device *cdev, char *buf) \
1259 iscsi_priv_session_attr_show(field, format) \ 1368 iscsi_priv_session_attr_show(field, format) \
1260static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \ 1369static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \
1261 NULL) 1370 NULL)
1262iscsi_priv_session_attr(targetname, "%s");
1263iscsi_priv_session_attr(tpgt, "%d");
1264iscsi_priv_session_attr(recovery_tmo, "%d"); 1371iscsi_priv_session_attr(recovery_tmo, "%d");
1265 1372
1266#define iscsi_priv_conn_attr_show(field, format) \
1267static ssize_t \
1268show_priv_conn_##field(struct class_device *cdev, char *buf) \
1269{ \
1270 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
1271 return sprintf(buf, format"\n", conn->field); \
1272}
1273
1274#define iscsi_priv_conn_attr(field, format) \
1275 iscsi_priv_conn_attr_show(field, format) \
1276static ISCSI_CLASS_ATTR(priv_conn, field, S_IRUGO, show_priv_conn_##field, \
1277 NULL)
1278iscsi_priv_conn_attr(persistent_address, "%s");
1279iscsi_priv_conn_attr(persistent_port, "%d");
1280
1281#define SETUP_PRIV_SESSION_RD_ATTR(field) \ 1373#define SETUP_PRIV_SESSION_RD_ATTR(field) \
1282do { \ 1374do { \
1283 priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \ 1375 priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
1284 count++; \ 1376 count++; \
1285} while (0) 1377} while (0)
1286 1378
1379
1287#define SETUP_SESSION_RD_ATTR(field, param_flag) \ 1380#define SETUP_SESSION_RD_ATTR(field, param_flag) \
1288do { \ 1381do { \
1289 if (tt->param_mask & param_flag) { \ 1382 if (tt->param_mask & param_flag) { \
@@ -1292,12 +1385,6 @@ do { \
1292 } \ 1385 } \
1293} while (0) 1386} while (0)
1294 1387
1295#define SETUP_PRIV_CONN_RD_ATTR(field) \
1296do { \
1297 priv->conn_attrs[count] = &class_device_attr_priv_conn_##field; \
1298 count++; \
1299} while (0)
1300
1301#define SETUP_CONN_RD_ATTR(field, param_flag) \ 1388#define SETUP_CONN_RD_ATTR(field, param_flag) \
1302do { \ 1389do { \
1303 if (tt->param_mask & param_flag) { \ 1390 if (tt->param_mask & param_flag) { \
@@ -1388,6 +1475,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1388 if (!priv) 1475 if (!priv)
1389 return NULL; 1476 return NULL;
1390 INIT_LIST_HEAD(&priv->list); 1477 INIT_LIST_HEAD(&priv->list);
1478 priv->daemon_pid = -1;
1391 priv->iscsi_transport = tt; 1479 priv->iscsi_transport = tt;
1392 priv->t.user_scan = iscsi_user_scan; 1480 priv->t.user_scan = iscsi_user_scan;
1393 1481
@@ -1424,16 +1512,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
1424 SETUP_CONN_RD_ATTR(address, ISCSI_CONN_ADDRESS); 1512 SETUP_CONN_RD_ATTR(address, ISCSI_CONN_ADDRESS);
1425 SETUP_CONN_RD_ATTR(port, ISCSI_CONN_PORT); 1513 SETUP_CONN_RD_ATTR(port, ISCSI_CONN_PORT);
1426 SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN); 1514 SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
1427 1515 SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
1428 if (tt->param_mask & ISCSI_PERSISTENT_ADDRESS) 1516 SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
1429 SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
1430 else
1431 SETUP_PRIV_CONN_RD_ATTR(persistent_address);
1432
1433 if (tt->param_mask & ISCSI_PERSISTENT_PORT)
1434 SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
1435 else
1436 SETUP_PRIV_CONN_RD_ATTR(persistent_port);
1437 1517
1438 BUG_ON(count > ISCSI_CONN_ATTRS); 1518 BUG_ON(count > ISCSI_CONN_ATTRS);
1439 priv->conn_attrs[count] = NULL; 1519 priv->conn_attrs[count] = NULL;
@@ -1453,18 +1533,10 @@ iscsi_register_transport(struct iscsi_transport *tt)
1453 SETUP_SESSION_RD_ATTR(data_pdu_in_order, ISCSI_PDU_INORDER_EN); 1533 SETUP_SESSION_RD_ATTR(data_pdu_in_order, ISCSI_PDU_INORDER_EN);
1454 SETUP_SESSION_RD_ATTR(data_seq_in_order, ISCSI_DATASEQ_INORDER_EN); 1534 SETUP_SESSION_RD_ATTR(data_seq_in_order, ISCSI_DATASEQ_INORDER_EN);
1455 SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL); 1535 SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
1536 SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
1537 SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
1456 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 1538 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
1457 1539
1458 if (tt->param_mask & ISCSI_TARGET_NAME)
1459 SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
1460 else
1461 SETUP_PRIV_SESSION_RD_ATTR(targetname);
1462
1463 if (tt->param_mask & ISCSI_TPGT)
1464 SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
1465 else
1466 SETUP_PRIV_SESSION_RD_ATTR(tpgt);
1467
1468 BUG_ON(count > ISCSI_SESSION_ATTRS); 1540 BUG_ON(count > ISCSI_SESSION_ATTRS);
1469 priv->session_attrs[count] = NULL; 1541 priv->session_attrs[count] = NULL;
1470 1542
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1fe6b2d01853..dd075627e605 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -174,12 +174,29 @@ static int sas_host_match(struct attribute_container *cont,
174 174
175static int do_sas_phy_delete(struct device *dev, void *data) 175static int do_sas_phy_delete(struct device *dev, void *data)
176{ 176{
177 if (scsi_is_sas_phy(dev)) 177 int pass = (int)(unsigned long)data;
178
179 if (pass == 0 && scsi_is_sas_port(dev))
180 sas_port_delete(dev_to_sas_port(dev));
181 else if (pass == 1 && scsi_is_sas_phy(dev))
178 sas_phy_delete(dev_to_phy(dev)); 182 sas_phy_delete(dev_to_phy(dev));
179 return 0; 183 return 0;
180} 184}
181 185
182/** 186/**
187 * sas_remove_children -- tear down a devices SAS data structures
188 * @dev: device belonging to the sas object
189 *
190 * Removes all SAS PHYs and remote PHYs for a given object
191 */
192void sas_remove_children(struct device *dev)
193{
194 device_for_each_child(dev, (void *)0, do_sas_phy_delete);
195 device_for_each_child(dev, (void *)1, do_sas_phy_delete);
196}
197EXPORT_SYMBOL(sas_remove_children);
198
199/**
183 * sas_remove_host -- tear down a Scsi_Host's SAS data structures 200 * sas_remove_host -- tear down a Scsi_Host's SAS data structures
184 * @shost: Scsi Host that is torn down 201 * @shost: Scsi Host that is torn down
185 * 202 *
@@ -188,13 +205,13 @@ static int do_sas_phy_delete(struct device *dev, void *data)
188 */ 205 */
189void sas_remove_host(struct Scsi_Host *shost) 206void sas_remove_host(struct Scsi_Host *shost)
190{ 207{
191 device_for_each_child(&shost->shost_gendev, NULL, do_sas_phy_delete); 208 sas_remove_children(&shost->shost_gendev);
192} 209}
193EXPORT_SYMBOL(sas_remove_host); 210EXPORT_SYMBOL(sas_remove_host);
194 211
195 212
196/* 213/*
197 * SAS Port attributes 214 * SAS Phy attributes
198 */ 215 */
199 216
200#define sas_phy_show_simple(field, name, format_string, cast) \ 217#define sas_phy_show_simple(field, name, format_string, cast) \
@@ -310,7 +327,7 @@ sas_phy_protocol_attr(identify.target_port_protocols,
310sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", 327sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
311 unsigned long long); 328 unsigned long long);
312sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); 329sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
313sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8); 330//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8);
314sas_phy_linkspeed_attr(negotiated_linkrate); 331sas_phy_linkspeed_attr(negotiated_linkrate);
315sas_phy_linkspeed_attr(minimum_linkrate_hw); 332sas_phy_linkspeed_attr(minimum_linkrate_hw);
316sas_phy_linkspeed_attr(minimum_linkrate); 333sas_phy_linkspeed_attr(minimum_linkrate);
@@ -378,9 +395,10 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
378 device_initialize(&phy->dev); 395 device_initialize(&phy->dev);
379 phy->dev.parent = get_device(parent); 396 phy->dev.parent = get_device(parent);
380 phy->dev.release = sas_phy_release; 397 phy->dev.release = sas_phy_release;
398 INIT_LIST_HEAD(&phy->port_siblings);
381 if (scsi_is_sas_expander_device(parent)) { 399 if (scsi_is_sas_expander_device(parent)) {
382 struct sas_rphy *rphy = dev_to_rphy(parent); 400 struct sas_rphy *rphy = dev_to_rphy(parent);
383 sprintf(phy->dev.bus_id, "phy-%d-%d:%d", shost->host_no, 401 sprintf(phy->dev.bus_id, "phy-%d:%d:%d", shost->host_no,
384 rphy->scsi_target_id, number); 402 rphy->scsi_target_id, number);
385 } else 403 } else
386 sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number); 404 sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number);
@@ -440,8 +458,8 @@ sas_phy_delete(struct sas_phy *phy)
440{ 458{
441 struct device *dev = &phy->dev; 459 struct device *dev = &phy->dev;
442 460
443 if (phy->rphy) 461 /* this happens if the phy is still part of a port when deleted */
444 sas_rphy_delete(phy->rphy); 462 BUG_ON(!list_empty(&phy->port_siblings));
445 463
446 transport_remove_device(dev); 464 transport_remove_device(dev);
447 device_del(dev); 465 device_del(dev);
@@ -464,6 +482,258 @@ int scsi_is_sas_phy(const struct device *dev)
464EXPORT_SYMBOL(scsi_is_sas_phy); 482EXPORT_SYMBOL(scsi_is_sas_phy);
465 483
466/* 484/*
485 * SAS Port attributes
486 */
487#define sas_port_show_simple(field, name, format_string, cast) \
488static ssize_t \
489show_sas_port_##name(struct class_device *cdev, char *buf) \
490{ \
491 struct sas_port *port = transport_class_to_sas_port(cdev); \
492 \
493 return snprintf(buf, 20, format_string, cast port->field); \
494}
495
496#define sas_port_simple_attr(field, name, format_string, type) \
497 sas_port_show_simple(field, name, format_string, (type)) \
498static CLASS_DEVICE_ATTR(name, S_IRUGO, show_sas_port_##name, NULL)
499
500sas_port_simple_attr(num_phys, num_phys, "%d\n", int);
501
502static DECLARE_TRANSPORT_CLASS(sas_port_class,
503 "sas_port", NULL, NULL, NULL);
504
505static int sas_port_match(struct attribute_container *cont, struct device *dev)
506{
507 struct Scsi_Host *shost;
508 struct sas_internal *i;
509
510 if (!scsi_is_sas_port(dev))
511 return 0;
512 shost = dev_to_shost(dev->parent);
513
514 if (!shost->transportt)
515 return 0;
516 if (shost->transportt->host_attrs.ac.class !=
517 &sas_host_class.class)
518 return 0;
519
520 i = to_sas_internal(shost->transportt);
521 return &i->port_attr_cont.ac == cont;
522}
523
524
525static void sas_port_release(struct device *dev)
526{
527 struct sas_port *port = dev_to_sas_port(dev);
528
529 BUG_ON(!list_empty(&port->phy_list));
530
531 put_device(dev->parent);
532 kfree(port);
533}
534
535static void sas_port_create_link(struct sas_port *port,
536 struct sas_phy *phy)
537{
538 sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, phy->dev.bus_id);
539 sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
540}
541
542static void sas_port_delete_link(struct sas_port *port,
543 struct sas_phy *phy)
544{
545 sysfs_remove_link(&port->dev.kobj, phy->dev.bus_id);
546 sysfs_remove_link(&phy->dev.kobj, "port");
547}
548
549/** sas_port_alloc - allocate and initialize a SAS port structure
550 *
551 * @parent: parent device
552 * @port_id: port number
553 *
554 * Allocates a SAS port structure. It will be added to the device tree
555 * below the device specified by @parent which must be either a Scsi_Host
556 * or a sas_expander_device.
557 *
558 * Returns %NULL on error
559 */
560struct sas_port *sas_port_alloc(struct device *parent, int port_id)
561{
562 struct Scsi_Host *shost = dev_to_shost(parent);
563 struct sas_port *port;
564
565 port = kzalloc(sizeof(*port), GFP_KERNEL);
566 if (!port)
567 return NULL;
568
569 port->port_identifier = port_id;
570
571 device_initialize(&port->dev);
572
573 port->dev.parent = get_device(parent);
574 port->dev.release = sas_port_release;
575
576 mutex_init(&port->phy_list_mutex);
577 INIT_LIST_HEAD(&port->phy_list);
578
579 if (scsi_is_sas_expander_device(parent)) {
580 struct sas_rphy *rphy = dev_to_rphy(parent);
581 sprintf(port->dev.bus_id, "port-%d:%d:%d", shost->host_no,
582 rphy->scsi_target_id, port->port_identifier);
583 } else
584 sprintf(port->dev.bus_id, "port-%d:%d", shost->host_no,
585 port->port_identifier);
586
587 transport_setup_device(&port->dev);
588
589 return port;
590}
591EXPORT_SYMBOL(sas_port_alloc);
592
593/**
594 * sas_port_add - add a SAS port to the device hierarchy
595 *
596 * @port: port to be added
597 *
598 * publishes a port to the rest of the system
599 */
600int sas_port_add(struct sas_port *port)
601{
602 int error;
603
604 /* No phys should be added until this is made visible */
605 BUG_ON(!list_empty(&port->phy_list));
606
607 error = device_add(&port->dev);
608
609 if (error)
610 return error;
611
612 transport_add_device(&port->dev);
613 transport_configure_device(&port->dev);
614
615 return 0;
616}
617EXPORT_SYMBOL(sas_port_add);
618
619/**
620 * sas_port_free -- free a SAS PORT
621 * @port: SAS PORT to free
622 *
623 * Frees the specified SAS PORT.
624 *
625 * Note:
626 * This function must only be called on a PORT that has not
627 * sucessfully been added using sas_port_add().
628 */
629void sas_port_free(struct sas_port *port)
630{
631 transport_destroy_device(&port->dev);
632 put_device(&port->dev);
633}
634EXPORT_SYMBOL(sas_port_free);
635
636/**
637 * sas_port_delete -- remove SAS PORT
638 * @port: SAS PORT to remove
639 *
640 * Removes the specified SAS PORT. If the SAS PORT has an
641 * associated phys, unlink them from the port as well.
642 */
643void sas_port_delete(struct sas_port *port)
644{
645 struct device *dev = &port->dev;
646 struct sas_phy *phy, *tmp_phy;
647
648 if (port->rphy) {
649 sas_rphy_delete(port->rphy);
650 port->rphy = NULL;
651 }
652
653 mutex_lock(&port->phy_list_mutex);
654 list_for_each_entry_safe(phy, tmp_phy, &port->phy_list,
655 port_siblings) {
656 sas_port_delete_link(port, phy);
657 list_del_init(&phy->port_siblings);
658 }
659 mutex_unlock(&port->phy_list_mutex);
660
661 transport_remove_device(dev);
662 device_del(dev);
663 transport_destroy_device(dev);
664 put_device(dev);
665}
666EXPORT_SYMBOL(sas_port_delete);
667
668/**
669 * scsi_is_sas_port -- check if a struct device represents a SAS port
670 * @dev: device to check
671 *
672 * Returns:
673 * %1 if the device represents a SAS Port, %0 else
674 */
675int scsi_is_sas_port(const struct device *dev)
676{
677 return dev->release == sas_port_release;
678}
679EXPORT_SYMBOL(scsi_is_sas_port);
680
681/**
682 * sas_port_add_phy - add another phy to a port to form a wide port
683 * @port: port to add the phy to
684 * @phy: phy to add
685 *
686 * When a port is initially created, it is empty (has no phys). All
687 * ports must have at least one phy to operated, and all wide ports
688 * must have at least two. The current code makes no difference
689 * between ports and wide ports, but the only object that can be
690 * connected to a remote device is a port, so ports must be formed on
691 * all devices with phys if they're connected to anything.
692 */
693void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy)
694{
695 mutex_lock(&port->phy_list_mutex);
696 if (unlikely(!list_empty(&phy->port_siblings))) {
697 /* make sure we're already on this port */
698 struct sas_phy *tmp;
699
700 list_for_each_entry(tmp, &port->phy_list, port_siblings)
701 if (tmp == phy)
702 break;
703 /* If this trips, you added a phy that was already
704 * part of a different port */
705 if (unlikely(tmp != phy)) {
706 dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n", phy->dev.bus_id);
707 BUG();
708 }
709 } else {
710 sas_port_create_link(port, phy);
711 list_add_tail(&phy->port_siblings, &port->phy_list);
712 port->num_phys++;
713 }
714 mutex_unlock(&port->phy_list_mutex);
715}
716EXPORT_SYMBOL(sas_port_add_phy);
717
718/**
719 * sas_port_delete_phy - remove a phy from a port or wide port
720 * @port: port to remove the phy from
721 * @phy: phy to remove
722 *
723 * This operation is used for tearing down ports again. It must be
724 * done to every port or wide port before calling sas_port_delete.
725 */
726void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy)
727{
728 mutex_lock(&port->phy_list_mutex);
729 sas_port_delete_link(port, phy);
730 list_del_init(&phy->port_siblings);
731 port->num_phys--;
732 mutex_unlock(&port->phy_list_mutex);
733}
734EXPORT_SYMBOL(sas_port_delete_phy);
735
736/*
467 * SAS remote PHY attributes. 737 * SAS remote PHY attributes.
468 */ 738 */
469 739
@@ -767,7 +1037,7 @@ static void sas_rphy_initialize(struct sas_rphy *rphy)
767 * Returns: 1037 * Returns:
768 * SAS PHY allocated or %NULL if the allocation failed. 1038 * SAS PHY allocated or %NULL if the allocation failed.
769 */ 1039 */
770struct sas_rphy *sas_end_device_alloc(struct sas_phy *parent) 1040struct sas_rphy *sas_end_device_alloc(struct sas_port *parent)
771{ 1041{
772 struct Scsi_Host *shost = dev_to_shost(&parent->dev); 1042 struct Scsi_Host *shost = dev_to_shost(&parent->dev);
773 struct sas_end_device *rdev; 1043 struct sas_end_device *rdev;
@@ -780,8 +1050,13 @@ struct sas_rphy *sas_end_device_alloc(struct sas_phy *parent)
780 device_initialize(&rdev->rphy.dev); 1050 device_initialize(&rdev->rphy.dev);
781 rdev->rphy.dev.parent = get_device(&parent->dev); 1051 rdev->rphy.dev.parent = get_device(&parent->dev);
782 rdev->rphy.dev.release = sas_end_device_release; 1052 rdev->rphy.dev.release = sas_end_device_release;
783 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d-%d", 1053 if (scsi_is_sas_expander_device(parent->dev.parent)) {
784 shost->host_no, parent->port_identifier, parent->number); 1054 struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent);
1055 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d:%d",
1056 shost->host_no, rphy->scsi_target_id, parent->port_identifier);
1057 } else
1058 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d",
1059 shost->host_no, parent->port_identifier);
785 rdev->rphy.identify.device_type = SAS_END_DEVICE; 1060 rdev->rphy.identify.device_type = SAS_END_DEVICE;
786 sas_rphy_initialize(&rdev->rphy); 1061 sas_rphy_initialize(&rdev->rphy);
787 transport_setup_device(&rdev->rphy.dev); 1062 transport_setup_device(&rdev->rphy.dev);
@@ -798,7 +1073,7 @@ EXPORT_SYMBOL(sas_end_device_alloc);
798 * Returns: 1073 * Returns:
799 * SAS PHY allocated or %NULL if the allocation failed. 1074 * SAS PHY allocated or %NULL if the allocation failed.
800 */ 1075 */
801struct sas_rphy *sas_expander_alloc(struct sas_phy *parent, 1076struct sas_rphy *sas_expander_alloc(struct sas_port *parent,
802 enum sas_device_type type) 1077 enum sas_device_type type)
803{ 1078{
804 struct Scsi_Host *shost = dev_to_shost(&parent->dev); 1079 struct Scsi_Host *shost = dev_to_shost(&parent->dev);
@@ -837,7 +1112,7 @@ EXPORT_SYMBOL(sas_expander_alloc);
837 */ 1112 */
838int sas_rphy_add(struct sas_rphy *rphy) 1113int sas_rphy_add(struct sas_rphy *rphy)
839{ 1114{
840 struct sas_phy *parent = dev_to_phy(rphy->dev.parent); 1115 struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
841 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); 1116 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
842 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); 1117 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
843 struct sas_identify *identify = &rphy->identify; 1118 struct sas_identify *identify = &rphy->identify;
@@ -910,7 +1185,7 @@ void
910sas_rphy_delete(struct sas_rphy *rphy) 1185sas_rphy_delete(struct sas_rphy *rphy)
911{ 1186{
912 struct device *dev = &rphy->dev; 1187 struct device *dev = &rphy->dev;
913 struct sas_phy *parent = dev_to_phy(dev->parent); 1188 struct sas_port *parent = dev_to_sas_port(dev->parent);
914 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); 1189 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
915 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); 1190 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
916 1191
@@ -920,7 +1195,7 @@ sas_rphy_delete(struct sas_rphy *rphy)
920 break; 1195 break;
921 case SAS_EDGE_EXPANDER_DEVICE: 1196 case SAS_EDGE_EXPANDER_DEVICE:
922 case SAS_FANOUT_EXPANDER_DEVICE: 1197 case SAS_FANOUT_EXPANDER_DEVICE:
923 device_for_each_child(dev, NULL, do_sas_phy_delete); 1198 sas_remove_children(dev);
924 break; 1199 break;
925 default: 1200 default:
926 break; 1201 break;
@@ -967,7 +1242,7 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
967 1242
968 mutex_lock(&sas_host->lock); 1243 mutex_lock(&sas_host->lock);
969 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 1244 list_for_each_entry(rphy, &sas_host->rphy_list, list) {
970 struct sas_phy *parent = dev_to_phy(rphy->dev.parent); 1245 struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
971 1246
972 if (rphy->identify.device_type != SAS_END_DEVICE || 1247 if (rphy->identify.device_type != SAS_END_DEVICE ||
973 rphy->scsi_target_id == -1) 1248 rphy->scsi_target_id == -1)
@@ -1003,16 +1278,19 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1003#define SETUP_OPTIONAL_RPORT_ATTRIBUTE(field, func) \ 1278#define SETUP_OPTIONAL_RPORT_ATTRIBUTE(field, func) \
1004 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, i->f->func) 1279 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, i->f->func)
1005 1280
1006#define SETUP_PORT_ATTRIBUTE(field) \ 1281#define SETUP_PHY_ATTRIBUTE(field) \
1007 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1) 1282 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1)
1008 1283
1009#define SETUP_OPTIONAL_PORT_ATTRIBUTE(field, func) \ 1284#define SETUP_PORT_ATTRIBUTE(field) \
1285 SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
1286
1287#define SETUP_OPTIONAL_PHY_ATTRIBUTE(field, func) \
1010 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func) 1288 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func)
1011 1289
1012#define SETUP_PORT_ATTRIBUTE_WRONLY(field) \ 1290#define SETUP_PHY_ATTRIBUTE_WRONLY(field) \
1013 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, 1) 1291 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, 1)
1014 1292
1015#define SETUP_OPTIONAL_PORT_ATTRIBUTE_WRONLY(field, func) \ 1293#define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \
1016 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, i->f->func) 1294 SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, i->f->func)
1017 1295
1018#define SETUP_END_DEV_ATTRIBUTE(field) \ 1296#define SETUP_END_DEV_ATTRIBUTE(field) \
@@ -1048,6 +1326,11 @@ sas_attach_transport(struct sas_function_template *ft)
1048 i->phy_attr_cont.ac.match = sas_phy_match; 1326 i->phy_attr_cont.ac.match = sas_phy_match;
1049 transport_container_register(&i->phy_attr_cont); 1327 transport_container_register(&i->phy_attr_cont);
1050 1328
1329 i->port_attr_cont.ac.class = &sas_port_class.class;
1330 i->port_attr_cont.ac.attrs = &i->port_attrs[0];
1331 i->port_attr_cont.ac.match = sas_port_match;
1332 transport_container_register(&i->port_attr_cont);
1333
1051 i->rphy_attr_cont.ac.class = &sas_rphy_class.class; 1334 i->rphy_attr_cont.ac.class = &sas_rphy_class.class;
1052 i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0]; 1335 i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0];
1053 i->rphy_attr_cont.ac.match = sas_rphy_match; 1336 i->rphy_attr_cont.ac.match = sas_rphy_match;
@@ -1066,30 +1349,35 @@ sas_attach_transport(struct sas_function_template *ft)
1066 i->f = ft; 1349 i->f = ft;
1067 1350
1068 count = 0; 1351 count = 0;
1352 SETUP_PORT_ATTRIBUTE(num_phys);
1069 i->host_attrs[count] = NULL; 1353 i->host_attrs[count] = NULL;
1070 1354
1071 count = 0; 1355 count = 0;
1072 SETUP_PORT_ATTRIBUTE(initiator_port_protocols); 1356 SETUP_PHY_ATTRIBUTE(initiator_port_protocols);
1073 SETUP_PORT_ATTRIBUTE(target_port_protocols); 1357 SETUP_PHY_ATTRIBUTE(target_port_protocols);
1074 SETUP_PORT_ATTRIBUTE(device_type); 1358 SETUP_PHY_ATTRIBUTE(device_type);
1075 SETUP_PORT_ATTRIBUTE(sas_address); 1359 SETUP_PHY_ATTRIBUTE(sas_address);
1076 SETUP_PORT_ATTRIBUTE(phy_identifier); 1360 SETUP_PHY_ATTRIBUTE(phy_identifier);
1077 SETUP_PORT_ATTRIBUTE(port_identifier); 1361 //SETUP_PHY_ATTRIBUTE(port_identifier);
1078 SETUP_PORT_ATTRIBUTE(negotiated_linkrate); 1362 SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
1079 SETUP_PORT_ATTRIBUTE(minimum_linkrate_hw); 1363 SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
1080 SETUP_PORT_ATTRIBUTE(minimum_linkrate); 1364 SETUP_PHY_ATTRIBUTE(minimum_linkrate);
1081 SETUP_PORT_ATTRIBUTE(maximum_linkrate_hw); 1365 SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw);
1082 SETUP_PORT_ATTRIBUTE(maximum_linkrate); 1366 SETUP_PHY_ATTRIBUTE(maximum_linkrate);
1083 1367
1084 SETUP_PORT_ATTRIBUTE(invalid_dword_count); 1368 SETUP_PHY_ATTRIBUTE(invalid_dword_count);
1085 SETUP_PORT_ATTRIBUTE(running_disparity_error_count); 1369 SETUP_PHY_ATTRIBUTE(running_disparity_error_count);
1086 SETUP_PORT_ATTRIBUTE(loss_of_dword_sync_count); 1370 SETUP_PHY_ATTRIBUTE(loss_of_dword_sync_count);
1087 SETUP_PORT_ATTRIBUTE(phy_reset_problem_count); 1371 SETUP_PHY_ATTRIBUTE(phy_reset_problem_count);
1088 SETUP_OPTIONAL_PORT_ATTRIBUTE_WRONLY(link_reset, phy_reset); 1372 SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset);
1089 SETUP_OPTIONAL_PORT_ATTRIBUTE_WRONLY(hard_reset, phy_reset); 1373 SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset);
1090 i->phy_attrs[count] = NULL; 1374 i->phy_attrs[count] = NULL;
1091 1375
1092 count = 0; 1376 count = 0;
1377 SETUP_PORT_ATTRIBUTE(num_phys);
1378 i->port_attrs[count] = NULL;
1379
1380 count = 0;
1093 SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols); 1381 SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols);
1094 SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols); 1382 SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols);
1095 SETUP_RPORT_ATTRIBUTE(rphy_device_type); 1383 SETUP_RPORT_ATTRIBUTE(rphy_device_type);
@@ -1131,6 +1419,7 @@ void sas_release_transport(struct scsi_transport_template *t)
1131 1419
1132 transport_container_unregister(&i->t.host_attrs); 1420 transport_container_unregister(&i->t.host_attrs);
1133 transport_container_unregister(&i->phy_attr_cont); 1421 transport_container_unregister(&i->phy_attr_cont);
1422 transport_container_unregister(&i->port_attr_cont);
1134 transport_container_unregister(&i->rphy_attr_cont); 1423 transport_container_unregister(&i->rphy_attr_cont);
1135 transport_container_unregister(&i->end_dev_attr_cont); 1424 transport_container_unregister(&i->end_dev_attr_cont);
1136 transport_container_unregister(&i->expander_attr_cont); 1425 transport_container_unregister(&i->expander_attr_cont);
@@ -1149,9 +1438,12 @@ static __init int sas_transport_init(void)
1149 error = transport_class_register(&sas_phy_class); 1438 error = transport_class_register(&sas_phy_class);
1150 if (error) 1439 if (error)
1151 goto out_unregister_transport; 1440 goto out_unregister_transport;
1152 error = transport_class_register(&sas_rphy_class); 1441 error = transport_class_register(&sas_port_class);
1153 if (error) 1442 if (error)
1154 goto out_unregister_phy; 1443 goto out_unregister_phy;
1444 error = transport_class_register(&sas_rphy_class);
1445 if (error)
1446 goto out_unregister_port;
1155 error = transport_class_register(&sas_end_dev_class); 1447 error = transport_class_register(&sas_end_dev_class);
1156 if (error) 1448 if (error)
1157 goto out_unregister_rphy; 1449 goto out_unregister_rphy;
@@ -1165,6 +1457,8 @@ static __init int sas_transport_init(void)
1165 transport_class_unregister(&sas_end_dev_class); 1457 transport_class_unregister(&sas_end_dev_class);
1166 out_unregister_rphy: 1458 out_unregister_rphy:
1167 transport_class_unregister(&sas_rphy_class); 1459 transport_class_unregister(&sas_rphy_class);
1460 out_unregister_port:
1461 transport_class_unregister(&sas_port_class);
1168 out_unregister_phy: 1462 out_unregister_phy:
1169 transport_class_unregister(&sas_phy_class); 1463 transport_class_unregister(&sas_phy_class);
1170 out_unregister_transport: 1464 out_unregister_transport:
@@ -1178,6 +1472,7 @@ static void __exit sas_transport_exit(void)
1178{ 1472{
1179 transport_class_unregister(&sas_host_class); 1473 transport_class_unregister(&sas_host_class);
1180 transport_class_unregister(&sas_phy_class); 1474 transport_class_unregister(&sas_phy_class);
1475 transport_class_unregister(&sas_port_class);
1181 transport_class_unregister(&sas_rphy_class); 1476 transport_class_unregister(&sas_rphy_class);
1182 transport_class_unregister(&sas_end_dev_class); 1477 transport_class_unregister(&sas_end_dev_class);
1183 transport_class_unregister(&sas_expander_class); 1478 transport_class_unregister(&sas_expander_class);
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index b78354fc4b17..cd68a66c7bb3 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -57,6 +57,7 @@ EXPORT_SYMBOL(scsi_bios_ptable);
57int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) 57int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
58{ 58{
59 unsigned char *p; 59 unsigned char *p;
60 u64 capacity64 = capacity; /* Suppress gcc warning */
60 int ret; 61 int ret;
61 62
62 p = scsi_bios_ptable(bdev); 63 p = scsi_bios_ptable(bdev);
@@ -68,7 +69,7 @@ int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
68 (unsigned int *)ip + 0, (unsigned int *)ip + 1); 69 (unsigned int *)ip + 0, (unsigned int *)ip + 1);
69 kfree(p); 70 kfree(p);
70 71
71 if (ret == -1) { 72 if (ret == -1 && capacity64 < (1ULL << 32)) {
72 /* pick some standard mapping with at most 1024 cylinders, 73 /* pick some standard mapping with at most 1024 cylinders,
73 and at most 62 sectors per track - this works up to 74 and at most 62 sectors per track - this works up to
74 7905 MB */ 75 7905 MB */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ea38757d12e5..3225d31449e1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -207,6 +207,23 @@ static ssize_t sd_store_cache_type(struct class_device *cdev, const char *buf,
207 return count; 207 return count;
208} 208}
209 209
210static ssize_t sd_store_allow_restart(struct class_device *cdev, const char *buf,
211 size_t count)
212{
213 struct scsi_disk *sdkp = to_scsi_disk(cdev);
214 struct scsi_device *sdp = sdkp->device;
215
216 if (!capable(CAP_SYS_ADMIN))
217 return -EACCES;
218
219 if (sdp->type != TYPE_DISK)
220 return -EINVAL;
221
222 sdp->allow_restart = simple_strtoul(buf, NULL, 10);
223
224 return count;
225}
226
210static ssize_t sd_show_cache_type(struct class_device *cdev, char *buf) 227static ssize_t sd_show_cache_type(struct class_device *cdev, char *buf)
211{ 228{
212 struct scsi_disk *sdkp = to_scsi_disk(cdev); 229 struct scsi_disk *sdkp = to_scsi_disk(cdev);
@@ -222,10 +239,19 @@ static ssize_t sd_show_fua(struct class_device *cdev, char *buf)
222 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA); 239 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
223} 240}
224 241
242static ssize_t sd_show_allow_restart(struct class_device *cdev, char *buf)
243{
244 struct scsi_disk *sdkp = to_scsi_disk(cdev);
245
246 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
247}
248
225static struct class_device_attribute sd_disk_attrs[] = { 249static struct class_device_attribute sd_disk_attrs[] = {
226 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 250 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
227 sd_store_cache_type), 251 sd_store_cache_type),
228 __ATTR(FUA, S_IRUGO, sd_show_fua, NULL), 252 __ATTR(FUA, S_IRUGO, sd_show_fua, NULL),
253 __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart,
254 sd_store_allow_restart),
229 __ATTR_NULL, 255 __ATTR_NULL,
230}; 256};
231 257
@@ -890,11 +916,10 @@ static struct block_device_operations sd_fops = {
890static void sd_rw_intr(struct scsi_cmnd * SCpnt) 916static void sd_rw_intr(struct scsi_cmnd * SCpnt)
891{ 917{
892 int result = SCpnt->result; 918 int result = SCpnt->result;
893 int this_count = SCpnt->request_bufflen; 919 unsigned int xfer_size = SCpnt->request_bufflen;
894 int good_bytes = (result == 0 ? this_count : 0); 920 unsigned int good_bytes = result ? 0 : xfer_size;
895 sector_t block_sectors = 1; 921 u64 start_lba = SCpnt->request->sector;
896 u64 first_err_block; 922 u64 bad_lba;
897 sector_t error_sector;
898 struct scsi_sense_hdr sshdr; 923 struct scsi_sense_hdr sshdr;
899 int sense_valid = 0; 924 int sense_valid = 0;
900 int sense_deferred = 0; 925 int sense_deferred = 0;
@@ -905,7 +930,6 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
905 if (sense_valid) 930 if (sense_valid)
906 sense_deferred = scsi_sense_is_deferred(&sshdr); 931 sense_deferred = scsi_sense_is_deferred(&sshdr);
907 } 932 }
908
909#ifdef CONFIG_SCSI_LOGGING 933#ifdef CONFIG_SCSI_LOGGING
910 SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: %s: res=0x%x\n", 934 SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: %s: res=0x%x\n",
911 SCpnt->request->rq_disk->disk_name, result)); 935 SCpnt->request->rq_disk->disk_name, result));
@@ -915,89 +939,72 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
915 sshdr.sense_key, sshdr.asc, sshdr.ascq)); 939 sshdr.sense_key, sshdr.asc, sshdr.ascq));
916 } 940 }
917#endif 941#endif
918 /* 942 if (driver_byte(result) != DRIVER_SENSE &&
919 Handle MEDIUM ERRORs that indicate partial success. Since this is a 943 (!sense_valid || sense_deferred))
920 relatively rare error condition, no care is taken to avoid 944 goto out;
921 unnecessary additional work such as memcpy's that could be avoided.
922 */
923 if (driver_byte(result) != 0 &&
924 sense_valid && !sense_deferred) {
925 switch (sshdr.sense_key) {
926 case MEDIUM_ERROR:
927 if (!blk_fs_request(SCpnt->request))
928 break;
929 info_valid = scsi_get_sense_info_fld(
930 SCpnt->sense_buffer, SCSI_SENSE_BUFFERSIZE,
931 &first_err_block);
932 /*
933 * May want to warn and skip if following cast results
934 * in actual truncation (if sector_t < 64 bits)
935 */
936 error_sector = (sector_t)first_err_block;
937 if (SCpnt->request->bio != NULL)
938 block_sectors = bio_sectors(SCpnt->request->bio);
939 switch (SCpnt->device->sector_size) {
940 case 1024:
941 error_sector <<= 1;
942 if (block_sectors < 2)
943 block_sectors = 2;
944 break;
945 case 2048:
946 error_sector <<= 2;
947 if (block_sectors < 4)
948 block_sectors = 4;
949 break;
950 case 4096:
951 error_sector <<=3;
952 if (block_sectors < 8)
953 block_sectors = 8;
954 break;
955 case 256:
956 error_sector >>= 1;
957 break;
958 default:
959 break;
960 }
961 945
962 error_sector &= ~(block_sectors - 1); 946 switch (sshdr.sense_key) {
963 good_bytes = (error_sector - SCpnt->request->sector) << 9; 947 case HARDWARE_ERROR:
964 if (good_bytes < 0 || good_bytes >= this_count) 948 case MEDIUM_ERROR:
965 good_bytes = 0; 949 if (!blk_fs_request(SCpnt->request))
950 goto out;
951 info_valid = scsi_get_sense_info_fld(SCpnt->sense_buffer,
952 SCSI_SENSE_BUFFERSIZE,
953 &bad_lba);
954 if (!info_valid)
955 goto out;
956 if (xfer_size <= SCpnt->device->sector_size)
957 goto out;
958 switch (SCpnt->device->sector_size) {
959 case 256:
960 start_lba <<= 1;
966 break; 961 break;
967 962 case 512:
968 case RECOVERED_ERROR: /* an error occurred, but it recovered */
969 case NO_SENSE: /* LLDD got sense data */
970 /*
971 * Inform the user, but make sure that it's not treated
972 * as a hard error.
973 */
974 scsi_print_sense("sd", SCpnt);
975 SCpnt->result = 0;
976 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
977 good_bytes = this_count;
978 break; 963 break;
979 964 case 1024:
980 case ILLEGAL_REQUEST: 965 start_lba >>= 1;
981 if (SCpnt->device->use_10_for_rw && 966 break;
982 (SCpnt->cmnd[0] == READ_10 || 967 case 2048:
983 SCpnt->cmnd[0] == WRITE_10)) 968 start_lba >>= 2;
984 SCpnt->device->use_10_for_rw = 0; 969 break;
985 if (SCpnt->device->use_10_for_ms && 970 case 4096:
986 (SCpnt->cmnd[0] == MODE_SENSE_10 || 971 start_lba >>= 3;
987 SCpnt->cmnd[0] == MODE_SELECT_10))
988 SCpnt->device->use_10_for_ms = 0;
989 break; 972 break;
990
991 default: 973 default:
974 /* Print something here with limiting frequency. */
975 goto out;
992 break; 976 break;
993 } 977 }
978 /* This computation should always be done in terms of
979 * the resolution of the device's medium.
980 */
981 good_bytes = (bad_lba - start_lba)*SCpnt->device->sector_size;
982 break;
983 case RECOVERED_ERROR:
984 case NO_SENSE:
985 /* Inform the user, but make sure that it's not treated
986 * as a hard error.
987 */
988 scsi_print_sense("sd", SCpnt);
989 SCpnt->result = 0;
990 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
991 good_bytes = xfer_size;
992 break;
993 case ILLEGAL_REQUEST:
994 if (SCpnt->device->use_10_for_rw &&
995 (SCpnt->cmnd[0] == READ_10 ||
996 SCpnt->cmnd[0] == WRITE_10))
997 SCpnt->device->use_10_for_rw = 0;
998 if (SCpnt->device->use_10_for_ms &&
999 (SCpnt->cmnd[0] == MODE_SENSE_10 ||
1000 SCpnt->cmnd[0] == MODE_SELECT_10))
1001 SCpnt->device->use_10_for_ms = 0;
1002 break;
1003 default:
1004 break;
994 } 1005 }
995 /* 1006 out:
996 * This calls the generic completion function, now that we know 1007 scsi_io_completion(SCpnt, good_bytes);
997 * how many actual sectors finished, and how many sectors we need
998 * to say have failed.
999 */
1000 scsi_io_completion(SCpnt, good_bytes, block_sectors << 9);
1001} 1008}
1002 1009
1003static int media_not_present(struct scsi_disk *sdkp, 1010static int media_not_present(struct scsi_disk *sdkp,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4e607d3065bc..65eef33846bb 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1401,6 +1401,7 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1401 Sg_device *sdp = NULL; 1401 Sg_device *sdp = NULL;
1402 struct cdev * cdev = NULL; 1402 struct cdev * cdev = NULL;
1403 int error, k; 1403 int error, k;
1404 unsigned long iflags;
1404 1405
1405 disk = alloc_disk(1); 1406 disk = alloc_disk(1);
1406 if (!disk) { 1407 if (!disk) {
@@ -1428,7 +1429,7 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1428 1429
1429 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1); 1430 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
1430 if (error) 1431 if (error)
1431 goto out; 1432 goto cdev_add_err;
1432 1433
1433 sdp->cdev = cdev; 1434 sdp->cdev = cdev;
1434 if (sg_sysfs_valid) { 1435 if (sg_sysfs_valid) {
@@ -1455,6 +1456,13 @@ sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1455 1456
1456 return 0; 1457 return 0;
1457 1458
1459cdev_add_err:
1460 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1461 kfree(sg_dev_arr[k]);
1462 sg_dev_arr[k] = NULL;
1463 sg_nr_dev--;
1464 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1465
1458out: 1466out:
1459 put_disk(disk); 1467 put_disk(disk);
1460 if (cdev) 1468 if (cdev)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ebf6579ed698..fd94408577e5 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,7 @@ static void rw_intr(struct scsi_cmnd * SCpnt)
292 * how many actual sectors finished, and how many sectors we need 292 * how many actual sectors finished, and how many sectors we need
293 * to say have failed. 293 * to say have failed.
294 */ 294 */
295 scsi_io_completion(SCpnt, good_bytes, block_sectors << 9); 295 scsi_io_completion(SCpnt, good_bytes);
296} 296}
297 297
298static int sr_init_command(struct scsi_cmnd * SCpnt) 298static int sr_init_command(struct scsi_cmnd * SCpnt)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index b5218fc0ac86..756ceb93ddc8 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3599,7 +3599,6 @@ static struct st_buffer *
3599 tb->use_sg = max_sg; 3599 tb->use_sg = max_sg;
3600 tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg); 3600 tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg);
3601 3601
3602 tb->in_use = 1;
3603 tb->dma = need_dma; 3602 tb->dma = need_dma;
3604 tb->buffer_size = got; 3603 tb->buffer_size = got;
3605 3604
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 411209048d74..05a5cae126ec 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -31,7 +31,6 @@ struct st_request {
31 31
32/* The tape buffer descriptor. */ 32/* The tape buffer descriptor. */
33struct st_buffer { 33struct st_buffer {
34 unsigned char in_use;
35 unsigned char dma; /* DMA-able buffer */ 34 unsigned char dma; /* DMA-able buffer */
36 unsigned char do_dio; /* direct i/o set up? */ 35 unsigned char do_dio; /* direct i/o set up? */
37 int buffer_size; 36 int buffer_size;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 864ef859be56..a1d322f8a16c 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -134,7 +134,7 @@ afavlab_setup(struct serial_private *priv, struct pciserial_board *board,
134 * and Keystone have one Diva chip with 3 UARTs. Some later machines have 134 * and Keystone have one Diva chip with 3 UARTs. Some later machines have
135 * one Diva chip, but it has been expanded to 5 UARTs. 135 * one Diva chip, but it has been expanded to 5 UARTs.
136 */ 136 */
137static int __devinit pci_hp_diva_init(struct pci_dev *dev) 137static int pci_hp_diva_init(struct pci_dev *dev)
138{ 138{
139 int rc = 0; 139 int rc = 0;
140 140
@@ -194,7 +194,7 @@ pci_hp_diva_setup(struct serial_private *priv, struct pciserial_board *board,
194/* 194/*
195 * Added for EKF Intel i960 serial boards 195 * Added for EKF Intel i960 serial boards
196 */ 196 */
197static int __devinit pci_inteli960ni_init(struct pci_dev *dev) 197static int pci_inteli960ni_init(struct pci_dev *dev)
198{ 198{
199 unsigned long oldval; 199 unsigned long oldval;
200 200
@@ -216,7 +216,7 @@ static int __devinit pci_inteli960ni_init(struct pci_dev *dev)
216 * seems to be mainly needed on card using the PLX which also use I/O 216 * seems to be mainly needed on card using the PLX which also use I/O
217 * mapped memory. 217 * mapped memory.
218 */ 218 */
219static int __devinit pci_plx9050_init(struct pci_dev *dev) 219static int pci_plx9050_init(struct pci_dev *dev)
220{ 220{
221 u8 irq_config; 221 u8 irq_config;
222 void __iomem *p; 222 void __iomem *p;
@@ -314,7 +314,7 @@ sbs_setup(struct serial_private *priv, struct pciserial_board *board,
314/* global control register offset for SBS PMC-OctalPro */ 314/* global control register offset for SBS PMC-OctalPro */
315#define OCT_REG_CR_OFF 0x500 315#define OCT_REG_CR_OFF 0x500
316 316
317static int __devinit sbs_init(struct pci_dev *dev) 317static int sbs_init(struct pci_dev *dev)
318{ 318{
319 u8 __iomem *p; 319 u8 __iomem *p;
320 320
@@ -493,7 +493,7 @@ static const struct timedia_struct {
493 { 0, NULL } 493 { 0, NULL }
494}; 494};
495 495
496static int __devinit pci_timedia_init(struct pci_dev *dev) 496static int pci_timedia_init(struct pci_dev *dev)
497{ 497{
498 unsigned short *ids; 498 unsigned short *ids;
499 int i, j; 499 int i, j;
@@ -566,13 +566,13 @@ titan_400l_800l_setup(struct serial_private *priv,
566 return setup_port(priv, port, bar, offset, board->reg_shift); 566 return setup_port(priv, port, bar, offset, board->reg_shift);
567} 567}
568 568
569static int __devinit pci_xircom_init(struct pci_dev *dev) 569static int pci_xircom_init(struct pci_dev *dev)
570{ 570{
571 msleep(100); 571 msleep(100);
572 return 0; 572 return 0;
573} 573}
574 574
575static int __devinit pci_netmos_init(struct pci_dev *dev) 575static int pci_netmos_init(struct pci_dev *dev)
576{ 576{
577 /* subdevice 0x00PS means <P> parallel, <S> serial */ 577 /* subdevice 0x00PS means <P> parallel, <S> serial */
578 unsigned int num_serial = dev->subsystem_device & 0xf; 578 unsigned int num_serial = dev->subsystem_device & 0xf;
@@ -622,7 +622,7 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
622 */ 622 */
623static struct pci_serial_quirk pci_serial_quirks[] = { 623static struct pci_serial_quirk pci_serial_quirks[] = {
624 /* 624 /*
625 * AFAVLAB cards. 625 * AFAVLAB cards - these may be called via parport_serial
626 * It is not clear whether this applies to all products. 626 * It is not clear whether this applies to all products.
627 */ 627 */
628 { 628 {
@@ -754,7 +754,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
754 .exit = __devexit_p(sbs_exit), 754 .exit = __devexit_p(sbs_exit),
755 }, 755 },
756 /* 756 /*
757 * SIIG cards. 757 * SIIG cards - these may be called via parport_serial
758 */ 758 */
759 { 759 {
760 .vendor = PCI_VENDOR_ID_SIIG, 760 .vendor = PCI_VENDOR_ID_SIIG,
@@ -811,7 +811,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
811 .setup = pci_default_setup, 811 .setup = pci_default_setup,
812 }, 812 },
813 /* 813 /*
814 * Netmos cards 814 * Netmos cards - these may be called via parport_serial
815 */ 815 */
816 { 816 {
817 .vendor = PCI_VENDOR_ID_NETMOS, 817 .vendor = PCI_VENDOR_ID_NETMOS,
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 739bc84f91e9..632f62d6ec7e 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -431,6 +431,8 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
431#endif 431#endif
432 432
433 port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; 433 port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
434 if (pnp_irq_flags(dev, 0) & IORESOURCE_IRQ_SHAREABLE)
435 port.flags |= UPF_SHARE_IRQ;
434 port.uartclk = 1843200; 436 port.uartclk = 1843200;
435 port.dev = &dev->dev; 437 port.dev = &dev->dev;
436 438
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 48eb22d3a63e..7708e5dd3656 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -726,8 +726,7 @@ mpc52xx_uart_probe(struct platform_device *dev)
726 726
727 spin_lock_init(&port->lock); 727 spin_lock_init(&port->lock);
728 port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */ 728 port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */
729 port->fifosize = 255; /* Should be 512 ! But it can't be */ 729 port->fifosize = 512;
730 /* stored in a unsigned char */
731 port->iotype = UPIO_MEM; 730 port->iotype = UPIO_MEM;
732 port->flags = UPF_BOOT_AUTOCONF | 731 port->flags = UPF_BOOT_AUTOCONF |
733 ( uart_console(port) ? 0 : UPF_IOREMAP ); 732 ( uart_console(port) ? 0 : UPF_IOREMAP );
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 459c0231aef3..bfd2a22759eb 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -1443,8 +1443,8 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
1443 uap->flags &= ~PMACZILOG_FLAG_HAS_DMA; 1443 uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
1444 goto no_dma; 1444 goto no_dma;
1445 } 1445 }
1446 uap->tx_dma_irq = np->intrs[1].line; 1446 uap->tx_dma_irq = irq_of_parse_and_map(np, 1);
1447 uap->rx_dma_irq = np->intrs[2].line; 1447 uap->rx_dma_irq = irq_of_parse_and_map(np, 2);
1448 } 1448 }
1449no_dma: 1449no_dma:
1450 1450
@@ -1491,7 +1491,7 @@ no_dma:
1491 * Init remaining bits of "port" structure 1491 * Init remaining bits of "port" structure
1492 */ 1492 */
1493 uap->port.iotype = UPIO_MEM; 1493 uap->port.iotype = UPIO_MEM;
1494 uap->port.irq = np->intrs[0].line; 1494 uap->port.irq = irq_of_parse_and_map(np, 0);
1495 uap->port.uartclk = ZS_CLOCK; 1495 uap->port.uartclk = ZS_CLOCK;
1496 uap->port.fifosize = 1; 1496 uap->port.fifosize = 1;
1497 uap->port.ops = &pmz_pops; 1497 uap->port.ops = &pmz_pops;
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index c54af8774393..d5f636fbf29a 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -49,6 +49,12 @@
49 */ 49 */
50static DEFINE_MUTEX(port_mutex); 50static DEFINE_MUTEX(port_mutex);
51 51
52/*
53 * lockdep: port->lock is initialized in two places, but we
54 * want only one lock-class:
55 */
56static struct lock_class_key port_lock_key;
57
52#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) 58#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
53 59
54#define uart_users(state) ((state)->count + ((state)->info ? (state)->info->blocked_open : 0)) 60#define uart_users(state) ((state)->count + ((state)->info ? (state)->info->blocked_open : 0))
@@ -690,7 +696,8 @@ static int uart_set_info(struct uart_state *state,
690 (new_serial.baud_base != port->uartclk / 16) || 696 (new_serial.baud_base != port->uartclk / 16) ||
691 (close_delay != state->close_delay) || 697 (close_delay != state->close_delay) ||
692 (closing_wait != state->closing_wait) || 698 (closing_wait != state->closing_wait) ||
693 (new_serial.xmit_fifo_size != port->fifosize) || 699 (new_serial.xmit_fifo_size &&
700 new_serial.xmit_fifo_size != port->fifosize) ||
694 (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0)) 701 (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
695 goto exit; 702 goto exit;
696 port->flags = ((port->flags & ~UPF_USR_MASK) | 703 port->flags = ((port->flags & ~UPF_USR_MASK) |
@@ -795,7 +802,8 @@ static int uart_set_info(struct uart_state *state,
795 port->custom_divisor = new_serial.custom_divisor; 802 port->custom_divisor = new_serial.custom_divisor;
796 state->close_delay = close_delay; 803 state->close_delay = close_delay;
797 state->closing_wait = closing_wait; 804 state->closing_wait = closing_wait;
798 port->fifosize = new_serial.xmit_fifo_size; 805 if (new_serial.xmit_fifo_size)
806 port->fifosize = new_serial.xmit_fifo_size;
799 if (state->info->tty) 807 if (state->info->tty)
800 state->info->tty->low_latency = 808 state->info->tty->low_latency =
801 (port->flags & UPF_LOW_LATENCY) ? 1 : 0; 809 (port->flags & UPF_LOW_LATENCY) ? 1 : 0;
@@ -1865,6 +1873,7 @@ uart_set_options(struct uart_port *port, struct console *co,
1865 * early. 1873 * early.
1866 */ 1874 */
1867 spin_lock_init(&port->lock); 1875 spin_lock_init(&port->lock);
1876 lockdep_set_class(&port->lock, &port_lock_key);
1868 1877
1869 memset(&termios, 0, sizeof(struct termios)); 1878 memset(&termios, 0, sizeof(struct termios));
1870 1879
@@ -2247,8 +2256,10 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
2247 * If this port is a console, then the spinlock is already 2256 * If this port is a console, then the spinlock is already
2248 * initialised. 2257 * initialised.
2249 */ 2258 */
2250 if (!(uart_console(port) && (port->cons->flags & CON_ENABLED))) 2259 if (!(uart_console(port) && (port->cons->flags & CON_ENABLED))) {
2251 spin_lock_init(&port->lock); 2260 spin_lock_init(&port->lock);
2261 lockdep_set_class(&port->lock, &port_lock_key);
2262 }
2252 2263
2253 uart_configure_port(drv, state, port); 2264 uart_configure_port(drv, state, port);
2254 2265
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ed1cdf6ac8f3..146298ad7371 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -510,7 +510,7 @@ static void spi_complete(void *arg)
510 */ 510 */
511int spi_sync(struct spi_device *spi, struct spi_message *message) 511int spi_sync(struct spi_device *spi, struct spi_message *message)
512{ 512{
513 DECLARE_COMPLETION(done); 513 DECLARE_COMPLETION_ONSTACK(done);
514 int status; 514 int status;
515 515
516 message->complete = spi_complete; 516 message->complete = spi_complete;
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index e47e3a8ed6e4..f48c3dbc367a 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -200,7 +200,7 @@ static void update_sb(struct super_block *sb)
200 if (!root) 200 if (!root)
201 return; 201 return;
202 202
203 mutex_lock(&root->d_inode->i_mutex); 203 mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_PARENT);
204 204
205 list_for_each_entry(bus, &root->d_subdirs, d_u.d_child) { 205 list_for_each_entry(bus, &root->d_subdirs, d_u.d_child) {
206 if (bus->d_inode) { 206 if (bus->d_inode) {
@@ -527,7 +527,7 @@ static void fs_remove_file (struct dentry *dentry)
527 if (!parent || !parent->d_inode) 527 if (!parent || !parent->d_inode)
528 return; 528 return;
529 529
530 mutex_lock(&parent->d_inode->i_mutex); 530 mutex_lock_nested(&parent->d_inode->i_mutex, I_MUTEX_PARENT);
531 if (usbfs_positive(dentry)) { 531 if (usbfs_positive(dentry)) {
532 if (dentry->d_inode) { 532 if (dentry->d_inode) {
533 if (S_ISDIR(dentry->d_inode->i_mode)) 533 if (S_ISDIR(dentry->d_inode->i_mode))
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
index bb22b7e82877..36db72579377 100644
--- a/drivers/usb/gadget/lh7a40x_udc.c
+++ b/drivers/usb/gadget/lh7a40x_udc.c
@@ -2143,7 +2143,7 @@ static int lh7a40x_udc_remove(struct platform_device *pdev)
2143 2143
2144static struct platform_driver udc_driver = { 2144static struct platform_driver udc_driver = {
2145 .probe = lh7a40x_udc_probe, 2145 .probe = lh7a40x_udc_probe,
2146 .remove = lh7a40x_udc_remove 2146 .remove = lh7a40x_udc_remove,
2147 /* FIXME power management support */ 2147 /* FIXME power management support */
2148 /* .suspend = ... disable UDC */ 2148 /* .suspend = ... disable UDC */
2149 /* .resume = ... re-enable UDC */ 2149 /* .resume = ... re-enable UDC */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 17de4c84db69..3badb48d662b 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1557,6 +1557,21 @@ config FB_S3C2410_DEBUG
1557 Turn on debugging messages. Note that you can set/unset at run time 1557 Turn on debugging messages. Note that you can set/unset at run time
1558 through sysfs 1558 through sysfs
1559 1559
1560config FB_PNX4008_DUM
1561 tristate "Display Update Module support on Philips PNX4008 board"
1562 depends on FB && ARCH_PNX4008
1563 ---help---
1564 Say Y here to enable support for PNX4008 Display Update Module (DUM)
1565
1566config FB_PNX4008_DUM_RGB
1567 tristate "RGB Framebuffer support on Philips PNX4008 board"
1568 depends on FB_PNX4008_DUM
1569 select FB_CFB_FILLRECT
1570 select FB_CFB_COPYAREA
1571 select FB_CFB_IMAGEBLIT
1572 ---help---
1573 Say Y here to enable support for PNX4008 RGB Framebuffer
1574
1560config FB_VIRTUAL 1575config FB_VIRTUAL
1561 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" 1576 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
1562 depends on FB 1577 depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index c335e9bc3b20..6283d015f8f5 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -94,6 +94,8 @@ obj-$(CONFIG_FB_TX3912) += tx3912fb.o
94obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o 94obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
95obj-$(CONFIG_FB_IMX) += imxfb.o 95obj-$(CONFIG_FB_IMX) += imxfb.o
96obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o 96obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
97obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/
98obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/
97 99
98# Platform or fallback drivers go here 100# Platform or fallback drivers go here
99obj-$(CONFIG_FB_VESA) += vesafb.o 101obj-$(CONFIG_FB_VESA) += vesafb.o
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index bfeb11bd4712..71ce1fa45cf4 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -97,14 +97,43 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
97 u_int transp, struct fb_info *info) 97 u_int transp, struct fb_info *info)
98{ 98{
99 struct offb_par *par = (struct offb_par *) info->par; 99 struct offb_par *par = (struct offb_par *) info->par;
100 int i, depth;
101 u32 *pal = info->pseudo_palette;
100 102
101 if (!par->cmap_adr || regno > 255) 103 depth = info->var.bits_per_pixel;
104 if (depth == 16)
105 depth = (info->var.green.length == 5) ? 15 : 16;
106
107 if (regno > 255 ||
108 (depth == 16 && regno > 63) ||
109 (depth == 15 && regno > 31))
102 return 1; 110 return 1;
103 111
112 if (regno < 16) {
113 switch (depth) {
114 case 15:
115 pal[regno] = (regno << 10) | (regno << 5) | regno;
116 break;
117 case 16:
118 pal[regno] = (regno << 11) | (regno << 5) | regno;
119 break;
120 case 24:
121 pal[regno] = (regno << 16) | (regno << 8) | regno;
122 break;
123 case 32:
124 i = (regno << 8) | regno;
125 pal[regno] = (i << 16) | i;
126 break;
127 }
128 }
129
104 red >>= 8; 130 red >>= 8;
105 green >>= 8; 131 green >>= 8;
106 blue >>= 8; 132 blue >>= 8;
107 133
134 if (!par->cmap_adr)
135 return 0;
136
108 switch (par->cmap_type) { 137 switch (par->cmap_type) {
109 case cmap_m64: 138 case cmap_m64:
110 writeb(regno, par->cmap_adr); 139 writeb(regno, par->cmap_adr);
@@ -141,20 +170,6 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
141 break; 170 break;
142 } 171 }
143 172
144 if (regno < 16)
145 switch (info->var.bits_per_pixel) {
146 case 16:
147 ((u16 *) (info->pseudo_palette))[regno] =
148 (regno << 10) | (regno << 5) | regno;
149 break;
150 case 32:
151 {
152 int i = (regno << 8) | regno;
153 ((u32 *) (info->pseudo_palette))[regno] =
154 (i << 16) | i;
155 break;
156 }
157 }
158 return 0; 173 return 0;
159} 174}
160 175
@@ -223,81 +238,9 @@ int __init offb_init(void)
223{ 238{
224 struct device_node *dp = NULL, *boot_disp = NULL; 239 struct device_node *dp = NULL, *boot_disp = NULL;
225 240
226#if defined(CONFIG_BOOTX_TEXT) && defined(CONFIG_PPC32)
227 struct device_node *macos_display = NULL;
228#endif
229 if (fb_get_options("offb", NULL)) 241 if (fb_get_options("offb", NULL))
230 return -ENODEV; 242 return -ENODEV;
231 243
232#if defined(CONFIG_BOOTX_TEXT) && defined(CONFIG_PPC32)
233 /* If we're booted from BootX... */
234 if (boot_infos != 0) {
235 unsigned long addr =
236 (unsigned long) boot_infos->dispDeviceBase;
237 u32 *addrp;
238 u64 daddr, dsize;
239 unsigned int flags;
240
241 /* find the device node corresponding to the macos display */
242 while ((dp = of_find_node_by_type(dp, "display"))) {
243 int i;
244
245 /*
246 * Look for an AAPL,address property first.
247 */
248 unsigned int na;
249 unsigned int *ap =
250 (unsigned int *)get_property(dp, "AAPL,address",
251 &na);
252 if (ap != 0) {
253 for (na /= sizeof(unsigned int); na > 0;
254 --na, ++ap)
255 if (*ap <= addr &&
256 addr < *ap + 0x1000000) {
257 macos_display = dp;
258 goto foundit;
259 }
260 }
261
262 /*
263 * See if the display address is in one of the address
264 * ranges for this display.
265 */
266 i = 0;
267 for (;;) {
268 addrp = of_get_address(dp, i++, &dsize, &flags);
269 if (addrp == NULL)
270 break;
271 if (!(flags & IORESOURCE_MEM))
272 continue;
273 daddr = of_translate_address(dp, addrp);
274 if (daddr == OF_BAD_ADDR)
275 continue;
276 if (daddr <= addr && addr < (daddr + dsize)) {
277 macos_display = dp;
278 goto foundit;
279 }
280 }
281 foundit:
282 if (macos_display) {
283 printk(KERN_INFO "MacOS display is %s\n",
284 dp->full_name);
285 break;
286 }
287 }
288
289 /* initialize it */
290 offb_init_fb(macos_display ? macos_display->
291 name : "MacOS display",
292 macos_display ? macos_display->
293 full_name : "MacOS display",
294 boot_infos->dispDeviceRect[2],
295 boot_infos->dispDeviceRect[3],
296 boot_infos->dispDeviceDepth,
297 boot_infos->dispDeviceRowBytes, addr, NULL);
298 }
299#endif /* defined(CONFIG_BOOTX_TEXT) && defined(CONFIG_PPC32) */
300
301 for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { 244 for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) {
302 if (get_property(dp, "linux,opened", NULL) && 245 if (get_property(dp, "linux,opened", NULL) &&
303 get_property(dp, "linux,boot-display", NULL)) { 246 get_property(dp, "linux,boot-display", NULL)) {
@@ -317,94 +260,93 @@ int __init offb_init(void)
317 260
318static void __init offb_init_nodriver(struct device_node *dp) 261static void __init offb_init_nodriver(struct device_node *dp)
319{ 262{
320 int *pp, i;
321 unsigned int len; 263 unsigned int len;
322 int width = 640, height = 480, depth = 8, pitch; 264 int i, width = 640, height = 480, depth = 8, pitch = 640;
323 unsigned int flags, rsize, *up; 265 unsigned int flags, rsize, addr_prop = 0;
324 u64 address = OF_BAD_ADDR; 266 unsigned long max_size = 0;
325 u32 *addrp; 267 u64 rstart, address = OF_BAD_ADDR;
268 u32 *pp, *addrp, *up;
326 u64 asize; 269 u64 asize;
327 270
328 if ((pp = (int *) get_property(dp, "depth", &len)) != NULL 271 pp = (u32 *)get_property(dp, "linux,bootx-depth", &len);
329 && len == sizeof(int)) 272 if (pp == NULL)
273 pp = (u32 *)get_property(dp, "depth", &len);
274 if (pp && len == sizeof(u32))
330 depth = *pp; 275 depth = *pp;
331 if ((pp = (int *) get_property(dp, "width", &len)) != NULL 276
332 && len == sizeof(int)) 277 pp = (u32 *)get_property(dp, "linux,bootx-width", &len);
278 if (pp == NULL)
279 pp = (u32 *)get_property(dp, "width", &len);
280 if (pp && len == sizeof(u32))
333 width = *pp; 281 width = *pp;
334 if ((pp = (int *) get_property(dp, "height", &len)) != NULL 282
335 && len == sizeof(int)) 283 pp = (u32 *)get_property(dp, "linux,bootx-height", &len);
284 if (pp == NULL)
285 pp = (u32 *)get_property(dp, "height", &len);
286 if (pp && len == sizeof(u32))
336 height = *pp; 287 height = *pp;
337 if ((pp = (int *) get_property(dp, "linebytes", &len)) != NULL 288
338 && len == sizeof(int)) { 289 pp = (u32 *)get_property(dp, "linux,bootx-linebytes", &len);
290 if (pp == NULL)
291 pp = (u32 *)get_property(dp, "linebytes", &len);
292 if (pp && len == sizeof(u32))
339 pitch = *pp; 293 pitch = *pp;
340 if (pitch == 1) 294 else
341 pitch = 0x1000; 295 pitch = width * ((depth + 7) / 8);
342 } else 296
343 pitch = width; 297 rsize = (unsigned long)pitch * (unsigned long)height;
344 298
345 rsize = (unsigned long)pitch * (unsigned long)height * 299 /* Ok, now we try to figure out the address of the framebuffer.
346 (unsigned long)(depth / 8); 300 *
347 301 * Unfortunately, Open Firmware doesn't provide a standard way to do
348 /* Try to match device to a PCI device in order to get a properly 302 * so. All we can do is a dodgy heuristic that happens to work in
349 * translated address rather then trying to decode the open firmware 303 * practice. On most machines, the "address" property contains what
350 * stuff in various incorrect ways 304 * we need, though not on Matrox cards found in IBM machines. What I've
351 */ 305 * found that appears to give good results is to go through the PCI
352#ifdef CONFIG_PCI 306 * ranges and pick one that is both big enough and if possible encloses
353 /* First try to locate the PCI device if any */ 307 * the "address" property. If none match, we pick the biggest
354 { 308 */
355 struct pci_dev *pdev = NULL; 309 up = (u32 *)get_property(dp, "linux,bootx-addr", &len);
356 310 if (up == NULL)
357 for_each_pci_dev(pdev) { 311 up = (u32 *)get_property(dp, "address", &len);
358 if (dp == pci_device_to_OF_node(pdev)) 312 if (up && len == sizeof(u32))
359 break; 313 addr_prop = *up;
360 } 314
361 if (pdev) { 315 for (i = 0; (addrp = of_get_address(dp, i, &asize, &flags))
362 for (i = 0; i < 6 && address == OF_BAD_ADDR; i++) { 316 != NULL; i++) {
363 if ((pci_resource_flags(pdev, i) & 317 int match_addrp = 0;
364 IORESOURCE_MEM) && 318
365 (pci_resource_len(pdev, i) >= rsize)) 319 if (!(flags & IORESOURCE_MEM))
366 address = pci_resource_start(pdev, i); 320 continue;
367 } 321 if (asize < rsize)
368 pci_dev_put(pdev); 322 continue;
369 } 323 rstart = of_translate_address(dp, addrp);
370 } 324 if (rstart == OF_BAD_ADDR)
371#endif /* CONFIG_PCI */ 325 continue;
372 326 if (addr_prop && (rstart <= addr_prop) &&
373 /* This one is dodgy, we may drop it ... */ 327 ((rstart + asize) >= (addr_prop + rsize)))
374 if (address == OF_BAD_ADDR && 328 match_addrp = 1;
375 (up = (unsigned *) get_property(dp, "address", &len)) != NULL && 329 if (match_addrp) {
376 len == sizeof(unsigned int)) 330 address = addr_prop;
377 address = (u64) * up; 331 break;
378
379 if (address == OF_BAD_ADDR) {
380 for (i = 0; (addrp = of_get_address(dp, i, &asize, &flags))
381 != NULL; i++) {
382 if (!(flags & IORESOURCE_MEM))
383 continue;
384 if (asize >= pitch * height * depth / 8)
385 break;
386 }
387 if (addrp == NULL) {
388 printk(KERN_ERR
389 "no framebuffer address found for %s\n",
390 dp->full_name);
391 return;
392 }
393 address = of_translate_address(dp, addrp);
394 if (address == OF_BAD_ADDR) {
395 printk(KERN_ERR
396 "can't translate framebuffer address for %s\n",
397 dp->full_name);
398 return;
399 } 332 }
333 if (rsize > max_size) {
334 max_size = rsize;
335 address = OF_BAD_ADDR;
336 }
400 337
338 if (address == OF_BAD_ADDR)
339 address = rstart;
340 }
341 if (address == OF_BAD_ADDR && addr_prop)
342 address = (u64)addr_prop;
343 if (address != OF_BAD_ADDR) {
401 /* kludge for valkyrie */ 344 /* kludge for valkyrie */
402 if (strcmp(dp->name, "valkyrie") == 0) 345 if (strcmp(dp->name, "valkyrie") == 0)
403 address += 0x1000; 346 address += 0x1000;
347 offb_init_fb(dp->name, dp->full_name, width, height, depth,
348 pitch, address, dp);
404 } 349 }
405 offb_init_fb(dp->name, dp->full_name, width, height, depth,
406 pitch, address, dp);
407
408} 350}
409 351
410static void __init offb_init_fb(const char *name, const char *full_name, 352static void __init offb_init_fb(const char *name, const char *full_name,
@@ -412,7 +354,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
412 int pitch, unsigned long address, 354 int pitch, unsigned long address,
413 struct device_node *dp) 355 struct device_node *dp)
414{ 356{
415 unsigned long res_size = pitch * height * depth / 8; 357 unsigned long res_size = pitch * height * (depth + 7) / 8;
416 struct offb_par *par = &default_par; 358 struct offb_par *par = &default_par;
417 unsigned long res_start = address; 359 unsigned long res_start = address;
418 struct fb_fix_screeninfo *fix; 360 struct fb_fix_screeninfo *fix;
@@ -426,7 +368,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
426 printk(KERN_INFO 368 printk(KERN_INFO
427 "Using unsupported %dx%d %s at %lx, depth=%d, pitch=%d\n", 369 "Using unsupported %dx%d %s at %lx, depth=%d, pitch=%d\n",
428 width, height, name, address, depth, pitch); 370 width, height, name, address, depth, pitch);
429 if (depth != 8 && depth != 16 && depth != 32) { 371 if (depth != 8 && depth != 15 && depth != 16 && depth != 32) {
430 printk(KERN_ERR "%s: can't use depth = %d\n", full_name, 372 printk(KERN_ERR "%s: can't use depth = %d\n", full_name,
431 depth); 373 depth);
432 release_mem_region(res_start, res_size); 374 release_mem_region(res_start, res_size);
@@ -502,7 +444,6 @@ static void __init offb_init_fb(const char *name, const char *full_name,
502 : */ FB_VISUAL_TRUECOLOR; 444 : */ FB_VISUAL_TRUECOLOR;
503 445
504 var->xoffset = var->yoffset = 0; 446 var->xoffset = var->yoffset = 0;
505 var->bits_per_pixel = depth;
506 switch (depth) { 447 switch (depth) {
507 case 8: 448 case 8:
508 var->bits_per_pixel = 8; 449 var->bits_per_pixel = 8;
@@ -515,7 +456,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
515 var->transp.offset = 0; 456 var->transp.offset = 0;
516 var->transp.length = 0; 457 var->transp.length = 0;
517 break; 458 break;
518 case 16: /* RGB 555 */ 459 case 15: /* RGB 555 */
519 var->bits_per_pixel = 16; 460 var->bits_per_pixel = 16;
520 var->red.offset = 10; 461 var->red.offset = 10;
521 var->red.length = 5; 462 var->red.length = 5;
@@ -526,6 +467,17 @@ static void __init offb_init_fb(const char *name, const char *full_name,
526 var->transp.offset = 0; 467 var->transp.offset = 0;
527 var->transp.length = 0; 468 var->transp.length = 0;
528 break; 469 break;
470 case 16: /* RGB 565 */
471 var->bits_per_pixel = 16;
472 var->red.offset = 11;
473 var->red.length = 5;
474 var->green.offset = 5;
475 var->green.length = 6;
476 var->blue.offset = 0;
477 var->blue.length = 5;
478 var->transp.offset = 0;
479 var->transp.length = 0;
480 break;
529 case 32: /* RGB 888 */ 481 case 32: /* RGB 888 */
530 var->bits_per_pixel = 32; 482 var->bits_per_pixel = 32;
531 var->red.offset = 16; 483 var->red.offset = 16;
diff --git a/drivers/video/pnx4008/Makefile b/drivers/video/pnx4008/Makefile
new file mode 100644
index 000000000000..636aaccf01fd
--- /dev/null
+++ b/drivers/video/pnx4008/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the new PNX4008 framebuffer device driver
3#
4
5obj-$(CONFIG_FB_PNX4008_DUM) += sdum.o
6obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnxrgbfb.o
7
diff --git a/drivers/video/pnx4008/dum.h b/drivers/video/pnx4008/dum.h
new file mode 100644
index 000000000000..d80a614d89ed
--- /dev/null
+++ b/drivers/video/pnx4008/dum.h
@@ -0,0 +1,211 @@
1/*
2 * linux/drivers/video/pnx4008/dum.h
3 *
4 * Internal header for SDUM
5 *
6 * 2005 (c) Koninklijke Philips N.V. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11
12#ifndef __PNX008_DUM_H__
13#define __PNX008_DUM_H__
14
15#include <asm/arch/platform.h>
16
17#define PNX4008_DUMCONF_VA_BASE IO_ADDRESS(PNX4008_DUMCONF_BASE)
18#define PNX4008_DUM_MAIN_VA_BASE IO_ADDRESS(PNX4008_DUM_MAINCFG_BASE)
19
20/* DUM CFG ADDRESSES */
21#define DUM_CH_BASE_ADR (PNX4008_DUMCONF_VA_BASE + 0x00)
22#define DUM_CH_MIN_ADR (PNX4008_DUMCONF_VA_BASE + 0x00)
23#define DUM_CH_MAX_ADR (PNX4008_DUMCONF_VA_BASE + 0x04)
24#define DUM_CH_CONF_ADR (PNX4008_DUMCONF_VA_BASE + 0x08)
25#define DUM_CH_STAT_ADR (PNX4008_DUMCONF_VA_BASE + 0x0C)
26#define DUM_CH_CTRL_ADR (PNX4008_DUMCONF_VA_BASE + 0x10)
27
28#define CH_MARG (0x100 / sizeof(u32))
29#define DUM_CH_MIN(i) (*((volatile u32 *)DUM_CH_MIN_ADR + (i) * CH_MARG))
30#define DUM_CH_MAX(i) (*((volatile u32 *)DUM_CH_MAX_ADR + (i) * CH_MARG))
31#define DUM_CH_CONF(i) (*((volatile u32 *)DUM_CH_CONF_ADR + (i) * CH_MARG))
32#define DUM_CH_STAT(i) (*((volatile u32 *)DUM_CH_STAT_ADR + (i) * CH_MARG))
33#define DUM_CH_CTRL(i) (*((volatile u32 *)DUM_CH_CTRL_ADR + (i) * CH_MARG))
34
35#define DUM_CONF_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x00)
36#define DUM_CTRL_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x04)
37#define DUM_STAT_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x08)
38#define DUM_DECODE_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x0C)
39#define DUM_COM_BASE_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x10)
40#define DUM_SYNC_C_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x14)
41#define DUM_CLK_DIV_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x18)
42#define DUM_DIRTY_LOW_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x20)
43#define DUM_DIRTY_HIGH_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x24)
44#define DUM_FORMAT_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x28)
45#define DUM_WTCFG1_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x30)
46#define DUM_RTCFG1_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x34)
47#define DUM_WTCFG2_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x38)
48#define DUM_RTCFG2_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x3C)
49#define DUM_TCFG_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x40)
50#define DUM_OUTP_FORMAT1_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x44)
51#define DUM_OUTP_FORMAT2_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x48)
52#define DUM_SYNC_MODE_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x4C)
53#define DUM_SYNC_OUT_C_ADR (PNX4008_DUM_MAIN_VA_BASE + 0x50)
54
55#define DUM_CONF (*(volatile u32 *)(DUM_CONF_ADR))
56#define DUM_CTRL (*(volatile u32 *)(DUM_CTRL_ADR))
57#define DUM_STAT (*(volatile u32 *)(DUM_STAT_ADR))
58#define DUM_DECODE (*(volatile u32 *)(DUM_DECODE_ADR))
59#define DUM_COM_BASE (*(volatile u32 *)(DUM_COM_BASE_ADR))
60#define DUM_SYNC_C (*(volatile u32 *)(DUM_SYNC_C_ADR))
61#define DUM_CLK_DIV (*(volatile u32 *)(DUM_CLK_DIV_ADR))
62#define DUM_DIRTY_LOW (*(volatile u32 *)(DUM_DIRTY_LOW_ADR))
63#define DUM_DIRTY_HIGH (*(volatile u32 *)(DUM_DIRTY_HIGH_ADR))
64#define DUM_FORMAT (*(volatile u32 *)(DUM_FORMAT_ADR))
65#define DUM_WTCFG1 (*(volatile u32 *)(DUM_WTCFG1_ADR))
66#define DUM_RTCFG1 (*(volatile u32 *)(DUM_RTCFG1_ADR))
67#define DUM_WTCFG2 (*(volatile u32 *)(DUM_WTCFG2_ADR))
68#define DUM_RTCFG2 (*(volatile u32 *)(DUM_RTCFG2_ADR))
69#define DUM_TCFG (*(volatile u32 *)(DUM_TCFG_ADR))
70#define DUM_OUTP_FORMAT1 (*(volatile u32 *)(DUM_OUTP_FORMAT1_ADR))
71#define DUM_OUTP_FORMAT2 (*(volatile u32 *)(DUM_OUTP_FORMAT2_ADR))
72#define DUM_SYNC_MODE (*(volatile u32 *)(DUM_SYNC_MODE_ADR))
73#define DUM_SYNC_OUT_C (*(volatile u32 *)(DUM_SYNC_OUT_C_ADR))
74
75/* DUM SLAVE ADDRESSES */
76#define DUM_SLAVE_WRITE_ADR (PNX4008_DUM_MAINCFG_BASE + 0x0000000)
77#define DUM_SLAVE_READ1_I_ADR (PNX4008_DUM_MAINCFG_BASE + 0x1000000)
78#define DUM_SLAVE_READ1_R_ADR (PNX4008_DUM_MAINCFG_BASE + 0x1000004)
79#define DUM_SLAVE_READ2_I_ADR (PNX4008_DUM_MAINCFG_BASE + 0x1000008)
80#define DUM_SLAVE_READ2_R_ADR (PNX4008_DUM_MAINCFG_BASE + 0x100000C)
81
82#define DUM_SLAVE_WRITE_W ((volatile u32 *)(DUM_SLAVE_WRITE_ADR))
83#define DUM_SLAVE_WRITE_HW ((volatile u16 *)(DUM_SLAVE_WRITE_ADR))
84#define DUM_SLAVE_READ1_I ((volatile u8 *)(DUM_SLAVE_READ1_I_ADR))
85#define DUM_SLAVE_READ1_R ((volatile u16 *)(DUM_SLAVE_READ1_R_ADR))
86#define DUM_SLAVE_READ2_I ((volatile u8 *)(DUM_SLAVE_READ2_I_ADR))
87#define DUM_SLAVE_READ2_R ((volatile u16 *)(DUM_SLAVE_READ2_R_ADR))
88
89/* Sony display register addresses */
90#define DISP_0_REG (0x00)
91#define DISP_1_REG (0x01)
92#define DISP_CAL_REG (0x20)
93#define DISP_ID_REG (0x2A)
94#define DISP_XMIN_L_REG (0x30)
95#define DISP_XMIN_H_REG (0x31)
96#define DISP_YMIN_REG (0x32)
97#define DISP_XMAX_L_REG (0x34)
98#define DISP_XMAX_H_REG (0x35)
99#define DISP_YMAX_REG (0x36)
100#define DISP_SYNC_EN_REG (0x38)
101#define DISP_SYNC_RISE_L_REG (0x3C)
102#define DISP_SYNC_RISE_H_REG (0x3D)
103#define DISP_SYNC_FALL_L_REG (0x3E)
104#define DISP_SYNC_FALL_H_REG (0x3F)
105#define DISP_PIXEL_REG (0x0B)
106#define DISP_DUMMY1_REG (0x28)
107#define DISP_DUMMY2_REG (0x29)
108#define DISP_TIMING_REG (0x98)
109#define DISP_DUMP_REG (0x99)
110
111/* Sony display constants */
112#define SONY_ID1 (0x22)
113#define SONY_ID2 (0x23)
114
115/* Philips display register addresses */
116#define PH_DISP_ORIENT_REG (0x003)
117#define PH_DISP_YPOINT_REG (0x200)
118#define PH_DISP_XPOINT_REG (0x201)
119#define PH_DISP_PIXEL_REG (0x202)
120#define PH_DISP_YMIN_REG (0x406)
121#define PH_DISP_YMAX_REG (0x407)
122#define PH_DISP_XMIN_REG (0x408)
123#define PH_DISP_XMAX_REG (0x409)
124
125/* Misc constants */
126#define NO_VALID_DISPLAY_FOUND (0)
127#define DISPLAY2_IS_NOT_CONNECTED (0)
128
129/* register values */
130#define V_BAC_ENABLE (BIT(0))
131#define V_BAC_DISABLE_IDLE (BIT(1))
132#define V_BAC_DISABLE_TRIG (BIT(2))
133#define V_DUM_RESET (BIT(3))
134#define V_MUX_RESET (BIT(4))
135#define BAC_ENABLED (BIT(0))
136#define BAC_DISABLED 0
137
138/* Sony LCD commands */
139#define V_LCD_STANDBY_OFF ((BIT(25)) | (0 << 16) | DISP_0_REG)
140#define V_LCD_USE_9BIT_BUS ((BIT(25)) | (2 << 16) | DISP_1_REG)
141#define V_LCD_SYNC_RISE_L ((BIT(25)) | (0 << 16) | DISP_SYNC_RISE_L_REG)
142#define V_LCD_SYNC_RISE_H ((BIT(25)) | (0 << 16) | DISP_SYNC_RISE_H_REG)
143#define V_LCD_SYNC_FALL_L ((BIT(25)) | (160 << 16) | DISP_SYNC_FALL_L_REG)
144#define V_LCD_SYNC_FALL_H ((BIT(25)) | (0 << 16) | DISP_SYNC_FALL_H_REG)
145#define V_LCD_SYNC_ENABLE ((BIT(25)) | (128 << 16) | DISP_SYNC_EN_REG)
146#define V_LCD_DISPLAY_ON ((BIT(25)) | (64 << 16) | DISP_0_REG)
147
148enum {
149 PAD_NONE,
150 PAD_512,
151 PAD_1024
152};
153
154enum {
155 RGB888,
156 RGB666,
157 RGB565,
158 BGR565,
159 ARGB1555,
160 ABGR1555,
161 ARGB4444,
162 ABGR4444
163};
164
165struct dum_setup {
166 int sync_neg_edge;
167 int round_robin;
168 int mux_int;
169 int synced_dirty_flag_int;
170 int dirty_flag_int;
171 int error_int;
172 int pf_empty_int;
173 int sf_empty_int;
174 int bac_dis_int;
175 u32 dirty_base_adr;
176 u32 command_base_adr;
177 u32 sync_clk_div;
178 int sync_output;
179 u32 sync_restart_val;
180 u32 set_sync_high;
181 u32 set_sync_low;
182};
183
184struct dum_ch_setup {
185 int disp_no;
186 u32 xmin;
187 u32 ymin;
188 u32 xmax;
189 u32 ymax;
190 int xmirror;
191 int ymirror;
192 int rotate;
193 u32 minadr;
194 u32 maxadr;
195 u32 dirtybuffer;
196 int pad;
197 int format;
198 int hwdirty;
199 int slave_trans;
200};
201
202struct disp_window {
203 u32 xmin_l;
204 u32 xmin_h;
205 u32 ymin;
206 u32 xmax_l;
207 u32 xmax_h;
208 u32 ymax;
209};
210
211#endif /* #ifndef __PNX008_DUM_H__ */
diff --git a/drivers/video/pnx4008/fbcommon.h b/drivers/video/pnx4008/fbcommon.h
new file mode 100644
index 000000000000..4ebc87dafafb
--- /dev/null
+++ b/drivers/video/pnx4008/fbcommon.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2005 Philips Semiconductors
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA, or http://www.gnu.org/licenses/gpl.html
18*/
19
20#define QCIF_W (176)
21#define QCIF_H (144)
22
23#define CIF_W (352)
24#define CIF_H (288)
25
26#define LCD_X_RES 208
27#define LCD_Y_RES 320
28#define LCD_X_PAD 256
29#define LCD_BBP 4 /* Bytes Per Pixel */
30
31#define DISP_MAX_X_SIZE (320)
32#define DISP_MAX_Y_SIZE (208)
33
34#define RETURNVAL_BASE (0x400)
35
36enum fb_ioctl_returntype {
37 ENORESOURCESLEFT = RETURNVAL_BASE,
38 ERESOURCESNOTFREED,
39 EPROCNOTOWNER,
40 EFBNOTOWNER,
41 ECOPYFAILED,
42 EIOREMAPFAILED,
43};
diff --git a/drivers/video/pnx4008/pnxrgbfb.c b/drivers/video/pnx4008/pnxrgbfb.c
new file mode 100644
index 000000000000..7d9453c91a42
--- /dev/null
+++ b/drivers/video/pnx4008/pnxrgbfb.c
@@ -0,0 +1,213 @@
1/*
2 * drivers/video/pnx4008/pnxrgbfb.c
3 *
4 * PNX4008's framebuffer support
5 *
6 * Author: Grigory Tolstolytkin <gtolstolytkin@ru.mvista.com>
7 * Based on Philips Semiconductors's code
8 *
9 * Copyrght (c) 2005 MontaVista Software, Inc.
10 * Copyright (c) 2005 Philips Semiconductors
11 * This file is licensed under the terms of the GNU General Public License
12 * version 2. This program is licensed "as is" without any warranty of any
13 * kind, whether express or implied.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/fb.h>
26#include <linux/init.h>
27#include <linux/platform_device.h>
28
29#include <asm/uaccess.h>
30#include "sdum.h"
31#include "fbcommon.h"
32
33static u32 colreg[16];
34
35static struct fb_var_screeninfo rgbfb_var __initdata = {
36 .xres = LCD_X_RES,
37 .yres = LCD_Y_RES,
38 .xres_virtual = LCD_X_RES,
39 .yres_virtual = LCD_Y_RES,
40 .bits_per_pixel = 32,
41 .red.offset = 16,
42 .red.length = 8,
43 .green.offset = 8,
44 .green.length = 8,
45 .blue.offset = 0,
46 .blue.length = 8,
47 .left_margin = 0,
48 .right_margin = 0,
49 .upper_margin = 0,
50 .lower_margin = 0,
51 .vmode = FB_VMODE_NONINTERLACED,
52};
53static struct fb_fix_screeninfo rgbfb_fix __initdata = {
54 .id = "RGBFB",
55 .line_length = LCD_X_RES * LCD_BBP,
56 .type = FB_TYPE_PACKED_PIXELS,
57 .visual = FB_VISUAL_TRUECOLOR,
58 .xpanstep = 0,
59 .ypanstep = 0,
60 .ywrapstep = 0,
61 .accel = FB_ACCEL_NONE,
62};
63
64static int channel_owned;
65
66static int no_cursor(struct fb_info *info, struct fb_cursor *cursor)
67{
68 return 0;
69}
70
71static int rgbfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
72 u_int transp, struct fb_info *info)
73{
74 if (regno > 15)
75 return 1;
76
77 colreg[regno] = ((red & 0xff00) << 8) | (green & 0xff00) |
78 ((blue & 0xff00) >> 8);
79 return 0;
80}
81
82static int rgbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
83{
84 return pnx4008_sdum_mmap(info, vma, NULL);
85}
86
87static struct fb_ops rgbfb_ops = {
88 .fb_mmap = rgbfb_mmap,
89 .fb_setcolreg = rgbfb_setcolreg,
90 .fb_fillrect = cfb_fillrect,
91 .fb_copyarea = cfb_copyarea,
92 .fb_imageblit = cfb_imageblit,
93};
94
95static int rgbfb_remove(struct platform_device *pdev)
96{
97 struct fb_info *info = platform_get_drvdata(pdev);
98
99 if (info) {
100 unregister_framebuffer(info);
101 fb_dealloc_cmap(&info->cmap);
102 framebuffer_release(info);
103 platform_set_drvdata(pdev, NULL);
104 kfree(info);
105 }
106
107 pnx4008_free_dum_channel(channel_owned, pdev->id);
108 pnx4008_set_dum_exit_notification(pdev->id);
109
110 return 0;
111}
112
113static int __devinit rgbfb_probe(struct platform_device *pdev)
114{
115 struct fb_info *info;
116 struct dumchannel_uf chan_uf;
117 int ret;
118 char *option;
119
120 info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev);
121 if (!info) {
122 ret = -ENOMEM;
123 goto err;
124 }
125
126 pnx4008_get_fb_addresses(FB_TYPE_RGB, (void **)&info->screen_base,
127 (dma_addr_t *) &rgbfb_fix.smem_start,
128 &rgbfb_fix.smem_len);
129
130 if ((ret = pnx4008_alloc_dum_channel(pdev->id)) < 0)
131 goto err0;
132 else {
133 channel_owned = ret;
134 chan_uf.channelnr = channel_owned;
135 chan_uf.dirty = (u32 *) NULL;
136 chan_uf.source = (u32 *) rgbfb_fix.smem_start;
137 chan_uf.x_offset = 0;
138 chan_uf.y_offset = 0;
139 chan_uf.width = LCD_X_RES;
140 chan_uf.height = LCD_Y_RES;
141
142 if ((ret = pnx4008_put_dum_channel_uf(chan_uf, pdev->id))< 0)
143 goto err1;
144
145 if ((ret =
146 pnx4008_set_dum_channel_sync(channel_owned, CONF_SYNC_ON,
147 pdev->id)) < 0)
148 goto err1;
149
150 if ((ret =
151 pnx4008_set_dum_channel_dirty_detect(channel_owned,
152 CONF_DIRTYDETECTION_ON,
153 pdev->id)) < 0)
154 goto err1;
155 }
156
157 if (!fb_get_options("pnxrgbfb", &option) && !strcmp(option, "nocursor"))
158 rgbfb_ops.fb_cursor = no_cursor;
159
160 info->node = -1;
161 info->flags = FBINFO_FLAG_DEFAULT;
162 info->fbops = &rgbfb_ops;
163 info->fix = rgbfb_fix;
164 info->var = rgbfb_var;
165 info->screen_size = rgbfb_fix.smem_len;
166 info->pseudo_palette = info->par;
167 info->par = NULL;
168
169 ret = fb_alloc_cmap(&info->cmap, 256, 0);
170 if (ret < 0)
171 goto err2;
172
173 ret = register_framebuffer(info);
174 if (ret < 0)
175 goto err3;
176 platform_set_drvdata(pdev, info);
177
178 return 0;
179
180err3:
181 fb_dealloc_cmap(&info->cmap);
182err2:
183 framebuffer_release(info);
184err1:
185 pnx4008_free_dum_channel(channel_owned, pdev->id);
186err0:
187 kfree(info);
188err:
189 return ret;
190}
191
192static struct platform_driver rgbfb_driver = {
193 .driver = {
194 .name = "rgbfb",
195 },
196 .probe = rgbfb_probe,
197 .remove = rgbfb_remove,
198};
199
200static int __init rgbfb_init(void)
201{
202 return platform_driver_register(&rgbfb_driver);
203}
204
205static void __exit rgbfb_exit(void)
206{
207 platform_driver_unregister(&rgbfb_driver);
208}
209
210module_init(rgbfb_init);
211module_exit(rgbfb_exit);
212
213MODULE_LICENSE("GPL");
diff --git a/drivers/video/pnx4008/sdum.c b/drivers/video/pnx4008/sdum.c
new file mode 100644
index 000000000000..51f0ecc2a511
--- /dev/null
+++ b/drivers/video/pnx4008/sdum.c
@@ -0,0 +1,872 @@
1/*
2 * drivers/video/pnx4008/sdum.c
3 *
4 * Display Update Master support
5 *
6 * Authors: Grigory Tolstolytkin <gtolstolytkin@ru.mvista.com>
7 * Vitaly Wool <vitalywool@gmail.com>
8 * Based on Philips Semiconductors's code
9 *
10 * Copyrght (c) 2005-2006 MontaVista Software, Inc.
11 * Copyright (c) 2005 Philips Semiconductors
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program is licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/errno.h>
20#include <linux/string.h>
21#include <linux/mm.h>
22#include <linux/tty.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/platform_device.h>
28#include <linux/fb.h>
29#include <linux/init.h>
30#include <linux/dma-mapping.h>
31#include <linux/clk.h>
32#include <asm/uaccess.h>
33#include <asm/arch/gpio.h>
34
35#include "sdum.h"
36#include "fbcommon.h"
37#include "dum.h"
38
39/* Framebuffers we have */
40
41static struct pnx4008_fb_addr {
42 int fb_type;
43 long addr_offset;
44 long fb_length;
45} fb_addr[] = {
46 [0] = {
47 FB_TYPE_YUV, 0, 0xB0000
48 },
49 [1] = {
50 FB_TYPE_RGB, 0xB0000, 0x50000
51 },
52};
53
54static struct dum_data {
55 u32 lcd_phys_start;
56 u32 lcd_virt_start;
57 u32 slave_phys_base;
58 u32 *slave_virt_base;
59 int fb_owning_channel[MAX_DUM_CHANNELS];
60 struct dumchannel_uf chan_uf_store[MAX_DUM_CHANNELS];
61} dum_data;
62
63/* Different local helper functions */
64
65static u32 nof_pixels_dx(struct dum_ch_setup *ch_setup)
66{
67 return (ch_setup->xmax - ch_setup->xmin + 1);
68}
69
70static u32 nof_pixels_dy(struct dum_ch_setup *ch_setup)
71{
72 return (ch_setup->ymax - ch_setup->ymin + 1);
73}
74
75static u32 nof_pixels_dxy(struct dum_ch_setup *ch_setup)
76{
77 return (nof_pixels_dx(ch_setup) * nof_pixels_dy(ch_setup));
78}
79
80static u32 nof_bytes(struct dum_ch_setup *ch_setup)
81{
82 u32 r = nof_pixels_dxy(ch_setup);
83 switch (ch_setup->format) {
84 case RGB888:
85 case RGB666:
86 r *= 4;
87 break;
88
89 default:
90 r *= 2;
91 break;
92 }
93 return r;
94}
95
96static u32 build_command(int disp_no, u32 reg, u32 val)
97{
98 return ((disp_no << 26) | BIT(25) | (val << 16) | (disp_no << 10) |
99 (reg << 0));
100}
101
102static u32 build_double_index(int disp_no, u32 val)
103{
104 return ((disp_no << 26) | (val << 16) | (disp_no << 10) | (val << 0));
105}
106
107static void build_disp_window(struct dum_ch_setup * ch_setup, struct disp_window * dw)
108{
109 dw->ymin = ch_setup->ymin;
110 dw->ymax = ch_setup->ymax;
111 dw->xmin_l = ch_setup->xmin & 0xFF;
112 dw->xmin_h = (ch_setup->xmin & BIT(8)) >> 8;
113 dw->xmax_l = ch_setup->xmax & 0xFF;
114 dw->xmax_h = (ch_setup->xmax & BIT(8)) >> 8;
115}
116
117static int put_channel(struct dumchannel chan)
118{
119 int i = chan.channelnr;
120
121 if (i < 0 || i > MAX_DUM_CHANNELS)
122 return -EINVAL;
123 else {
124 DUM_CH_MIN(i) = chan.dum_ch_min;
125 DUM_CH_MAX(i) = chan.dum_ch_max;
126 DUM_CH_CONF(i) = chan.dum_ch_conf;
127 DUM_CH_CTRL(i) = chan.dum_ch_ctrl;
128 }
129
130 return 0;
131}
132
133static void clear_channel(int channr)
134{
135 struct dumchannel chan;
136
137 chan.channelnr = channr;
138 chan.dum_ch_min = 0;
139 chan.dum_ch_max = 0;
140 chan.dum_ch_conf = 0;
141 chan.dum_ch_ctrl = 0;
142
143 put_channel(chan);
144}
145
146static int put_cmd_string(struct cmdstring cmds)
147{
148 u16 *cmd_str_virtaddr;
149 u32 *cmd_ptr0_virtaddr;
150 u32 cmd_str_physaddr;
151
152 int i = cmds.channelnr;
153
154 if (i < 0 || i > MAX_DUM_CHANNELS)
155 return -EINVAL;
156 else if ((cmd_ptr0_virtaddr =
157 (int *)ioremap_nocache(DUM_COM_BASE,
158 sizeof(int) * MAX_DUM_CHANNELS)) ==
159 NULL)
160 return -EIOREMAPFAILED;
161 else {
162 cmd_str_physaddr = ioread32(&cmd_ptr0_virtaddr[cmds.channelnr]);
163 if ((cmd_str_virtaddr =
164 (u16 *) ioremap_nocache(cmd_str_physaddr,
165 sizeof(cmds))) == NULL) {
166 iounmap(cmd_ptr0_virtaddr);
167 return -EIOREMAPFAILED;
168 } else {
169 int t;
170 for (t = 0; t < 8; t++)
171 iowrite16(*((u16 *)&cmds.prestringlen + t),
172 cmd_str_virtaddr + t);
173
174 for (t = 0; t < cmds.prestringlen / 2; t++)
175 iowrite16(*((u16 *)&cmds.precmd + t),
176 cmd_str_virtaddr + t + 8);
177
178 for (t = 0; t < cmds.poststringlen / 2; t++)
179 iowrite16(*((u16 *)&cmds.postcmd + t),
180 cmd_str_virtaddr + t + 8 +
181 cmds.prestringlen / 2);
182
183 iounmap(cmd_ptr0_virtaddr);
184 iounmap(cmd_str_virtaddr);
185 }
186 }
187
188 return 0;
189}
190
191static u32 dum_ch_setup(int ch_no, struct dum_ch_setup * ch_setup)
192{
193 struct cmdstring cmds_c;
194 struct cmdstring *cmds = &cmds_c;
195 struct disp_window dw;
196 int standard;
197 u32 orientation = 0;
198 struct dumchannel chan = { 0 };
199 int ret;
200
201 if ((ch_setup->xmirror) || (ch_setup->ymirror) || (ch_setup->rotate)) {
202 standard = 0;
203
204 orientation = BIT(1); /* always set 9-bit-bus */
205 if (ch_setup->xmirror)
206 orientation |= BIT(4);
207 if (ch_setup->ymirror)
208 orientation |= BIT(3);
209 if (ch_setup->rotate)
210 orientation |= BIT(0);
211 } else
212 standard = 1;
213
214 cmds->channelnr = ch_no;
215
216 /* build command string header */
217 if (standard) {
218 cmds->prestringlen = 32;
219 cmds->poststringlen = 0;
220 } else {
221 cmds->prestringlen = 48;
222 cmds->poststringlen = 16;
223 }
224
225 cmds->format =
226 (u16) ((ch_setup->disp_no << 4) | (BIT(3)) | (ch_setup->format));
227 cmds->reserved = 0x0;
228 cmds->startaddr_low = (ch_setup->minadr & 0xFFFF);
229 cmds->startaddr_high = (ch_setup->minadr >> 16);
230
231 if ((ch_setup->minadr == 0) && (ch_setup->maxadr == 0)
232 && (ch_setup->xmin == 0)
233 && (ch_setup->ymin == 0) && (ch_setup->xmax == 0)
234 && (ch_setup->ymax == 0)) {
235 cmds->pixdatlen_low = 0;
236 cmds->pixdatlen_high = 0;
237 } else {
238 u32 nbytes = nof_bytes(ch_setup);
239 cmds->pixdatlen_low = (nbytes & 0xFFFF);
240 cmds->pixdatlen_high = (nbytes >> 16);
241 }
242
243 if (ch_setup->slave_trans)
244 cmds->pixdatlen_high |= BIT(15);
245
246 /* build pre-string */
247 build_disp_window(ch_setup, &dw);
248
249 if (standard) {
250 cmds->precmd[0] =
251 build_command(ch_setup->disp_no, DISP_XMIN_L_REG, 0x99);
252 cmds->precmd[1] =
253 build_command(ch_setup->disp_no, DISP_XMIN_L_REG,
254 dw.xmin_l);
255 cmds->precmd[2] =
256 build_command(ch_setup->disp_no, DISP_XMIN_H_REG,
257 dw.xmin_h);
258 cmds->precmd[3] =
259 build_command(ch_setup->disp_no, DISP_YMIN_REG, dw.ymin);
260 cmds->precmd[4] =
261 build_command(ch_setup->disp_no, DISP_XMAX_L_REG,
262 dw.xmax_l);
263 cmds->precmd[5] =
264 build_command(ch_setup->disp_no, DISP_XMAX_H_REG,
265 dw.xmax_h);
266 cmds->precmd[6] =
267 build_command(ch_setup->disp_no, DISP_YMAX_REG, dw.ymax);
268 cmds->precmd[7] =
269 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
270 } else {
271 if (dw.xmin_l == ch_no)
272 cmds->precmd[0] =
273 build_command(ch_setup->disp_no, DISP_XMIN_L_REG,
274 0x99);
275 else
276 cmds->precmd[0] =
277 build_command(ch_setup->disp_no, DISP_XMIN_L_REG,
278 ch_no);
279
280 cmds->precmd[1] =
281 build_command(ch_setup->disp_no, DISP_XMIN_L_REG,
282 dw.xmin_l);
283 cmds->precmd[2] =
284 build_command(ch_setup->disp_no, DISP_XMIN_H_REG,
285 dw.xmin_h);
286 cmds->precmd[3] =
287 build_command(ch_setup->disp_no, DISP_YMIN_REG, dw.ymin);
288 cmds->precmd[4] =
289 build_command(ch_setup->disp_no, DISP_XMAX_L_REG,
290 dw.xmax_l);
291 cmds->precmd[5] =
292 build_command(ch_setup->disp_no, DISP_XMAX_H_REG,
293 dw.xmax_h);
294 cmds->precmd[6] =
295 build_command(ch_setup->disp_no, DISP_YMAX_REG, dw.ymax);
296 cmds->precmd[7] =
297 build_command(ch_setup->disp_no, DISP_1_REG, orientation);
298 cmds->precmd[8] =
299 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
300 cmds->precmd[9] =
301 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
302 cmds->precmd[0xA] =
303 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
304 cmds->precmd[0xB] =
305 build_double_index(ch_setup->disp_no, DISP_PIXEL_REG);
306 cmds->postcmd[0] =
307 build_command(ch_setup->disp_no, DISP_1_REG, BIT(1));
308 cmds->postcmd[1] =
309 build_command(ch_setup->disp_no, DISP_DUMMY1_REG, 1);
310 cmds->postcmd[2] =
311 build_command(ch_setup->disp_no, DISP_DUMMY1_REG, 2);
312 cmds->postcmd[3] =
313 build_command(ch_setup->disp_no, DISP_DUMMY1_REG, 3);
314 }
315
316 if ((ret = put_cmd_string(cmds_c)) != 0) {
317 return ret;
318 }
319
320 chan.channelnr = cmds->channelnr;
321 chan.dum_ch_min = ch_setup->dirtybuffer + ch_setup->minadr;
322 chan.dum_ch_max = ch_setup->dirtybuffer + ch_setup->maxadr;
323 chan.dum_ch_conf = 0x002;
324 chan.dum_ch_ctrl = 0x04;
325
326 put_channel(chan);
327
328 return 0;
329}
330
331static u32 display_open(int ch_no, int auto_update, u32 * dirty_buffer,
332 u32 * frame_buffer, u32 xpos, u32 ypos, u32 w, u32 h)
333{
334
335 struct dum_ch_setup k;
336 int ret;
337
338 /* keep width & height within display area */
339 if ((xpos + w) > DISP_MAX_X_SIZE)
340 w = DISP_MAX_X_SIZE - xpos;
341
342 if ((ypos + h) > DISP_MAX_Y_SIZE)
343 h = DISP_MAX_Y_SIZE - ypos;
344
345 /* assume 1 display only */
346 k.disp_no = 0;
347 k.xmin = xpos;
348 k.ymin = ypos;
349 k.xmax = xpos + (w - 1);
350 k.ymax = ypos + (h - 1);
351
352 /* adjust min and max values if necessary */
353 if (k.xmin > DISP_MAX_X_SIZE - 1)
354 k.xmin = DISP_MAX_X_SIZE - 1;
355 if (k.ymin > DISP_MAX_Y_SIZE - 1)
356 k.ymin = DISP_MAX_Y_SIZE - 1;
357
358 if (k.xmax > DISP_MAX_X_SIZE - 1)
359 k.xmax = DISP_MAX_X_SIZE - 1;
360 if (k.ymax > DISP_MAX_Y_SIZE - 1)
361 k.ymax = DISP_MAX_Y_SIZE - 1;
362
363 k.xmirror = 0;
364 k.ymirror = 0;
365 k.rotate = 0;
366 k.minadr = (u32) frame_buffer;
367 k.maxadr = (u32) frame_buffer + (((w - 1) << 10) | ((h << 2) - 2));
368 k.pad = PAD_1024;
369 k.dirtybuffer = (u32) dirty_buffer;
370 k.format = RGB888;
371 k.hwdirty = 0;
372 k.slave_trans = 0;
373
374 ret = dum_ch_setup(ch_no, &k);
375
376 return ret;
377}
378
379static void lcd_reset(void)
380{
381 u32 *dum_pio_base = (u32 *)IO_ADDRESS(PNX4008_PIO_BASE);
382
383 udelay(1);
384 iowrite32(BIT(19), &dum_pio_base[2]);
385 udelay(1);
386 iowrite32(BIT(19), &dum_pio_base[1]);
387 udelay(1);
388}
389
390static int dum_init(struct platform_device *pdev)
391{
392 struct clk *clk;
393
394 /* enable DUM clock */
395 clk = clk_get(&pdev->dev, "dum_ck");
396 if (IS_ERR(clk)) {
397 printk(KERN_ERR "pnx4008_dum: Unable to access DUM clock\n");
398 return PTR_ERR(clk);
399 }
400
401 clk_set_rate(clk, 1);
402 clk_put(clk);
403
404 DUM_CTRL = V_DUM_RESET;
405
406 /* set priority to "round-robin". All other params to "false" */
407 DUM_CONF = BIT(9);
408
409 /* Display 1 */
410 DUM_WTCFG1 = PNX4008_DUM_WT_CFG;
411 DUM_RTCFG1 = PNX4008_DUM_RT_CFG;
412 DUM_TCFG = PNX4008_DUM_T_CFG;
413
414 return 0;
415}
416
417static void dum_chan_init(void)
418{
419 int i = 0, ch = 0;
420 u32 *cmdptrs;
421 u32 *cmdstrings;
422
423 DUM_COM_BASE =
424 CMDSTRING_BASEADDR + BYTES_PER_CMDSTRING * NR_OF_CMDSTRINGS;
425
426 if ((cmdptrs =
427 (u32 *) ioremap_nocache(DUM_COM_BASE,
428 sizeof(u32) * NR_OF_CMDSTRINGS)) == NULL)
429 return;
430
431 for (ch = 0; ch < NR_OF_CMDSTRINGS; ch++)
432 iowrite32(CMDSTRING_BASEADDR + BYTES_PER_CMDSTRING * ch,
433 cmdptrs + ch);
434
435 for (ch = 0; ch < MAX_DUM_CHANNELS; ch++)
436 clear_channel(ch);
437
438 /* Clear the cmdstrings */
439 cmdstrings =
440 (u32 *)ioremap_nocache(*cmdptrs,
441 BYTES_PER_CMDSTRING * NR_OF_CMDSTRINGS);
442
443 if (!cmdstrings)
444 goto out;
445
446 for (i = 0; i < NR_OF_CMDSTRINGS * BYTES_PER_CMDSTRING / sizeof(u32);
447 i++)
448 iowrite32(0, cmdstrings + i);
449
450 iounmap((u32 *)cmdstrings);
451
452out:
453 iounmap((u32 *)cmdptrs);
454}
455
456static void lcd_init(void)
457{
458 lcd_reset();
459
460 DUM_OUTP_FORMAT1 = 0; /* RGB666 */
461
462 udelay(1);
463 iowrite32(V_LCD_STANDBY_OFF, dum_data.slave_virt_base);
464 udelay(1);
465 iowrite32(V_LCD_USE_9BIT_BUS, dum_data.slave_virt_base);
466 udelay(1);
467 iowrite32(V_LCD_SYNC_RISE_L, dum_data.slave_virt_base);
468 udelay(1);
469 iowrite32(V_LCD_SYNC_RISE_H, dum_data.slave_virt_base);
470 udelay(1);
471 iowrite32(V_LCD_SYNC_FALL_L, dum_data.slave_virt_base);
472 udelay(1);
473 iowrite32(V_LCD_SYNC_FALL_H, dum_data.slave_virt_base);
474 udelay(1);
475 iowrite32(V_LCD_SYNC_ENABLE, dum_data.slave_virt_base);
476 udelay(1);
477 iowrite32(V_LCD_DISPLAY_ON, dum_data.slave_virt_base);
478 udelay(1);
479}
480
481/* Interface exported to framebuffer drivers */
482
483int pnx4008_get_fb_addresses(int fb_type, void **virt_addr,
484 dma_addr_t *phys_addr, int *fb_length)
485{
486 int i;
487 int ret = -1;
488 for (i = 0; i < ARRAY_SIZE(fb_addr); i++)
489 if (fb_addr[i].fb_type == fb_type) {
490 *virt_addr = (void *)(dum_data.lcd_virt_start +
491 fb_addr[i].addr_offset);
492 *phys_addr =
493 dum_data.lcd_phys_start + fb_addr[i].addr_offset;
494 *fb_length = fb_addr[i].fb_length;
495 ret = 0;
496 break;
497 }
498
499 return ret;
500}
501
502EXPORT_SYMBOL(pnx4008_get_fb_addresses);
503
504int pnx4008_alloc_dum_channel(int dev_id)
505{
506 int i = 0;
507
508 while ((i < MAX_DUM_CHANNELS) && (dum_data.fb_owning_channel[i] != -1))
509 i++;
510
511 if (i == MAX_DUM_CHANNELS)
512 return -ENORESOURCESLEFT;
513 else {
514 dum_data.fb_owning_channel[i] = dev_id;
515 return i;
516 }
517}
518
519EXPORT_SYMBOL(pnx4008_alloc_dum_channel);
520
521int pnx4008_free_dum_channel(int channr, int dev_id)
522{
523 if (channr < 0 || channr > MAX_DUM_CHANNELS)
524 return -EINVAL;
525 else if (dum_data.fb_owning_channel[channr] != dev_id)
526 return -EFBNOTOWNER;
527 else {
528 clear_channel(channr);
529 dum_data.fb_owning_channel[channr] = -1;
530 }
531
532 return 0;
533}
534
535EXPORT_SYMBOL(pnx4008_free_dum_channel);
536
537int pnx4008_put_dum_channel_uf(struct dumchannel_uf chan_uf, int dev_id)
538{
539 int i = chan_uf.channelnr;
540 int ret;
541
542 if (i < 0 || i > MAX_DUM_CHANNELS)
543 return -EINVAL;
544 else if (dum_data.fb_owning_channel[i] != dev_id)
545 return -EFBNOTOWNER;
546 else if ((ret =
547 display_open(chan_uf.channelnr, 0, chan_uf.dirty,
548 chan_uf.source, chan_uf.y_offset,
549 chan_uf.x_offset, chan_uf.height,
550 chan_uf.width)) != 0)
551 return ret;
552 else {
553 dum_data.chan_uf_store[i].dirty = chan_uf.dirty;
554 dum_data.chan_uf_store[i].source = chan_uf.source;
555 dum_data.chan_uf_store[i].x_offset = chan_uf.x_offset;
556 dum_data.chan_uf_store[i].y_offset = chan_uf.y_offset;
557 dum_data.chan_uf_store[i].width = chan_uf.width;
558 dum_data.chan_uf_store[i].height = chan_uf.height;
559 }
560
561 return 0;
562}
563
564EXPORT_SYMBOL(pnx4008_put_dum_channel_uf);
565
566int pnx4008_set_dum_channel_sync(int channr, int val, int dev_id)
567{
568 if (channr < 0 || channr > MAX_DUM_CHANNELS)
569 return -EINVAL;
570 else if (dum_data.fb_owning_channel[channr] != dev_id)
571 return -EFBNOTOWNER;
572 else {
573 if (val == CONF_SYNC_ON) {
574 DUM_CH_CONF(channr) |= CONF_SYNCENABLE;
575 DUM_CH_CONF(channr) |= DUM_CHANNEL_CFG_SYNC_MASK |
576 DUM_CHANNEL_CFG_SYNC_MASK_SET;
577 } else if (val == CONF_SYNC_OFF)
578 DUM_CH_CONF(channr) &= ~CONF_SYNCENABLE;
579 else
580 return -EINVAL;
581 }
582
583 return 0;
584}
585
586EXPORT_SYMBOL(pnx4008_set_dum_channel_sync);
587
588int pnx4008_set_dum_channel_dirty_detect(int channr, int val, int dev_id)
589{
590 if (channr < 0 || channr > MAX_DUM_CHANNELS)
591 return -EINVAL;
592 else if (dum_data.fb_owning_channel[channr] != dev_id)
593 return -EFBNOTOWNER;
594 else {
595 if (val == CONF_DIRTYDETECTION_ON)
596 DUM_CH_CONF(channr) |= CONF_DIRTYENABLE;
597 else if (val == CONF_DIRTYDETECTION_OFF)
598 DUM_CH_CONF(channr) &= ~CONF_DIRTYENABLE;
599 else
600 return -EINVAL;
601 }
602
603 return 0;
604}
605
606EXPORT_SYMBOL(pnx4008_set_dum_channel_dirty_detect);
607
608#if 0 /* Functions not used currently, but likely to be used in future */
609
610static int get_channel(struct dumchannel *p_chan)
611{
612 int i = p_chan->channelnr;
613
614 if (i < 0 || i > MAX_DUM_CHANNELS)
615 return -EINVAL;
616 else {
617 p_chan->dum_ch_min = DUM_CH_MIN(i);
618 p_chan->dum_ch_max = DUM_CH_MAX(i);
619 p_chan->dum_ch_conf = DUM_CH_CONF(i);
620 p_chan->dum_ch_stat = DUM_CH_STAT(i);
621 p_chan->dum_ch_ctrl = 0; /* WriteOnly control register */
622 }
623
624 return 0;
625}
626
627int pnx4008_get_dum_channel_uf(struct dumchannel_uf *p_chan_uf, int dev_id)
628{
629 int i = p_chan_uf->channelnr;
630
631 if (i < 0 || i > MAX_DUM_CHANNELS)
632 return -EINVAL;
633 else if (dum_data.fb_owning_channel[i] != dev_id)
634 return -EFBNOTOWNER;
635 else {
636 p_chan_uf->dirty = dum_data.chan_uf_store[i].dirty;
637 p_chan_uf->source = dum_data.chan_uf_store[i].source;
638 p_chan_uf->x_offset = dum_data.chan_uf_store[i].x_offset;
639 p_chan_uf->y_offset = dum_data.chan_uf_store[i].y_offset;
640 p_chan_uf->width = dum_data.chan_uf_store[i].width;
641 p_chan_uf->height = dum_data.chan_uf_store[i].height;
642 }
643
644 return 0;
645}
646
647EXPORT_SYMBOL(pnx4008_get_dum_channel_uf);
648
649int pnx4008_get_dum_channel_config(int channr, int dev_id)
650{
651 int ret;
652 struct dumchannel chan;
653
654 if (channr < 0 || channr > MAX_DUM_CHANNELS)
655 return -EINVAL;
656 else if (dum_data.fb_owning_channel[channr] != dev_id)
657 return -EFBNOTOWNER;
658 else {
659 chan.channelnr = channr;
660 if ((ret = get_channel(&chan)) != 0)
661 return ret;
662 }
663
664 return (chan.dum_ch_conf & DUM_CHANNEL_CFG_MASK);
665}
666
667EXPORT_SYMBOL(pnx4008_get_dum_channel_config);
668
669int pnx4008_force_update_dum_channel(int channr, int dev_id)
670{
671 if (channr < 0 || channr > MAX_DUM_CHANNELS)
672 return -EINVAL;
673
674 else if (dum_data.fb_owning_channel[channr] != dev_id)
675 return -EFBNOTOWNER;
676 else
677 DUM_CH_CTRL(channr) = CTRL_SETDIRTY;
678
679 return 0;
680}
681
682EXPORT_SYMBOL(pnx4008_force_update_dum_channel);
683
684#endif
685
686int pnx4008_sdum_mmap(struct fb_info *info, struct vm_area_struct *vma,
687 struct device *dev)
688{
689 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
690
691 if (off < info->fix.smem_len) {
692 vma->vm_pgoff += 1;
693 return dma_mmap_writecombine(dev, vma,
694 (void *)dum_data.lcd_virt_start,
695 dum_data.lcd_phys_start,
696 FB_DMA_SIZE);
697 }
698 return -EINVAL;
699}
700
701EXPORT_SYMBOL(pnx4008_sdum_mmap);
702
703int pnx4008_set_dum_exit_notification(int dev_id)
704{
705 int i;
706
707 for (i = 0; i < MAX_DUM_CHANNELS; i++)
708 if (dum_data.fb_owning_channel[i] == dev_id)
709 return -ERESOURCESNOTFREED;
710
711 return 0;
712}
713
714EXPORT_SYMBOL(pnx4008_set_dum_exit_notification);
715
716/* Platform device driver for DUM */
717
718static int sdum_suspend(struct platform_device *pdev, pm_message_t state)
719{
720 int retval = 0;
721 struct clk *clk;
722
723 clk = clk_get(0, "dum_ck");
724 if (!IS_ERR(clk)) {
725 clk_set_rate(clk, 0);
726 clk_put(clk);
727 } else
728 retval = PTR_ERR(clk);
729
730 /* disable BAC */
731 DUM_CTRL = V_BAC_DISABLE_IDLE;
732
733 /* LCD standby & turn off display */
734 lcd_reset();
735
736 return retval;
737}
738
739static int sdum_resume(struct platform_device *pdev)
740{
741 int retval = 0;
742 struct clk *clk;
743
744 clk = clk_get(0, "dum_ck");
745 if (!IS_ERR(clk)) {
746 clk_set_rate(clk, 1);
747 clk_put(clk);
748 } else
749 retval = PTR_ERR(clk);
750
751 /* wait for BAC disable */
752 DUM_CTRL = V_BAC_DISABLE_TRIG;
753
754 while (DUM_CTRL & BAC_ENABLED)
755 udelay(10);
756
757 /* re-init LCD */
758 lcd_init();
759
760 /* enable BAC and reset MUX */
761 DUM_CTRL = V_BAC_ENABLE;
762 udelay(1);
763 DUM_CTRL = V_MUX_RESET;
764 return 0;
765}
766
767static int __devinit sdum_probe(struct platform_device *pdev)
768{
769 int ret = 0, i = 0;
770
771 /* map frame buffer */
772 dum_data.lcd_virt_start = (u32) dma_alloc_writecombine(&pdev->dev,
773 FB_DMA_SIZE,
774 &dum_data.lcd_phys_start,
775 GFP_KERNEL);
776
777 if (!dum_data.lcd_virt_start) {
778 ret = -ENOMEM;
779 goto out_3;
780 }
781
782 /* map slave registers */
783 dum_data.slave_phys_base = PNX4008_DUM_SLAVE_BASE;
784 dum_data.slave_virt_base =
785 (u32 *) ioremap_nocache(dum_data.slave_phys_base, sizeof(u32));
786
787 if (dum_data.slave_virt_base == NULL) {
788 ret = -ENOMEM;
789 goto out_2;
790 }
791
792 /* initialize DUM and LCD display */
793 ret = dum_init(pdev);
794 if (ret)
795 goto out_1;
796
797 dum_chan_init();
798 lcd_init();
799
800 DUM_CTRL = V_BAC_ENABLE;
801 udelay(1);
802 DUM_CTRL = V_MUX_RESET;
803
804 /* set decode address and sync clock divider */
805 DUM_DECODE = dum_data.lcd_phys_start & DUM_DECODE_MASK;
806 DUM_CLK_DIV = PNX4008_DUM_CLK_DIV;
807
808 for (i = 0; i < MAX_DUM_CHANNELS; i++)
809 dum_data.fb_owning_channel[i] = -1;
810
811 /*setup wakeup interrupt */
812 start_int_set_rising_edge(SE_DISP_SYNC_INT);
813 start_int_ack(SE_DISP_SYNC_INT);
814 start_int_umask(SE_DISP_SYNC_INT);
815
816 return 0;
817
818out_1:
819 iounmap((void *)dum_data.slave_virt_base);
820out_2:
821 dma_free_writecombine(&pdev->dev, FB_DMA_SIZE,
822 (void *)dum_data.lcd_virt_start,
823 dum_data.lcd_phys_start);
824out_3:
825 return ret;
826}
827
828static int sdum_remove(struct platform_device *pdev)
829{
830 struct clk *clk;
831
832 start_int_mask(SE_DISP_SYNC_INT);
833
834 clk = clk_get(0, "dum_ck");
835 if (!IS_ERR(clk)) {
836 clk_set_rate(clk, 0);
837 clk_put(clk);
838 }
839
840 iounmap((void *)dum_data.slave_virt_base);
841
842 dma_free_writecombine(&pdev->dev, FB_DMA_SIZE,
843 (void *)dum_data.lcd_virt_start,
844 dum_data.lcd_phys_start);
845
846 return 0;
847}
848
849static struct platform_driver sdum_driver = {
850 .driver = {
851 .name = "sdum",
852 },
853 .probe = sdum_probe,
854 .remove = sdum_remove,
855 .suspend = sdum_suspend,
856 .resume = sdum_resume,
857};
858
859int __init sdum_init(void)
860{
861 return platform_driver_register(&sdum_driver);
862}
863
864static void __exit sdum_exit(void)
865{
866 platform_driver_unregister(&sdum_driver);
867};
868
869module_init(sdum_init);
870module_exit(sdum_exit);
871
872MODULE_LICENSE("GPL");
diff --git a/drivers/video/pnx4008/sdum.h b/drivers/video/pnx4008/sdum.h
new file mode 100644
index 000000000000..e8c5dcdd8813
--- /dev/null
+++ b/drivers/video/pnx4008/sdum.h
@@ -0,0 +1,139 @@
1/*
2 * Copyright (C) 2005 Philips Semiconductors
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA, or http://www.gnu.org/licenses/gpl.html
18*/
19
20#define MAX_DUM_CHANNELS 64
21
22#define RGB_MEM_WINDOW(x) (0x10000000 + (x)*0x00100000)
23
24#define QCIF_OFFSET(x) (((x) == 0) ? 0x00000: ((x) == 1) ? 0x30000: -1)
25#define CIF_OFFSET(x) (((x) == 0) ? 0x00000: ((x) == 1) ? 0x60000: -1)
26
27#define CTRL_SETDIRTY (0x00000001)
28#define CONF_DIRTYENABLE (0x00000020)
29#define CONF_SYNCENABLE (0x00000004)
30
31#define DIRTY_ENABLED(conf) ((conf) & 0x0020)
32#define SYNC_ENABLED(conf) ((conf) & 0x0004)
33
34/* Display 1 & 2 Write Timing Configuration */
35#define PNX4008_DUM_WT_CFG 0x00372000
36
37/* Display 1 & 2 Read Timing Configuration */
38#define PNX4008_DUM_RT_CFG 0x00003A47
39
40/* DUM Transit State Timing Configuration */
41#define PNX4008_DUM_T_CFG 0x1D /* 29 HCLK cycles */
42
43/* DUM Sync count clock divider */
44#define PNX4008_DUM_CLK_DIV 0x02DD
45
46/* Memory size for framebuffer, allocated through dma_alloc_writecombine().
47 * Must be PAGE aligned
48 */
49#define FB_DMA_SIZE (PAGE_ALIGN(SZ_1M + PAGE_SIZE))
50
51#define OFFSET_RGBBUFFER (0xB0000)
52#define OFFSET_YUVBUFFER (0x00000)
53
54#define YUVBUFFER (lcd_video_start + OFFSET_YUVBUFFER)
55#define RGBBUFFER (lcd_video_start + OFFSET_RGBBUFFER)
56
57#define CMDSTRING_BASEADDR (0x00C000) /* iram */
58#define BYTES_PER_CMDSTRING (0x80)
59#define NR_OF_CMDSTRINGS (64)
60
61#define MAX_NR_PRESTRINGS (0x40)
62#define MAX_NR_POSTSTRINGS (0x40)
63
64/* various mask definitions */
65#define DUM_CLK_ENABLE 0x01
66#define DUM_CLK_DISABLE 0
67#define DUM_DECODE_MASK 0x1FFFFFFF
68#define DUM_CHANNEL_CFG_MASK 0x01FF
69#define DUM_CHANNEL_CFG_SYNC_MASK 0xFFFE00FF
70#define DUM_CHANNEL_CFG_SYNC_MASK_SET 0x0CA00
71
72#define SDUM_RETURNVAL_BASE (0x500)
73
74#define CONF_SYNC_OFF (0x602)
75#define CONF_SYNC_ON (0x603)
76
77#define CONF_DIRTYDETECTION_OFF (0x600)
78#define CONF_DIRTYDETECTION_ON (0x601)
79
80/* Set the corresponding bit. */
81#define BIT(n) (0x1U << (n))
82
83struct dumchannel_uf {
84 int channelnr;
85 u32 *dirty;
86 u32 *source;
87 u32 x_offset;
88 u32 y_offset;
89 u32 width;
90 u32 height;
91};
92
93enum {
94 FB_TYPE_YUV,
95 FB_TYPE_RGB
96};
97
98struct cmdstring {
99 int channelnr;
100 uint16_t prestringlen;
101 uint16_t poststringlen;
102 uint16_t format;
103 uint16_t reserved;
104 uint16_t startaddr_low;
105 uint16_t startaddr_high;
106 uint16_t pixdatlen_low;
107 uint16_t pixdatlen_high;
108 u32 precmd[MAX_NR_PRESTRINGS];
109 u32 postcmd[MAX_NR_POSTSTRINGS];
110
111};
112
113struct dumchannel {
114 int channelnr;
115 int dum_ch_min;
116 int dum_ch_max;
117 int dum_ch_conf;
118 int dum_ch_stat;
119 int dum_ch_ctrl;
120};
121
122int pnx4008_alloc_dum_channel(int dev_id);
123int pnx4008_free_dum_channel(int channr, int dev_id);
124
125int pnx4008_get_dum_channel_uf(struct dumchannel_uf *pChan_uf, int dev_id);
126int pnx4008_put_dum_channel_uf(struct dumchannel_uf chan_uf, int dev_id);
127
128int pnx4008_set_dum_channel_sync(int channr, int val, int dev_id);
129int pnx4008_set_dum_channel_dirty_detect(int channr, int val, int dev_id);
130
131int pnx4008_force_dum_update_channel(int channr, int dev_id);
132
133int pnx4008_get_dum_channel_config(int channr, int dev_id);
134
135int pnx4008_sdum_mmap(struct fb_info *info, struct vm_area_struct *vma, struct device *dev);
136int pnx4008_set_dum_exit_notification(int dev_id);
137
138int pnx4008_get_fb_addresses(int fb_type, void **virt_addr,
139 dma_addr_t * phys_addr, int *fb_length);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d0434406eaeb..f42e64210ee5 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -84,7 +84,7 @@ static struct linux_binfmt elf_format = {
84 .min_coredump = ELF_EXEC_PAGESIZE 84 .min_coredump = ELF_EXEC_PAGESIZE
85}; 85};
86 86
87#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE) 87#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
88 88
89static int set_brk(unsigned long start, unsigned long end) 89static int set_brk(unsigned long start, unsigned long end)
90{ 90{
@@ -394,7 +394,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
394 * <= p_memsize so it's only necessary to check p_memsz. 394 * <= p_memsize so it's only necessary to check p_memsz.
395 */ 395 */
396 k = load_addr + eppnt->p_vaddr; 396 k = load_addr + eppnt->p_vaddr;
397 if (k > TASK_SIZE || 397 if (BAD_ADDR(k) ||
398 eppnt->p_filesz > eppnt->p_memsz || 398 eppnt->p_filesz > eppnt->p_memsz ||
399 eppnt->p_memsz > TASK_SIZE || 399 eppnt->p_memsz > TASK_SIZE ||
400 TASK_SIZE - eppnt->p_memsz < k) { 400 TASK_SIZE - eppnt->p_memsz < k) {
@@ -887,7 +887,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
887 * allowed task size. Note that p_filesz must always be 887 * allowed task size. Note that p_filesz must always be
888 * <= p_memsz so it is only necessary to check p_memsz. 888 * <= p_memsz so it is only necessary to check p_memsz.
889 */ 889 */
890 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz || 890 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
891 elf_ppnt->p_memsz > TASK_SIZE || 891 elf_ppnt->p_memsz > TASK_SIZE ||
892 TASK_SIZE - elf_ppnt->p_memsz < k) { 892 TASK_SIZE - elf_ppnt->p_memsz < k) {
893 /* set_brk can never work. Avoid overflows. */ 893 /* set_brk can never work. Avoid overflows. */
@@ -941,10 +941,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
941 interpreter, 941 interpreter,
942 &interp_load_addr); 942 &interp_load_addr);
943 if (BAD_ADDR(elf_entry)) { 943 if (BAD_ADDR(elf_entry)) {
944 printk(KERN_ERR "Unable to load interpreter %.128s\n",
945 elf_interpreter);
946 force_sig(SIGSEGV, current); 944 force_sig(SIGSEGV, current);
947 retval = -ENOEXEC; /* Nobody gets to see this, but.. */ 945 retval = IS_ERR((void *)elf_entry) ?
946 (int)elf_entry : -EINVAL;
948 goto out_free_dentry; 947 goto out_free_dentry;
949 } 948 }
950 reloc_func_desc = interp_load_addr; 949 reloc_func_desc = interp_load_addr;
@@ -955,8 +954,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
955 } else { 954 } else {
956 elf_entry = loc->elf_ex.e_entry; 955 elf_entry = loc->elf_ex.e_entry;
957 if (BAD_ADDR(elf_entry)) { 956 if (BAD_ADDR(elf_entry)) {
958 send_sig(SIGSEGV, current, 0); 957 force_sig(SIGSEGV, current);
959 retval = -ENOEXEC; /* Nobody gets to see this, but.. */ 958 retval = -EINVAL;
960 goto out_free_dentry; 959 goto out_free_dentry;
961 } 960 }
962 } 961 }
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 9633a490dab0..37534573960b 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -739,7 +739,7 @@ static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
739 if (!bo) 739 if (!bo)
740 return -ENOMEM; 740 return -ENOMEM;
741 741
742 mutex_lock(&bdev->bd_mutex); 742 mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION);
743 res = bd_claim(bdev, holder); 743 res = bd_claim(bdev, holder);
744 if (res || !add_bd_holder(bdev, bo)) 744 if (res || !add_bd_holder(bdev, bo))
745 free_bd_holder(bo); 745 free_bd_holder(bo);
@@ -764,7 +764,7 @@ static void bd_release_from_kobject(struct block_device *bdev,
764 if (!kobj) 764 if (!kobj)
765 return; 765 return;
766 766
767 mutex_lock(&bdev->bd_mutex); 767 mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION);
768 bd_release(bdev); 768 bd_release(bdev);
769 if ((bo = del_bd_holder(bdev, kobj))) 769 if ((bo = del_bd_holder(bdev, kobj)))
770 free_bd_holder(bo); 770 free_bd_holder(bo);
@@ -822,6 +822,22 @@ struct block_device *open_by_devnum(dev_t dev, unsigned mode)
822 822
823EXPORT_SYMBOL(open_by_devnum); 823EXPORT_SYMBOL(open_by_devnum);
824 824
825static int
826blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags);
827
828struct block_device *open_partition_by_devnum(dev_t dev, unsigned mode)
829{
830 struct block_device *bdev = bdget(dev);
831 int err = -ENOMEM;
832 int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY;
833 if (bdev)
834 err = blkdev_get_partition(bdev, mode, flags);
835 return err ? ERR_PTR(err) : bdev;
836}
837
838EXPORT_SYMBOL(open_partition_by_devnum);
839
840
825/* 841/*
826 * This routine checks whether a removable media has been changed, 842 * This routine checks whether a removable media has been changed,
827 * and invalidates all buffer-cache-entries in that case. This 843 * and invalidates all buffer-cache-entries in that case. This
@@ -868,7 +884,11 @@ void bd_set_size(struct block_device *bdev, loff_t size)
868} 884}
869EXPORT_SYMBOL(bd_set_size); 885EXPORT_SYMBOL(bd_set_size);
870 886
871static int do_open(struct block_device *bdev, struct file *file) 887static int
888blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags);
889
890static int
891do_open(struct block_device *bdev, struct file *file, unsigned int subclass)
872{ 892{
873 struct module *owner = NULL; 893 struct module *owner = NULL;
874 struct gendisk *disk; 894 struct gendisk *disk;
@@ -885,7 +905,8 @@ static int do_open(struct block_device *bdev, struct file *file)
885 } 905 }
886 owner = disk->fops->owner; 906 owner = disk->fops->owner;
887 907
888 mutex_lock(&bdev->bd_mutex); 908 mutex_lock_nested(&bdev->bd_mutex, subclass);
909
889 if (!bdev->bd_openers) { 910 if (!bdev->bd_openers) {
890 bdev->bd_disk = disk; 911 bdev->bd_disk = disk;
891 bdev->bd_contains = bdev; 912 bdev->bd_contains = bdev;
@@ -912,11 +933,11 @@ static int do_open(struct block_device *bdev, struct file *file)
912 ret = -ENOMEM; 933 ret = -ENOMEM;
913 if (!whole) 934 if (!whole)
914 goto out_first; 935 goto out_first;
915 ret = blkdev_get(whole, file->f_mode, file->f_flags); 936 ret = blkdev_get_whole(whole, file->f_mode, file->f_flags);
916 if (ret) 937 if (ret)
917 goto out_first; 938 goto out_first;
918 bdev->bd_contains = whole; 939 bdev->bd_contains = whole;
919 mutex_lock(&whole->bd_mutex); 940 mutex_lock_nested(&whole->bd_mutex, BD_MUTEX_WHOLE);
920 whole->bd_part_count++; 941 whole->bd_part_count++;
921 p = disk->part[part - 1]; 942 p = disk->part[part - 1];
922 bdev->bd_inode->i_data.backing_dev_info = 943 bdev->bd_inode->i_data.backing_dev_info =
@@ -944,7 +965,8 @@ static int do_open(struct block_device *bdev, struct file *file)
944 if (bdev->bd_invalidated) 965 if (bdev->bd_invalidated)
945 rescan_partitions(bdev->bd_disk, bdev); 966 rescan_partitions(bdev->bd_disk, bdev);
946 } else { 967 } else {
947 mutex_lock(&bdev->bd_contains->bd_mutex); 968 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
969 BD_MUTEX_PARTITION);
948 bdev->bd_contains->bd_part_count++; 970 bdev->bd_contains->bd_part_count++;
949 mutex_unlock(&bdev->bd_contains->bd_mutex); 971 mutex_unlock(&bdev->bd_contains->bd_mutex);
950 } 972 }
@@ -985,11 +1007,49 @@ int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags)
985 fake_file.f_dentry = &fake_dentry; 1007 fake_file.f_dentry = &fake_dentry;
986 fake_dentry.d_inode = bdev->bd_inode; 1008 fake_dentry.d_inode = bdev->bd_inode;
987 1009
988 return do_open(bdev, &fake_file); 1010 return do_open(bdev, &fake_file, BD_MUTEX_NORMAL);
989} 1011}
990 1012
991EXPORT_SYMBOL(blkdev_get); 1013EXPORT_SYMBOL(blkdev_get);
992 1014
1015static int
1016blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags)
1017{
1018 /*
1019 * This crockload is due to bad choice of ->open() type.
1020 * It will go away.
1021 * For now, block device ->open() routine must _not_
1022 * examine anything in 'inode' argument except ->i_rdev.
1023 */
1024 struct file fake_file = {};
1025 struct dentry fake_dentry = {};
1026 fake_file.f_mode = mode;
1027 fake_file.f_flags = flags;
1028 fake_file.f_dentry = &fake_dentry;
1029 fake_dentry.d_inode = bdev->bd_inode;
1030
1031 return do_open(bdev, &fake_file, BD_MUTEX_WHOLE);
1032}
1033
1034static int
1035blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags)
1036{
1037 /*
1038 * This crockload is due to bad choice of ->open() type.
1039 * It will go away.
1040 * For now, block device ->open() routine must _not_
1041 * examine anything in 'inode' argument except ->i_rdev.
1042 */
1043 struct file fake_file = {};
1044 struct dentry fake_dentry = {};
1045 fake_file.f_mode = mode;
1046 fake_file.f_flags = flags;
1047 fake_file.f_dentry = &fake_dentry;
1048 fake_dentry.d_inode = bdev->bd_inode;
1049
1050 return do_open(bdev, &fake_file, BD_MUTEX_PARTITION);
1051}
1052
993static int blkdev_open(struct inode * inode, struct file * filp) 1053static int blkdev_open(struct inode * inode, struct file * filp)
994{ 1054{
995 struct block_device *bdev; 1055 struct block_device *bdev;
@@ -1005,7 +1065,7 @@ static int blkdev_open(struct inode * inode, struct file * filp)
1005 1065
1006 bdev = bd_acquire(inode); 1066 bdev = bd_acquire(inode);
1007 1067
1008 res = do_open(bdev, filp); 1068 res = do_open(bdev, filp, BD_MUTEX_NORMAL);
1009 if (res) 1069 if (res)
1010 return res; 1070 return res;
1011 1071
@@ -1019,13 +1079,13 @@ static int blkdev_open(struct inode * inode, struct file * filp)
1019 return res; 1079 return res;
1020} 1080}
1021 1081
1022int blkdev_put(struct block_device *bdev) 1082static int __blkdev_put(struct block_device *bdev, unsigned int subclass)
1023{ 1083{
1024 int ret = 0; 1084 int ret = 0;
1025 struct inode *bd_inode = bdev->bd_inode; 1085 struct inode *bd_inode = bdev->bd_inode;
1026 struct gendisk *disk = bdev->bd_disk; 1086 struct gendisk *disk = bdev->bd_disk;
1027 1087
1028 mutex_lock(&bdev->bd_mutex); 1088 mutex_lock_nested(&bdev->bd_mutex, subclass);
1029 lock_kernel(); 1089 lock_kernel();
1030 if (!--bdev->bd_openers) { 1090 if (!--bdev->bd_openers) {
1031 sync_blockdev(bdev); 1091 sync_blockdev(bdev);
@@ -1035,7 +1095,8 @@ int blkdev_put(struct block_device *bdev)
1035 if (disk->fops->release) 1095 if (disk->fops->release)
1036 ret = disk->fops->release(bd_inode, NULL); 1096 ret = disk->fops->release(bd_inode, NULL);
1037 } else { 1097 } else {
1038 mutex_lock(&bdev->bd_contains->bd_mutex); 1098 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
1099 subclass + 1);
1039 bdev->bd_contains->bd_part_count--; 1100 bdev->bd_contains->bd_part_count--;
1040 mutex_unlock(&bdev->bd_contains->bd_mutex); 1101 mutex_unlock(&bdev->bd_contains->bd_mutex);
1041 } 1102 }
@@ -1051,9 +1112,8 @@ int blkdev_put(struct block_device *bdev)
1051 } 1112 }
1052 bdev->bd_disk = NULL; 1113 bdev->bd_disk = NULL;
1053 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; 1114 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1054 if (bdev != bdev->bd_contains) { 1115 if (bdev != bdev->bd_contains)
1055 blkdev_put(bdev->bd_contains); 1116 __blkdev_put(bdev->bd_contains, subclass + 1);
1056 }
1057 bdev->bd_contains = NULL; 1117 bdev->bd_contains = NULL;
1058 } 1118 }
1059 unlock_kernel(); 1119 unlock_kernel();
@@ -1062,8 +1122,20 @@ int blkdev_put(struct block_device *bdev)
1062 return ret; 1122 return ret;
1063} 1123}
1064 1124
1125int blkdev_put(struct block_device *bdev)
1126{
1127 return __blkdev_put(bdev, BD_MUTEX_NORMAL);
1128}
1129
1065EXPORT_SYMBOL(blkdev_put); 1130EXPORT_SYMBOL(blkdev_put);
1066 1131
1132int blkdev_put_partition(struct block_device *bdev)
1133{
1134 return __blkdev_put(bdev, BD_MUTEX_PARTITION);
1135}
1136
1137EXPORT_SYMBOL(blkdev_put_partition);
1138
1067static int blkdev_close(struct inode * inode, struct file * filp) 1139static int blkdev_close(struct inode * inode, struct file * filp)
1068{ 1140{
1069 struct block_device *bdev = I_BDEV(filp->f_mapping->host); 1141 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
diff --git a/fs/dcache.c b/fs/dcache.c
index c6e3535be192..1b4a3a34ec57 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -38,7 +38,7 @@ int sysctl_vfs_cache_pressure __read_mostly = 100;
38EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 38EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
39 39
40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
41static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; 41static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
42 42
43EXPORT_SYMBOL(dcache_lock); 43EXPORT_SYMBOL(dcache_lock);
44 44
@@ -1339,10 +1339,10 @@ void d_move(struct dentry * dentry, struct dentry * target)
1339 */ 1339 */
1340 if (target < dentry) { 1340 if (target < dentry) {
1341 spin_lock(&target->d_lock); 1341 spin_lock(&target->d_lock);
1342 spin_lock(&dentry->d_lock); 1342 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1343 } else { 1343 } else {
1344 spin_lock(&dentry->d_lock); 1344 spin_lock(&dentry->d_lock);
1345 spin_lock(&target->d_lock); 1345 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
1346 } 1346 }
1347 1347
1348 /* Move the dentry to the target hash queue, if on different bucket */ 1348 /* Move the dentry to the target hash queue, if on different bucket */
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 538fb0418fba..5981e17f46f0 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -220,7 +220,8 @@ static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes)
220 if (dio->end_io && dio->result) 220 if (dio->end_io && dio->result)
221 dio->end_io(dio->iocb, offset, bytes, dio->map_bh.b_private); 221 dio->end_io(dio->iocb, offset, bytes, dio->map_bh.b_private);
222 if (dio->lock_type == DIO_LOCKING) 222 if (dio->lock_type == DIO_LOCKING)
223 up_read(&dio->inode->i_alloc_sem); 223 /* lockdep: non-owner release */
224 up_read_non_owner(&dio->inode->i_alloc_sem);
224} 225}
225 226
226/* 227/*
@@ -1261,7 +1262,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1261 } 1262 }
1262 1263
1263 if (dio_lock_type == DIO_LOCKING) 1264 if (dio_lock_type == DIO_LOCKING)
1264 down_read(&inode->i_alloc_sem); 1265 /* lockdep: not the owner will release it */
1266 down_read_non_owner(&inode->i_alloc_sem);
1265 } 1267 }
1266 1268
1267 /* 1269 /*
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 9c677bbd0b08..19ffb043abbc 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -120,7 +120,7 @@ struct epoll_filefd {
120 */ 120 */
121struct wake_task_node { 121struct wake_task_node {
122 struct list_head llink; 122 struct list_head llink;
123 task_t *task; 123 struct task_struct *task;
124 wait_queue_head_t *wq; 124 wait_queue_head_t *wq;
125}; 125};
126 126
@@ -413,7 +413,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
413{ 413{
414 int wake_nests = 0; 414 int wake_nests = 0;
415 unsigned long flags; 415 unsigned long flags;
416 task_t *this_task = current; 416 struct task_struct *this_task = current;
417 struct list_head *lsthead = &psw->wake_task_list, *lnk; 417 struct list_head *lsthead = &psw->wake_task_list, *lnk;
418 struct wake_task_node *tncur; 418 struct wake_task_node *tncur;
419 struct wake_task_node tnode; 419 struct wake_task_node tnode;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 9f43879d6d68..f2702cda9779 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1157,7 +1157,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
1157 struct buffer_head tmp_bh; 1157 struct buffer_head tmp_bh;
1158 struct buffer_head *bh; 1158 struct buffer_head *bh;
1159 1159
1160 mutex_lock(&inode->i_mutex); 1160 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
1161 while (towrite > 0) { 1161 while (towrite > 0) {
1162 tocopy = sb->s_blocksize - offset < towrite ? 1162 tocopy = sb->s_blocksize - offset < towrite ?
1163 sb->s_blocksize - offset : towrite; 1163 sb->s_blocksize - offset : towrite;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index f2dd71336612..813d589cc6c0 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2614,7 +2614,7 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
2614 struct buffer_head *bh; 2614 struct buffer_head *bh;
2615 handle_t *handle = journal_current_handle(); 2615 handle_t *handle = journal_current_handle();
2616 2616
2617 mutex_lock(&inode->i_mutex); 2617 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2618 while (towrite > 0) { 2618 while (towrite > 0) {
2619 tocopy = sb->s_blocksize - offset < towrite ? 2619 tocopy = sb->s_blocksize - offset < towrite ?
2620 sb->s_blocksize - offset : towrite; 2620 sb->s_blocksize - offset : towrite;
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 9c2077e7e081..0ae3cd10702c 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -345,10 +345,8 @@ int jffs2_init_acl(struct inode *inode, struct inode *dir)
345 return rc; 345 return rc;
346} 346}
347 347
348void jffs2_clear_acl(struct inode *inode) 348void jffs2_clear_acl(struct jffs2_inode_info *f)
349{ 349{
350 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
351
352 if (f->i_acl_access && f->i_acl_access != JFFS2_ACL_NOT_CACHED) { 350 if (f->i_acl_access && f->i_acl_access != JFFS2_ACL_NOT_CACHED) {
353 posix_acl_release(f->i_acl_access); 351 posix_acl_release(f->i_acl_access);
354 f->i_acl_access = JFFS2_ACL_NOT_CACHED; 352 f->i_acl_access = JFFS2_ACL_NOT_CACHED;
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 8893bd1a6ba7..fa327dbd3171 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -30,7 +30,7 @@ struct jffs2_acl_header {
30extern int jffs2_permission(struct inode *, int, struct nameidata *); 30extern int jffs2_permission(struct inode *, int, struct nameidata *);
31extern int jffs2_acl_chmod(struct inode *); 31extern int jffs2_acl_chmod(struct inode *);
32extern int jffs2_init_acl(struct inode *, struct inode *); 32extern int jffs2_init_acl(struct inode *, struct inode *);
33extern void jffs2_clear_acl(struct inode *); 33extern void jffs2_clear_acl(struct jffs2_inode_info *);
34 34
35extern struct xattr_handler jffs2_acl_access_xattr_handler; 35extern struct xattr_handler jffs2_acl_access_xattr_handler;
36extern struct xattr_handler jffs2_acl_default_xattr_handler; 36extern struct xattr_handler jffs2_acl_default_xattr_handler;
@@ -40,6 +40,6 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
40#define jffs2_permission NULL 40#define jffs2_permission NULL
41#define jffs2_acl_chmod(inode) (0) 41#define jffs2_acl_chmod(inode) (0)
42#define jffs2_init_acl(inode,dir) (0) 42#define jffs2_init_acl(inode,dir) (0)
43#define jffs2_clear_acl(inode) 43#define jffs2_clear_acl(f)
44 44
45#endif /* CONFIG_JFFS2_FS_POSIX_ACL */ 45#endif /* CONFIG_JFFS2_FS_POSIX_ACL */
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 8310c95478e9..33f291005012 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -190,7 +190,7 @@ void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
190 kmem_cache_free(tmp_dnode_info_slab, x); 190 kmem_cache_free(tmp_dnode_info_slab, x);
191} 191}
192 192
193struct jffs2_raw_node_ref *jffs2_alloc_refblock(void) 193static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
194{ 194{
195 struct jffs2_raw_node_ref *ret; 195 struct jffs2_raw_node_ref *ret;
196 196
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index f752baa8d399..cae92c14116d 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -426,8 +426,6 @@ char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
426/* scan.c */ 426/* scan.c */
427int jffs2_scan_medium(struct jffs2_sb_info *c); 427int jffs2_scan_medium(struct jffs2_sb_info *c);
428void jffs2_rotate_lists(struct jffs2_sb_info *c); 428void jffs2_rotate_lists(struct jffs2_sb_info *c);
429int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
430 uint32_t ofs, uint32_t len);
431struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino); 429struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
432int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 430int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
433int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size); 431int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size);
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index cc1899268c43..266423b2709d 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -968,6 +968,7 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
968 struct jffs2_full_dirent *fd, *fds; 968 struct jffs2_full_dirent *fd, *fds;
969 int deleted; 969 int deleted;
970 970
971 jffs2_clear_acl(f);
971 jffs2_xattr_delete_inode(c, f->inocache); 972 jffs2_xattr_delete_inode(c, f->inocache);
972 down(&f->sem); 973 down(&f->sem);
973 deleted = f->inocache && !f->inocache->nlink; 974 deleted = f->inocache && !f->inocache->nlink;
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 2bfdc33752d3..e2413466ddd5 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -274,8 +274,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
274 return ret; 274 return ret;
275} 275}
276 276
277int jffs2_fill_scan_buf (struct jffs2_sb_info *c, void *buf, 277static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
278 uint32_t ofs, uint32_t len) 278 uint32_t ofs, uint32_t len)
279{ 279{
280 int ret; 280 int ret;
281 size_t retlen; 281 size_t retlen;
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 18e66dbf23b4..25bc1ae08648 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -50,9 +50,10 @@
50 * is used to write xdatum to medium. xd->version will be incremented. 50 * is used to write xdatum to medium. xd->version will be incremented.
51 * create_xattr_datum(c, xprefix, xname, xvalue, xsize) 51 * create_xattr_datum(c, xprefix, xname, xvalue, xsize)
52 * is used to create new xdatum and write to medium. 52 * is used to create new xdatum and write to medium.
53 * delete_xattr_datum(c, xd) 53 * unrefer_xattr_datum(c, xd)
54 * is used to delete a xdatum. It marks xd JFFS2_XFLAGS_DEAD, and allows 54 * is used to delete a xdatum. When nobody refers this xdatum, JFFS2_XFLAGS_DEAD
55 * GC to reclaim those physical nodes. 55 * is set on xd->flags and chained xattr_dead_list or release it immediately.
56 * In the first case, the garbage collector release it later.
56 * -------------------------------------------------- */ 57 * -------------------------------------------------- */
57static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize) 58static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize)
58{ 59{
@@ -394,22 +395,24 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
394 return xd; 395 return xd;
395} 396}
396 397
397static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) 398static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
398{ 399{
399 /* must be called under down_write(xattr_sem) */ 400 /* must be called under down_write(xattr_sem) */
400 BUG_ON(atomic_read(&xd->refcnt)); 401 if (atomic_dec_and_lock(&xd->refcnt, &c->erase_completion_lock)) {
402 uint32_t xid = xd->xid, version = xd->version;
401 403
402 unload_xattr_datum(c, xd); 404 unload_xattr_datum(c, xd);
403 xd->flags |= JFFS2_XFLAGS_DEAD; 405 xd->flags |= JFFS2_XFLAGS_DEAD;
404 spin_lock(&c->erase_completion_lock); 406 if (xd->node == (void *)xd) {
405 if (xd->node == (void *)xd) { 407 BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID));
406 BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID)); 408 jffs2_free_xattr_datum(xd);
407 jffs2_free_xattr_datum(xd); 409 } else {
408 } else { 410 list_add(&xd->xindex, &c->xattr_dead_list);
409 list_add(&xd->xindex, &c->xattr_dead_list); 411 }
412 spin_unlock(&c->erase_completion_lock);
413
414 dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", xid, version);
410 } 415 }
411 spin_unlock(&c->erase_completion_lock);
412 dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", xd->xid, xd->version);
413} 416}
414 417
415/* -------- xref related functions ------------------ 418/* -------- xref related functions ------------------
@@ -580,8 +583,7 @@ static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *re
580 dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n", 583 dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n",
581 ref->ino, ref->xid, ref->xseqno); 584 ref->ino, ref->xid, ref->xseqno);
582 585
583 if (atomic_dec_and_test(&xd->refcnt)) 586 unrefer_xattr_datum(c, xd);
584 delete_xattr_datum(c, xd);
585} 587}
586 588
587void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) 589void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
@@ -1119,8 +1121,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
1119 ref->next = c->xref_dead_list; 1121 ref->next = c->xref_dead_list;
1120 c->xref_dead_list = ref; 1122 c->xref_dead_list = ref;
1121 spin_unlock(&c->erase_completion_lock); 1123 spin_unlock(&c->erase_completion_lock);
1122 if (atomic_dec_and_test(&xd->refcnt)) 1124 unrefer_xattr_datum(c, xd);
1123 delete_xattr_datum(c, xd);
1124 } else { 1125 } else {
1125 ref->ic = ic; 1126 ref->ic = ic;
1126 ref->xd = xd; 1127 ref->xd = xd;
@@ -1156,8 +1157,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
1156 down_write(&c->xattr_sem); 1157 down_write(&c->xattr_sem);
1157 if (rc) { 1158 if (rc) {
1158 JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); 1159 JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
1159 if (atomic_dec_and_test(&xd->refcnt)) 1160 unrefer_xattr_datum(c, xd);
1160 delete_xattr_datum(c, xd);
1161 up_write(&c->xattr_sem); 1161 up_write(&c->xattr_sem);
1162 return rc; 1162 return rc;
1163 } 1163 }
@@ -1170,8 +1170,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
1170 ic->xref = ref; 1170 ic->xref = ref;
1171 } 1171 }
1172 rc = PTR_ERR(newref); 1172 rc = PTR_ERR(newref);
1173 if (atomic_dec_and_test(&xd->refcnt)) 1173 unrefer_xattr_datum(c, xd);
1174 delete_xattr_datum(c, xd);
1175 } else if (ref) { 1174 } else if (ref) {
1176 delete_xattr_ref(c, ref); 1175 delete_xattr_ref(c, ref);
1177 } 1176 }
diff --git a/fs/namei.c b/fs/namei.c
index c784e8bb57a3..c9750d755aff 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1423,7 +1423,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
1423 struct dentry *p; 1423 struct dentry *p;
1424 1424
1425 if (p1 == p2) { 1425 if (p1 == p2) {
1426 mutex_lock(&p1->d_inode->i_mutex); 1426 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
1427 return NULL; 1427 return NULL;
1428 } 1428 }
1429 1429
@@ -1431,22 +1431,22 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
1431 1431
1432 for (p = p1; p->d_parent != p; p = p->d_parent) { 1432 for (p = p1; p->d_parent != p; p = p->d_parent) {
1433 if (p->d_parent == p2) { 1433 if (p->d_parent == p2) {
1434 mutex_lock(&p2->d_inode->i_mutex); 1434 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
1435 mutex_lock(&p1->d_inode->i_mutex); 1435 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
1436 return p; 1436 return p;
1437 } 1437 }
1438 } 1438 }
1439 1439
1440 for (p = p2; p->d_parent != p; p = p->d_parent) { 1440 for (p = p2; p->d_parent != p; p = p->d_parent) {
1441 if (p->d_parent == p1) { 1441 if (p->d_parent == p1) {
1442 mutex_lock(&p1->d_inode->i_mutex); 1442 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
1443 mutex_lock(&p2->d_inode->i_mutex); 1443 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
1444 return p; 1444 return p;
1445 } 1445 }
1446 } 1446 }
1447 1447
1448 mutex_lock(&p1->d_inode->i_mutex); 1448 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
1449 mutex_lock(&p2->d_inode->i_mutex); 1449 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
1450 return NULL; 1450 return NULL;
1451} 1451}
1452 1452
@@ -1751,7 +1751,7 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
1751{ 1751{
1752 struct dentry *dentry = ERR_PTR(-EEXIST); 1752 struct dentry *dentry = ERR_PTR(-EEXIST);
1753 1753
1754 mutex_lock(&nd->dentry->d_inode->i_mutex); 1754 mutex_lock_nested(&nd->dentry->d_inode->i_mutex, I_MUTEX_PARENT);
1755 /* 1755 /*
1756 * Yucky last component or no last component at all? 1756 * Yucky last component or no last component at all?
1757 * (foo/., foo/.., /////) 1757 * (foo/., foo/.., /////)
@@ -2008,7 +2008,7 @@ static long do_rmdir(int dfd, const char __user *pathname)
2008 error = -EBUSY; 2008 error = -EBUSY;
2009 goto exit1; 2009 goto exit1;
2010 } 2010 }
2011 mutex_lock(&nd.dentry->d_inode->i_mutex); 2011 mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2012 dentry = lookup_hash(&nd); 2012 dentry = lookup_hash(&nd);
2013 error = PTR_ERR(dentry); 2013 error = PTR_ERR(dentry);
2014 if (!IS_ERR(dentry)) { 2014 if (!IS_ERR(dentry)) {
@@ -2082,7 +2082,7 @@ static long do_unlinkat(int dfd, const char __user *pathname)
2082 error = -EISDIR; 2082 error = -EISDIR;
2083 if (nd.last_type != LAST_NORM) 2083 if (nd.last_type != LAST_NORM)
2084 goto exit1; 2084 goto exit1;
2085 mutex_lock(&nd.dentry->d_inode->i_mutex); 2085 mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
2086 dentry = lookup_hash(&nd); 2086 dentry = lookup_hash(&nd);
2087 error = PTR_ERR(dentry); 2087 error = PTR_ERR(dentry);
2088 if (!IS_ERR(dentry)) { 2088 if (!IS_ERR(dentry)) {
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 4c86b7e1d1eb..d313f356e66a 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -367,6 +367,12 @@ static void ntfs_destroy_extent_inode(ntfs_inode *ni)
367 kmem_cache_free(ntfs_inode_cache, ni); 367 kmem_cache_free(ntfs_inode_cache, ni);
368} 368}
369 369
370/*
371 * The attribute runlist lock has separate locking rules from the
372 * normal runlist lock, so split the two lock-classes:
373 */
374static struct lock_class_key attr_list_rl_lock_class;
375
370/** 376/**
371 * __ntfs_init_inode - initialize ntfs specific part of an inode 377 * __ntfs_init_inode - initialize ntfs specific part of an inode
372 * @sb: super block of mounted volume 378 * @sb: super block of mounted volume
@@ -394,6 +400,8 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
394 ni->attr_list_size = 0; 400 ni->attr_list_size = 0;
395 ni->attr_list = NULL; 401 ni->attr_list = NULL;
396 ntfs_init_runlist(&ni->attr_list_rl); 402 ntfs_init_runlist(&ni->attr_list_rl);
403 lockdep_set_class(&ni->attr_list_rl.lock,
404 &attr_list_rl_lock_class);
397 ni->itype.index.bmp_ino = NULL; 405 ni->itype.index.bmp_ino = NULL;
398 ni->itype.index.block_size = 0; 406 ni->itype.index.block_size = 0;
399 ni->itype.index.vcn_size = 0; 407 ni->itype.index.vcn_size = 0;
@@ -405,6 +413,13 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
405 ni->ext.base_ntfs_ino = NULL; 413 ni->ext.base_ntfs_ino = NULL;
406} 414}
407 415
416/*
417 * Extent inodes get MFT-mapped in a nested way, while the base inode
418 * is still mapped. Teach this nesting to the lock validator by creating
419 * a separate class for nested inode's mrec_lock's:
420 */
421static struct lock_class_key extent_inode_mrec_lock_key;
422
408inline ntfs_inode *ntfs_new_extent_inode(struct super_block *sb, 423inline ntfs_inode *ntfs_new_extent_inode(struct super_block *sb,
409 unsigned long mft_no) 424 unsigned long mft_no)
410{ 425{
@@ -413,6 +428,7 @@ inline ntfs_inode *ntfs_new_extent_inode(struct super_block *sb,
413 ntfs_debug("Entering."); 428 ntfs_debug("Entering.");
414 if (likely(ni != NULL)) { 429 if (likely(ni != NULL)) {
415 __ntfs_init_inode(sb, ni); 430 __ntfs_init_inode(sb, ni);
431 lockdep_set_class(&ni->mrec_lock, &extent_inode_mrec_lock_key);
416 ni->mft_no = mft_no; 432 ni->mft_no = mft_no;
417 ni->type = AT_UNUSED; 433 ni->type = AT_UNUSED;
418 ni->name = NULL; 434 ni->name = NULL;
@@ -1722,6 +1738,15 @@ err_out:
1722 return err; 1738 return err;
1723} 1739}
1724 1740
1741/*
1742 * The MFT inode has special locking, so teach the lock validator
1743 * about this by splitting off the locking rules of the MFT from
1744 * the locking rules of other inodes. The MFT inode can never be
1745 * accessed from the VFS side (or even internally), only by the
1746 * map_mft functions.
1747 */
1748static struct lock_class_key mft_ni_runlist_lock_key, mft_ni_mrec_lock_key;
1749
1725/** 1750/**
1726 * ntfs_read_inode_mount - special read_inode for mount time use only 1751 * ntfs_read_inode_mount - special read_inode for mount time use only
1727 * @vi: inode to read 1752 * @vi: inode to read
@@ -2148,6 +2173,14 @@ int ntfs_read_inode_mount(struct inode *vi)
2148 ntfs_attr_put_search_ctx(ctx); 2173 ntfs_attr_put_search_ctx(ctx);
2149 ntfs_debug("Done."); 2174 ntfs_debug("Done.");
2150 ntfs_free(m); 2175 ntfs_free(m);
2176
2177 /*
2178 * Split the locking rules of the MFT inode from the
2179 * locking rules of other inodes:
2180 */
2181 lockdep_set_class(&ni->runlist.lock, &mft_ni_runlist_lock_key);
2182 lockdep_set_class(&ni->mrec_lock, &mft_ni_mrec_lock_key);
2183
2151 return 0; 2184 return 0;
2152 2185
2153em_put_err_out: 2186em_put_err_out:
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 0e14acea3f8b..74e0ee8fce72 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1724,6 +1724,14 @@ upcase_failed:
1724 return FALSE; 1724 return FALSE;
1725} 1725}
1726 1726
1727/*
1728 * The lcn and mft bitmap inodes are NTFS-internal inodes with
1729 * their own special locking rules:
1730 */
1731static struct lock_class_key
1732 lcnbmp_runlist_lock_key, lcnbmp_mrec_lock_key,
1733 mftbmp_runlist_lock_key, mftbmp_mrec_lock_key;
1734
1727/** 1735/**
1728 * load_system_files - open the system files using normal functions 1736 * load_system_files - open the system files using normal functions
1729 * @vol: ntfs super block describing device whose system files to load 1737 * @vol: ntfs super block describing device whose system files to load
@@ -1780,6 +1788,10 @@ static BOOL load_system_files(ntfs_volume *vol)
1780 ntfs_error(sb, "Failed to load $MFT/$BITMAP attribute."); 1788 ntfs_error(sb, "Failed to load $MFT/$BITMAP attribute.");
1781 goto iput_mirr_err_out; 1789 goto iput_mirr_err_out;
1782 } 1790 }
1791 lockdep_set_class(&NTFS_I(vol->mftbmp_ino)->runlist.lock,
1792 &mftbmp_runlist_lock_key);
1793 lockdep_set_class(&NTFS_I(vol->mftbmp_ino)->mrec_lock,
1794 &mftbmp_mrec_lock_key);
1783 /* Read upcase table and setup @vol->upcase and @vol->upcase_len. */ 1795 /* Read upcase table and setup @vol->upcase and @vol->upcase_len. */
1784 if (!load_and_init_upcase(vol)) 1796 if (!load_and_init_upcase(vol))
1785 goto iput_mftbmp_err_out; 1797 goto iput_mftbmp_err_out;
@@ -1802,6 +1814,11 @@ static BOOL load_system_files(ntfs_volume *vol)
1802 iput(vol->lcnbmp_ino); 1814 iput(vol->lcnbmp_ino);
1803 goto bitmap_failed; 1815 goto bitmap_failed;
1804 } 1816 }
1817 lockdep_set_class(&NTFS_I(vol->lcnbmp_ino)->runlist.lock,
1818 &lcnbmp_runlist_lock_key);
1819 lockdep_set_class(&NTFS_I(vol->lcnbmp_ino)->mrec_lock,
1820 &lcnbmp_mrec_lock_key);
1821
1805 NInoSetSparseDisabled(NTFS_I(vol->lcnbmp_ino)); 1822 NInoSetSparseDisabled(NTFS_I(vol->lcnbmp_ino));
1806 if ((vol->nr_clusters + 7) >> 3 > i_size_read(vol->lcnbmp_ino)) { 1823 if ((vol->nr_clusters + 7) >> 3 > i_size_read(vol->lcnbmp_ino)) {
1807 iput(vol->lcnbmp_ino); 1824 iput(vol->lcnbmp_ino);
@@ -2743,6 +2760,17 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2743 struct inode *tmp_ino; 2760 struct inode *tmp_ino;
2744 int blocksize, result; 2761 int blocksize, result;
2745 2762
2763 /*
2764 * We do a pretty difficult piece of bootstrap by reading the
2765 * MFT (and other metadata) from disk into memory. We'll only
2766 * release this metadata during umount, so the locking patterns
2767 * observed during bootstrap do not count. So turn off the
2768 * observation of locking patterns (strictly for this context
2769 * only) while mounting NTFS. [The validator is still active
2770 * otherwise, even for this context: it will for example record
2771 * lock class registrations.]
2772 */
2773 lockdep_off();
2746 ntfs_debug("Entering."); 2774 ntfs_debug("Entering.");
2747#ifndef NTFS_RW 2775#ifndef NTFS_RW
2748 sb->s_flags |= MS_RDONLY; 2776 sb->s_flags |= MS_RDONLY;
@@ -2754,6 +2782,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2754 if (!silent) 2782 if (!silent)
2755 ntfs_error(sb, "Allocation of NTFS volume structure " 2783 ntfs_error(sb, "Allocation of NTFS volume structure "
2756 "failed. Aborting mount..."); 2784 "failed. Aborting mount...");
2785 lockdep_on();
2757 return -ENOMEM; 2786 return -ENOMEM;
2758 } 2787 }
2759 /* Initialize ntfs_volume structure. */ 2788 /* Initialize ntfs_volume structure. */
@@ -2940,6 +2969,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2940 mutex_unlock(&ntfs_lock); 2969 mutex_unlock(&ntfs_lock);
2941 sb->s_export_op = &ntfs_export_ops; 2970 sb->s_export_op = &ntfs_export_ops;
2942 lock_kernel(); 2971 lock_kernel();
2972 lockdep_on();
2943 return 0; 2973 return 0;
2944 } 2974 }
2945 ntfs_error(sb, "Failed to allocate root directory."); 2975 ntfs_error(sb, "Failed to allocate root directory.");
@@ -3059,6 +3089,7 @@ err_out_now:
3059 sb->s_fs_info = NULL; 3089 sb->s_fs_info = NULL;
3060 kfree(vol); 3090 kfree(vol);
3061 ntfs_debug("Failed, returning -EINVAL."); 3091 ntfs_debug("Failed, returning -EINVAL.");
3092 lockdep_on();
3062 return -EINVAL; 3093 return -EINVAL;
3063} 3094}
3064 3095
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index af69f28277b6..4616ed50ffcd 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -107,7 +107,7 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount *
107{ 107{
108 struct vm_list_struct *vml; 108 struct vm_list_struct *vml;
109 struct vm_area_struct *vma; 109 struct vm_area_struct *vma;
110 struct task_struct *task = proc_task(inode); 110 struct task_struct *task = get_proc_task(inode);
111 struct mm_struct *mm = get_task_mm(task); 111 struct mm_struct *mm = get_task_mm(task);
112 int result = -ENOENT; 112 int result = -ENOENT;
113 113
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 28eb3c886034..5567328f1041 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2203,7 +2203,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
2203 size_t towrite = len; 2203 size_t towrite = len;
2204 struct buffer_head tmp_bh, *bh; 2204 struct buffer_head tmp_bh, *bh;
2205 2205
2206 mutex_lock(&inode->i_mutex); 2206 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2207 while (towrite > 0) { 2207 while (towrite > 0) {
2208 tocopy = sb->s_blocksize - offset < towrite ? 2208 tocopy = sb->s_blocksize - offset < towrite ?
2209 sb->s_blocksize - offset : towrite; 2209 sb->s_blocksize - offset : towrite;
diff --git a/fs/super.c b/fs/super.c
index 9b780c42d845..6d4e8174b6db 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -53,7 +53,7 @@ DEFINE_SPINLOCK(sb_lock);
53 * Allocates and initializes a new &struct super_block. alloc_super() 53 * Allocates and initializes a new &struct super_block. alloc_super()
54 * returns a pointer new superblock or %NULL if allocation had failed. 54 * returns a pointer new superblock or %NULL if allocation had failed.
55 */ 55 */
56static struct super_block *alloc_super(void) 56static struct super_block *alloc_super(struct file_system_type *type)
57{ 57{
58 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); 58 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
59 static struct super_operations default_op; 59 static struct super_operations default_op;
@@ -72,6 +72,13 @@ static struct super_block *alloc_super(void)
72 INIT_LIST_HEAD(&s->s_inodes); 72 INIT_LIST_HEAD(&s->s_inodes);
73 init_rwsem(&s->s_umount); 73 init_rwsem(&s->s_umount);
74 mutex_init(&s->s_lock); 74 mutex_init(&s->s_lock);
75 lockdep_set_class(&s->s_umount, &type->s_umount_key);
76 /*
77 * The locking rules for s_lock are up to the
78 * filesystem. For example ext3fs has different
79 * lock ordering than usbfs:
80 */
81 lockdep_set_class(&s->s_lock, &type->s_lock_key);
75 down_write(&s->s_umount); 82 down_write(&s->s_umount);
76 s->s_count = S_BIAS; 83 s->s_count = S_BIAS;
77 atomic_set(&s->s_active, 1); 84 atomic_set(&s->s_active, 1);
@@ -295,7 +302,7 @@ retry:
295 } 302 }
296 if (!s) { 303 if (!s) {
297 spin_unlock(&sb_lock); 304 spin_unlock(&sb_lock);
298 s = alloc_super(); 305 s = alloc_super(type);
299 if (!s) 306 if (!s)
300 return ERR_PTR(-ENOMEM); 307 return ERR_PTR(-ENOMEM);
301 goto retry; 308 goto retry;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 19a99726e58d..992ee0b87cc3 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1326,7 +1326,7 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type,
1326 size_t towrite = len; 1326 size_t towrite = len;
1327 struct buffer_head *bh; 1327 struct buffer_head *bh;
1328 1328
1329 mutex_lock(&inode->i_mutex); 1329 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
1330 while (towrite > 0) { 1330 while (towrite > 0) {
1331 tocopy = sb->s_blocksize - offset < towrite ? 1331 tocopy = sb->s_blocksize - offset < towrite ?
1332 sb->s_blocksize - offset : towrite; 1332 sb->s_blocksize - offset : towrite;
diff --git a/include/Kbuild b/include/Kbuild
new file mode 100644
index 000000000000..cb2534800b19
--- /dev/null
+++ b/include/Kbuild
@@ -0,0 +1,2 @@
1header-y += asm-generic/ linux/ scsi/ sound/ mtd/ rdma/ video/
2header-y += asm-$(ARCH)/
diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h
index 4bb38068f40d..f1ac6109556e 100644
--- a/include/acpi/acmacros.h
+++ b/include/acpi/acmacros.h
@@ -726,7 +726,7 @@
726 726
727#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__) 727#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
728#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__) 728#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
729#define ACPI_FREE(a) acpi_os_free(a) 729#define ACPI_FREE(a) kfree(a)
730#define ACPI_MEM_TRACKING(a) 730#define ACPI_MEM_TRACKING(a)
731 731
732#else 732#else
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 89bc4a16c2e8..0cd63bce0ae4 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -143,8 +143,6 @@ void acpi_os_release_mutex(acpi_mutex handle);
143 */ 143 */
144void *acpi_os_allocate(acpi_size size); 144void *acpi_os_allocate(acpi_size size);
145 145
146void acpi_os_free(void *memory);
147
148acpi_status 146acpi_status
149acpi_os_map_memory(acpi_physical_address physical_address, 147acpi_os_map_memory(acpi_physical_address physical_address,
150 acpi_size size, void __iomem ** logical_address); 148 acpi_size size, void __iomem ** logical_address);
diff --git a/include/asm-alpha/Kbuild b/include/asm-alpha/Kbuild
new file mode 100644
index 000000000000..e57fd57538b8
--- /dev/null
+++ b/include/asm-alpha/Kbuild
@@ -0,0 +1,5 @@
1include include/asm-generic/Kbuild.asm
2
3unifdef-y += console.h fpu.h sysinfo.h
4
5header-y += gentrap.h regdef.h pal.h reg.h
diff --git a/include/asm-alpha/rwsem.h b/include/asm-alpha/rwsem.h
index fafdd4f7010a..1570c0b54336 100644
--- a/include/asm-alpha/rwsem.h
+++ b/include/asm-alpha/rwsem.h
@@ -36,20 +36,11 @@ struct rw_semaphore {
36#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 36#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
37 spinlock_t wait_lock; 37 spinlock_t wait_lock;
38 struct list_head wait_list; 38 struct list_head wait_list;
39#if RWSEM_DEBUG
40 int debug;
41#endif
42}; 39};
43 40
44#if RWSEM_DEBUG
45#define __RWSEM_DEBUG_INIT , 0
46#else
47#define __RWSEM_DEBUG_INIT /* */
48#endif
49
50#define __RWSEM_INITIALIZER(name) \ 41#define __RWSEM_INITIALIZER(name) \
51 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 42 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
52 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } 43 LIST_HEAD_INIT((name).wait_list) }
53 44
54#define DECLARE_RWSEM(name) \ 45#define DECLARE_RWSEM(name) \
55 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 46 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -59,9 +50,6 @@ static inline void init_rwsem(struct rw_semaphore *sem)
59 sem->count = RWSEM_UNLOCKED_VALUE; 50 sem->count = RWSEM_UNLOCKED_VALUE;
60 spin_lock_init(&sem->wait_lock); 51 spin_lock_init(&sem->wait_lock);
61 INIT_LIST_HEAD(&sem->wait_list); 52 INIT_LIST_HEAD(&sem->wait_list);
62#if RWSEM_DEBUG
63 sem->debug = 0;
64#endif
65} 53}
66 54
67static inline void __down_read(struct rw_semaphore *sem) 55static inline void __down_read(struct rw_semaphore *sem)
diff --git a/include/asm-arm/Kbuild b/include/asm-arm/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-arm/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-arm26/Kbuild b/include/asm-arm26/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-arm26/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-cris/Kbuild b/include/asm-cris/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-cris/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-frv/Kbuild b/include/asm-frv/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-frv/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
new file mode 100644
index 000000000000..70594b275a6e
--- /dev/null
+++ b/include/asm-generic/Kbuild
@@ -0,0 +1,3 @@
1header-y += atomic.h errno-base.h errno.h fcntl.h ioctl.h ipc.h mman.h \
2 signal.h statfs.h
3unifdef-y := resource.h siginfo.h
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
new file mode 100644
index 000000000000..d8d0bcecd23f
--- /dev/null
+++ b/include/asm-generic/Kbuild.asm
@@ -0,0 +1,11 @@
1unifdef-y += a.out.h auxvec.h byteorder.h errno.h fcntl.h ioctl.h \
2 ioctls.h ipcbuf.h irq.h mman.h msgbuf.h param.h poll.h \
3 posix_types.h ptrace.h resource.h sembuf.h shmbuf.h shmparam.h \
4 sigcontext.h siginfo.h signal.h socket.h sockios.h stat.h \
5 statfs.h termbits.h termios.h timex.h types.h unistd.h user.h
6
7# These really shouldn't be exported
8unifdef-y += atomic.h io.h
9
10# These probably shouldn't be exported
11unifdef-y += elf.h page.h
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 6f178563e336..09204e40d663 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -24,7 +24,9 @@ typedef u64 cputime64_t;
24 24
25#define cputime64_zero (0ULL) 25#define cputime64_zero (0ULL)
26#define cputime64_add(__a, __b) ((__a) + (__b)) 26#define cputime64_add(__a, __b) ((__a) + (__b))
27#define cputime64_sub(__a, __b) ((__a) - (__b))
27#define cputime64_to_jiffies64(__ct) (__ct) 28#define cputime64_to_jiffies64(__ct) (__ct)
29#define jiffies64_to_cputime64(__jif) (__jif)
28#define cputime_to_cputime64(__ct) ((u64) __ct) 30#define cputime_to_cputime64(__ct) ((u64) __ct)
29 31
30 32
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
index 5cf8b7ce0c45..254a126ede5c 100644
--- a/include/asm-generic/mutex-null.h
+++ b/include/asm-generic/mutex-null.h
@@ -10,15 +10,10 @@
10#ifndef _ASM_GENERIC_MUTEX_NULL_H 10#ifndef _ASM_GENERIC_MUTEX_NULL_H
11#define _ASM_GENERIC_MUTEX_NULL_H 11#define _ASM_GENERIC_MUTEX_NULL_H
12 12
13/* extra parameter only needed for mutex debugging: */ 13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
14#ifndef __IP__ 14#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count)
15# define __IP__ 15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
16#endif 16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
17 17#define __mutex_slowpath_needs_to_unlock() 1
18#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count __RET_IP__)
19#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count __RET_IP__)
20#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count __RET_IP__)
21#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
22#define __mutex_slowpath_needs_to_unlock() 1
23 18
24#endif 19#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index c74521157461..e160e04290fb 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -7,6 +7,8 @@
7 7
8extern unsigned long __per_cpu_offset[NR_CPUS]; 8extern unsigned long __per_cpu_offset[NR_CPUS];
9 9
10#define per_cpu_offset(x) (__per_cpu_offset[x])
11
10/* Separate out the type, so (int[3], foo) works. */ 12/* Separate out the type, so (int[3], foo) works. */
11#define DEFINE_PER_CPU(type, name) \ 13#define DEFINE_PER_CPU(type, name) \
12 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 14 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
diff --git a/include/asm-h8300/Kbuild b/include/asm-h8300/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-h8300/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild
new file mode 100644
index 000000000000..c064a8e9170f
--- /dev/null
+++ b/include/asm-i386/Kbuild
@@ -0,0 +1,5 @@
1include include/asm-generic/Kbuild.asm
2
3header-y += boot.h cpufeature.h debugreg.h ldt.h setup.h ucontext.h
4
5unifdef-y += mtrr.h vm86.h
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
new file mode 100644
index 000000000000..e1bdb97c07fa
--- /dev/null
+++ b/include/asm-i386/irqflags.h
@@ -0,0 +1,127 @@
1/*
2 * include/asm-i386/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15static inline unsigned long __raw_local_save_flags(void)
16{
17 unsigned long flags;
18
19 __asm__ __volatile__(
20 "pushfl ; popl %0"
21 : "=g" (flags)
22 : /* no input */
23 );
24
25 return flags;
26}
27
28#define raw_local_save_flags(flags) \
29 do { (flags) = __raw_local_save_flags(); } while (0)
30
31static inline void raw_local_irq_restore(unsigned long flags)
32{
33 __asm__ __volatile__(
34 "pushl %0 ; popfl"
35 : /* no output */
36 :"g" (flags)
37 :"memory", "cc"
38 );
39}
40
41static inline void raw_local_irq_disable(void)
42{
43 __asm__ __volatile__("cli" : : : "memory");
44}
45
46static inline void raw_local_irq_enable(void)
47{
48 __asm__ __volatile__("sti" : : : "memory");
49}
50
51/*
52 * Used in the idle loop; sti takes one instruction cycle
53 * to complete:
54 */
55static inline void raw_safe_halt(void)
56{
57 __asm__ __volatile__("sti; hlt" : : : "memory");
58}
59
60/*
61 * Used when interrupts are already enabled or to
62 * shutdown the processor:
63 */
64static inline void halt(void)
65{
66 __asm__ __volatile__("hlt": : :"memory");
67}
68
69static inline int raw_irqs_disabled_flags(unsigned long flags)
70{
71 return !(flags & (1 << 9));
72}
73
74static inline int raw_irqs_disabled(void)
75{
76 unsigned long flags = __raw_local_save_flags();
77
78 return raw_irqs_disabled_flags(flags);
79}
80
81/*
82 * For spinlocks, etc:
83 */
84static inline unsigned long __raw_local_irq_save(void)
85{
86 unsigned long flags = __raw_local_save_flags();
87
88 raw_local_irq_disable();
89
90 return flags;
91}
92
93#define raw_local_irq_save(flags) \
94 do { (flags) = __raw_local_irq_save(); } while (0)
95
96#endif /* __ASSEMBLY__ */
97
98/*
99 * Do the CPU's IRQ-state tracing from assembly code. We call a
100 * C function, so save all the C-clobbered registers:
101 */
102#ifdef CONFIG_TRACE_IRQFLAGS
103
104# define TRACE_IRQS_ON \
105 pushl %eax; \
106 pushl %ecx; \
107 pushl %edx; \
108 call trace_hardirqs_on; \
109 popl %edx; \
110 popl %ecx; \
111 popl %eax;
112
113# define TRACE_IRQS_OFF \
114 pushl %eax; \
115 pushl %ecx; \
116 pushl %edx; \
117 call trace_hardirqs_off; \
118 popl %edx; \
119 popl %ecx; \
120 popl %eax;
121
122#else
123# define TRACE_IRQS_ON
124# define TRACE_IRQS_OFF
125#endif
126
127#endif
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index be4ab859238e..2f07601562e7 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -40,6 +40,7 @@
40 40
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/spinlock.h> 42#include <linux/spinlock.h>
43#include <linux/lockdep.h>
43 44
44struct rwsem_waiter; 45struct rwsem_waiter;
45 46
@@ -61,36 +62,34 @@ struct rw_semaphore {
61#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 62#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
62 spinlock_t wait_lock; 63 spinlock_t wait_lock;
63 struct list_head wait_list; 64 struct list_head wait_list;
64#if RWSEM_DEBUG 65#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 int debug; 66 struct lockdep_map dep_map;
66#endif 67#endif
67}; 68};
68 69
69/* 70#ifdef CONFIG_DEBUG_LOCK_ALLOC
70 * initialisation 71# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
71 */
72#if RWSEM_DEBUG
73#define __RWSEM_DEBUG_INIT , 0
74#else 72#else
75#define __RWSEM_DEBUG_INIT /* */ 73# define __RWSEM_DEP_MAP_INIT(lockname)
76#endif 74#endif
77 75
76
78#define __RWSEM_INITIALIZER(name) \ 77#define __RWSEM_INITIALIZER(name) \
79{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ 78{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
80 __RWSEM_DEBUG_INIT } 79 __RWSEM_DEP_MAP_INIT(name) }
81 80
82#define DECLARE_RWSEM(name) \ 81#define DECLARE_RWSEM(name) \
83 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
84 83
85static inline void init_rwsem(struct rw_semaphore *sem) 84extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
86{ 85 struct lock_class_key *key);
87 sem->count = RWSEM_UNLOCKED_VALUE; 86
88 spin_lock_init(&sem->wait_lock); 87#define init_rwsem(sem) \
89 INIT_LIST_HEAD(&sem->wait_list); 88do { \
90#if RWSEM_DEBUG 89 static struct lock_class_key __key; \
91 sem->debug = 0; 90 \
92#endif 91 __init_rwsem((sem), #sem, &__key); \
93} 92} while (0)
94 93
95/* 94/*
96 * lock for reading 95 * lock for reading
@@ -143,7 +142,7 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
143/* 142/*
144 * lock for writing 143 * lock for writing
145 */ 144 */
146static inline void __down_write(struct rw_semaphore *sem) 145static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
147{ 146{
148 int tmp; 147 int tmp;
149 148
@@ -167,6 +166,11 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
167 : "memory", "cc"); 166 : "memory", "cc");
168} 167}
169 168
169static inline void __down_write(struct rw_semaphore *sem)
170{
171 __down_write_nested(sem, 0);
172}
173
170/* 174/*
171 * trylock for writing -- returns 1 if successful, 0 if contention 175 * trylock for writing -- returns 1 if successful, 0 if contention
172 */ 176 */
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 04ba30234c48..87c40f830653 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -31,6 +31,11 @@
31 "jmp 1b\n" \ 31 "jmp 1b\n" \
32 "3:\n\t" 32 "3:\n\t"
33 33
34/*
35 * NOTE: there's an irqs-on section here, which normally would have to be
36 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
37 * __raw_spin_lock_string_flags().
38 */
34#define __raw_spin_lock_string_flags \ 39#define __raw_spin_lock_string_flags \
35 "\n1:\t" \ 40 "\n1:\t" \
36 "lock ; decb %0\n\t" \ 41 "lock ; decb %0\n\t" \
@@ -63,6 +68,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
63 "=m" (lock->slock) : : "memory"); 68 "=m" (lock->slock) : : "memory");
64} 69}
65 70
71/*
72 * It is easier for the lock validator if interrupts are not re-enabled
73 * in the middle of a lock-acquire. This is a performance feature anyway
74 * so we turn it off:
75 */
76#ifndef CONFIG_PROVE_LOCKING
66static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 77static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
67{ 78{
68 alternative_smp( 79 alternative_smp(
@@ -70,6 +81,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
70 __raw_spin_lock_string_up, 81 __raw_spin_lock_string_up,
71 "=m" (lock->slock) : "r" (flags) : "memory"); 82 "=m" (lock->slock) : "r" (flags) : "memory");
72} 83}
84#endif
73 85
74static inline int __raw_spin_trylock(raw_spinlock_t *lock) 86static inline int __raw_spin_trylock(raw_spinlock_t *lock)
75{ 87{
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index cab0180567f9..db398d88b1d9 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -456,25 +456,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
456 456
457#define set_wmb(var, value) do { var = value; wmb(); } while (0) 457#define set_wmb(var, value) do { var = value; wmb(); } while (0)
458 458
459/* interrupt control.. */ 459#include <linux/irqflags.h>
460#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
461#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
462#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
463#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
464/* used in the idle loop; sti takes one instruction cycle to complete */
465#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
466/* used when interrupts are already enabled or to shutdown the processor */
467#define halt() __asm__ __volatile__("hlt": : :"memory")
468
469#define irqs_disabled() \
470({ \
471 unsigned long flags; \
472 local_save_flags(flags); \
473 !(flags & (1<<9)); \
474})
475
476/* For spinlocks etc */
477#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
478 460
479/* 461/*
480 * disable hlt during certain critical i/o operations 462 * disable hlt during certain critical i/o operations
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild
new file mode 100644
index 000000000000..85d6f8005eb4
--- /dev/null
+++ b/include/asm-ia64/Kbuild
@@ -0,0 +1,7 @@
1include include/asm-generic/Kbuild.asm
2
3header-y += break.h fpu.h fpswa.h gcc_intrin.h ia64regs.h \
4 intel_intrin.h intrinsics.h perfmon_default_smpl.h \
5 ptrace_offsets.h rse.h setup.h ucontext.h
6
7unifdef-y += perfmon.h
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
index 8acb00190d5a..79479e2c6966 100644
--- a/include/asm-ia64/irq.h
+++ b/include/asm-ia64/irq.h
@@ -14,8 +14,6 @@
14#define NR_IRQS 256 14#define NR_IRQS 256
15#define NR_IRQ_VECTORS NR_IRQS 15#define NR_IRQ_VECTORS NR_IRQS
16 16
17#define IRQF_PERCPU 0x02000000
18
19static __inline__ int 17static __inline__ int
20irq_canonicalize (int irq) 18irq_canonicalize (int irq)
21{ 19{
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
index 24d898b650c5..fbe5cf3ab8dc 100644
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -36,6 +36,7 @@
36#ifdef CONFIG_SMP 36#ifdef CONFIG_SMP
37 37
38extern unsigned long __per_cpu_offset[NR_CPUS]; 38extern unsigned long __per_cpu_offset[NR_CPUS];
39#define per_cpu_offset(x) (__per_cpu_offset(x))
39 40
40/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */ 41/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
41DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); 42DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h
index 1327c91ea39c..2d1640cc240a 100644
--- a/include/asm-ia64/rwsem.h
+++ b/include/asm-ia64/rwsem.h
@@ -33,9 +33,6 @@ struct rw_semaphore {
33 signed long count; 33 signed long count;
34 spinlock_t wait_lock; 34 spinlock_t wait_lock;
35 struct list_head wait_list; 35 struct list_head wait_list;
36#if RWSEM_DEBUG
37 int debug;
38#endif
39}; 36};
40 37
41#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) 38#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
@@ -45,19 +42,9 @@ struct rw_semaphore {
45#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 42#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
46#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 43#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
47 44
48/*
49 * initialization
50 */
51#if RWSEM_DEBUG
52#define __RWSEM_DEBUG_INIT , 0
53#else
54#define __RWSEM_DEBUG_INIT /* */
55#endif
56
57#define __RWSEM_INITIALIZER(name) \ 45#define __RWSEM_INITIALIZER(name) \
58 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 46 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
59 LIST_HEAD_INIT((name).wait_list) \ 47 LIST_HEAD_INIT((name).wait_list) }
60 __RWSEM_DEBUG_INIT }
61 48
62#define DECLARE_RWSEM(name) \ 49#define DECLARE_RWSEM(name) \
63 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 50 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -73,9 +60,6 @@ init_rwsem (struct rw_semaphore *sem)
73 sem->count = RWSEM_UNLOCKED_VALUE; 60 sem->count = RWSEM_UNLOCKED_VALUE;
74 spin_lock_init(&sem->wait_lock); 61 spin_lock_init(&sem->wait_lock);
75 INIT_LIST_HEAD(&sem->wait_list); 62 INIT_LIST_HEAD(&sem->wait_list);
76#if RWSEM_DEBUG
77 sem->debug = 0;
78#endif
79} 63}
80 64
81/* 65/*
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 8bc9869e5765..8adcde0934ca 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -68,7 +68,7 @@ struct thread_info {
68#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) 68#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
69 69
70#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 70#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
71#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 71#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
72#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 72#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
73 73
74#endif /* !__ASSEMBLY */ 74#endif /* !__ASSEMBLY */
diff --git a/include/asm-m32r/Kbuild b/include/asm-m32r/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-m32r/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 66c4742f09e7..311cebf44eff 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -18,7 +18,7 @@
18 * switch_to(prev, next) should switch from task `prev' to `next' 18 * switch_to(prev, next) should switch from task `prev' to `next'
19 * `prev' will never be the same as `next'. 19 * `prev' will never be the same as `next'.
20 * 20 *
21 * `next' and `prev' should be task_t, but it isn't always defined 21 * `next' and `prev' should be struct task_struct, but it isn't always defined
22 */ 22 */
23 23
24#define switch_to(prev, next, last) do { \ 24#define switch_to(prev, next, last) do { \
diff --git a/include/asm-m68k/Kbuild b/include/asm-m68k/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-m68k/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-m68knommu/Kbuild b/include/asm-m68knommu/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-m68knommu/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-mips/Kbuild b/include/asm-mips/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-mips/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-parisc/Kbuild b/include/asm-parisc/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-parisc/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-powerpc/Kbuild b/include/asm-powerpc/Kbuild
new file mode 100644
index 000000000000..ac61d7eb6021
--- /dev/null
+++ b/include/asm-powerpc/Kbuild
@@ -0,0 +1,10 @@
1include include/asm-generic/Kbuild.asm
2
3unifdef-y += a.out.h asm-compat.h bootx.h byteorder.h cputable.h elf.h \
4 nvram.h param.h posix_types.h ptrace.h seccomp.h signal.h \
5 termios.h types.h unistd.h
6
7header-y += auxvec.h ioctls.h mman.h sembuf.h siginfo.h stat.h errno.h \
8 ipcbuf.h msgbuf.h shmbuf.h socket.h termbits.h fcntl.h ipc.h \
9 poll.h shmparam.h sockios.h ucontext.h ioctl.h linkage.h \
10 resource.h sigcontext.h statfs.h
diff --git a/include/asm-powerpc/i8259.h b/include/asm-powerpc/i8259.h
index 0392159e16e4..c80e113052cd 100644
--- a/include/asm-powerpc/i8259.h
+++ b/include/asm-powerpc/i8259.h
@@ -4,11 +4,13 @@
4 4
5#include <linux/irq.h> 5#include <linux/irq.h>
6 6
7extern struct hw_interrupt_type i8259_pic; 7#ifdef CONFIG_PPC_MERGE
8 8extern void i8259_init(struct device_node *node, unsigned long intack_addr);
9extern unsigned int i8259_irq(struct pt_regs *regs);
10#else
9extern void i8259_init(unsigned long intack_addr, int offset); 11extern void i8259_init(unsigned long intack_addr, int offset);
10extern int i8259_irq(struct pt_regs *regs); 12extern int i8259_irq(struct pt_regs *regs);
11extern int i8259_irq_cascade(struct pt_regs *regs, void *unused); 13#endif
12 14
13#endif /* __KERNEL__ */ 15#endif /* __KERNEL__ */
14#endif /* _ASM_POWERPC_I8259_H */ 16#endif /* _ASM_POWERPC_I8259_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index eb5f33e1977a..e05754752028 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -9,26 +9,14 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/config.h>
12#include <linux/threads.h> 13#include <linux/threads.h>
14#include <linux/list.h>
15#include <linux/radix-tree.h>
13 16
14#include <asm/types.h> 17#include <asm/types.h>
15#include <asm/atomic.h> 18#include <asm/atomic.h>
16 19
17/* this number is used when no interrupt has been assigned */
18#define NO_IRQ (-1)
19
20/*
21 * These constants are used for passing information about interrupt
22 * signal polarity and level/edge sensing to the low-level PIC chip
23 * drivers.
24 */
25#define IRQ_SENSE_MASK 0x1
26#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
27#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
28
29#define IRQ_POLARITY_MASK 0x2
30#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
31#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
32 20
33#define get_irq_desc(irq) (&irq_desc[(irq)]) 21#define get_irq_desc(irq) (&irq_desc[(irq)])
34 22
@@ -36,50 +24,325 @@
36#define for_each_irq(i) \ 24#define for_each_irq(i) \
37 for ((i) = 0; (i) < NR_IRQS; ++(i)) 25 for ((i) = 0; (i) < NR_IRQS; ++(i))
38 26
39#ifdef CONFIG_PPC64 27extern atomic_t ppc_n_lost_interrupts;
40 28
41/* 29#ifdef CONFIG_PPC_MERGE
42 * Maximum number of interrupt sources that we can handle. 30
31/* This number is used when no interrupt has been assigned */
32#define NO_IRQ (0)
33
34/* This is a special irq number to return from get_irq() to tell that
35 * no interrupt happened _and_ ignore it (don't count it as bad). Some
36 * platforms like iSeries rely on that.
43 */ 37 */
38#define NO_IRQ_IGNORE ((unsigned int)-1)
39
40/* Total number of virq in the platform (make it a CONFIG_* option ? */
44#define NR_IRQS 512 41#define NR_IRQS 512
45 42
46/* Interrupt numbers are virtual in case they are sparsely 43/* Number of irqs reserved for the legacy controller */
47 * distributed by the hardware. 44#define NUM_ISA_INTERRUPTS 16
45
46/* This type is the placeholder for a hardware interrupt number. It has to
47 * be big enough to enclose whatever representation is used by a given
48 * platform.
49 */
50typedef unsigned long irq_hw_number_t;
51
52/* Interrupt controller "host" data structure. This could be defined as a
53 * irq domain controller. That is, it handles the mapping between hardware
54 * and virtual interrupt numbers for a given interrupt domain. The host
55 * structure is generally created by the PIC code for a given PIC instance
56 * (though a host can cover more than one PIC if they have a flat number
57 * model). It's the host callbacks that are responsible for setting the
58 * irq_chip on a given irq_desc after it's been mapped.
59 *
60 * The host code and data structures are fairly agnostic to the fact that
61 * we use an open firmware device-tree. We do have references to struct
62 * device_node in two places: in irq_find_host() to find the host matching
63 * a given interrupt controller node, and of course as an argument to its
64 * counterpart host->ops->match() callback. However, those are treated as
65 * generic pointers by the core and the fact that it's actually a device-node
66 * pointer is purely a convention between callers and implementation. This
67 * code could thus be used on other architectures by replacing those two
68 * by some sort of arch-specific void * "token" used to identify interrupt
69 * controllers.
48 */ 70 */
49extern unsigned int virt_irq_to_real_map[NR_IRQS]; 71struct irq_host;
72struct radix_tree_root;
50 73
51/* The maximum virtual IRQ number that we support. This 74/* Functions below are provided by the host and called whenever a new mapping
52 * can be set by the platform and will be reduced by the 75 * is created or an old mapping is disposed. The host can then proceed to
53 * value of __irq_offset_value. It defaults to and is 76 * whatever internal data structures management is required. It also needs
54 * capped by (NR_IRQS - 1). 77 * to setup the irq_desc when returning from map().
55 */ 78 */
56extern unsigned int virt_irq_max; 79struct irq_host_ops {
80 /* Match an interrupt controller device node to a host, returns
81 * 1 on a match
82 */
83 int (*match)(struct irq_host *h, struct device_node *node);
84
85 /* Create or update a mapping between a virtual irq number and a hw
86 * irq number. This can be called several times for the same mapping
87 * but with different flags, though unmap shall always be called
88 * before the virq->hw mapping is changed.
89 */
90 int (*map)(struct irq_host *h, unsigned int virq,
91 irq_hw_number_t hw, unsigned int flags);
92
93 /* Dispose of such a mapping */
94 void (*unmap)(struct irq_host *h, unsigned int virq);
95
96 /* Translate device-tree interrupt specifier from raw format coming
97 * from the firmware to a irq_hw_number_t (interrupt line number) and
98 * trigger flags that can be passed to irq_create_mapping().
99 * If no translation is provided, raw format is assumed to be one cell
100 * for interrupt line and default sense.
101 */
102 int (*xlate)(struct irq_host *h, struct device_node *ctrler,
103 u32 *intspec, unsigned int intsize,
104 irq_hw_number_t *out_hwirq, unsigned int *out_flags);
105};
106
107struct irq_host {
108 struct list_head link;
109
110 /* type of reverse mapping technique */
111 unsigned int revmap_type;
112#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
113#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
114#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
115#define IRQ_HOST_MAP_TREE 3 /* radix tree */
116 union {
117 struct {
118 unsigned int size;
119 unsigned int *revmap;
120 } linear;
121 struct radix_tree_root tree;
122 } revmap_data;
123 struct irq_host_ops *ops;
124 void *host_data;
125 irq_hw_number_t inval_irq;
126};
127
128/* The main irq map itself is an array of NR_IRQ entries containing the
129 * associate host and irq number. An entry with a host of NULL is free.
130 * An entry can be allocated if it's free, the allocator always then sets
131 * hwirq first to the host's invalid irq number and then fills ops.
132 */
133struct irq_map_entry {
134 irq_hw_number_t hwirq;
135 struct irq_host *host;
136};
137
138extern struct irq_map_entry irq_map[NR_IRQS];
139
57 140
58/* Create a mapping for a real_irq if it doesn't already exist. 141/***
59 * Return the virtual irq as a convenience. 142 * irq_alloc_host - Allocate a new irq_host data structure
143 * @node: device-tree node of the interrupt controller
144 * @revmap_type: type of reverse mapping to use
145 * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
146 * @ops: map/unmap host callbacks
147 * @inval_irq: provide a hw number in that host space that is always invalid
148 *
149 * Allocates and initialize and irq_host structure. Note that in the case of
150 * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
151 * for all legacy interrupts except 0 (which is always the invalid irq for
152 * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
153 * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
154 * later during boot automatically (the reverse mapping will use the slow path
155 * until that happens).
156 */
157extern struct irq_host *irq_alloc_host(unsigned int revmap_type,
158 unsigned int revmap_arg,
159 struct irq_host_ops *ops,
160 irq_hw_number_t inval_irq);
161
162
163/***
164 * irq_find_host - Locates a host for a given device node
165 * @node: device-tree node of the interrupt controller
166 */
167extern struct irq_host *irq_find_host(struct device_node *node);
168
169
170/***
171 * irq_set_default_host - Set a "default" host
172 * @host: default host pointer
173 *
174 * For convenience, it's possible to set a "default" host that will be used
175 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
176 * platforms that want to manipulate a few hard coded interrupt numbers that
177 * aren't properly represented in the device-tree.
178 */
179extern void irq_set_default_host(struct irq_host *host);
180
181
182/***
183 * irq_set_virq_count - Set the maximum number of virt irqs
184 * @count: number of linux virtual irqs, capped with NR_IRQS
185 *
186 * This is mainly for use by platforms like iSeries who want to program
187 * the virtual irq number in the controller to avoid the reverse mapping
188 */
189extern void irq_set_virq_count(unsigned int count);
190
191
192/***
193 * irq_create_mapping - Map a hardware interrupt into linux virq space
194 * @host: host owning this hardware interrupt or NULL for default host
195 * @hwirq: hardware irq number in that host space
196 * @flags: flags passed to the controller. contains the trigger type among
197 * others. Use IRQ_TYPE_* defined in include/linux/irq.h
198 *
199 * Only one mapping per hardware interrupt is permitted. Returns a linux
200 * virq number. The flags can be used to provide sense information to the
201 * controller (typically extracted from the device-tree). If no information
202 * is passed, the controller defaults will apply (for example, xics can only
203 * do edge so flags are irrelevant for some pseries specific irqs).
204 *
205 * The device-tree generally contains the trigger info in an encoding that is
206 * specific to a given type of controller. In that case, you can directly use
207 * host->ops->trigger_xlate() to translate that.
208 *
209 * It is recommended that new PICs that don't have existing OF bindings chose
210 * to use a representation of triggers identical to linux.
211 */
212extern unsigned int irq_create_mapping(struct irq_host *host,
213 irq_hw_number_t hwirq,
214 unsigned int flags);
215
216
217/***
218 * irq_dispose_mapping - Unmap an interrupt
219 * @virq: linux virq number of the interrupt to unmap
220 */
221extern void irq_dispose_mapping(unsigned int virq);
222
223/***
224 * irq_find_mapping - Find a linux virq from an hw irq number.
225 * @host: host owning this hardware interrupt
226 * @hwirq: hardware irq number in that host space
227 *
228 * This is a slow path, for use by generic code. It's expected that an
229 * irq controller implementation directly calls the appropriate low level
230 * mapping function.
60 */ 231 */
61int virt_irq_create_mapping(unsigned int real_irq); 232extern unsigned int irq_find_mapping(struct irq_host *host,
62void virt_irq_init(void); 233 irq_hw_number_t hwirq);
63 234
64static inline unsigned int virt_irq_to_real(unsigned int virt_irq) 235
236/***
237 * irq_radix_revmap - Find a linux virq from a hw irq number.
238 * @host: host owning this hardware interrupt
239 * @hwirq: hardware irq number in that host space
240 *
241 * This is a fast path, for use by irq controller code that uses radix tree
242 * revmaps
243 */
244extern unsigned int irq_radix_revmap(struct irq_host *host,
245 irq_hw_number_t hwirq);
246
247/***
248 * irq_linear_revmap - Find a linux virq from a hw irq number.
249 * @host: host owning this hardware interrupt
250 * @hwirq: hardware irq number in that host space
251 *
252 * This is a fast path, for use by irq controller code that uses linear
253 * revmaps. It does fallback to the slow path if the revmap doesn't exist
254 * yet and will create the revmap entry with appropriate locking
255 */
256
257extern unsigned int irq_linear_revmap(struct irq_host *host,
258 irq_hw_number_t hwirq);
259
260
261
262/***
263 * irq_alloc_virt - Allocate virtual irq numbers
264 * @host: host owning these new virtual irqs
265 * @count: number of consecutive numbers to allocate
266 * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
267 *
268 * This is a low level function that is used internally by irq_create_mapping()
269 * and that can be used by some irq controllers implementations for things
270 * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
271 */
272extern unsigned int irq_alloc_virt(struct irq_host *host,
273 unsigned int count,
274 unsigned int hint);
275
276/***
277 * irq_free_virt - Free virtual irq numbers
278 * @virq: virtual irq number of the first interrupt to free
279 * @count: number of interrupts to free
280 *
281 * This function is the opposite of irq_alloc_virt. It will not clear reverse
282 * maps, this should be done previously by unmap'ing the interrupt. In fact,
283 * all interrupts covered by the range being freed should have been unmapped
284 * prior to calling this.
285 */
286extern void irq_free_virt(unsigned int virq, unsigned int count);
287
288
289/* -- OF helpers -- */
290
291/* irq_create_of_mapping - Map a hardware interrupt into linux virq space
292 * @controller: Device node of the interrupt controller
293 * @inspec: Interrupt specifier from the device-tree
294 * @intsize: Size of the interrupt specifier from the device-tree
295 *
296 * This function is identical to irq_create_mapping except that it takes
297 * as input informations straight from the device-tree (typically the results
298 * of the of_irq_map_*() functions
299 */
300extern unsigned int irq_create_of_mapping(struct device_node *controller,
301 u32 *intspec, unsigned int intsize);
302
303
304/* irq_of_parse_and_map - Parse nad Map an interrupt into linux virq space
305 * @device: Device node of the device whose interrupt is to be mapped
306 * @index: Index of the interrupt to map
307 *
308 * This function is a wrapper that chains of_irq_map_one() and
309 * irq_create_of_mapping() to make things easier to callers
310 */
311extern unsigned int irq_of_parse_and_map(struct device_node *dev, int index);
312
313/* -- End OF helpers -- */
314
315/***
316 * irq_early_init - Init irq remapping subsystem
317 */
318extern void irq_early_init(void);
319
320static __inline__ int irq_canonicalize(int irq)
65{ 321{
66 return virt_irq_to_real_map[virt_irq]; 322 return irq;
67} 323}
68 324
69extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq); 325
326#else /* CONFIG_PPC_MERGE */
327
328/* This number is used when no interrupt has been assigned */
329#define NO_IRQ (-1)
330#define NO_IRQ_IGNORE (-2)
331
70 332
71/* 333/*
72 * List of interrupt controllers. 334 * These constants are used for passing information about interrupt
335 * signal polarity and level/edge sensing to the low-level PIC chip
336 * drivers.
73 */ 337 */
74#define IC_INVALID 0 338#define IRQ_SENSE_MASK 0x1
75#define IC_OPEN_PIC 1 339#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
76#define IC_PPC_XIC 2 340#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
77#define IC_CELL_PIC 3
78#define IC_ISERIES 4
79 341
80extern u64 ppc64_interrupt_controller; 342#define IRQ_POLARITY_MASK 0x2
343#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
344#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
81 345
82#else /* 32-bit */
83 346
84#if defined(CONFIG_40x) 347#if defined(CONFIG_40x)
85#include <asm/ibm4xx.h> 348#include <asm/ibm4xx.h>
@@ -512,16 +775,11 @@ extern u64 ppc64_interrupt_controller;
512 775
513#endif /* CONFIG_8260 */ 776#endif /* CONFIG_8260 */
514 777
515#endif 778#endif /* Whatever way too big #ifdef */
516 779
517#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 780#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
518/* pedantic: these are long because they are used with set_bit --RR */ 781/* pedantic: these are long because they are used with set_bit --RR */
519extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 782extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
520extern atomic_t ppc_n_lost_interrupts;
521
522#define virt_irq_create_mapping(x) (x)
523
524#endif
525 783
526/* 784/*
527 * Because many systems have two overlapping names spaces for 785 * Because many systems have two overlapping names spaces for
@@ -560,6 +818,7 @@ static __inline__ int irq_canonicalize(int irq)
560 irq = 9; 818 irq = 9;
561 return irq; 819 return irq;
562} 820}
821#endif /* CONFIG_PPC_MERGE */
563 822
564extern int distribute_irqs; 823extern int distribute_irqs;
565 824
@@ -579,9 +838,8 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
579 838
580extern void irq_ctx_init(void); 839extern void irq_ctx_init(void);
581extern void call_do_softirq(struct thread_info *tp); 840extern void call_do_softirq(struct thread_info *tp);
582extern int call___do_IRQ(int irq, struct pt_regs *regs, 841extern int call_handle_irq(int irq, void *p1, void *p2,
583 struct thread_info *tp); 842 struct thread_info *tp, void *func);
584
585#else 843#else
586#define irq_ctx_init() 844#define irq_ctx_init()
587 845
diff --git a/include/asm-powerpc/irqflags.h b/include/asm-powerpc/irqflags.h
new file mode 100644
index 000000000000..7970cbaeaa54
--- /dev/null
+++ b/include/asm-powerpc/irqflags.h
@@ -0,0 +1,31 @@
1/*
2 * include/asm-powerpc/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() macros from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13/*
14 * Get definitions for raw_local_save_flags(x), etc.
15 */
16#include <asm-powerpc/hw_irq.h>
17
18/*
19 * Do the CPU's IRQ-state tracing from assembly code. We call a
20 * C function, so save all the C-clobbered registers:
21 */
22#ifdef CONFIG_TRACE_IRQFLAGS
23
24#error No support on PowerPC yet for CONFIG_TRACE_IRQFLAGS
25
26#else
27# define TRACE_IRQS_ON
28# define TRACE_IRQS_OFF
29#endif
30
31#endif
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index eba133d149a7..c17c13742401 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -97,7 +97,7 @@ struct machdep_calls {
97 void (*show_percpuinfo)(struct seq_file *m, int i); 97 void (*show_percpuinfo)(struct seq_file *m, int i);
98 98
99 void (*init_IRQ)(void); 99 void (*init_IRQ)(void);
100 int (*get_irq)(struct pt_regs *); 100 unsigned int (*get_irq)(struct pt_regs *);
101#ifdef CONFIG_KEXEC 101#ifdef CONFIG_KEXEC
102 void (*kexec_cpu_down)(int crash_shutdown, int secondary); 102 void (*kexec_cpu_down)(int crash_shutdown, int secondary);
103#endif 103#endif
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
index f0d22ac34b96..eb241c99c457 100644
--- a/include/asm-powerpc/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -114,9 +114,6 @@
114#define MPIC_VEC_TIMER_1 248 114#define MPIC_VEC_TIMER_1 248
115#define MPIC_VEC_TIMER_0 247 115#define MPIC_VEC_TIMER_0 247
116 116
117/* Type definition of the cascade handler */
118typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data);
119
120#ifdef CONFIG_MPIC_BROKEN_U3 117#ifdef CONFIG_MPIC_BROKEN_U3
121/* Fixup table entry */ 118/* Fixup table entry */
122struct mpic_irq_fixup 119struct mpic_irq_fixup
@@ -132,10 +129,19 @@ struct mpic_irq_fixup
132/* The instance data of a given MPIC */ 129/* The instance data of a given MPIC */
133struct mpic 130struct mpic
134{ 131{
132 /* The device node of the interrupt controller */
133 struct device_node *of_node;
134
135 /* The remapper for this MPIC */
136 struct irq_host *irqhost;
137
135 /* The "linux" controller struct */ 138 /* The "linux" controller struct */
136 hw_irq_controller hc_irq; 139 struct irq_chip hc_irq;
140#ifdef CONFIG_MPIC_BROKEN_U3
141 struct irq_chip hc_ht_irq;
142#endif
137#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
138 hw_irq_controller hc_ipi; 144 struct irq_chip hc_ipi;
139#endif 145#endif
140 const char *name; 146 const char *name;
141 /* Flags */ 147 /* Flags */
@@ -144,20 +150,12 @@ struct mpic
144 unsigned int isu_size; 150 unsigned int isu_size;
145 unsigned int isu_shift; 151 unsigned int isu_shift;
146 unsigned int isu_mask; 152 unsigned int isu_mask;
147 /* Offset of irq vector numbers */
148 unsigned int irq_offset;
149 unsigned int irq_count; 153 unsigned int irq_count;
150 /* Offset of ipi vector numbers */
151 unsigned int ipi_offset;
152 /* Number of sources */ 154 /* Number of sources */
153 unsigned int num_sources; 155 unsigned int num_sources;
154 /* Number of CPUs */ 156 /* Number of CPUs */
155 unsigned int num_cpus; 157 unsigned int num_cpus;
156 /* cascade handler */ 158 /* default senses array */
157 mpic_cascade_t cascade;
158 void *cascade_data;
159 unsigned int cascade_vec;
160 /* senses array */
161 unsigned char *senses; 159 unsigned char *senses;
162 unsigned int senses_count; 160 unsigned int senses_count;
163 161
@@ -213,14 +211,11 @@ struct mpic
213 * The values in the array start at the first source of the MPIC, 211 * The values in the array start at the first source of the MPIC,
214 * that is senses[0] correspond to linux irq "irq_offset". 212 * that is senses[0] correspond to linux irq "irq_offset".
215 */ 213 */
216extern struct mpic *mpic_alloc(unsigned long phys_addr, 214extern struct mpic *mpic_alloc(struct device_node *node,
215 unsigned long phys_addr,
217 unsigned int flags, 216 unsigned int flags,
218 unsigned int isu_size, 217 unsigned int isu_size,
219 unsigned int irq_offset,
220 unsigned int irq_count, 218 unsigned int irq_count,
221 unsigned int ipi_offset,
222 unsigned char *senses,
223 unsigned int senses_num,
224 const char *name); 219 const char *name);
225 220
226/* Assign ISUs, to call before mpic_init() 221/* Assign ISUs, to call before mpic_init()
@@ -232,22 +227,27 @@ extern struct mpic *mpic_alloc(unsigned long phys_addr,
232extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, 227extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
233 unsigned long phys_addr); 228 unsigned long phys_addr);
234 229
230/* Set default sense codes
231 *
232 * @mpic: controller
233 * @senses: array of sense codes
234 * @count: size of above array
235 *
236 * Optionally provide an array (indexed on hardware interrupt numbers
237 * for this MPIC) of default sense codes for the chip. Those are linux
238 * sense codes IRQ_TYPE_*
239 *
240 * The driver gets ownership of the pointer, don't dispose of it or
241 * anything like that. __init only.
242 */
243extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count);
244
245
235/* Initialize the controller. After this has been called, none of the above 246/* Initialize the controller. After this has been called, none of the above
236 * should be called again for this mpic 247 * should be called again for this mpic
237 */ 248 */
238extern void mpic_init(struct mpic *mpic); 249extern void mpic_init(struct mpic *mpic);
239 250
240/* Setup a cascade. Currently, only one cascade is supported this
241 * way, though you can always do a normal request_irq() and add
242 * other cascades this way. You should call this _after_ having
243 * added all the ISUs
244 *
245 * @irq_no: "linux" irq number of the cascade (that is offset'ed vector)
246 * @handler: cascade handler function
247 */
248extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder,
249 void *data);
250
251/* 251/*
252 * All of the following functions must only be used after the 252 * All of the following functions must only be used after the
253 * ISUs have been assigned and the controller fully initialized 253 * ISUs have been assigned and the controller fully initialized
@@ -284,9 +284,9 @@ extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
284void smp_mpic_message_pass(int target, int msg); 284void smp_mpic_message_pass(int target, int msg);
285 285
286/* Fetch interrupt from a given mpic */ 286/* Fetch interrupt from a given mpic */
287extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); 287extern unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
288/* This one gets to the primary mpic */ 288/* This one gets to the primary mpic */
289extern int mpic_get_irq(struct pt_regs *regs); 289extern unsigned int mpic_get_irq(struct pt_regs *regs);
290 290
291/* Set the EPIC clock ratio */ 291/* Set the EPIC clock ratio */
292void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio); 292void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
@@ -294,8 +294,5 @@ void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
294/* Enable/Disable EPIC serial interrupt mode */ 294/* Enable/Disable EPIC serial interrupt mode */
295void mpic_set_serial_int(struct mpic *mpic, int enable); 295void mpic_set_serial_int(struct mpic *mpic, int enable);
296 296
297/* global mpic for pSeries */
298extern struct mpic *pSeries_mpic;
299
300#endif /* __KERNEL__ */ 297#endif /* __KERNEL__ */
301#endif /* _ASM_POWERPC_MPIC_H */ 298#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index faa1fc703053..2f2e3024fa61 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -14,6 +14,7 @@
14 14
15#define __per_cpu_offset(cpu) (paca[cpu].data_offset) 15#define __per_cpu_offset(cpu) (paca[cpu].data_offset)
16#define __my_cpu_offset() get_paca()->data_offset 16#define __my_cpu_offset() get_paca()->data_offset
17#define per_cpu_offset(x) (__per_cpu_offset(x))
17 18
18/* Separate out the type, so (int[3], foo) works. */ 19/* Separate out the type, so (int[3], foo) works. */
19#define DEFINE_PER_CPU(type, name) \ 20#define DEFINE_PER_CPU(type, name) \
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index 010d186d095b..b095a285c84b 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -64,11 +64,6 @@ struct boot_param_header
64typedef u32 phandle; 64typedef u32 phandle;
65typedef u32 ihandle; 65typedef u32 ihandle;
66 66
67struct interrupt_info {
68 int line;
69 int sense; /* +ve/-ve logic, edge or level, etc. */
70};
71
72struct property { 67struct property {
73 char *name; 68 char *name;
74 int length; 69 int length;
@@ -81,8 +76,6 @@ struct device_node {
81 char *type; 76 char *type;
82 phandle node; 77 phandle node;
83 phandle linux_phandle; 78 phandle linux_phandle;
84 int n_intrs;
85 struct interrupt_info *intrs;
86 char *full_name; 79 char *full_name;
87 80
88 struct property *properties; 81 struct property *properties;
@@ -167,8 +160,8 @@ extern void unflatten_device_tree(void);
167extern void early_init_devtree(void *); 160extern void early_init_devtree(void *);
168extern int device_is_compatible(struct device_node *device, const char *); 161extern int device_is_compatible(struct device_node *device, const char *);
169extern int machine_is_compatible(const char *compat); 162extern int machine_is_compatible(const char *compat);
170extern unsigned char *get_property(struct device_node *node, const char *name, 163extern void *get_property(struct device_node *node, const char *name,
171 int *lenp); 164 int *lenp);
172extern void print_properties(struct device_node *node); 165extern void print_properties(struct device_node *node);
173extern int prom_n_addr_cells(struct device_node* np); 166extern int prom_n_addr_cells(struct device_node* np);
174extern int prom_n_size_cells(struct device_node* np); 167extern int prom_n_size_cells(struct device_node* np);
@@ -204,6 +197,15 @@ extern int release_OF_resource(struct device_node* node, int index);
204 */ 197 */
205 198
206 199
200/* Helper to read a big number */
201static inline u64 of_read_number(u32 *cell, int size)
202{
203 u64 r = 0;
204 while (size--)
205 r = (r << 32) | *(cell++);
206 return r;
207}
208
207/* Translate an OF address block into a CPU physical address 209/* Translate an OF address block into a CPU physical address
208 */ 210 */
209#define OF_BAD_ADDR ((u64)-1) 211#define OF_BAD_ADDR ((u64)-1)
@@ -240,5 +242,83 @@ extern void kdump_move_device_tree(void);
240/* CPU OF node matching */ 242/* CPU OF node matching */
241struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); 243struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
242 244
245
246/*
247 * OF interrupt mapping
248 */
249
250/* This structure is returned when an interrupt is mapped. The controller
251 * field needs to be put() after use
252 */
253
254#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */
255
256struct of_irq {
257 struct device_node *controller; /* Interrupt controller node */
258 u32 size; /* Specifier size */
259 u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
260};
261
262/***
263 * of_irq_map_init - Initialize the irq remapper
264 * @flags: flags defining workarounds to enable
265 *
266 * Some machines have bugs in the device-tree which require certain workarounds
267 * to be applied. Call this before any interrupt mapping attempts to enable
268 * those workarounds.
269 */
270#define OF_IMAP_OLDWORLD_MAC 0x00000001
271#define OF_IMAP_NO_PHANDLE 0x00000002
272
273extern void of_irq_map_init(unsigned int flags);
274
275/***
276 * of_irq_map_raw - Low level interrupt tree parsing
277 * @parent: the device interrupt parent
278 * @intspec: interrupt specifier ("interrupts" property of the device)
279 * @addr: address specifier (start of "reg" property of the device)
280 * @out_irq: structure of_irq filled by this function
281 *
282 * Returns 0 on success and a negative number on error
283 *
284 * This function is a low-level interrupt tree walking function. It
285 * can be used to do a partial walk with synthetized reg and interrupts
286 * properties, for example when resolving PCI interrupts when no device
287 * node exist for the parent.
288 *
289 */
290
291extern int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 *addr,
292 struct of_irq *out_irq);
293
294
295/***
296 * of_irq_map_one - Resolve an interrupt for a device
297 * @device: the device whose interrupt is to be resolved
298 * @index: index of the interrupt to resolve
299 * @out_irq: structure of_irq filled by this function
300 *
301 * This function resolves an interrupt, walking the tree, for a given
302 * device-tree node. It's the high level pendant to of_irq_map_raw().
303 * It also implements the workarounds for OldWolrd Macs.
304 */
305extern int of_irq_map_one(struct device_node *device, int index,
306 struct of_irq *out_irq);
307
308/***
309 * of_irq_map_pci - Resolve the interrupt for a PCI device
310 * @pdev: the device whose interrupt is to be resolved
311 * @out_irq: structure of_irq filled by this function
312 *
313 * This function resolves the PCI interrupt for a given PCI device. If a
314 * device-node exists for a given pci_dev, it will use normal OF tree
315 * walking. If not, it will implement standard swizzling and walk up the
316 * PCI tree until an device-node is found, at which point it will finish
317 * resolving using the OF tree walking.
318 */
319struct pci_dev;
320extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
321
322
243#endif /* __KERNEL__ */ 323#endif /* __KERNEL__ */
244#endif /* _POWERPC_PROM_H */ 324#endif /* _POWERPC_PROM_H */
diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h
index 2c2fe9647595..e929145e1e46 100644
--- a/include/asm-powerpc/rwsem.h
+++ b/include/asm-powerpc/rwsem.h
@@ -28,24 +28,11 @@ struct rw_semaphore {
28#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 28#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
29 spinlock_t wait_lock; 29 spinlock_t wait_lock;
30 struct list_head wait_list; 30 struct list_head wait_list;
31#if RWSEM_DEBUG
32 int debug;
33#endif
34}; 31};
35 32
36/*
37 * initialisation
38 */
39#if RWSEM_DEBUG
40#define __RWSEM_DEBUG_INIT , 0
41#else
42#define __RWSEM_DEBUG_INIT /* */
43#endif
44
45#define __RWSEM_INITIALIZER(name) \ 33#define __RWSEM_INITIALIZER(name) \
46 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 34 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
47 LIST_HEAD_INIT((name).wait_list) \ 35 LIST_HEAD_INIT((name).wait_list) }
48 __RWSEM_DEBUG_INIT }
49 36
50#define DECLARE_RWSEM(name) \ 37#define DECLARE_RWSEM(name) \
51 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 38 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -60,9 +47,6 @@ static inline void init_rwsem(struct rw_semaphore *sem)
60 sem->count = RWSEM_UNLOCKED_VALUE; 47 sem->count = RWSEM_UNLOCKED_VALUE;
61 spin_lock_init(&sem->wait_lock); 48 spin_lock_init(&sem->wait_lock);
62 INIT_LIST_HEAD(&sem->wait_list); 49 INIT_LIST_HEAD(&sem->wait_list);
63#if RWSEM_DEBUG
64 sem->debug = 0;
65#endif
66} 50}
67 51
68/* 52/*
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 9609d3ee8798..c02d105d8294 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -117,6 +117,7 @@ struct spu {
117 struct list_head sched_list; 117 struct list_head sched_list;
118 int number; 118 int number;
119 int nid; 119 int nid;
120 unsigned int irqs[3];
120 u32 isrc; 121 u32 isrc;
121 u32 node; 122 u32 node;
122 u64 flags; 123 u64 flags;
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
new file mode 100644
index 000000000000..ed8955f49e47
--- /dev/null
+++ b/include/asm-s390/Kbuild
@@ -0,0 +1,4 @@
1include include/asm-generic/Kbuild.asm
2
3unifdef-y += cmb.h debug.h
4header-y += dasd.h qeth.h tape390.h ucontext.h vtoc.h z90crypt.h
diff --git a/include/asm-s390/irqflags.h b/include/asm-s390/irqflags.h
new file mode 100644
index 000000000000..65f4db627e7a
--- /dev/null
+++ b/include/asm-s390/irqflags.h
@@ -0,0 +1,50 @@
1/*
2 * include/asm-s390/irqflags.h
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#ifndef __ASM_IRQFLAGS_H
9#define __ASM_IRQFLAGS_H
10
11#ifdef __KERNEL__
12
13/* interrupt control.. */
14#define raw_local_irq_enable() ({ \
15 unsigned long __dummy; \
16 __asm__ __volatile__ ( \
17 "stosm 0(%1),0x03" \
18 : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
19 })
20
21#define raw_local_irq_disable() ({ \
22 unsigned long __flags; \
23 __asm__ __volatile__ ( \
24 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
25 __flags; \
26 })
27
28#define raw_local_save_flags(x) \
29 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
30
31#define raw_local_irq_restore(x) \
32 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory")
33
34#define raw_irqs_disabled() \
35({ \
36 unsigned long flags; \
37 local_save_flags(flags); \
38 !((flags >> __FLAG_SHIFT) & 3); \
39})
40
41static inline int raw_irqs_disabled_flags(unsigned long flags)
42{
43 return !((flags >> __FLAG_SHIFT) & 3);
44}
45
46/* For spinlocks etc */
47#define raw_local_irq_save(x) ((x) = raw_local_irq_disable())
48
49#endif /* __KERNEL__ */
50#endif /* __ASM_IRQFLAGS_H */
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index d9a8cca9b653..28b3517e787c 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -42,6 +42,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
42#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 42#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
43#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 43#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
44#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) 44#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
45#define per_cpu_offset(x) (__per_cpu_offset[x])
45 46
46/* A macro to avoid #include hell... */ 47/* A macro to avoid #include hell... */
47#define percpu_modcopy(pcpudst, src, size) \ 48#define percpu_modcopy(pcpudst, src, size) \
diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
index 0422a085dd56..13ec16965150 100644
--- a/include/asm-s390/rwsem.h
+++ b/include/asm-s390/rwsem.h
@@ -61,6 +61,9 @@ struct rw_semaphore {
61 signed long count; 61 signed long count;
62 spinlock_t wait_lock; 62 spinlock_t wait_lock;
63 struct list_head wait_list; 63 struct list_head wait_list;
64#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map;
66#endif
64}; 67};
65 68
66#ifndef __s390x__ 69#ifndef __s390x__
@@ -80,8 +83,16 @@ struct rw_semaphore {
80/* 83/*
81 * initialisation 84 * initialisation
82 */ 85 */
86
87#ifdef CONFIG_DEBUG_LOCK_ALLOC
88# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
89#else
90# define __RWSEM_DEP_MAP_INIT(lockname)
91#endif
92
83#define __RWSEM_INITIALIZER(name) \ 93#define __RWSEM_INITIALIZER(name) \
84{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } 94{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
95 __RWSEM_DEP_MAP_INIT(name) }
85 96
86#define DECLARE_RWSEM(name) \ 97#define DECLARE_RWSEM(name) \
87 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 98 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -93,6 +104,17 @@ static inline void init_rwsem(struct rw_semaphore *sem)
93 INIT_LIST_HEAD(&sem->wait_list); 104 INIT_LIST_HEAD(&sem->wait_list);
94} 105}
95 106
107extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
108 struct lock_class_key *key);
109
110#define init_rwsem(sem) \
111do { \
112 static struct lock_class_key __key; \
113 \
114 __init_rwsem((sem), #sem, &__key); \
115} while (0)
116
117
96/* 118/*
97 * lock for reading 119 * lock for reading
98 */ 120 */
@@ -155,7 +177,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
155/* 177/*
156 * lock for writing 178 * lock for writing
157 */ 179 */
158static inline void __down_write(struct rw_semaphore *sem) 180static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
159{ 181{
160 signed long old, new, tmp; 182 signed long old, new, tmp;
161 183
@@ -181,6 +203,11 @@ static inline void __down_write(struct rw_semaphore *sem)
181 rwsem_down_write_failed(sem); 203 rwsem_down_write_failed(sem);
182} 204}
183 205
206static inline void __down_write(struct rw_semaphore *sem)
207{
208 __down_write_nested(sem, 0);
209}
210
184/* 211/*
185 * trylock for writing -- returns 1 if successful, 0 if contention 212 * trylock for writing -- returns 1 if successful, 0 if contention
186 */ 213 */
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 702cf436698c..32cdc69f39f4 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -37,7 +37,8 @@ struct semaphore {
37 37
38static inline void sema_init (struct semaphore *sem, int val) 38static inline void sema_init (struct semaphore *sem, int val)
39{ 39{
40 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val); 40 atomic_set(&sem->count, val);
41 init_waitqueue_head(&sem->wait);
41} 42}
42 43
43static inline void init_MUTEX (struct semaphore *sem) 44static inline void init_MUTEX (struct semaphore *sem)
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 71a0732cd518..9ab186ffde23 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -301,34 +301,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
301#define set_mb(var, value) do { var = value; mb(); } while (0) 301#define set_mb(var, value) do { var = value; mb(); } while (0)
302#define set_wmb(var, value) do { var = value; wmb(); } while (0) 302#define set_wmb(var, value) do { var = value; wmb(); } while (0)
303 303
304/* interrupt control.. */
305#define local_irq_enable() ({ \
306 unsigned long __dummy; \
307 __asm__ __volatile__ ( \
308 "stosm 0(%1),0x03" \
309 : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
310 })
311
312#define local_irq_disable() ({ \
313 unsigned long __flags; \
314 __asm__ __volatile__ ( \
315 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
316 __flags; \
317 })
318
319#define local_save_flags(x) \
320 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
321
322#define local_irq_restore(x) \
323 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory")
324
325#define irqs_disabled() \
326({ \
327 unsigned long flags; \
328 local_save_flags(flags); \
329 !((flags >> __FLAG_SHIFT) & 3); \
330})
331
332#ifdef __s390x__ 304#ifdef __s390x__
333 305
334#define __ctl_load(array, low, high) ({ \ 306#define __ctl_load(array, low, high) ({ \
@@ -442,8 +414,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
442 }) 414 })
443#endif /* __s390x__ */ 415#endif /* __s390x__ */
444 416
445/* For spinlocks etc */ 417#include <linux/irqflags.h>
446#define local_irq_save(x) ((x) = local_irq_disable())
447 418
448/* 419/*
449 * Use to set psw mask except for the first byte which 420 * Use to set psw mask except for the first byte which
@@ -482,4 +453,3 @@ extern void (*_machine_power_off)(void);
482#endif /* __KERNEL__ */ 453#endif /* __KERNEL__ */
483 454
484#endif 455#endif
485
diff --git a/include/asm-sh/Kbuild b/include/asm-sh/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-sh/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-sh/rwsem.h b/include/asm-sh/rwsem.h
index 0262d3d1e5e0..9d2aea5e8488 100644
--- a/include/asm-sh/rwsem.h
+++ b/include/asm-sh/rwsem.h
@@ -25,24 +25,11 @@ struct rw_semaphore {
25#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 25#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
26 spinlock_t wait_lock; 26 spinlock_t wait_lock;
27 struct list_head wait_list; 27 struct list_head wait_list;
28#if RWSEM_DEBUG
29 int debug;
30#endif
31}; 28};
32 29
33/*
34 * initialisation
35 */
36#if RWSEM_DEBUG
37#define __RWSEM_DEBUG_INIT , 0
38#else
39#define __RWSEM_DEBUG_INIT /* */
40#endif
41
42#define __RWSEM_INITIALIZER(name) \ 30#define __RWSEM_INITIALIZER(name) \
43 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 31 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
44 LIST_HEAD_INIT((name).wait_list) \ 32 LIST_HEAD_INIT((name).wait_list) }
45 __RWSEM_DEBUG_INIT }
46 33
47#define DECLARE_RWSEM(name) \ 34#define DECLARE_RWSEM(name) \
48 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 35 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -57,9 +44,6 @@ static inline void init_rwsem(struct rw_semaphore *sem)
57 sem->count = RWSEM_UNLOCKED_VALUE; 44 sem->count = RWSEM_UNLOCKED_VALUE;
58 spin_lock_init(&sem->wait_lock); 45 spin_lock_init(&sem->wait_lock);
59 INIT_LIST_HEAD(&sem->wait_list); 46 INIT_LIST_HEAD(&sem->wait_list);
60#if RWSEM_DEBUG
61 sem->debug = 0;
62#endif
63} 47}
64 48
65/* 49/*
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index b752e5cbb830..ce2e60664a86 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -12,7 +12,7 @@
12 */ 12 */
13 13
14#define switch_to(prev, next, last) do { \ 14#define switch_to(prev, next, last) do { \
15 task_t *__last; \ 15 struct task_struct *__last; \
16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ 18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
diff --git a/include/asm-sh64/Kbuild b/include/asm-sh64/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-sh64/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-sparc/Kbuild b/include/asm-sparc/Kbuild
new file mode 100644
index 000000000000..e2a57fd7abfa
--- /dev/null
+++ b/include/asm-sparc/Kbuild
@@ -0,0 +1,6 @@
1include include/asm-generic/Kbuild.asm
2
3unifdef-y += fbio.h perfctr.h psr.h
4header-y += apc.h asi.h auxio.h bpp.h head.h ipc.h jsflash.h \
5 openpromio.h pbm.h pconf.h pgtsun4.h reg.h traps.h \
6 turbosparc.h vfc_ioctls.h winmacro.h
diff --git a/include/asm-sparc64/Kbuild b/include/asm-sparc64/Kbuild
new file mode 100644
index 000000000000..c78d44bb195f
--- /dev/null
+++ b/include/asm-sparc64/Kbuild
@@ -0,0 +1,10 @@
1include include/asm-generic/Kbuild.asm
2
3ALTARCH := sparc
4ARCHDEF := defined __sparc__ && defined __arch64__
5ALTARCHDEF := defined __sparc__ && !defined __arch64__
6
7unifdef-y := fbio.h perfctr.h
8header-y += apb.h asi.h bbc.h bpp.h display7seg.h envctrl.h floppy.h \
9 ipc.h kdebug.h mostek.h openprom.h openpromio.h parport.h \
10 pconf.h psrcompat.h pstate.h reg.h uctx.h utrap.h watchdog.h
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index a6ece06b83db..ced8cbde046d 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -11,6 +11,7 @@ extern unsigned long __per_cpu_base;
11extern unsigned long __per_cpu_shift; 11extern unsigned long __per_cpu_shift;
12#define __per_cpu_offset(__cpu) \ 12#define __per_cpu_offset(__cpu) \
13 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) 13 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
14#define per_cpu_offset(x) (__per_cpu_offset(x))
14 15
15/* Separate out the type, so (int[3], foo) works. */ 16/* Separate out the type, so (int[3], foo) works. */
16#define DEFINE_PER_CPU(type, name) \ 17#define DEFINE_PER_CPU(type, name) \
diff --git a/include/asm-um/Kbuild b/include/asm-um/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-um/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-v850/Kbuild b/include/asm-v850/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-v850/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
new file mode 100644
index 000000000000..dc4d101e8a16
--- /dev/null
+++ b/include/asm-x86_64/Kbuild
@@ -0,0 +1,11 @@
1include include/asm-generic/Kbuild.asm
2
3ALTARCH := i386
4ARCHDEF := defined __x86_64__
5ALTARCHDEF := defined __i386__
6
7header-y += boot.h bootsetup.h cpufeature.h debugreg.h ldt.h \
8 msr.h prctl.h setup.h sigcontext32.h ucontext.h \
9 vsyscall32.h
10
11unifdef-y += mce.h mtrr.h vsyscall.h
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h
new file mode 100644
index 000000000000..cce6937e87c0
--- /dev/null
+++ b/include/asm-x86_64/irqflags.h
@@ -0,0 +1,141 @@
1/*
2 * include/asm-x86_64/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14/*
15 * Interrupt control:
16 */
17
18static inline unsigned long __raw_local_save_flags(void)
19{
20 unsigned long flags;
21
22 __asm__ __volatile__(
23 "# __raw_save_flags\n\t"
24 "pushfq ; popq %q0"
25 : "=g" (flags)
26 : /* no input */
27 : "memory"
28 );
29
30 return flags;
31}
32
33#define raw_local_save_flags(flags) \
34 do { (flags) = __raw_local_save_flags(); } while (0)
35
36static inline void raw_local_irq_restore(unsigned long flags)
37{
38 __asm__ __volatile__(
39 "pushq %0 ; popfq"
40 : /* no output */
41 :"g" (flags)
42 :"memory", "cc"
43 );
44}
45
46#ifdef CONFIG_X86_VSMP
47
48/*
49 * Interrupt control for the VSMP architecture:
50 */
51
52static inline void raw_local_irq_disable(void)
53{
54 unsigned long flags = __raw_local_save_flags();
55
56 raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
57}
58
59static inline void raw_local_irq_enable(void)
60{
61 unsigned long flags = __raw_local_save_flags();
62
63 raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
64}
65
66static inline int raw_irqs_disabled_flags(unsigned long flags)
67{
68 return !(flags & (1<<9)) || (flags & (1 << 18));
69}
70
71#else /* CONFIG_X86_VSMP */
72
73static inline void raw_local_irq_disable(void)
74{
75 __asm__ __volatile__("cli" : : : "memory");
76}
77
78static inline void raw_local_irq_enable(void)
79{
80 __asm__ __volatile__("sti" : : : "memory");
81}
82
83static inline int raw_irqs_disabled_flags(unsigned long flags)
84{
85 return !(flags & (1 << 9));
86}
87
88#endif
89
90/*
91 * For spinlocks, etc.:
92 */
93
94static inline unsigned long __raw_local_irq_save(void)
95{
96 unsigned long flags = __raw_local_save_flags();
97
98 raw_local_irq_disable();
99
100 return flags;
101}
102
103#define raw_local_irq_save(flags) \
104 do { (flags) = __raw_local_irq_save(); } while (0)
105
106static inline int raw_irqs_disabled(void)
107{
108 unsigned long flags = __raw_local_save_flags();
109
110 return raw_irqs_disabled_flags(flags);
111}
112
113/*
114 * Used in the idle loop; sti takes one instruction cycle
115 * to complete:
116 */
117static inline void raw_safe_halt(void)
118{
119 __asm__ __volatile__("sti; hlt" : : : "memory");
120}
121
122/*
123 * Used when interrupts are already enabled or to
124 * shutdown the processor:
125 */
126static inline void halt(void)
127{
128 __asm__ __volatile__("hlt": : :"memory");
129}
130
131#else /* __ASSEMBLY__: */
132# ifdef CONFIG_TRACE_IRQFLAGS
133# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
134# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
135# else
136# define TRACE_IRQS_ON
137# define TRACE_IRQS_OFF
138# endif
139#endif
140
141#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index cd52c7f33bca..2b0c088e2957 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -49,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str,
49 return atomic_notifier_call_chain(&die_chain, val, &args); 49 return atomic_notifier_call_chain(&die_chain, val, &args);
50} 50}
51 51
52extern int printk_address(unsigned long address); 52extern void printk_address(unsigned long address);
53extern void die(const char *,struct pt_regs *,long); 53extern void die(const char *,struct pt_regs *,long);
54extern void __die(const char *,struct pt_regs *,long); 54extern void __die(const char *,struct pt_regs *,long);
55extern void show_registers(struct pt_regs *regs); 55extern void show_registers(struct pt_regs *regs);
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 549eb929b2c0..08dd9f9dda81 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -14,6 +14,8 @@
14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) 14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
15#define __my_cpu_offset() read_pda(data_offset) 15#define __my_cpu_offset() read_pda(data_offset)
16 16
17#define per_cpu_offset(x) (__per_cpu_offset(x))
18
17/* Separate out the type, so (int[3], foo) works. */ 19/* Separate out the type, so (int[3], foo) works. */
18#define DEFINE_PER_CPU(type, name) \ 20#define DEFINE_PER_CPU(type, name) \
19 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 68e559f3631c..f67f2873a922 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -244,43 +244,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
244 244
245#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) 245#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
246 246
247/* interrupt control.. */ 247#include <linux/irqflags.h>
248#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
249#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
250
251#ifdef CONFIG_X86_VSMP
252/* Interrupt control for VSMP architecture */
253#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
254#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
255
256#define irqs_disabled() \
257({ \
258 unsigned long flags; \
259 local_save_flags(flags); \
260 (flags & (1<<18)) || !(flags & (1<<9)); \
261})
262
263/* For spinlocks etc */
264#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
265#else /* CONFIG_X86_VSMP */
266#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
267#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
268
269#define irqs_disabled() \
270({ \
271 unsigned long flags; \
272 local_save_flags(flags); \
273 !(flags & (1<<9)); \
274})
275
276/* For spinlocks etc */
277#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
278#endif
279
280/* used in the idle loop; sti takes one instruction cycle to complete */
281#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
282/* used when interrupts are already enabled or to shutdown the processor */
283#define halt() __asm__ __volatile__("hlt": : :"memory")
284 248
285void cpu_idle_wait(void); 249void cpu_idle_wait(void);
286 250
diff --git a/include/asm-xtensa/Kbuild b/include/asm-xtensa/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-xtensa/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h
index abcd86dc5ab9..0aad3a587551 100644
--- a/include/asm-xtensa/rwsem.h
+++ b/include/asm-xtensa/rwsem.h
@@ -31,24 +31,11 @@ struct rw_semaphore {
31#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 31#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
32 spinlock_t wait_lock; 32 spinlock_t wait_lock;
33 struct list_head wait_list; 33 struct list_head wait_list;
34#if RWSEM_DEBUG
35 int debug;
36#endif
37}; 34};
38 35
39/*
40 * initialisation
41 */
42#if RWSEM_DEBUG
43#define __RWSEM_DEBUG_INIT , 0
44#else
45#define __RWSEM_DEBUG_INIT /* */
46#endif
47
48#define __RWSEM_INITIALIZER(name) \ 36#define __RWSEM_INITIALIZER(name) \
49 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 37 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
50 LIST_HEAD_INIT((name).wait_list) \ 38 LIST_HEAD_INIT((name).wait_list) }
51 __RWSEM_DEBUG_INIT }
52 39
53#define DECLARE_RWSEM(name) \ 40#define DECLARE_RWSEM(name) \
54 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 41 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -63,9 +50,6 @@ static inline void init_rwsem(struct rw_semaphore *sem)
63 sem->count = RWSEM_UNLOCKED_VALUE; 50 sem->count = RWSEM_UNLOCKED_VALUE;
64 spin_lock_init(&sem->wait_lock); 51 spin_lock_init(&sem->wait_lock);
65 INIT_LIST_HEAD(&sem->wait_list); 52 INIT_LIST_HEAD(&sem->wait_list);
66#if RWSEM_DEBUG
67 sem->debug = 0;
68#endif
69} 53}
70 54
71/* 55/*
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
new file mode 100644
index 000000000000..2b8a7d68fae3
--- /dev/null
+++ b/include/linux/Kbuild
@@ -0,0 +1,63 @@
1header-y := byteorder/ dvb/ hdlc/ isdn/ nfsd/ raid/ sunrpc/ tc_act/ \
2 netfilter/ netfilter_arp/ netfilter_bridge/ netfilter_ipv4/ \
3 netfilter_ipv6/
4
5header-y += affs_fs.h affs_hardblocks.h aio_abi.h a.out.h arcfb.h \
6 atmapi.h atmbr2684.h atmclip.h atm_eni.h atm_he.h \
7 atm_idt77105.h atmioc.h atmlec.h atmmpc.h atm_nicstar.h \
8 atmppp.h atmsap.h atmsvc.h atm_zatm.h auto_fs4.h auxvec.h \
9 awe_voice.h ax25.h b1lli.h baycom.h bfs_fs.h blkpg.h \
10 bpqether.h cdk.h chio.h coda_psdev.h coff.h comstats.h \
11 consolemap.h cycx_cfm.h dm-ioctl.h dn.h dqblk_v1.h \
12 dqblk_v2.h dqblk_xfs.h efs_fs_sb.h elf-fdpic.h elf.h elf-em.h \
13 fadvise.h fd.h fdreg.h ftape-header-segment.h ftape-vendors.h \
14 fuse.h futex.h genetlink.h gen_stats.h gigaset_dev.h hdsmart.h \
15 hpfs_fs.h hysdn_if.h i2c-dev.h i8k.h icmp.h \
16 if_arcnet.h if_arp.h if_bonding.h if_cablemodem.h if_fc.h \
17 if_fddi.h if.h if_hippi.h if_infiniband.h if_packet.h \
18 if_plip.h if_ppp.h if_slip.h if_strip.h if_tunnel.h in6.h \
19 in_route.h ioctl.h ip.h ipmi_msgdefs.h ip_mp_alg.h ipsec.h \
20 ipx.h irda.h isdn_divertif.h iso_fs.h ite_gpio.h ixjuser.h \
21 jffs2.h keyctl.h limits.h major.h matroxfb.h meye.h minix_fs.h \
22 mmtimer.h mqueue.h mtio.h ncp_no.h netfilter_arp.h netrom.h \
23 nfs2.h nfs4_mount.h nfs_mount.h openprom_fs.h param.h \
24 pci_ids.h pci_regs.h personality.h pfkeyv2.h pg.h pkt_cls.h \
25 pkt_sched.h posix_types.h ppdev.h prctl.h ps2esdi.h qic117.h \
26 qnxtypes.h quotaio_v1.h quotaio_v2.h radeonfb.h raw.h \
27 resource.h rose.h sctp.h smbno.h snmp.h sockios.h som.h \
28 sound.h stddef.h synclink.h telephony.h termios.h ticable.h \
29 times.h tiocl.h tipc.h toshiba.h ultrasound.h un.h utime.h \
30 utsname.h video_decoder.h video_encoder.h videotext.h vt.h \
31 wavefront.h wireless.h xattr.h x25.h zorro_ids.h
32
33unifdef-y += acct.h adb.h adfs_fs.h agpgart.h apm_bios.h atalk.h \
34 atmarp.h atmdev.h atm.h atm_tcp.h audit.h auto_fs.h binfmts.h \
35 capability.h capi.h cciss_ioctl.h cdrom.h cm4000_cs.h \
36 cn_proc.h coda.h connector.h cramfs_fs.h cuda.h cyclades.h \
37 dccp.h dirent.h divert.h elfcore.h errno.h errqueue.h \
38 ethtool.h eventpoll.h ext2_fs.h ext3_fs.h fb.h fcntl.h \
39 filter.h flat.h fs.h ftape.h gameport.h generic_serial.h \
40 genhd.h hayesesp.h hdlcdrv.h hdlc.h hdreg.h hiddev.h hpet.h \
41 i2c.h i2o-dev.h icmpv6.h if_bridge.h if_ec.h \
42 if_eql.h if_ether.h if_frad.h if_ltalk.h if_pppox.h \
43 if_shaper.h if_tr.h if_tun.h if_vlan.h if_wanpipe.h igmp.h \
44 inet_diag.h in.h inotify.h input.h ipc.h ipmi.h ipv6.h \
45 ipv6_route.h isdn.h isdnif.h isdn_ppp.h isicom.h jbd.h \
46 joystick.h kdev_t.h kd.h kernelcapi.h kernel.h keyboard.h \
47 llc.h loop.h lp.h mempolicy.h mii.h mman.h mroute.h msdos_fs.h \
48 msg.h nbd.h ncp_fs.h ncp.h ncp_mount.h netdevice.h \
49 netfilter_bridge.h netfilter_decnet.h netfilter.h \
50 netfilter_ipv4.h netfilter_ipv6.h netfilter_logging.h net.h \
51 netlink.h nfs3.h nfs4.h nfsacl.h nfs_fs.h nfs.h nfs_idmap.h \
52 n_r3964.h nubus.h nvram.h parport.h patchkey.h pci.h pktcdvd.h \
53 pmu.h poll.h ppp_defs.h ppp-comp.h ptrace.h qnx4_fs.h quota.h \
54 random.h reboot.h reiserfs_fs.h reiserfs_xattr.h romfs_fs.h \
55 route.h rtc.h rtnetlink.h scc.h sched.h sdla.h \
56 selinux_netlink.h sem.h serial_core.h serial.h serio.h shm.h \
57 signal.h smb_fs.h smb.h smb_mount.h socket.h sonet.h sonypi.h \
58 soundcard.h stat.h sysctl.h tcp.h time.h timex.h tty.h types.h \
59 udf_fs_i.h udp.h uinput.h uio.h unistd.h usb_ch9.h \
60 usbdevice_fs.h user.h videodev2.h videodev.h wait.h \
61 wanrouter.h watchdog.h xfrm.h zftape.h
62
63objhdr-y := version.h
diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild
new file mode 100644
index 000000000000..84a57d4fb212
--- /dev/null
+++ b/include/linux/byteorder/Kbuild
@@ -0,0 +1,2 @@
1unifdef-y += generic.h swabb.h swab.h
2header-y += big_endian.h little_endian.h pdp_endian.h
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 90663ad217f9..251c41e3ddd5 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -21,6 +21,18 @@ struct completion {
21#define DECLARE_COMPLETION(work) \ 21#define DECLARE_COMPLETION(work) \
22 struct completion work = COMPLETION_INITIALIZER(work) 22 struct completion work = COMPLETION_INITIALIZER(work)
23 23
24/*
25 * Lockdep needs to run a non-constant initializer for on-stack
26 * completions - so we use the _ONSTACK() variant for those that
27 * are on the kernel stack:
28 */
29#ifdef CONFIG_LOCKDEP
30# define DECLARE_COMPLETION_ONSTACK(work) \
31 struct completion work = ({ init_completion(&work); work; })
32#else
33# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
34#endif
35
24static inline void init_completion(struct completion *x) 36static inline void init_completion(struct completion *x)
25{ 37{
26 x->done = 0; 38 x->done = 0;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 0dd1610a94a9..471781ffeab1 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -114,6 +114,18 @@ struct dentry {
114 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ 114 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
115}; 115};
116 116
117/*
118 * dentry->d_lock spinlock nesting subclasses:
119 *
120 * 0: normal
121 * 1: nested
122 */
123enum dentry_d_lock_class
124{
125 DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */
126 DENTRY_D_LOCK_NESTED
127};
128
117struct dentry_operations { 129struct dentry_operations {
118 int (*d_revalidate)(struct dentry *, struct nameidata *); 130 int (*d_revalidate)(struct dentry *, struct nameidata *);
119 int (*d_hash) (struct dentry *, struct qstr *); 131 int (*d_hash) (struct dentry *, struct qstr *);
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
new file mode 100644
index 000000000000..6a7047851e48
--- /dev/null
+++ b/include/linux/debug_locks.h
@@ -0,0 +1,69 @@
1#ifndef __LINUX_DEBUG_LOCKING_H
2#define __LINUX_DEBUG_LOCKING_H
3
4extern int debug_locks;
5extern int debug_locks_silent;
6
7/*
8 * Generic 'turn off all lock debugging' function:
9 */
10extern int debug_locks_off(void);
11
12/*
13 * In the debug case we carry the caller's instruction pointer into
14 * other functions, but we dont want the function argument overhead
15 * in the nondebug case - hence these macros:
16 */
17#define _RET_IP_ (unsigned long)__builtin_return_address(0)
18#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
19
20#define DEBUG_LOCKS_WARN_ON(c) \
21({ \
22 int __ret = 0; \
23 \
24 if (unlikely(c)) { \
25 if (debug_locks_off()) \
26 WARN_ON(1); \
27 __ret = 1; \
28 } \
29 __ret; \
30})
31
32#ifdef CONFIG_SMP
33# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
34#else
35# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
36#endif
37
38#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
39 extern void locking_selftest(void);
40#else
41# define locking_selftest() do { } while (0)
42#endif
43
44#ifdef CONFIG_LOCKDEP
45extern void debug_show_all_locks(void);
46extern void debug_show_held_locks(struct task_struct *task);
47extern void debug_check_no_locks_freed(const void *from, unsigned long len);
48extern void debug_check_no_locks_held(struct task_struct *task);
49#else
50static inline void debug_show_all_locks(void)
51{
52}
53
54static inline void debug_show_held_locks(struct task_struct *task)
55{
56}
57
58static inline void
59debug_check_no_locks_freed(const void *from, unsigned long len)
60{
61}
62
63static inline void
64debug_check_no_locks_held(struct task_struct *task)
65{
66}
67#endif
68
69#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 272010a6078a..c94d8f1d62e5 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -44,7 +44,7 @@ enum dma_event {
44}; 44};
45 45
46/** 46/**
47 * typedef dma_cookie_t 47 * typedef dma_cookie_t - an opaque DMA cookie
48 * 48 *
49 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 49 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
50 */ 50 */
@@ -80,14 +80,14 @@ struct dma_chan_percpu {
80 80
81/** 81/**
82 * struct dma_chan - devices supply DMA channels, clients use them 82 * struct dma_chan - devices supply DMA channels, clients use them
83 * @client: ptr to the client user of this chan, will be NULL when unused 83 * @client: ptr to the client user of this chan, will be %NULL when unused
84 * @device: ptr to the dma device who supplies this channel, always !NULL 84 * @device: ptr to the dma device who supplies this channel, always !%NULL
85 * @cookie: last cookie value returned to client 85 * @cookie: last cookie value returned to client
86 * @chan_id: 86 * @chan_id: channel ID for sysfs
87 * @class_dev: 87 * @class_dev: class device for sysfs
88 * @refcount: kref, used in "bigref" slow-mode 88 * @refcount: kref, used in "bigref" slow-mode
89 * @slow_ref: 89 * @slow_ref: indicates that the DMA channel is free
90 * @rcu: 90 * @rcu: the DMA channel's RCU head
91 * @client_node: used to add this to the client chan list 91 * @client_node: used to add this to the client chan list
92 * @device_node: used to add this to the device chan list 92 * @device_node: used to add this to the device chan list
93 * @local: per-cpu pointer to a struct dma_chan_percpu 93 * @local: per-cpu pointer to a struct dma_chan_percpu
@@ -162,10 +162,17 @@ struct dma_client {
162 * @chancnt: how many DMA channels are supported 162 * @chancnt: how many DMA channels are supported
163 * @channels: the list of struct dma_chan 163 * @channels: the list of struct dma_chan
164 * @global_node: list_head for global dma_device_list 164 * @global_node: list_head for global dma_device_list
165 * @refcount: 165 * @refcount: reference count
166 * @done: 166 * @done: IO completion struct
167 * @dev_id: 167 * @dev_id: unique device ID
168 * Other func ptrs: used to make use of this device's capabilities 168 * @device_alloc_chan_resources: allocate resources and return the
169 * number of allocated descriptors
170 * @device_free_chan_resources: release DMA channel's resources
171 * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer
172 * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page
173 * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset
174 * @device_memcpy_complete: poll the status of an IOAT DMA transaction
175 * @device_memcpy_issue_pending: push appended descriptors to hardware
169 */ 176 */
170struct dma_device { 177struct dma_device {
171 178
@@ -211,7 +218,7 @@ void dma_async_client_chan_request(struct dma_client *client,
211 * Both @dest and @src must be mappable to a bus address according to the 218 * Both @dest and @src must be mappable to a bus address according to the
212 * DMA mapping API rules for streaming mappings. 219 * DMA mapping API rules for streaming mappings.
213 * Both @dest and @src must stay memory resident (kernel memory or locked 220 * Both @dest and @src must stay memory resident (kernel memory or locked
214 * user space pages) 221 * user space pages).
215 */ 222 */
216static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 223static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
217 void *dest, void *src, size_t len) 224 void *dest, void *src, size_t len)
@@ -225,7 +232,7 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
225} 232}
226 233
227/** 234/**
228 * dma_async_memcpy_buf_to_pg - offloaded copy 235 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
229 * @chan: DMA channel to offload copy to 236 * @chan: DMA channel to offload copy to
230 * @page: destination page 237 * @page: destination page
231 * @offset: offset in page to copy to 238 * @offset: offset in page to copy to
@@ -250,18 +257,18 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
250} 257}
251 258
252/** 259/**
253 * dma_async_memcpy_buf_to_pg - offloaded copy 260 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
254 * @chan: DMA channel to offload copy to 261 * @chan: DMA channel to offload copy to
255 * @dest_page: destination page 262 * @dest_pg: destination page
256 * @dest_off: offset in page to copy to 263 * @dest_off: offset in page to copy to
257 * @src_page: source page 264 * @src_pg: source page
258 * @src_off: offset in page to copy from 265 * @src_off: offset in page to copy from
259 * @len: length 266 * @len: length
260 * 267 *
261 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus 268 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
262 * address according to the DMA mapping API rules for streaming mappings. 269 * address according to the DMA mapping API rules for streaming mappings.
263 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident 270 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
264 * (kernel memory or locked user space pages) 271 * (kernel memory or locked user space pages).
265 */ 272 */
266static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, 273static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
267 struct page *dest_pg, unsigned int dest_off, struct page *src_pg, 274 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
@@ -278,7 +285,7 @@ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
278 285
279/** 286/**
280 * dma_async_memcpy_issue_pending - flush pending copies to HW 287 * dma_async_memcpy_issue_pending - flush pending copies to HW
281 * @chan: 288 * @chan: target DMA channel
282 * 289 *
283 * This allows drivers to push copies to HW in batches, 290 * This allows drivers to push copies to HW in batches,
284 * reducing MMIO writes where possible. 291 * reducing MMIO writes where possible.
diff --git a/include/linux/dvb/Kbuild b/include/linux/dvb/Kbuild
new file mode 100644
index 000000000000..63973af72fd5
--- /dev/null
+++ b/include/linux/dvb/Kbuild
@@ -0,0 +1,2 @@
1header-y += ca.h frontend.h net.h osd.h version.h
2unifdef-y := audio.h dmx.h video.h
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e04a5cfe874f..134b32068246 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -436,6 +436,21 @@ struct block_device {
436}; 436};
437 437
438/* 438/*
439 * bdev->bd_mutex nesting subclasses for the lock validator:
440 *
441 * 0: normal
442 * 1: 'whole'
443 * 2: 'partition'
444 */
445enum bdev_bd_mutex_lock_class
446{
447 BD_MUTEX_NORMAL,
448 BD_MUTEX_WHOLE,
449 BD_MUTEX_PARTITION
450};
451
452
453/*
439 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache 454 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
440 * radix trees 455 * radix trees
441 */ 456 */
@@ -543,6 +558,25 @@ struct inode {
543}; 558};
544 559
545/* 560/*
561 * inode->i_mutex nesting subclasses for the lock validator:
562 *
563 * 0: the object of the current VFS operation
564 * 1: parent
565 * 2: child/target
566 * 3: quota file
567 *
568 * The locking order between these classes is
569 * parent -> child -> normal -> quota
570 */
571enum inode_i_mutex_lock_class
572{
573 I_MUTEX_NORMAL,
574 I_MUTEX_PARENT,
575 I_MUTEX_CHILD,
576 I_MUTEX_QUOTA
577};
578
579/*
546 * NOTE: in a 32bit arch with a preemptable kernel and 580 * NOTE: in a 32bit arch with a preemptable kernel and
547 * an UP compile the i_size_read/write must be atomic 581 * an UP compile the i_size_read/write must be atomic
548 * with respect to the local cpu (unlike with preempt disabled), 582 * with respect to the local cpu (unlike with preempt disabled),
@@ -1276,6 +1310,8 @@ struct file_system_type {
1276 struct module *owner; 1310 struct module *owner;
1277 struct file_system_type * next; 1311 struct file_system_type * next;
1278 struct list_head fs_supers; 1312 struct list_head fs_supers;
1313 struct lock_class_key s_lock_key;
1314 struct lock_class_key s_umount_key;
1279}; 1315};
1280 1316
1281extern int get_sb_bdev(struct file_system_type *fs_type, 1317extern int get_sb_bdev(struct file_system_type *fs_type,
@@ -1404,6 +1440,7 @@ extern void bd_set_size(struct block_device *, loff_t size);
1404extern void bd_forget(struct inode *inode); 1440extern void bd_forget(struct inode *inode);
1405extern void bdput(struct block_device *); 1441extern void bdput(struct block_device *);
1406extern struct block_device *open_by_devnum(dev_t, unsigned); 1442extern struct block_device *open_by_devnum(dev_t, unsigned);
1443extern struct block_device *open_partition_by_devnum(dev_t, unsigned);
1407extern const struct file_operations def_blk_fops; 1444extern const struct file_operations def_blk_fops;
1408extern const struct address_space_operations def_blk_aops; 1445extern const struct address_space_operations def_blk_aops;
1409extern const struct file_operations def_chr_fops; 1446extern const struct file_operations def_chr_fops;
@@ -1414,6 +1451,7 @@ extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
1414extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); 1451extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
1415extern int blkdev_get(struct block_device *, mode_t, unsigned); 1452extern int blkdev_get(struct block_device *, mode_t, unsigned);
1416extern int blkdev_put(struct block_device *); 1453extern int blkdev_put(struct block_device *);
1454extern int blkdev_put_partition(struct block_device *);
1417extern int bd_claim(struct block_device *, void *); 1455extern int bd_claim(struct block_device *, void *);
1418extern void bd_release(struct block_device *); 1456extern void bd_release(struct block_device *);
1419#ifdef CONFIG_SYSFS 1457#ifdef CONFIG_SYSFS
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 114ae583cca9..50d8b5744cf6 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/preempt.h> 4#include <linux/preempt.h>
5#include <linux/smp_lock.h> 5#include <linux/smp_lock.h>
6#include <linux/lockdep.h>
6#include <asm/hardirq.h> 7#include <asm/hardirq.h>
7#include <asm/system.h> 8#include <asm/system.h>
8 9
@@ -86,9 +87,6 @@ extern void synchronize_irq(unsigned int irq);
86# define synchronize_irq(irq) barrier() 87# define synchronize_irq(irq) barrier()
87#endif 88#endif
88 89
89#define nmi_enter() irq_enter()
90#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
91
92struct task_struct; 90struct task_struct;
93 91
94#ifndef CONFIG_VIRT_CPU_ACCOUNTING 92#ifndef CONFIG_VIRT_CPU_ACCOUNTING
@@ -97,12 +95,35 @@ static inline void account_system_vtime(struct task_struct *tsk)
97} 95}
98#endif 96#endif
99 97
98/*
99 * It is safe to do non-atomic ops on ->hardirq_context,
100 * because NMI handlers may not preempt and the ops are
101 * always balanced, so the interrupted value of ->hardirq_context
102 * will always be restored.
103 */
100#define irq_enter() \ 104#define irq_enter() \
101 do { \ 105 do { \
102 account_system_vtime(current); \ 106 account_system_vtime(current); \
103 add_preempt_count(HARDIRQ_OFFSET); \ 107 add_preempt_count(HARDIRQ_OFFSET); \
108 trace_hardirq_enter(); \
109 } while (0)
110
111/*
112 * Exit irq context without processing softirqs:
113 */
114#define __irq_exit() \
115 do { \
116 trace_hardirq_exit(); \
117 account_system_vtime(current); \
118 sub_preempt_count(HARDIRQ_OFFSET); \
104 } while (0) 119 } while (0)
105 120
121/*
122 * Exit irq context and process softirqs if needed:
123 */
106extern void irq_exit(void); 124extern void irq_exit(void);
107 125
126#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0)
127#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
128
108#endif /* LINUX_HARDIRQ_H */ 129#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hdlc/Kbuild b/include/linux/hdlc/Kbuild
new file mode 100644
index 000000000000..1fb26448faa9
--- /dev/null
+++ b/include/linux/hdlc/Kbuild
@@ -0,0 +1 @@
header-y += ioctl.h
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 07d7305f131e..e4bccbcc2750 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -91,6 +91,7 @@ struct hrtimer_base {
91 ktime_t (*get_softirq_time)(void); 91 ktime_t (*get_softirq_time)(void);
92 struct hrtimer *curr_timer; 92 struct hrtimer *curr_timer;
93 ktime_t softirq_time; 93 ktime_t softirq_time;
94 struct lock_class_key lock_key;
94}; 95};
95 96
96/* 97/*
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 285316c836b5..dc7abef10965 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1359,7 +1359,7 @@ extern struct semaphore ide_cfg_sem;
1359 * ide_drive_t->hwif: constant, no locking 1359 * ide_drive_t->hwif: constant, no locking
1360 */ 1360 */
1361 1361
1362#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable(); } while (0) 1362#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0)
1363 1363
1364extern struct bus_type ide_bus_type; 1364extern struct bus_type ide_bus_type;
1365 1365
diff --git a/include/linux/idr.h b/include/linux/idr.h
index f559a719dbe8..826803449db7 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -66,7 +66,7 @@ struct idr {
66 .id_free = NULL, \ 66 .id_free = NULL, \
67 .layers = 0, \ 67 .layers = 0, \
68 .id_free_cnt = 0, \ 68 .id_free_cnt = 0, \
69 .lock = SPIN_LOCK_UNLOCKED, \ 69 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
70} 70}
71#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) 71#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
72 72
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 3a256957fb56..60aac2cea0cf 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/file.h> 4#include <linux/file.h>
5#include <linux/rcupdate.h> 5#include <linux/rcupdate.h>
6#include <linux/irqflags.h>
7#include <linux/lockdep.h>
6 8
7#define INIT_FDTABLE \ 9#define INIT_FDTABLE \
8{ \ 10{ \
@@ -21,7 +23,7 @@
21 .count = ATOMIC_INIT(1), \ 23 .count = ATOMIC_INIT(1), \
22 .fdt = &init_files.fdtab, \ 24 .fdt = &init_files.fdtab, \
23 .fdtab = INIT_FDTABLE, \ 25 .fdtab = INIT_FDTABLE, \
24 .file_lock = SPIN_LOCK_UNLOCKED, \ 26 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \
25 .next_fd = 0, \ 27 .next_fd = 0, \
26 .close_on_exec_init = { { 0, } }, \ 28 .close_on_exec_init = { { 0, } }, \
27 .open_fds_init = { { 0, } }, \ 29 .open_fds_init = { { 0, } }, \
@@ -36,7 +38,7 @@
36 .user_id = 0, \ 38 .user_id = 0, \
37 .next = NULL, \ 39 .next = NULL, \
38 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ 40 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \
39 .ctx_lock = SPIN_LOCK_UNLOCKED, \ 41 .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \
40 .reqs_active = 0U, \ 42 .reqs_active = 0U, \
41 .max_reqs = ~0U, \ 43 .max_reqs = ~0U, \
42} 44}
@@ -48,7 +50,7 @@
48 .mm_users = ATOMIC_INIT(2), \ 50 .mm_users = ATOMIC_INIT(2), \
49 .mm_count = ATOMIC_INIT(1), \ 51 .mm_count = ATOMIC_INIT(1), \
50 .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ 52 .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \
51 .page_table_lock = SPIN_LOCK_UNLOCKED, \ 53 .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \
52 .mmlist = LIST_HEAD_INIT(name.mmlist), \ 54 .mmlist = LIST_HEAD_INIT(name.mmlist), \
53 .cpu_vm_mask = CPU_MASK_ALL, \ 55 .cpu_vm_mask = CPU_MASK_ALL, \
54} 56}
@@ -69,7 +71,7 @@
69#define INIT_SIGHAND(sighand) { \ 71#define INIT_SIGHAND(sighand) { \
70 .count = ATOMIC_INIT(1), \ 72 .count = ATOMIC_INIT(1), \
71 .action = { { { .sa_handler = NULL, } }, }, \ 73 .action = { { { .sa_handler = NULL, } }, }, \
72 .siglock = SPIN_LOCK_UNLOCKED, \ 74 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
73} 75}
74 76
75extern struct group_info init_groups; 77extern struct group_info init_groups;
@@ -119,12 +121,13 @@ extern struct group_info init_groups;
119 .list = LIST_HEAD_INIT(tsk.pending.list), \ 121 .list = LIST_HEAD_INIT(tsk.pending.list), \
120 .signal = {{0}}}, \ 122 .signal = {{0}}}, \
121 .blocked = {{0}}, \ 123 .blocked = {{0}}, \
122 .alloc_lock = SPIN_LOCK_UNLOCKED, \ 124 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
123 .journal_info = NULL, \ 125 .journal_info = NULL, \
124 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 126 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
125 .fs_excl = ATOMIC_INIT(0), \ 127 .fs_excl = ATOMIC_INIT(0), \
126 .pi_lock = SPIN_LOCK_UNLOCKED, \ 128 .pi_lock = SPIN_LOCK_UNLOCKED, \
127 INIT_RT_MUTEXES(tsk) \ 129 INIT_TRACE_IRQFLAGS \
130 INIT_LOCKDEP \
128} 131}
129 132
130 133
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index da3e0dbe61d4..d5afee95fd43 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -10,6 +10,7 @@
10#include <linux/irqreturn.h> 10#include <linux/irqreturn.h>
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/irqflags.h>
13#include <asm/atomic.h> 14#include <asm/atomic.h>
14#include <asm/ptrace.h> 15#include <asm/ptrace.h>
15#include <asm/system.h> 16#include <asm/system.h>
@@ -80,12 +81,64 @@ extern int request_irq(unsigned int,
80 unsigned long, const char *, void *); 81 unsigned long, const char *, void *);
81extern void free_irq(unsigned int, void *); 82extern void free_irq(unsigned int, void *);
82 83
84/*
85 * On lockdep we dont want to enable hardirqs in hardirq
86 * context. Use local_irq_enable_in_hardirq() to annotate
87 * kernel code that has to do this nevertheless (pretty much
88 * the only valid case is for old/broken hardware that is
89 * insanely slow).
90 *
91 * NOTE: in theory this might break fragile code that relies
92 * on hardirq delivery - in practice we dont seem to have such
93 * places left. So the only effect should be slightly increased
94 * irqs-off latencies.
95 */
96#ifdef CONFIG_LOCKDEP
97# define local_irq_enable_in_hardirq() do { } while (0)
98#else
99# define local_irq_enable_in_hardirq() local_irq_enable()
100#endif
83 101
84#ifdef CONFIG_GENERIC_HARDIRQS 102#ifdef CONFIG_GENERIC_HARDIRQS
85extern void disable_irq_nosync(unsigned int irq); 103extern void disable_irq_nosync(unsigned int irq);
86extern void disable_irq(unsigned int irq); 104extern void disable_irq(unsigned int irq);
87extern void enable_irq(unsigned int irq); 105extern void enable_irq(unsigned int irq);
88 106
107/*
108 * Special lockdep variants of irq disabling/enabling.
109 * These should be used for locking constructs that
110 * know that a particular irq context which is disabled,
111 * and which is the only irq-context user of a lock,
112 * that it's safe to take the lock in the irq-disabled
113 * section without disabling hardirqs.
114 *
115 * On !CONFIG_LOCKDEP they are equivalent to the normal
116 * irq disable/enable methods.
117 */
118static inline void disable_irq_nosync_lockdep(unsigned int irq)
119{
120 disable_irq_nosync(irq);
121#ifdef CONFIG_LOCKDEP
122 local_irq_disable();
123#endif
124}
125
126static inline void disable_irq_lockdep(unsigned int irq)
127{
128 disable_irq(irq);
129#ifdef CONFIG_LOCKDEP
130 local_irq_disable();
131#endif
132}
133
134static inline void enable_irq_lockdep(unsigned int irq)
135{
136#ifdef CONFIG_LOCKDEP
137 local_irq_enable();
138#endif
139 enable_irq(irq);
140}
141
89/* IRQ wakeup (PM) control: */ 142/* IRQ wakeup (PM) control: */
90extern int set_irq_wake(unsigned int irq, unsigned int on); 143extern int set_irq_wake(unsigned int irq, unsigned int on);
91 144
@@ -99,7 +152,19 @@ static inline int disable_irq_wake(unsigned int irq)
99 return set_irq_wake(irq, 0); 152 return set_irq_wake(irq, 0);
100} 153}
101 154
102#endif 155#else /* !CONFIG_GENERIC_HARDIRQS */
156/*
157 * NOTE: non-genirq architectures, if they want to support the lock
158 * validator need to define the methods below in their asm/irq.h
159 * files, under an #ifdef CONFIG_LOCKDEP section.
160 */
161# ifndef CONFIG_LOCKDEP
162# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
163# define disable_irq_lockdep(irq) disable_irq(irq)
164# define enable_irq_lockdep(irq) enable_irq(irq)
165# endif
166
167#endif /* CONFIG_GENERIC_HARDIRQS */
103 168
104#ifndef __ARCH_SET_SOFTIRQ_PENDING 169#ifndef __ARCH_SET_SOFTIRQ_PENDING
105#define set_softirq_pending(x) (local_softirq_pending() = (x)) 170#define set_softirq_pending(x) (local_softirq_pending() = (x))
@@ -135,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x)
135#define save_and_cli(x) save_and_cli(&x) 200#define save_and_cli(x) save_and_cli(&x)
136#endif /* CONFIG_SMP */ 201#endif /* CONFIG_SMP */
137 202
138/* SoftIRQ primitives. */ 203extern void local_bh_disable(void);
139#define local_bh_disable() \ 204extern void __local_bh_enable(void);
140 do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) 205extern void _local_bh_enable(void);
141#define __local_bh_enable() \
142 do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
143
144extern void local_bh_enable(void); 206extern void local_bh_enable(void);
207extern void local_bh_enable_ip(unsigned long ip);
145 208
146/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 209/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
147 frequency threaded job scheduling. For almost all the purposes 210 frequency threaded job scheduling. For almost all the purposes
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 87a9fc039b47..5612dfeeae50 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -55,6 +55,7 @@ struct resource_list {
55#define IORESOURCE_IRQ_LOWEDGE (1<<1) 55#define IORESOURCE_IRQ_LOWEDGE (1<<1)
56#define IORESOURCE_IRQ_HIGHLEVEL (1<<2) 56#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
57#define IORESOURCE_IRQ_LOWLEVEL (1<<3) 57#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
58#define IORESOURCE_IRQ_SHAREABLE (1<<4)
58 59
59/* ISA PnP DMA specific bits (IORESOURCE_BITS) */ 60/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
60#define IORESOURCE_DMA_TYPE_MASK (3<<0) 61#define IORESOURCE_DMA_TYPE_MASK (3<<0)
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
new file mode 100644
index 000000000000..412e025bc5c7
--- /dev/null
+++ b/include/linux/irqflags.h
@@ -0,0 +1,96 @@
1/*
2 * include/linux/irqflags.h
3 *
4 * IRQ flags tracing: follow the state of the hardirq and softirq flags and
5 * provide callbacks for transitions between ON and OFF states.
6 *
7 * This file gets included from lowlevel asm headers too, to provide
8 * wrapped versions of the local_irq_*() APIs, based on the
9 * raw_local_irq_*() macros from the lowlevel headers.
10 */
11#ifndef _LINUX_TRACE_IRQFLAGS_H
12#define _LINUX_TRACE_IRQFLAGS_H
13
14#ifdef CONFIG_TRACE_IRQFLAGS
15 extern void trace_hardirqs_on(void);
16 extern void trace_hardirqs_off(void);
17 extern void trace_softirqs_on(unsigned long ip);
18 extern void trace_softirqs_off(unsigned long ip);
19# define trace_hardirq_context(p) ((p)->hardirq_context)
20# define trace_softirq_context(p) ((p)->softirq_context)
21# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
22# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
23# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
24# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
25# define trace_softirq_enter() do { current->softirq_context++; } while (0)
26# define trace_softirq_exit() do { current->softirq_context--; } while (0)
27# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
28#else
29# define trace_hardirqs_on() do { } while (0)
30# define trace_hardirqs_off() do { } while (0)
31# define trace_softirqs_on(ip) do { } while (0)
32# define trace_softirqs_off(ip) do { } while (0)
33# define trace_hardirq_context(p) 0
34# define trace_softirq_context(p) 0
35# define trace_hardirqs_enabled(p) 0
36# define trace_softirqs_enabled(p) 0
37# define trace_hardirq_enter() do { } while (0)
38# define trace_hardirq_exit() do { } while (0)
39# define trace_softirq_enter() do { } while (0)
40# define trace_softirq_exit() do { } while (0)
41# define INIT_TRACE_IRQFLAGS
42#endif
43
44#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
45
46#include <asm/irqflags.h>
47
48#define local_irq_enable() \
49 do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
50#define local_irq_disable() \
51 do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
52#define local_irq_save(flags) \
53 do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
54
55#define local_irq_restore(flags) \
56 do { \
57 if (raw_irqs_disabled_flags(flags)) { \
58 raw_local_irq_restore(flags); \
59 trace_hardirqs_off(); \
60 } else { \
61 trace_hardirqs_on(); \
62 raw_local_irq_restore(flags); \
63 } \
64 } while (0)
65#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
66/*
67 * The local_irq_*() APIs are equal to the raw_local_irq*()
68 * if !TRACE_IRQFLAGS.
69 */
70# define raw_local_irq_disable() local_irq_disable()
71# define raw_local_irq_enable() local_irq_enable()
72# define raw_local_irq_save(flags) local_irq_save(flags)
73# define raw_local_irq_restore(flags) local_irq_restore(flags)
74#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
75
76#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
77#define safe_halt() \
78 do { \
79 trace_hardirqs_on(); \
80 raw_safe_halt(); \
81 } while (0)
82
83#define local_save_flags(flags) raw_local_save_flags(flags)
84
85#define irqs_disabled() \
86({ \
87 unsigned long flags; \
88 \
89 raw_local_save_flags(flags); \
90 raw_irqs_disabled_flags(flags); \
91})
92
93#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
94#endif /* CONFIG_X86 */
95
96#endif
diff --git a/include/linux/isdn/Kbuild b/include/linux/isdn/Kbuild
new file mode 100644
index 000000000000..991cdb29ab2e
--- /dev/null
+++ b/include/linux/isdn/Kbuild
@@ -0,0 +1 @@
header-y += capicmd.h
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 54e2549f96ba..849043ce4ed6 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -57,10 +57,25 @@ do { \
57#define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr) 57#define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr)
58#endif 58#endif
59 59
60#define print_symbol(fmt, addr) \ 60static inline void print_symbol(const char *fmt, unsigned long addr)
61do { \ 61{
62 __check_printsym_format(fmt, ""); \ 62 __check_printsym_format(fmt, "");
63 __print_symbol(fmt, addr); \ 63 __print_symbol(fmt, (unsigned long)
64 __builtin_extract_return_addr((void *)addr));
65}
66
67#ifndef CONFIG_64BIT
68#define print_ip_sym(ip) \
69do { \
70 printk("[<%08lx>]", ip); \
71 print_symbol(" %s\n", ip); \
64} while(0) 72} while(0)
73#else
74#define print_ip_sym(ip) \
75do { \
76 printk("[<%016lx>]", ip); \
77 print_symbol(" %s\n", ip); \
78} while(0)
79#endif
65 80
66#endif /*_LINUX_KALLSYMS_H*/ 81#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
new file mode 100644
index 000000000000..316e0fb8d7b1
--- /dev/null
+++ b/include/linux/lockdep.h
@@ -0,0 +1,353 @@
1/*
2 * Runtime locking correctness validator
3 *
4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * see Documentation/lockdep-design.txt for more details.
7 */
8#ifndef __LINUX_LOCKDEP_H
9#define __LINUX_LOCKDEP_H
10
11#include <linux/linkage.h>
12#include <linux/list.h>
13#include <linux/debug_locks.h>
14#include <linux/stacktrace.h>
15
16#ifdef CONFIG_LOCKDEP
17
18/*
19 * Lock-class usage-state bits:
20 */
21enum lock_usage_bit
22{
23 LOCK_USED = 0,
24 LOCK_USED_IN_HARDIRQ,
25 LOCK_USED_IN_SOFTIRQ,
26 LOCK_ENABLED_SOFTIRQS,
27 LOCK_ENABLED_HARDIRQS,
28 LOCK_USED_IN_HARDIRQ_READ,
29 LOCK_USED_IN_SOFTIRQ_READ,
30 LOCK_ENABLED_SOFTIRQS_READ,
31 LOCK_ENABLED_HARDIRQS_READ,
32 LOCK_USAGE_STATES
33};
34
35/*
36 * Usage-state bitmasks:
37 */
38#define LOCKF_USED (1 << LOCK_USED)
39#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
40#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
41#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
42#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
43
44#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
45#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
46
47#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
48#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
49#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
50#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
51
52#define LOCKF_ENABLED_IRQS_READ \
53 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
54#define LOCKF_USED_IN_IRQ_READ \
55 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
56
57#define MAX_LOCKDEP_SUBCLASSES 8UL
58
59/*
60 * Lock-classes are keyed via unique addresses, by embedding the
61 * lockclass-key into the kernel (or module) .data section. (For
62 * static locks we use the lock address itself as the key.)
63 */
64struct lockdep_subclass_key {
65 char __one_byte;
66} __attribute__ ((__packed__));
67
68struct lock_class_key {
69 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
70};
71
72/*
73 * The lock-class itself:
74 */
75struct lock_class {
76 /*
77 * class-hash:
78 */
79 struct list_head hash_entry;
80
81 /*
82 * global list of all lock-classes:
83 */
84 struct list_head lock_entry;
85
86 struct lockdep_subclass_key *key;
87 unsigned int subclass;
88
89 /*
90 * IRQ/softirq usage tracking bits:
91 */
92 unsigned long usage_mask;
93 struct stack_trace usage_traces[LOCK_USAGE_STATES];
94
95 /*
96 * These fields represent a directed graph of lock dependencies,
97 * to every node we attach a list of "forward" and a list of
98 * "backward" graph nodes.
99 */
100 struct list_head locks_after, locks_before;
101
102 /*
103 * Generation counter, when doing certain classes of graph walking,
104 * to ensure that we check one node only once:
105 */
106 unsigned int version;
107
108 /*
109 * Statistics counter:
110 */
111 unsigned long ops;
112
113 const char *name;
114 int name_version;
115};
116
117/*
118 * Map the lock object (the lock instance) to the lock-class object.
119 * This is embedded into specific lock instances:
120 */
121struct lockdep_map {
122 struct lock_class_key *key;
123 struct lock_class *class[MAX_LOCKDEP_SUBCLASSES];
124 const char *name;
125};
126
127/*
128 * Every lock has a list of other locks that were taken after it.
129 * We only grow the list, never remove from it:
130 */
131struct lock_list {
132 struct list_head entry;
133 struct lock_class *class;
134 struct stack_trace trace;
135};
136
137/*
138 * We record lock dependency chains, so that we can cache them:
139 */
140struct lock_chain {
141 struct list_head entry;
142 u64 chain_key;
143};
144
145struct held_lock {
146 /*
147 * One-way hash of the dependency chain up to this point. We
148 * hash the hashes step by step as the dependency chain grows.
149 *
150 * We use it for dependency-caching and we skip detection
151 * passes and dependency-updates if there is a cache-hit, so
152 * it is absolutely critical for 100% coverage of the validator
153 * to have a unique key value for every unique dependency path
154 * that can occur in the system, to make a unique hash value
155 * as likely as possible - hence the 64-bit width.
156 *
157 * The task struct holds the current hash value (initialized
158 * with zero), here we store the previous hash value:
159 */
160 u64 prev_chain_key;
161 struct lock_class *class;
162 unsigned long acquire_ip;
163 struct lockdep_map *instance;
164
165 /*
166 * The lock-stack is unified in that the lock chains of interrupt
167 * contexts nest ontop of process context chains, but we 'separate'
168 * the hashes by starting with 0 if we cross into an interrupt
169 * context, and we also keep do not add cross-context lock
170 * dependencies - the lock usage graph walking covers that area
171 * anyway, and we'd just unnecessarily increase the number of
172 * dependencies otherwise. [Note: hardirq and softirq contexts
173 * are separated from each other too.]
174 *
175 * The following field is used to detect when we cross into an
176 * interrupt context:
177 */
178 int irq_context;
179 int trylock;
180 int read;
181 int check;
182 int hardirqs_off;
183};
184
185/*
186 * Initialization, self-test and debugging-output methods:
187 */
188extern void lockdep_init(void);
189extern void lockdep_info(void);
190extern void lockdep_reset(void);
191extern void lockdep_reset_lock(struct lockdep_map *lock);
192extern void lockdep_free_key_range(void *start, unsigned long size);
193
194extern void lockdep_off(void);
195extern void lockdep_on(void);
196extern int lockdep_internal(void);
197
198/*
199 * These methods are used by specific locking variants (spinlocks,
200 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
201 * to lockdep:
202 */
203
204extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
205 struct lock_class_key *key);
206
207/*
208 * Reinitialize a lock key - for cases where there is special locking or
209 * special initialization of locks so that the validator gets the scope
210 * of dependencies wrong: they are either too broad (they need a class-split)
211 * or they are too narrow (they suffer from a false class-split):
212 */
213#define lockdep_set_class(lock, key) \
214 lockdep_init_map(&(lock)->dep_map, #key, key)
215#define lockdep_set_class_and_name(lock, key, name) \
216 lockdep_init_map(&(lock)->dep_map, name, key)
217
218/*
219 * Acquire a lock.
220 *
221 * Values for "read":
222 *
223 * 0: exclusive (write) acquire
224 * 1: read-acquire (no recursion allowed)
225 * 2: read-acquire with same-instance recursion allowed
226 *
227 * Values for check:
228 *
229 * 0: disabled
230 * 1: simple checks (freeing, held-at-exit-time, etc.)
231 * 2: full validation
232 */
233extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
234 int trylock, int read, int check, unsigned long ip);
235
236extern void lock_release(struct lockdep_map *lock, int nested,
237 unsigned long ip);
238
239# define INIT_LOCKDEP .lockdep_recursion = 0,
240
241#else /* !LOCKDEP */
242
243static inline void lockdep_off(void)
244{
245}
246
247static inline void lockdep_on(void)
248{
249}
250
251static inline int lockdep_internal(void)
252{
253 return 0;
254}
255
256# define lock_acquire(l, s, t, r, c, i) do { } while (0)
257# define lock_release(l, n, i) do { } while (0)
258# define lockdep_init() do { } while (0)
259# define lockdep_info() do { } while (0)
260# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0)
261# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
262# define lockdep_set_class_and_name(lock, key, name) \
263 do { (void)(key); } while (0)
264# define INIT_LOCKDEP
265# define lockdep_reset() do { debug_locks = 1; } while (0)
266# define lockdep_free_key_range(start, size) do { } while (0)
267/*
268 * The class key takes no space if lockdep is disabled:
269 */
270struct lock_class_key { };
271#endif /* !LOCKDEP */
272
273#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
274extern void early_init_irq_lock_class(void);
275#else
276# define early_init_irq_lock_class() do { } while (0)
277#endif
278
279#ifdef CONFIG_TRACE_IRQFLAGS
280extern void early_boot_irqs_off(void);
281extern void early_boot_irqs_on(void);
282#else
283# define early_boot_irqs_off() do { } while (0)
284# define early_boot_irqs_on() do { } while (0)
285#endif
286
287/*
288 * For trivial one-depth nesting of a lock-class, the following
289 * global define can be used. (Subsystems with multiple levels
290 * of nesting should define their own lock-nesting subclasses.)
291 */
292#define SINGLE_DEPTH_NESTING 1
293
294/*
295 * Map the dependency ops to NOP or to real lockdep ops, depending
296 * on the per lock-class debug mode:
297 */
298
299#ifdef CONFIG_DEBUG_LOCK_ALLOC
300# ifdef CONFIG_PROVE_LOCKING
301# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
302# else
303# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
304# endif
305# define spin_release(l, n, i) lock_release(l, n, i)
306#else
307# define spin_acquire(l, s, t, i) do { } while (0)
308# define spin_release(l, n, i) do { } while (0)
309#endif
310
311#ifdef CONFIG_DEBUG_LOCK_ALLOC
312# ifdef CONFIG_PROVE_LOCKING
313# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
314# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
315# else
316# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
317# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
318# endif
319# define rwlock_release(l, n, i) lock_release(l, n, i)
320#else
321# define rwlock_acquire(l, s, t, i) do { } while (0)
322# define rwlock_acquire_read(l, s, t, i) do { } while (0)
323# define rwlock_release(l, n, i) do { } while (0)
324#endif
325
326#ifdef CONFIG_DEBUG_LOCK_ALLOC
327# ifdef CONFIG_PROVE_LOCKING
328# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
329# else
330# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
331# endif
332# define mutex_release(l, n, i) lock_release(l, n, i)
333#else
334# define mutex_acquire(l, s, t, i) do { } while (0)
335# define mutex_release(l, n, i) do { } while (0)
336#endif
337
338#ifdef CONFIG_DEBUG_LOCK_ALLOC
339# ifdef CONFIG_PROVE_LOCKING
340# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
341# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
342# else
343# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
344# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
345# endif
346# define rwsem_release(l, n, i) lock_release(l, n, i)
347#else
348# define rwsem_acquire(l, s, t, i) do { } while (0)
349# define rwsem_acquire_read(l, s, t, i) do { } while (0)
350# define rwsem_release(l, n, i) do { } while (0)
351#endif
352
353#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 75179529e399..990957e0929f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -14,6 +14,7 @@
14#include <linux/prio_tree.h> 14#include <linux/prio_tree.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/debug_locks.h>
17 18
18struct mempolicy; 19struct mempolicy;
19struct anon_vma; 20struct anon_vma;
@@ -1034,13 +1035,6 @@ static inline void vm_stat_account(struct mm_struct *mm,
1034} 1035}
1035#endif /* CONFIG_PROC_FS */ 1036#endif /* CONFIG_PROC_FS */
1036 1037
1037static inline void
1038debug_check_no_locks_freed(const void *from, unsigned long len)
1039{
1040 mutex_debug_check_no_locks_freed(from, len);
1041 rt_mutex_debug_check_no_locks_freed(from, len);
1042}
1043
1044#ifndef CONFIG_DEBUG_PAGEALLOC 1038#ifndef CONFIG_DEBUG_PAGEALLOC
1045static inline void 1039static inline void
1046kernel_map_pages(struct page *page, int numpages, int enable) 1040kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 27e748eb72b0..656b588a9f96 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -150,6 +150,10 @@ struct zone {
150 unsigned long lowmem_reserve[MAX_NR_ZONES]; 150 unsigned long lowmem_reserve[MAX_NR_ZONES];
151 151
152#ifdef CONFIG_NUMA 152#ifdef CONFIG_NUMA
153 /*
154 * zone reclaim becomes active if more unmapped pages exist.
155 */
156 unsigned long min_unmapped_ratio;
153 struct per_cpu_pageset *pageset[NR_CPUS]; 157 struct per_cpu_pageset *pageset[NR_CPUS];
154#else 158#else
155 struct per_cpu_pageset pageset[NR_CPUS]; 159 struct per_cpu_pageset pageset[NR_CPUS];
@@ -414,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
414 void __user *, size_t *, loff_t *); 418 void __user *, size_t *, loff_t *);
415int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, 419int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
416 void __user *, size_t *, loff_t *); 420 void __user *, size_t *, loff_t *);
421int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
422 struct file *, void __user *, size_t *, loff_t *);
417 423
418#include <linux/topology.h> 424#include <linux/topology.h>
419/* Returns the number of the current Node. */ 425/* Returns the number of the current Node. */
diff --git a/include/linux/module.h b/include/linux/module.h
index 9e9dc7c24d95..d06c74fb8c26 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -358,6 +358,7 @@ static inline int module_is_live(struct module *mod)
358/* Is this address in a module? (second is with no locks, for oops) */ 358/* Is this address in a module? (second is with no locks, for oops) */
359struct module *module_text_address(unsigned long addr); 359struct module *module_text_address(unsigned long addr);
360struct module *__module_text_address(unsigned long addr); 360struct module *__module_text_address(unsigned long addr);
361int is_module_address(unsigned long addr);
361 362
362/* Returns module and fills in value, defined and namebuf, or NULL if 363/* Returns module and fills in value, defined and namebuf, or NULL if
363 symnum out of range. */ 364 symnum out of range. */
@@ -496,6 +497,11 @@ static inline struct module *__module_text_address(unsigned long addr)
496 return NULL; 497 return NULL;
497} 498}
498 499
500static inline int is_module_address(unsigned long addr)
501{
502 return 0;
503}
504
499/* Get/put a kernel symbol (calls should be symmetric) */ 505/* Get/put a kernel symbol (calls should be symmetric) */
500#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) 506#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
501#define symbol_put(x) do { } while(0) 507#define symbol_put(x) do { } while(0)
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 7a7fbe87fef0..1221b7c44158 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -19,21 +19,21 @@
19 19
20/** 20/**
21 * struct nand_bbt_descr - bad block table descriptor 21 * struct nand_bbt_descr - bad block table descriptor
22 * @param options options for this descriptor 22 * @options: options for this descriptor
23 * @param pages the page(s) where we find the bbt, used with 23 * @pages: the page(s) where we find the bbt, used with
24 * option BBT_ABSPAGE when bbt is searched, 24 * option BBT_ABSPAGE when bbt is searched,
25 * then we store the found bbts pages here. 25 * then we store the found bbts pages here.
26 * Its an array and supports up to 8 chips now 26 * Its an array and supports up to 8 chips now
27 * @param offs offset of the pattern in the oob area of the page 27 * @offs: offset of the pattern in the oob area of the page
28 * @param veroffs offset of the bbt version counter in the oob are of the page 28 * @veroffs: offset of the bbt version counter in the oob area of the page
29 * @param version version read from the bbt page during scan 29 * @version: version read from the bbt page during scan
30 * @param len length of the pattern, if 0 no pattern check is performed 30 * @len: length of the pattern, if 0 no pattern check is performed
31 * @param maxblocks maximum number of blocks to search for a bbt. This number of 31 * @maxblocks: maximum number of blocks to search for a bbt. This
32 * blocks is reserved at the end of the device 32 * number of blocks is reserved at the end of the device
33 * where the tables are written. 33 * where the tables are written.
34 * @param reserved_block_code if non-0, this pattern denotes a reserved 34 * @reserved_block_code: if non-0, this pattern denotes a reserved
35 * (rather than bad) block in the stored bbt 35 * (rather than bad) block in the stored bbt
36 * @param pattern pattern to identify bad block table or factory marked 36 * @pattern: pattern to identify bad block table or factory marked
37 * good / bad blocks, can be NULL, if len = 0 37 * good / bad blocks, can be NULL, if len = 0
38 * 38 *
39 * Descriptor for the bad block table marker and the descriptor for the 39 * Descriptor for the bad block table marker and the descriptor for the
@@ -93,12 +93,15 @@ struct nand_bbt_descr {
93#define ONENAND_BADBLOCK_POS 0 93#define ONENAND_BADBLOCK_POS 0
94 94
95/** 95/**
96 * struct bbt_info - [GENERIC] Bad Block Table data structure 96 * struct bbm_info - [GENERIC] Bad Block Table data structure
97 * @param bbt_erase_shift [INTERN] number of address bits in a bbt entry 97 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
98 * @param badblockpos [INTERN] position of the bad block marker in the oob area 98 * @badblockpos: [INTERN] position of the bad block marker in the oob area
99 * @param bbt [INTERN] bad block table pointer 99 * @options: options for this descriptor
100 * @param badblock_pattern [REPLACEABLE] bad block scan pattern used for initial bad block scan 100 * @bbt: [INTERN] bad block table pointer
101 * @param priv [OPTIONAL] pointer to private bbm date 101 * @isbad_bbt: function to determine if a block is bad
102 * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for
103 * initial bad block scan
104 * @priv: [OPTIONAL] pointer to private bbm date
102 */ 105 */
103struct bbm_info { 106struct bbm_info {
104 int bbt_erase_shift; 107 int bbt_erase_shift;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9b7a2b525d63..94a443d45258 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -77,11 +77,11 @@ typedef enum {
77 * 77 *
78 * @len: number of bytes to write/read. When a data buffer is given 78 * @len: number of bytes to write/read. When a data buffer is given
79 * (datbuf != NULL) this is the number of data bytes. When 79 * (datbuf != NULL) this is the number of data bytes. When
80 + no data buffer is available this is the number of oob bytes. 80 * no data buffer is available this is the number of oob bytes.
81 * 81 *
82 * @retlen: number of bytes written/read. When a data buffer is given 82 * @retlen: number of bytes written/read. When a data buffer is given
83 * (datbuf != NULL) this is the number of data bytes. When 83 * (datbuf != NULL) this is the number of data bytes. When
84 + no data buffer is available this is the number of oob bytes. 84 * no data buffer is available this is the number of oob bytes.
85 * 85 *
86 * @ooblen: number of oob bytes per page 86 * @ooblen: number of oob bytes per page
87 * @ooboffs: offset of oob data in the oob area (only relevant when 87 * @ooboffs: offset of oob data in the oob area (only relevant when
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 66559272ebcb..0b4cd2fa64aa 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -202,7 +202,7 @@ typedef enum {
202struct nand_chip; 202struct nand_chip;
203 203
204/** 204/**
205 * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independend devices 205 * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
206 * @lock: protection lock 206 * @lock: protection lock
207 * @active: the mtd device which holds the controller currently 207 * @active: the mtd device which holds the controller currently
208 * @wq: wait queue to sleep on if a NAND operation is in progress 208 * @wq: wait queue to sleep on if a NAND operation is in progress
@@ -223,12 +223,15 @@ struct nand_hw_control {
223 * @total: total number of ecc bytes per page 223 * @total: total number of ecc bytes per page
224 * @prepad: padding information for syndrome based ecc generators 224 * @prepad: padding information for syndrome based ecc generators
225 * @postpad: padding information for syndrome based ecc generators 225 * @postpad: padding information for syndrome based ecc generators
226 * @layout: ECC layout control struct pointer
226 * @hwctl: function to control hardware ecc generator. Must only 227 * @hwctl: function to control hardware ecc generator. Must only
227 * be provided if an hardware ECC is available 228 * be provided if an hardware ECC is available
228 * @calculate: function for ecc calculation or readback from ecc hardware 229 * @calculate: function for ecc calculation or readback from ecc hardware
229 * @correct: function for ecc correction, matching to ecc generator (sw/hw) 230 * @correct: function for ecc correction, matching to ecc generator (sw/hw)
230 * @read_page: function to read a page according to the ecc generator requirements 231 * @read_page: function to read a page according to the ecc generator requirements
231 * @write_page: function to write a page according to the ecc generator requirements 232 * @write_page: function to write a page according to the ecc generator requirements
233 * @read_oob: function to read chip OOB data
234 * @write_oob: function to write chip OOB data
232 */ 235 */
233struct nand_ecc_ctrl { 236struct nand_ecc_ctrl {
234 nand_ecc_modes_t mode; 237 nand_ecc_modes_t mode;
@@ -300,11 +303,15 @@ struct nand_buffers {
300 * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip 303 * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip
301 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready 304 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready
302 * @ecc: [BOARDSPECIFIC] ecc control ctructure 305 * @ecc: [BOARDSPECIFIC] ecc control ctructure
306 * @buffers: buffer structure for read/write
307 * @hwcontrol: platform-specific hardware control structure
308 * @ops: oob operation operands
303 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support 309 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support
304 * @scan_bbt: [REPLACEABLE] function to scan bad block table 310 * @scan_bbt: [REPLACEABLE] function to scan bad block table
305 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) 311 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
306 * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress 312 * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress
307 * @state: [INTERN] the current state of the NAND device 313 * @state: [INTERN] the current state of the NAND device
314 * @oob_poi: poison value buffer
308 * @page_shift: [INTERN] number of address bits in a page (column address bits) 315 * @page_shift: [INTERN] number of address bits in a page (column address bits)
309 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock 316 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
310 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry 317 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
@@ -400,7 +407,6 @@ struct nand_chip {
400 407
401/** 408/**
402 * struct nand_flash_dev - NAND Flash Device ID Structure 409 * struct nand_flash_dev - NAND Flash Device ID Structure
403 *
404 * @name: Identify the device type 410 * @name: Identify the device type
405 * @id: device ID code 411 * @id: device ID code
406 * @pagesize: Pagesize in bytes. Either 256 or 512 or 0 412 * @pagesize: Pagesize in bytes. Either 256 or 512 or 0
@@ -519,9 +525,8 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
519 525
520/** 526/**
521 * struct platform_nand_chip - chip level device structure 527 * struct platform_nand_chip - chip level device structure
522 *
523 * @nr_chips: max. number of chips to scan for 528 * @nr_chips: max. number of chips to scan for
524 * @chip_offs: chip number offset 529 * @chip_offset: chip number offset
525 * @nr_partitions: number of partitions pointed to by partitions (or zero) 530 * @nr_partitions: number of partitions pointed to by partitions (or zero)
526 * @partitions: mtd partition list 531 * @partitions: mtd partition list
527 * @chip_delay: R/B delay value in us 532 * @chip_delay: R/B delay value in us
@@ -542,11 +547,10 @@ struct platform_nand_chip {
542 547
543/** 548/**
544 * struct platform_nand_ctrl - controller level device structure 549 * struct platform_nand_ctrl - controller level device structure
545 *
546 * @hwcontrol: platform specific hardware control structure 550 * @hwcontrol: platform specific hardware control structure
547 * @dev_ready: platform specific function to read ready/busy pin 551 * @dev_ready: platform specific function to read ready/busy pin
548 * @select_chip: platform specific chip select function 552 * @select_chip: platform specific chip select function
549 * @priv_data: private data to transport driver specific settings 553 * @priv: private data to transport driver specific settings
550 * 554 *
551 * All fields are optional and depend on the hardware driver requirements 555 * All fields are optional and depend on the hardware driver requirements
552 */ 556 */
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 9ce9a48db444..1f4972155249 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -23,7 +23,7 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
23/* Free resources held by the OneNAND device */ 23/* Free resources held by the OneNAND device */
24extern void onenand_release(struct mtd_info *mtd); 24extern void onenand_release(struct mtd_info *mtd);
25 25
26/** 26/*
27 * onenand_state_t - chip states 27 * onenand_state_t - chip states
28 * Enumeration for OneNAND flash chip state 28 * Enumeration for OneNAND flash chip state
29 */ 29 */
@@ -42,9 +42,9 @@ typedef enum {
42 42
43/** 43/**
44 * struct onenand_bufferram - OneNAND BufferRAM Data 44 * struct onenand_bufferram - OneNAND BufferRAM Data
45 * @param block block address in BufferRAM 45 * @block: block address in BufferRAM
46 * @param page page address in BufferRAM 46 * @page: page address in BufferRAM
47 * @param valid valid flag 47 * @valid: valid flag
48 */ 48 */
49struct onenand_bufferram { 49struct onenand_bufferram {
50 int block; 50 int block;
@@ -54,32 +54,43 @@ struct onenand_bufferram {
54 54
55/** 55/**
56 * struct onenand_chip - OneNAND Private Flash Chip Data 56 * struct onenand_chip - OneNAND Private Flash Chip Data
57 * @param base [BOARDSPECIFIC] address to access OneNAND 57 * @base: [BOARDSPECIFIC] address to access OneNAND
58 * @param chipsize [INTERN] the size of one chip for multichip arrays 58 * @chipsize: [INTERN] the size of one chip for multichip arrays
59 * @param device_id [INTERN] device ID 59 * @device_id: [INTERN] device ID
60 * @param verstion_id [INTERN] version ID 60 * @density_mask: chip density, used for DDP devices
61 * @param options [BOARDSPECIFIC] various chip options. They can partly be set to inform onenand_scan about 61 * @verstion_id: [INTERN] version ID
62 * @param erase_shift [INTERN] number of address bits in a block 62 * @options: [BOARDSPECIFIC] various chip options. They can
63 * @param page_shift [INTERN] number of address bits in a page 63 * partly be set to inform onenand_scan about
64 * @param ppb_shift [INTERN] number of address bits in a pages per block 64 * @erase_shift: [INTERN] number of address bits in a block
65 * @param page_mask [INTERN] a page per block mask 65 * @page_shift: [INTERN] number of address bits in a page
66 * @param bufferam_index [INTERN] BufferRAM index 66 * @ppb_shift: [INTERN] number of address bits in a pages per block
67 * @param bufferam [INTERN] BufferRAM info 67 * @page_mask: [INTERN] a page per block mask
68 * @param readw [REPLACEABLE] hardware specific function for read short 68 * @bufferram_index: [INTERN] BufferRAM index
69 * @param writew [REPLACEABLE] hardware specific function for write short 69 * @bufferram: [INTERN] BufferRAM info
70 * @param command [REPLACEABLE] hardware specific function for writing commands to the chip 70 * @readw: [REPLACEABLE] hardware specific function for read short
71 * @param wait [REPLACEABLE] hardware specific function for wait on ready 71 * @writew: [REPLACEABLE] hardware specific function for write short
72 * @param read_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area 72 * @command: [REPLACEABLE] hardware specific function for writing
73 * @param write_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area 73 * commands to the chip
74 * @param read_word [REPLACEABLE] hardware specific function for read register of OneNAND 74 * @wait: [REPLACEABLE] hardware specific function for wait on ready
75 * @param write_word [REPLACEABLE] hardware specific function for write register of OneNAND 75 * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
76 * @param scan_bbt [REPLACEALBE] hardware specific function for scaning Bad block Table 76 * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
77 * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip 77 * @read_word: [REPLACEABLE] hardware specific function for read
78 * @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress 78 * register of OneNAND
79 * @param state [INTERN] the current state of the OneNAND device 79 * @write_word: [REPLACEABLE] hardware specific function for write
80 * @param ecclayout [REPLACEABLE] the default ecc placement scheme 80 * register of OneNAND
81 * @param bbm [REPLACEABLE] pointer to Bad Block Management 81 * @mmcontrol: sync burst read function
82 * @param priv [OPTIONAL] pointer to private chip date 82 * @block_markbad: function to mark a block as bad
83 * @scan_bbt: [REPLACEALBE] hardware specific function for scanning
84 * Bad block Table
85 * @chip_lock: [INTERN] spinlock used to protect access to this
86 * structure and the chip
87 * @wq: [INTERN] wait queue to sleep on if a OneNAND
88 * operation is in progress
89 * @state: [INTERN] the current state of the OneNAND device
90 * @page_buf: data buffer
91 * @ecclayout: [REPLACEABLE] the default ecc placement scheme
92 * @bbm: [REPLACEABLE] pointer to Bad Block Management
93 * @priv: [OPTIONAL] pointer to private chip date
83 */ 94 */
84struct onenand_chip { 95struct onenand_chip {
85 void __iomem *base; 96 void __iomem *base;
@@ -147,9 +158,9 @@ struct onenand_chip {
147#define ONENAND_MFR_SAMSUNG 0xec 158#define ONENAND_MFR_SAMSUNG 0xec
148 159
149/** 160/**
150 * struct nand_manufacturers - NAND Flash Manufacturer ID Structure 161 * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
151 * @param name: Manufacturer name 162 * @name: Manufacturer name
152 * @param id: manufacturer ID code of device. 163 * @id: manufacturer ID code of device.
153*/ 164*/
154struct onenand_manufacturers { 165struct onenand_manufacturers {
155 int id; 166 int id;
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 8b5769f00467..2537285e1064 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -2,22 +2,22 @@
2#define __LINUX_MUTEX_DEBUG_H 2#define __LINUX_MUTEX_DEBUG_H
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/lockdep.h>
5 6
6/* 7/*
7 * Mutexes - debugging helpers: 8 * Mutexes - debugging helpers:
8 */ 9 */
9 10
10#define __DEBUG_MUTEX_INITIALIZER(lockname) \ 11#define __DEBUG_MUTEX_INITIALIZER(lockname) \
11 , .held_list = LIST_HEAD_INIT(lockname.held_list), \ 12 , .magic = &lockname
12 .name = #lockname , .magic = &lockname
13 13
14#define mutex_init(sem) __mutex_init(sem, __FUNCTION__) 14#define mutex_init(mutex) \
15do { \
16 static struct lock_class_key __key; \
17 \
18 __mutex_init((mutex), #mutex, &__key); \
19} while (0)
15 20
16extern void FASTCALL(mutex_destroy(struct mutex *lock)); 21extern void FASTCALL(mutex_destroy(struct mutex *lock));
17 22
18extern void mutex_debug_show_all_locks(void);
19extern void mutex_debug_show_held_locks(struct task_struct *filter);
20extern void mutex_debug_check_no_locks_held(struct task_struct *task);
21extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len);
22
23#endif 23#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f1ac507fa20d..27c48daa3183 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/spinlock_types.h> 14#include <linux/spinlock_types.h>
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/lockdep.h>
16 17
17#include <asm/atomic.h> 18#include <asm/atomic.h>
18 19
@@ -50,11 +51,12 @@ struct mutex {
50 struct list_head wait_list; 51 struct list_head wait_list;
51#ifdef CONFIG_DEBUG_MUTEXES 52#ifdef CONFIG_DEBUG_MUTEXES
52 struct thread_info *owner; 53 struct thread_info *owner;
53 struct list_head held_list;
54 unsigned long acquire_ip;
55 const char *name; 54 const char *name;
56 void *magic; 55 void *magic;
57#endif 56#endif
57#ifdef CONFIG_DEBUG_LOCK_ALLOC
58 struct lockdep_map dep_map;
59#endif
58}; 60};
59 61
60/* 62/*
@@ -74,24 +76,34 @@ struct mutex_waiter {
74# include <linux/mutex-debug.h> 76# include <linux/mutex-debug.h>
75#else 77#else
76# define __DEBUG_MUTEX_INITIALIZER(lockname) 78# define __DEBUG_MUTEX_INITIALIZER(lockname)
77# define mutex_init(mutex) __mutex_init(mutex, NULL) 79# define mutex_init(mutex) \
80do { \
81 static struct lock_class_key __key; \
82 \
83 __mutex_init((mutex), #mutex, &__key); \
84} while (0)
78# define mutex_destroy(mutex) do { } while (0) 85# define mutex_destroy(mutex) do { } while (0)
79# define mutex_debug_show_all_locks() do { } while (0) 86#endif
80# define mutex_debug_show_held_locks(p) do { } while (0) 87
81# define mutex_debug_check_no_locks_held(task) do { } while (0) 88#ifdef CONFIG_DEBUG_LOCK_ALLOC
82# define mutex_debug_check_no_locks_freed(from, len) do { } while (0) 89# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
90 , .dep_map = { .name = #lockname }
91#else
92# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
83#endif 93#endif
84 94
85#define __MUTEX_INITIALIZER(lockname) \ 95#define __MUTEX_INITIALIZER(lockname) \
86 { .count = ATOMIC_INIT(1) \ 96 { .count = ATOMIC_INIT(1) \
87 , .wait_lock = SPIN_LOCK_UNLOCKED \ 97 , .wait_lock = SPIN_LOCK_UNLOCKED \
88 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 98 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
89 __DEBUG_MUTEX_INITIALIZER(lockname) } 99 __DEBUG_MUTEX_INITIALIZER(lockname) \
100 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
90 101
91#define DEFINE_MUTEX(mutexname) \ 102#define DEFINE_MUTEX(mutexname) \
92 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 103 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
93 104
94extern void fastcall __mutex_init(struct mutex *lock, const char *name); 105extern void __mutex_init(struct mutex *lock, const char *name,
106 struct lock_class_key *key);
95 107
96/*** 108/***
97 * mutex_is_locked - is the mutex locked 109 * mutex_is_locked - is the mutex locked
@@ -110,6 +122,13 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
110 */ 122 */
111extern void fastcall mutex_lock(struct mutex *lock); 123extern void fastcall mutex_lock(struct mutex *lock);
112extern int fastcall mutex_lock_interruptible(struct mutex *lock); 124extern int fastcall mutex_lock_interruptible(struct mutex *lock);
125
126#ifdef CONFIG_DEBUG_LOCK_ALLOC
127extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
128#else
129# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
130#endif
131
113/* 132/*
114 * NOTE: mutex_trylock() follows the spin_trylock() convention, 133 * NOTE: mutex_trylock() follows the spin_trylock() convention,
115 * not the down_trylock() convention! 134 * not the down_trylock() convention!
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
new file mode 100644
index 000000000000..d06311acd448
--- /dev/null
+++ b/include/linux/netfilter/Kbuild
@@ -0,0 +1,11 @@
1header-y := nf_conntrack_sctp.h nf_conntrack_tuple_common.h \
2 nfnetlink_conntrack.h nfnetlink_log.h nfnetlink_queue.h \
3 xt_CLASSIFY.h xt_comment.h xt_connbytes.h xt_connmark.h \
4 xt_CONNMARK.h xt_conntrack.h xt_dccp.h xt_esp.h \
5 xt_helper.h xt_length.h xt_limit.h xt_mac.h xt_mark.h \
6 xt_MARK.h xt_multiport.h xt_NFQUEUE.h xt_pkttype.h \
7 xt_policy.h xt_realm.h xt_sctp.h xt_state.h xt_string.h \
8 xt_tcpmss.h xt_tcpudp.h
9
10unifdef-y := nf_conntrack_common.h nf_conntrack_ftp.h \
11 nf_conntrack_tcp.h nfnetlink.h x_tables.h xt_physdev.h
diff --git a/include/linux/netfilter_arp/Kbuild b/include/linux/netfilter_arp/Kbuild
new file mode 100644
index 000000000000..198ec5e7b17d
--- /dev/null
+++ b/include/linux/netfilter_arp/Kbuild
@@ -0,0 +1,2 @@
1header-y := arpt_mangle.h
2unifdef-y := arp_tables.h
diff --git a/include/linux/netfilter_bridge/Kbuild b/include/linux/netfilter_bridge/Kbuild
new file mode 100644
index 000000000000..5b1aba6abbad
--- /dev/null
+++ b/include/linux/netfilter_bridge/Kbuild
@@ -0,0 +1,4 @@
1header-y += ebt_among.h ebt_arp.h ebt_arpreply.h ebt_ip.h ebt_limit.h \
2 ebt_log.h ebt_mark_m.h ebt_mark_t.h ebt_nat.h ebt_pkttype.h \
3 ebt_redirect.h ebt_stp.h ebt_ulog.h ebt_vlan.h
4unifdef-y := ebtables.h ebt_802_3.h
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
new file mode 100644
index 000000000000..04e4d2721689
--- /dev/null
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -0,0 +1,21 @@
1
2header-y := ip_conntrack_helper.h ip_conntrack_helper_h323_asn1.h \
3 ip_conntrack_helper_h323_types.h ip_conntrack_protocol.h \
4 ip_conntrack_sctp.h ip_conntrack_tcp.h ip_conntrack_tftp.h \
5 ip_nat_pptp.h ipt_addrtype.h ipt_ah.h \
6 ipt_CLASSIFY.h ipt_CLUSTERIP.h ipt_comment.h \
7 ipt_connbytes.h ipt_connmark.h ipt_CONNMARK.h \
8 ipt_conntrack.h ipt_dccp.h ipt_dscp.h ipt_DSCP.h ipt_ecn.h \
9 ipt_ECN.h ipt_esp.h ipt_hashlimit.h ipt_helper.h \
10 ipt_iprange.h ipt_length.h ipt_limit.h ipt_LOG.h ipt_mac.h \
11 ipt_mark.h ipt_MARK.h ipt_multiport.h ipt_NFQUEUE.h \
12 ipt_owner.h ipt_physdev.h ipt_pkttype.h ipt_policy.h \
13 ipt_realm.h ipt_recent.h ipt_REJECT.h ipt_SAME.h \
14 ipt_sctp.h ipt_state.h ipt_string.h ipt_tcpmss.h \
15 ipt_TCPMSS.h ipt_tos.h ipt_TOS.h ipt_ttl.h ipt_TTL.h \
16 ipt_ULOG.h
17
18unifdef-y := ip_conntrack.h ip_conntrack_h323.h ip_conntrack_irc.h \
19 ip_conntrack_pptp.h ip_conntrack_proto_gre.h \
20 ip_conntrack_tuple.h ip_nat.h ip_nat_rule.h ip_queue.h \
21 ip_tables.h
diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild
new file mode 100644
index 000000000000..913ddbf55b4b
--- /dev/null
+++ b/include/linux/netfilter_ipv6/Kbuild
@@ -0,0 +1,6 @@
1header-y += ip6t_HL.h ip6t_LOG.h ip6t_MARK.h ip6t_REJECT.h ip6t_ah.h \
2 ip6t_esp.h ip6t_frag.h ip6t_hl.h ip6t_ipv6header.h \
3 ip6t_length.h ip6t_limit.h ip6t_mac.h ip6t_mark.h \
4 ip6t_multiport.h ip6t_opts.h ip6t_owner.h ip6t_policy.h \
5 ip6t_physdev.h ip6t_rt.h
6unifdef-y := ip6_tables.h
diff --git a/include/linux/nfsd/Kbuild b/include/linux/nfsd/Kbuild
new file mode 100644
index 000000000000..c8c545665885
--- /dev/null
+++ b/include/linux/nfsd/Kbuild
@@ -0,0 +1,2 @@
1unifdef-y := const.h export.h stats.h syscall.h nfsfh.h debug.h auth.h
2
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 51dbab9710c7..7ff386a6ae87 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -65,7 +65,7 @@ struct raw_notifier_head {
65 } while (0) 65 } while (0)
66 66
67#define ATOMIC_NOTIFIER_INIT(name) { \ 67#define ATOMIC_NOTIFIER_INIT(name) { \
68 .lock = SPIN_LOCK_UNLOCKED, \ 68 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
69 .head = NULL } 69 .head = NULL }
70#define BLOCKING_NOTIFIER_INIT(name) { \ 70#define BLOCKING_NOTIFIER_INIT(name) { \
71 .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ 71 .rwsem = __RWSEM_INITIALIZER((name).rwsem), \
diff --git a/include/linux/poison.h b/include/linux/poison.h
index a5347c02432e..3e628f990fdf 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -44,6 +44,11 @@
44 44
45/********** drivers/atm/ **********/ 45/********** drivers/atm/ **********/
46#define ATM_POISON_FREE 0x12 46#define ATM_POISON_FREE 0x12
47#define ATM_POISON 0xdeadbeef
48
49/********** net/ **********/
50#define NEIGHBOR_DEAD 0xdeadbeef
51#define NETFILTER_LINK_POISON 0xdead57ac
47 52
48/********** kernel/mutexes **********/ 53/********** kernel/mutexes **********/
49#define MUTEX_DEBUG_INIT 0x11 54#define MUTEX_DEBUG_INIT 0x11
diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild
new file mode 100644
index 000000000000..73fa27a8d552
--- /dev/null
+++ b/include/linux/raid/Kbuild
@@ -0,0 +1 @@
header-y += md_p.h md_u.h
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index fa4a3b82ba70..5d41dee82f80 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -29,8 +29,6 @@ struct rt_mutex {
29 struct task_struct *owner; 29 struct task_struct *owner;
30#ifdef CONFIG_DEBUG_RT_MUTEXES 30#ifdef CONFIG_DEBUG_RT_MUTEXES
31 int save_state; 31 int save_state;
32 struct list_head held_list_entry;
33 unsigned long acquire_ip;
34 const char *name, *file; 32 const char *name, *file;
35 int line; 33 int line;
36 void *magic; 34 void *magic;
@@ -98,14 +96,6 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
98 96
99extern void rt_mutex_unlock(struct rt_mutex *lock); 97extern void rt_mutex_unlock(struct rt_mutex *lock);
100 98
101#ifdef CONFIG_DEBUG_RT_MUTEXES
102# define INIT_RT_MUTEX_DEBUG(tsk) \
103 .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
104 .held_list_lock = SPIN_LOCK_UNLOCKED
105#else
106# define INIT_RT_MUTEX_DEBUG(tsk)
107#endif
108
109#ifdef CONFIG_RT_MUTEXES 99#ifdef CONFIG_RT_MUTEXES
110# define INIT_RT_MUTEXES(tsk) \ 100# define INIT_RT_MUTEXES(tsk) \
111 .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \ 101 .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index f30f805080ae..ae1fcadd598e 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -32,30 +32,37 @@ struct rw_semaphore {
32 __s32 activity; 32 __s32 activity;
33 spinlock_t wait_lock; 33 spinlock_t wait_lock;
34 struct list_head wait_list; 34 struct list_head wait_list;
35#if RWSEM_DEBUG 35#ifdef CONFIG_DEBUG_LOCK_ALLOC
36 int debug; 36 struct lockdep_map dep_map;
37#endif 37#endif
38}; 38};
39 39
40/* 40#ifdef CONFIG_DEBUG_LOCK_ALLOC
41 * initialisation 41# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
42 */
43#if RWSEM_DEBUG
44#define __RWSEM_DEBUG_INIT , 0
45#else 42#else
46#define __RWSEM_DEBUG_INIT /* */ 43# define __RWSEM_DEP_MAP_INIT(lockname)
47#endif 44#endif
48 45
49#define __RWSEM_INITIALIZER(name) \ 46#define __RWSEM_INITIALIZER(name) \
50{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } 47{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
51 48
52#define DECLARE_RWSEM(name) \ 49#define DECLARE_RWSEM(name) \
53 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 50 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
54 51
55extern void FASTCALL(init_rwsem(struct rw_semaphore *sem)); 52extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
53 struct lock_class_key *key);
54
55#define init_rwsem(sem) \
56do { \
57 static struct lock_class_key __key; \
58 \
59 __init_rwsem((sem), #sem, &__key); \
60} while (0)
61
56extern void FASTCALL(__down_read(struct rw_semaphore *sem)); 62extern void FASTCALL(__down_read(struct rw_semaphore *sem));
57extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem)); 63extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
58extern void FASTCALL(__down_write(struct rw_semaphore *sem)); 64extern void FASTCALL(__down_write(struct rw_semaphore *sem));
65extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass));
59extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem)); 66extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
60extern void FASTCALL(__up_read(struct rw_semaphore *sem)); 67extern void FASTCALL(__up_read(struct rw_semaphore *sem));
61extern void FASTCALL(__up_write(struct rw_semaphore *sem)); 68extern void FASTCALL(__up_write(struct rw_semaphore *sem));
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f99fe90732ab..658afb37c3f5 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -9,8 +9,6 @@
9 9
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11 11
12#define RWSEM_DEBUG 0
13
14#ifdef __KERNEL__ 12#ifdef __KERNEL__
15 13
16#include <linux/types.h> 14#include <linux/types.h>
@@ -26,89 +24,58 @@ struct rw_semaphore;
26#include <asm/rwsem.h> /* use an arch-specific implementation */ 24#include <asm/rwsem.h> /* use an arch-specific implementation */
27#endif 25#endif
28 26
29#ifndef rwsemtrace
30#if RWSEM_DEBUG
31extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str));
32#else
33#define rwsemtrace(SEM,FMT)
34#endif
35#endif
36
37/* 27/*
38 * lock for reading 28 * lock for reading
39 */ 29 */
40static inline void down_read(struct rw_semaphore *sem) 30extern void down_read(struct rw_semaphore *sem);
41{
42 might_sleep();
43 rwsemtrace(sem,"Entering down_read");
44 __down_read(sem);
45 rwsemtrace(sem,"Leaving down_read");
46}
47 31
48/* 32/*
49 * trylock for reading -- returns 1 if successful, 0 if contention 33 * trylock for reading -- returns 1 if successful, 0 if contention
50 */ 34 */
51static inline int down_read_trylock(struct rw_semaphore *sem) 35extern int down_read_trylock(struct rw_semaphore *sem);
52{
53 int ret;
54 rwsemtrace(sem,"Entering down_read_trylock");
55 ret = __down_read_trylock(sem);
56 rwsemtrace(sem,"Leaving down_read_trylock");
57 return ret;
58}
59 36
60/* 37/*
61 * lock for writing 38 * lock for writing
62 */ 39 */
63static inline void down_write(struct rw_semaphore *sem) 40extern void down_write(struct rw_semaphore *sem);
64{
65 might_sleep();
66 rwsemtrace(sem,"Entering down_write");
67 __down_write(sem);
68 rwsemtrace(sem,"Leaving down_write");
69}
70 41
71/* 42/*
72 * trylock for writing -- returns 1 if successful, 0 if contention 43 * trylock for writing -- returns 1 if successful, 0 if contention
73 */ 44 */
74static inline int down_write_trylock(struct rw_semaphore *sem) 45extern int down_write_trylock(struct rw_semaphore *sem);
75{
76 int ret;
77 rwsemtrace(sem,"Entering down_write_trylock");
78 ret = __down_write_trylock(sem);
79 rwsemtrace(sem,"Leaving down_write_trylock");
80 return ret;
81}
82 46
83/* 47/*
84 * release a read lock 48 * release a read lock
85 */ 49 */
86static inline void up_read(struct rw_semaphore *sem) 50extern void up_read(struct rw_semaphore *sem);
87{
88 rwsemtrace(sem,"Entering up_read");
89 __up_read(sem);
90 rwsemtrace(sem,"Leaving up_read");
91}
92 51
93/* 52/*
94 * release a write lock 53 * release a write lock
95 */ 54 */
96static inline void up_write(struct rw_semaphore *sem) 55extern void up_write(struct rw_semaphore *sem);
97{
98 rwsemtrace(sem,"Entering up_write");
99 __up_write(sem);
100 rwsemtrace(sem,"Leaving up_write");
101}
102 56
103/* 57/*
104 * downgrade write lock to read lock 58 * downgrade write lock to read lock
105 */ 59 */
106static inline void downgrade_write(struct rw_semaphore *sem) 60extern void downgrade_write(struct rw_semaphore *sem);
107{ 61
108 rwsemtrace(sem,"Entering downgrade_write"); 62#ifdef CONFIG_DEBUG_LOCK_ALLOC
109 __downgrade_write(sem); 63/*
110 rwsemtrace(sem,"Leaving downgrade_write"); 64 * nested locking:
111} 65 */
66extern void down_read_nested(struct rw_semaphore *sem, int subclass);
67extern void down_write_nested(struct rw_semaphore *sem, int subclass);
68/*
69 * Take/release a lock when not the owner will release it:
70 */
71extern void down_read_non_owner(struct rw_semaphore *sem);
72extern void up_read_non_owner(struct rw_semaphore *sem);
73#else
74# define down_read_nested(sem, subclass) down_read(sem)
75# define down_write_nested(sem, subclass) down_write(sem)
76# define down_read_non_owner(sem) down_read(sem)
77# define up_read_non_owner(sem) up_read(sem)
78#endif
112 79
113#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
114#endif /* _LINUX_RWSEM_H */ 81#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index aaf723308ed4..1c876e27ff93 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu);
184extern rwlock_t tasklist_lock; 184extern rwlock_t tasklist_lock;
185extern spinlock_t mmlist_lock; 185extern spinlock_t mmlist_lock;
186 186
187typedef struct task_struct task_t; 187struct task_struct;
188 188
189extern void sched_init(void); 189extern void sched_init(void);
190extern void sched_init_smp(void); 190extern void sched_init_smp(void);
191extern void init_idle(task_t *idle, int cpu); 191extern void init_idle(struct task_struct *idle, int cpu);
192 192
193extern cpumask_t nohz_cpu_mask; 193extern cpumask_t nohz_cpu_mask;
194 194
@@ -383,7 +383,7 @@ struct signal_struct {
383 wait_queue_head_t wait_chldexit; /* for wait4() */ 383 wait_queue_head_t wait_chldexit; /* for wait4() */
384 384
385 /* current thread group signal load-balancing target: */ 385 /* current thread group signal load-balancing target: */
386 task_t *curr_target; 386 struct task_struct *curr_target;
387 387
388 /* shared signal handling: */ 388 /* shared signal handling: */
389 struct sigpending shared_pending; 389 struct sigpending shared_pending;
@@ -534,7 +534,6 @@ extern struct user_struct *find_user(uid_t);
534extern struct user_struct root_user; 534extern struct user_struct root_user;
535#define INIT_USER (&root_user) 535#define INIT_USER (&root_user)
536 536
537typedef struct prio_array prio_array_t;
538struct backing_dev_info; 537struct backing_dev_info;
539struct reclaim_state; 538struct reclaim_state;
540 539
@@ -699,7 +698,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp);
699 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 698 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
700 699
701#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 700#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
702extern void prefetch_stack(struct task_struct*); 701extern void prefetch_stack(struct task_struct *t);
703#else 702#else
704static inline void prefetch_stack(struct task_struct *t) { } 703static inline void prefetch_stack(struct task_struct *t) { }
705#endif 704#endif
@@ -715,6 +714,8 @@ enum sleep_type {
715 SLEEP_INTERRUPTED, 714 SLEEP_INTERRUPTED,
716}; 715};
717 716
717struct prio_array;
718
718struct task_struct { 719struct task_struct {
719 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 720 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
720 struct thread_info *thread_info; 721 struct thread_info *thread_info;
@@ -732,7 +733,7 @@ struct task_struct {
732 int load_weight; /* for niceness load balancing purposes */ 733 int load_weight; /* for niceness load balancing purposes */
733 int prio, static_prio, normal_prio; 734 int prio, static_prio, normal_prio;
734 struct list_head run_list; 735 struct list_head run_list;
735 prio_array_t *array; 736 struct prio_array *array;
736 737
737 unsigned short ioprio; 738 unsigned short ioprio;
738 unsigned int btrace_seq; 739 unsigned int btrace_seq;
@@ -865,16 +866,34 @@ struct task_struct {
865 struct plist_head pi_waiters; 866 struct plist_head pi_waiters;
866 /* Deadlock detection and priority inheritance handling */ 867 /* Deadlock detection and priority inheritance handling */
867 struct rt_mutex_waiter *pi_blocked_on; 868 struct rt_mutex_waiter *pi_blocked_on;
868# ifdef CONFIG_DEBUG_RT_MUTEXES
869 spinlock_t held_list_lock;
870 struct list_head held_list_head;
871# endif
872#endif 869#endif
873 870
874#ifdef CONFIG_DEBUG_MUTEXES 871#ifdef CONFIG_DEBUG_MUTEXES
875 /* mutex deadlock detection */ 872 /* mutex deadlock detection */
876 struct mutex_waiter *blocked_on; 873 struct mutex_waiter *blocked_on;
877#endif 874#endif
875#ifdef CONFIG_TRACE_IRQFLAGS
876 unsigned int irq_events;
877 int hardirqs_enabled;
878 unsigned long hardirq_enable_ip;
879 unsigned int hardirq_enable_event;
880 unsigned long hardirq_disable_ip;
881 unsigned int hardirq_disable_event;
882 int softirqs_enabled;
883 unsigned long softirq_disable_ip;
884 unsigned int softirq_disable_event;
885 unsigned long softirq_enable_ip;
886 unsigned int softirq_enable_event;
887 int hardirq_context;
888 int softirq_context;
889#endif
890#ifdef CONFIG_LOCKDEP
891# define MAX_LOCK_DEPTH 30UL
892 u64 curr_chain_key;
893 int lockdep_depth;
894 struct held_lock held_locks[MAX_LOCK_DEPTH];
895 unsigned int lockdep_recursion;
896#endif
878 897
879/* journalling filesystem info */ 898/* journalling filesystem info */
880 void *journal_info; 899 void *journal_info;
@@ -1013,9 +1032,9 @@ static inline void put_task_struct(struct task_struct *t)
1013#define used_math() tsk_used_math(current) 1032#define used_math() tsk_used_math(current)
1014 1033
1015#ifdef CONFIG_SMP 1034#ifdef CONFIG_SMP
1016extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); 1035extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
1017#else 1036#else
1018static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) 1037static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1019{ 1038{
1020 if (!cpu_isset(0, new_mask)) 1039 if (!cpu_isset(0, new_mask))
1021 return -EINVAL; 1040 return -EINVAL;
@@ -1024,7 +1043,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
1024#endif 1043#endif
1025 1044
1026extern unsigned long long sched_clock(void); 1045extern unsigned long long sched_clock(void);
1027extern unsigned long long current_sched_time(const task_t *current_task); 1046extern unsigned long long
1047current_sched_time(const struct task_struct *current_task);
1028 1048
1029/* sched_exec is called by processes performing an exec */ 1049/* sched_exec is called by processes performing an exec */
1030#ifdef CONFIG_SMP 1050#ifdef CONFIG_SMP
@@ -1042,27 +1062,27 @@ static inline void idle_task_exit(void) {}
1042extern void sched_idle_next(void); 1062extern void sched_idle_next(void);
1043 1063
1044#ifdef CONFIG_RT_MUTEXES 1064#ifdef CONFIG_RT_MUTEXES
1045extern int rt_mutex_getprio(task_t *p); 1065extern int rt_mutex_getprio(struct task_struct *p);
1046extern void rt_mutex_setprio(task_t *p, int prio); 1066extern void rt_mutex_setprio(struct task_struct *p, int prio);
1047extern void rt_mutex_adjust_pi(task_t *p); 1067extern void rt_mutex_adjust_pi(struct task_struct *p);
1048#else 1068#else
1049static inline int rt_mutex_getprio(task_t *p) 1069static inline int rt_mutex_getprio(struct task_struct *p)
1050{ 1070{
1051 return p->normal_prio; 1071 return p->normal_prio;
1052} 1072}
1053# define rt_mutex_adjust_pi(p) do { } while (0) 1073# define rt_mutex_adjust_pi(p) do { } while (0)
1054#endif 1074#endif
1055 1075
1056extern void set_user_nice(task_t *p, long nice); 1076extern void set_user_nice(struct task_struct *p, long nice);
1057extern int task_prio(const task_t *p); 1077extern int task_prio(const struct task_struct *p);
1058extern int task_nice(const task_t *p); 1078extern int task_nice(const struct task_struct *p);
1059extern int can_nice(const task_t *p, const int nice); 1079extern int can_nice(const struct task_struct *p, const int nice);
1060extern int task_curr(const task_t *p); 1080extern int task_curr(const struct task_struct *p);
1061extern int idle_cpu(int cpu); 1081extern int idle_cpu(int cpu);
1062extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1082extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1063extern task_t *idle_task(int cpu); 1083extern struct task_struct *idle_task(int cpu);
1064extern task_t *curr_task(int cpu); 1084extern struct task_struct *curr_task(int cpu);
1065extern void set_curr_task(int cpu, task_t *p); 1085extern void set_curr_task(int cpu, struct task_struct *p);
1066 1086
1067void yield(void); 1087void yield(void);
1068 1088
@@ -1119,8 +1139,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
1119#else 1139#else
1120 static inline void kick_process(struct task_struct *tsk) { } 1140 static inline void kick_process(struct task_struct *tsk) { }
1121#endif 1141#endif
1122extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); 1142extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
1123extern void FASTCALL(sched_exit(task_t * p)); 1143extern void FASTCALL(sched_exit(struct task_struct * p));
1124 1144
1125extern int in_group_p(gid_t); 1145extern int in_group_p(gid_t);
1126extern int in_egroup_p(gid_t); 1146extern int in_egroup_p(gid_t);
@@ -1225,17 +1245,17 @@ extern NORET_TYPE void do_group_exit(int);
1225extern void daemonize(const char *, ...); 1245extern void daemonize(const char *, ...);
1226extern int allow_signal(int); 1246extern int allow_signal(int);
1227extern int disallow_signal(int); 1247extern int disallow_signal(int);
1228extern task_t *child_reaper; 1248extern struct task_struct *child_reaper;
1229 1249
1230extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 1250extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
1231extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 1251extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
1232task_t *fork_idle(int); 1252struct task_struct *fork_idle(int);
1233 1253
1234extern void set_task_comm(struct task_struct *tsk, char *from); 1254extern void set_task_comm(struct task_struct *tsk, char *from);
1235extern void get_task_comm(char *to, struct task_struct *tsk); 1255extern void get_task_comm(char *to, struct task_struct *tsk);
1236 1256
1237#ifdef CONFIG_SMP 1257#ifdef CONFIG_SMP
1238extern void wait_task_inactive(task_t * p); 1258extern void wait_task_inactive(struct task_struct * p);
1239#else 1259#else
1240#define wait_task_inactive(p) do { } while (0) 1260#define wait_task_inactive(p) do { } while (0)
1241#endif 1261#endif
@@ -1261,13 +1281,13 @@ extern void wait_task_inactive(task_t * p);
1261/* de_thread depends on thread_group_leader not being a pid based check */ 1281/* de_thread depends on thread_group_leader not being a pid based check */
1262#define thread_group_leader(p) (p == p->group_leader) 1282#define thread_group_leader(p) (p == p->group_leader)
1263 1283
1264static inline task_t *next_thread(const task_t *p) 1284static inline struct task_struct *next_thread(const struct task_struct *p)
1265{ 1285{
1266 return list_entry(rcu_dereference(p->thread_group.next), 1286 return list_entry(rcu_dereference(p->thread_group.next),
1267 task_t, thread_group); 1287 struct task_struct, thread_group);
1268} 1288}
1269 1289
1270static inline int thread_group_empty(task_t *p) 1290static inline int thread_group_empty(struct task_struct *p)
1271{ 1291{
1272 return list_empty(&p->thread_group); 1292 return list_empty(&p->thread_group);
1273} 1293}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 7bc5c7c12b54..46000936f8f1 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -38,9 +38,17 @@ typedef struct {
38 * These macros triggered gcc-3.x compile-time problems. We think these are 38 * These macros triggered gcc-3.x compile-time problems. We think these are
39 * OK now. Be cautious. 39 * OK now. Be cautious.
40 */ 40 */
41#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } 41#define __SEQLOCK_UNLOCKED(lockname) \
42#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) 42 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
43 43
44#define SEQLOCK_UNLOCKED \
45 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
46
47#define seqlock_init(x) \
48 do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0)
49
50#define DEFINE_SEQLOCK(x) \
51 seqlock_t x = __SEQLOCK_UNLOCKED(x)
44 52
45/* Lock out other writers and update the count. 53/* Lock out other writers and update the count.
46 * Acts like a normal spin_lock/unlock. 54 * Acts like a normal spin_lock/unlock.
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index fc1104a2cfa9..058cba70818a 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -216,10 +216,11 @@ struct uart_port {
216 unsigned char __iomem *membase; /* read/write[bwl] */ 216 unsigned char __iomem *membase; /* read/write[bwl] */
217 unsigned int irq; /* irq number */ 217 unsigned int irq; /* irq number */
218 unsigned int uartclk; /* base uart clock */ 218 unsigned int uartclk; /* base uart clock */
219 unsigned char fifosize; /* tx fifo size */ 219 unsigned int fifosize; /* tx fifo size */
220 unsigned char x_char; /* xon/xoff char */ 220 unsigned char x_char; /* xon/xoff char */
221 unsigned char regshift; /* reg offset shift */ 221 unsigned char regshift; /* reg offset shift */
222 unsigned char iotype; /* io access style */ 222 unsigned char iotype; /* io access style */
223 unsigned char unused1;
223 224
224#define UPIO_PORT (0) 225#define UPIO_PORT (0)
225#define UPIO_HUB6 (1) 226#define UPIO_HUB6 (1)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 57d7d4965f9a..3597b4f14389 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -604,9 +604,12 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
604 return list_->qlen; 604 return list_->qlen;
605} 605}
606 606
607extern struct lock_class_key skb_queue_lock_key;
608
607static inline void skb_queue_head_init(struct sk_buff_head *list) 609static inline void skb_queue_head_init(struct sk_buff_head *list)
608{ 610{
609 spin_lock_init(&list->lock); 611 spin_lock_init(&list->lock);
612 lockdep_set_class(&list->lock, &skb_queue_lock_key);
610 list->prev = list->next = (struct sk_buff *)list; 613 list->prev = list->next = (struct sk_buff *)list;
611 list->qlen = 0; 614 list->qlen = 0;
612} 615}
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index ae23beef9cc9..31473db92d3b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
82/* 82/*
83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
84 */ 84 */
85#if defined(CONFIG_SMP) 85#ifdef CONFIG_SMP
86# include <asm/spinlock.h> 86# include <asm/spinlock.h>
87#else 87#else
88# include <linux/spinlock_up.h> 88# include <linux/spinlock_up.h>
89#endif 89#endif
90 90
91#define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) 91#ifdef CONFIG_DEBUG_SPINLOCK
92#define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) 92 extern void __spin_lock_init(spinlock_t *lock, const char *name,
93 struct lock_class_key *key);
94# define spin_lock_init(lock) \
95do { \
96 static struct lock_class_key __key; \
97 \
98 __spin_lock_init((lock), #lock, &__key); \
99} while (0)
100
101#else
102# define spin_lock_init(lock) \
103 do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
104#endif
105
106#ifdef CONFIG_DEBUG_SPINLOCK
107 extern void __rwlock_init(rwlock_t *lock, const char *name,
108 struct lock_class_key *key);
109# define rwlock_init(lock) \
110do { \
111 static struct lock_class_key __key; \
112 \
113 __rwlock_init((lock), #lock, &__key); \
114} while (0)
115#else
116# define rwlock_init(lock) \
117 do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
118#endif
93 119
94#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
95 121
@@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
113#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 139#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
114 extern int _raw_spin_trylock(spinlock_t *lock); 140 extern int _raw_spin_trylock(spinlock_t *lock);
115 extern void _raw_spin_unlock(spinlock_t *lock); 141 extern void _raw_spin_unlock(spinlock_t *lock);
116
117 extern void _raw_read_lock(rwlock_t *lock); 142 extern void _raw_read_lock(rwlock_t *lock);
118 extern int _raw_read_trylock(rwlock_t *lock); 143 extern int _raw_read_trylock(rwlock_t *lock);
119 extern void _raw_read_unlock(rwlock_t *lock); 144 extern void _raw_read_unlock(rwlock_t *lock);
@@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
121 extern int _raw_write_trylock(rwlock_t *lock); 146 extern int _raw_write_trylock(rwlock_t *lock);
122 extern void _raw_write_unlock(rwlock_t *lock); 147 extern void _raw_write_unlock(rwlock_t *lock);
123#else 148#else
124# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
125# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
126# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 149# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
127# define _raw_spin_lock_flags(lock, flags) \ 150# define _raw_spin_lock_flags(lock, flags) \
128 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 151 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
152# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
153# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
129# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 154# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
130# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
131# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
132# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
133# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 155# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
156# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
157# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
134# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 158# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
159# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
135#endif 160#endif
136 161
137#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) 162#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
@@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
147#define write_trylock(lock) __cond_lock(_write_trylock(lock)) 172#define write_trylock(lock) __cond_lock(_write_trylock(lock))
148 173
149#define spin_lock(lock) _spin_lock(lock) 174#define spin_lock(lock) _spin_lock(lock)
175
176#ifdef CONFIG_DEBUG_LOCK_ALLOC
177# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
178#else
179# define spin_lock_nested(lock, subclass) _spin_lock(lock)
180#endif
181
150#define write_lock(lock) _write_lock(lock) 182#define write_lock(lock) _write_lock(lock)
151#define read_lock(lock) _read_lock(lock) 183#define read_lock(lock) _read_lock(lock)
152 184
@@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
172/* 204/*
173 * We inline the unlock functions in the nondebug case: 205 * We inline the unlock functions in the nondebug case:
174 */ 206 */
175#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) 207#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
208 !defined(CONFIG_SMP)
176# define spin_unlock(lock) _spin_unlock(lock) 209# define spin_unlock(lock) _spin_unlock(lock)
177# define read_unlock(lock) _read_unlock(lock) 210# define read_unlock(lock) _read_unlock(lock)
178# define write_unlock(lock) _write_unlock(lock) 211# define write_unlock(lock) _write_unlock(lock)
179#else
180# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
181# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
182# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
183#endif
184
185#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
186# define spin_unlock_irq(lock) _spin_unlock_irq(lock) 212# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
187# define read_unlock_irq(lock) _read_unlock_irq(lock) 213# define read_unlock_irq(lock) _read_unlock_irq(lock)
188# define write_unlock_irq(lock) _write_unlock_irq(lock) 214# define write_unlock_irq(lock) _write_unlock_irq(lock)
189#else 215#else
216# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
217# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
218# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
190# define spin_unlock_irq(lock) \ 219# define spin_unlock_irq(lock) \
191 do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0) 220 do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
192# define read_unlock_irq(lock) \ 221# define read_unlock_irq(lock) \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 78e6989ffb54..b2c4f8299464 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr);
20#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) 20#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
21 21
22void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); 22void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
23void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
24 __acquires(spinlock_t);
23void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); 25void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t);
24void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); 26void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t);
25void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); 27void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index cd81cee566f4..67faa044c5f5 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -49,6 +49,7 @@
49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0) 49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
50 50
51#define _spin_lock(lock) __LOCK(lock) 51#define _spin_lock(lock) __LOCK(lock)
52#define _spin_lock_nested(lock, subclass) __LOCK(lock)
52#define _read_lock(lock) __LOCK(lock) 53#define _read_lock(lock) __LOCK(lock)
53#define _write_lock(lock) __LOCK(lock) 54#define _write_lock(lock) __LOCK(lock)
54#define _spin_lock_bh(lock) __LOCK_BH(lock) 55#define _spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 9cb51e070390..dc5fb69e4de9 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,6 +9,8 @@
9 * Released under the General Public License (GPL). 9 * Released under the General Public License (GPL).
10 */ 10 */
11 11
12#include <linux/lockdep.h>
13
12#if defined(CONFIG_SMP) 14#if defined(CONFIG_SMP)
13# include <asm/spinlock_types.h> 15# include <asm/spinlock_types.h>
14#else 16#else
@@ -24,6 +26,9 @@ typedef struct {
24 unsigned int magic, owner_cpu; 26 unsigned int magic, owner_cpu;
25 void *owner; 27 void *owner;
26#endif 28#endif
29#ifdef CONFIG_DEBUG_LOCK_ALLOC
30 struct lockdep_map dep_map;
31#endif
27} spinlock_t; 32} spinlock_t;
28 33
29#define SPINLOCK_MAGIC 0xdead4ead 34#define SPINLOCK_MAGIC 0xdead4ead
@@ -37,31 +42,53 @@ typedef struct {
37 unsigned int magic, owner_cpu; 42 unsigned int magic, owner_cpu;
38 void *owner; 43 void *owner;
39#endif 44#endif
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46 struct lockdep_map dep_map;
47#endif
40} rwlock_t; 48} rwlock_t;
41 49
42#define RWLOCK_MAGIC 0xdeaf1eed 50#define RWLOCK_MAGIC 0xdeaf1eed
43 51
44#define SPINLOCK_OWNER_INIT ((void *)-1L) 52#define SPINLOCK_OWNER_INIT ((void *)-1L)
45 53
54#ifdef CONFIG_DEBUG_LOCK_ALLOC
55# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
56#else
57# define SPIN_DEP_MAP_INIT(lockname)
58#endif
59
60#ifdef CONFIG_DEBUG_LOCK_ALLOC
61# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
62#else
63# define RW_DEP_MAP_INIT(lockname)
64#endif
65
46#ifdef CONFIG_DEBUG_SPINLOCK 66#ifdef CONFIG_DEBUG_SPINLOCK
47# define SPIN_LOCK_UNLOCKED \ 67# define __SPIN_LOCK_UNLOCKED(lockname) \
48 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 68 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
49 .magic = SPINLOCK_MAGIC, \ 69 .magic = SPINLOCK_MAGIC, \
50 .owner = SPINLOCK_OWNER_INIT, \ 70 .owner = SPINLOCK_OWNER_INIT, \
51 .owner_cpu = -1 } 71 .owner_cpu = -1, \
52#define RW_LOCK_UNLOCKED \ 72 SPIN_DEP_MAP_INIT(lockname) }
73#define __RW_LOCK_UNLOCKED(lockname) \
53 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 74 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
54 .magic = RWLOCK_MAGIC, \ 75 .magic = RWLOCK_MAGIC, \
55 .owner = SPINLOCK_OWNER_INIT, \ 76 .owner = SPINLOCK_OWNER_INIT, \
56 .owner_cpu = -1 } 77 .owner_cpu = -1, \
78 RW_DEP_MAP_INIT(lockname) }
57#else 79#else
58# define SPIN_LOCK_UNLOCKED \ 80# define __SPIN_LOCK_UNLOCKED(lockname) \
59 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } 81 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
60#define RW_LOCK_UNLOCKED \ 82 SPIN_DEP_MAP_INIT(lockname) }
61 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } 83#define __RW_LOCK_UNLOCKED(lockname) \
84 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
85 RW_DEP_MAP_INIT(lockname) }
62#endif 86#endif
63 87
64#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED 88#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
65#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED 89#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
90
91#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
92#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
66 93
67#endif /* __LINUX_SPINLOCK_TYPES_H */ 94#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198e..27644af20b7c 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -12,10 +12,14 @@
12 * Released under the General Public License (GPL). 12 * Released under the General Public License (GPL).
13 */ 13 */
14 14
15#ifdef CONFIG_DEBUG_SPINLOCK 15#if defined(CONFIG_DEBUG_SPINLOCK) || \
16 defined(CONFIG_DEBUG_LOCK_ALLOC)
16 17
17typedef struct { 18typedef struct {
18 volatile unsigned int slock; 19 volatile unsigned int slock;
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 struct lockdep_map dep_map;
22#endif
19} raw_spinlock_t; 23} raw_spinlock_t;
20 24
21#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 25#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
@@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t;
30 34
31typedef struct { 35typedef struct {
32 /* no debug version on UP */ 36 /* no debug version on UP */
37#ifdef CONFIG_DEBUG_LOCK_ALLOC
38 struct lockdep_map dep_map;
39#endif
33} raw_rwlock_t; 40} raw_rwlock_t;
34 41
35#define __RAW_RW_LOCK_UNLOCKED { } 42#define __RAW_RW_LOCK_UNLOCKED { }
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 31accf2f0b13..ea54c4c9a4ec 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#ifdef CONFIG_DEBUG_SPINLOCK 20#ifdef CONFIG_DEBUG_SPINLOCK
21
22#define __raw_spin_is_locked(x) ((x)->slock == 0) 21#define __raw_spin_is_locked(x) ((x)->slock == 0)
23 22
24static inline void __raw_spin_lock(raw_spinlock_t *lock) 23static inline void __raw_spin_lock(raw_spinlock_t *lock)
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
new file mode 100644
index 000000000000..9cc81e572224
--- /dev/null
+++ b/include/linux/stacktrace.h
@@ -0,0 +1,20 @@
1#ifndef __LINUX_STACKTRACE_H
2#define __LINUX_STACKTRACE_H
3
4#ifdef CONFIG_STACKTRACE
5struct stack_trace {
6 unsigned int nr_entries, max_entries;
7 unsigned long *entries;
8};
9
10extern void save_stack_trace(struct stack_trace *trace,
11 struct task_struct *task, int all_contexts,
12 unsigned int skip);
13
14extern void print_stack_trace(struct stack_trace *trace, int spaces);
15#else
16# define save_stack_trace(trace, task, all, skip) do { } while (0)
17# define print_stack_trace(trace) do { } while (0)
18#endif
19
20#endif
diff --git a/include/linux/sunrpc/Kbuild b/include/linux/sunrpc/Kbuild
new file mode 100644
index 000000000000..0d1d768a27bf
--- /dev/null
+++ b/include/linux/sunrpc/Kbuild
@@ -0,0 +1 @@
unifdef-y := debug.h
diff --git a/include/linux/swap.h b/include/linux/swap.h
index cf6ca6e377bd..5e59184c9096 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -189,6 +189,7 @@ extern long vm_total_pages;
189 189
190#ifdef CONFIG_NUMA 190#ifdef CONFIG_NUMA
191extern int zone_reclaim_mode; 191extern int zone_reclaim_mode;
192extern int sysctl_min_unmapped_ratio;
192extern int zone_reclaim(struct zone *, gfp_t, unsigned int); 193extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
193#else 194#else
194#define zone_reclaim_mode 0 195#define zone_reclaim_mode 0
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 46e4d8f2771f..e4b1a4d4dcf3 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -188,7 +188,7 @@ enum
188 VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */ 188 VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */
189 VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */ 189 VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
190 VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ 190 VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
191 VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ 191 VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
192 VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ 192 VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
193 VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ 193 VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
194}; 194};
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild
new file mode 100644
index 000000000000..5251a505b2f1
--- /dev/null
+++ b/include/linux/tc_act/Kbuild
@@ -0,0 +1 @@
header-y += tc_gact.h tc_ipt.h tc_mirred.h tc_pedit.h
diff --git a/include/linux/tc_ematch/Kbuild b/include/linux/tc_ematch/Kbuild
new file mode 100644
index 000000000000..381e93018df6
--- /dev/null
+++ b/include/linux/tc_ematch/Kbuild
@@ -0,0 +1 @@
headers-y := tc_em_cmp.h tc_em_meta.h tc_em_nbyte.h tc_em_text.h
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index dc7c621e4647..46919f9f5eb3 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -1,4 +1,4 @@
1#include <linux/version.h> 1#include <linux/utsrelease.h>
2#include <linux/module.h> 2#include <linux/module.h>
3 3
4/* Simply sanity version stamp for modules. */ 4/* Simply sanity version stamp for modules. */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 544e855c7c02..794be7af58ae 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -68,7 +68,7 @@ struct task_struct;
68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) 68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
69 69
70#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 70#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
71 .lock = SPIN_LOCK_UNLOCKED, \ 71 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
72 .task_list = { &(name).task_list, &(name).task_list } } 72 .task_list = { &(name).task_list, &(name).task_list } }
73 73
74#define DECLARE_WAIT_QUEUE_HEAD(name) \ 74#define DECLARE_WAIT_QUEUE_HEAD(name) \
@@ -77,9 +77,15 @@ struct task_struct;
77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, } 78 { .flags = word, .bit_nr = bit, }
79 79
80/*
81 * lockdep: we want one lock-class for all waitqueue locks.
82 */
83extern struct lock_class_key waitqueue_lock_key;
84
80static inline void init_waitqueue_head(wait_queue_head_t *q) 85static inline void init_waitqueue_head(wait_queue_head_t *q)
81{ 86{
82 spin_lock_init(&q->lock); 87 spin_lock_init(&q->lock);
88 lockdep_set_class(&q->lock, &waitqueue_lock_key);
83 INIT_LIST_HEAD(&q->task_list); 89 INIT_LIST_HEAD(&q->task_list);
84} 90}
85 91
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 957c21c16d62..9bca3539a1e5 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -63,6 +63,8 @@ extern void destroy_workqueue(struct workqueue_struct *wq);
63 63
64extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 64extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
65extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); 65extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
66extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
67 struct work_struct *work, unsigned long delay);
66extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 68extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
67 69
68extern int FASTCALL(schedule_work(struct work_struct *work)); 70extern int FASTCALL(schedule_work(struct work_struct *work));
diff --git a/include/mtd/Kbuild b/include/mtd/Kbuild
new file mode 100644
index 000000000000..e1da2a5b2a57
--- /dev/null
+++ b/include/mtd/Kbuild
@@ -0,0 +1,2 @@
1unifdef-y := mtd-abi.h
2header-y := inftl-user.h jffs2-user.h mtd-user.h nftl-user.h
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index 31329fce1ff5..1da3f7fa7993 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -133,7 +133,7 @@ struct nand_ecclayout {
133}; 133};
134 134
135/** 135/**
136 * struct mtd_ecc_stats - error correction status 136 * struct mtd_ecc_stats - error correction stats
137 * 137 *
138 * @corrected: number of corrected bits 138 * @corrected: number of corrected bits
139 * @failed: number of uncorrectable errors 139 * @failed: number of uncorrectable errors
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 5ba72d95280c..2fec827c8801 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -67,6 +67,9 @@ struct unix_skb_parms {
67#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 67#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock)
68#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock) 68#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
69#define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock) 69#define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock)
70#define unix_state_wlock_nested(s) \
71 spin_lock_nested(&unix_sk(s)->lock, \
72 SINGLE_DEPTH_NESTING)
70#define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock) 73#define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock)
71 74
72#ifdef __KERNEL__ 75#ifdef __KERNEL__
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 7cd528e9d668..69374cd1a857 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -182,14 +182,26 @@ typedef struct {
182 182
183typedef struct ax25_route { 183typedef struct ax25_route {
184 struct ax25_route *next; 184 struct ax25_route *next;
185 atomic_t ref; 185 atomic_t refcount;
186 ax25_address callsign; 186 ax25_address callsign;
187 struct net_device *dev; 187 struct net_device *dev;
188 ax25_digi *digipeat; 188 ax25_digi *digipeat;
189 char ip_mode; 189 char ip_mode;
190 struct timer_list timer;
191} ax25_route; 190} ax25_route;
192 191
192static inline void ax25_hold_route(ax25_route *ax25_rt)
193{
194 atomic_inc(&ax25_rt->refcount);
195}
196
197extern void __ax25_put_route(ax25_route *ax25_rt);
198
199static inline void ax25_put_route(ax25_route *ax25_rt)
200{
201 if (atomic_dec_and_test(&ax25_rt->refcount))
202 __ax25_put_route(ax25_rt);
203}
204
193typedef struct { 205typedef struct {
194 char slave; /* slave_mode? */ 206 char slave; /* slave_mode? */
195 struct timer_list slave_timer; /* timeout timer */ 207 struct timer_list slave_timer; /* timeout timer */
@@ -348,17 +360,11 @@ extern int ax25_check_iframes_acked(ax25_cb *, unsigned short);
348extern void ax25_rt_device_down(struct net_device *); 360extern void ax25_rt_device_down(struct net_device *);
349extern int ax25_rt_ioctl(unsigned int, void __user *); 361extern int ax25_rt_ioctl(unsigned int, void __user *);
350extern struct file_operations ax25_route_fops; 362extern struct file_operations ax25_route_fops;
363extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
351extern int ax25_rt_autobind(ax25_cb *, ax25_address *); 364extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
352extern ax25_route *ax25_rt_find_route(ax25_route *, ax25_address *,
353 struct net_device *);
354extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); 365extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
355extern void ax25_rt_free(void); 366extern void ax25_rt_free(void);
356 367
357static inline void ax25_put_route(ax25_route *ax25_rt)
358{
359 atomic_dec(&ax25_rt->ref);
360}
361
362/* ax25_std_in.c */ 368/* ax25_std_in.c */
363extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); 369extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
364 370
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 911ceb5cd263..771d17783c18 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -175,6 +175,6 @@ extern int hci_sock_cleanup(void);
175extern int bt_sysfs_init(void); 175extern int bt_sysfs_init(void);
176extern void bt_sysfs_cleanup(void); 176extern void bt_sysfs_cleanup(void);
177 177
178extern struct class bt_class; 178extern struct class *bt_class;
179 179
180#endif /* __BLUETOOTH_H */ 180#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index b06a2d2f63d2..b2bdb1aa0429 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -54,7 +54,8 @@
54/* HCI device quirks */ 54/* HCI device quirks */
55enum { 55enum {
56 HCI_QUIRK_RESET_ON_INIT, 56 HCI_QUIRK_RESET_ON_INIT,
57 HCI_QUIRK_RAW_DEVICE 57 HCI_QUIRK_RAW_DEVICE,
58 HCI_QUIRK_FIXUP_BUFFER_SIZE
58}; 59};
59 60
60/* HCI device flags */ 61/* HCI device flags */
@@ -100,9 +101,10 @@ enum {
100#define HCIINQUIRY _IOR('H', 240, int) 101#define HCIINQUIRY _IOR('H', 240, int)
101 102
102/* HCI timeouts */ 103/* HCI timeouts */
103#define HCI_CONN_TIMEOUT (HZ * 40) 104#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
104#define HCI_DISCONN_TIMEOUT (HZ * 2) 105#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
105#define HCI_CONN_IDLE_TIMEOUT (HZ * 60) 106#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
107#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
106 108
107/* HCI Packet types */ 109/* HCI Packet types */
108#define HCI_COMMAND_PKT 0x01 110#define HCI_COMMAND_PKT 0x01
@@ -144,7 +146,7 @@ enum {
144#define LMP_TACCURACY 0x10 146#define LMP_TACCURACY 0x10
145#define LMP_RSWITCH 0x20 147#define LMP_RSWITCH 0x20
146#define LMP_HOLD 0x40 148#define LMP_HOLD 0x40
147#define LMP_SNIF 0x80 149#define LMP_SNIFF 0x80
148 150
149#define LMP_PARK 0x01 151#define LMP_PARK 0x01
150#define LMP_RSSI 0x02 152#define LMP_RSSI 0x02
@@ -159,13 +161,21 @@ enum {
159#define LMP_PSCHEME 0x02 161#define LMP_PSCHEME 0x02
160#define LMP_PCONTROL 0x04 162#define LMP_PCONTROL 0x04
161 163
164#define LMP_SNIFF_SUBR 0x02
165
166/* Connection modes */
167#define HCI_CM_ACTIVE 0x0000
168#define HCI_CM_HOLD 0x0001
169#define HCI_CM_SNIFF 0x0002
170#define HCI_CM_PARK 0x0003
171
162/* Link policies */ 172/* Link policies */
163#define HCI_LP_RSWITCH 0x0001 173#define HCI_LP_RSWITCH 0x0001
164#define HCI_LP_HOLD 0x0002 174#define HCI_LP_HOLD 0x0002
165#define HCI_LP_SNIFF 0x0004 175#define HCI_LP_SNIFF 0x0004
166#define HCI_LP_PARK 0x0008 176#define HCI_LP_PARK 0x0008
167 177
168/* Link mode */ 178/* Link modes */
169#define HCI_LM_ACCEPT 0x8000 179#define HCI_LM_ACCEPT 0x8000
170#define HCI_LM_MASTER 0x0001 180#define HCI_LM_MASTER 0x0001
171#define HCI_LM_AUTH 0x0002 181#define HCI_LM_AUTH 0x0002
@@ -191,7 +201,7 @@ struct hci_rp_read_loc_version {
191} __attribute__ ((packed)); 201} __attribute__ ((packed));
192 202
193#define OCF_READ_LOCAL_FEATURES 0x0003 203#define OCF_READ_LOCAL_FEATURES 0x0003
194struct hci_rp_read_loc_features { 204struct hci_rp_read_local_features {
195 __u8 status; 205 __u8 status;
196 __u8 features[8]; 206 __u8 features[8];
197} __attribute__ ((packed)); 207} __attribute__ ((packed));
@@ -375,17 +385,32 @@ struct hci_cp_change_conn_link_key {
375} __attribute__ ((packed)); 385} __attribute__ ((packed));
376 386
377#define OCF_READ_REMOTE_FEATURES 0x001B 387#define OCF_READ_REMOTE_FEATURES 0x001B
378struct hci_cp_read_rmt_features { 388struct hci_cp_read_remote_features {
379 __le16 handle; 389 __le16 handle;
380} __attribute__ ((packed)); 390} __attribute__ ((packed));
381 391
382#define OCF_READ_REMOTE_VERSION 0x001D 392#define OCF_READ_REMOTE_VERSION 0x001D
383struct hci_cp_read_rmt_version { 393struct hci_cp_read_remote_version {
384 __le16 handle; 394 __le16 handle;
385} __attribute__ ((packed)); 395} __attribute__ ((packed));
386 396
387/* Link Policy */ 397/* Link Policy */
388#define OGF_LINK_POLICY 0x02 398#define OGF_LINK_POLICY 0x02
399
400#define OCF_SNIFF_MODE 0x0003
401struct hci_cp_sniff_mode {
402 __le16 handle;
403 __le16 max_interval;
404 __le16 min_interval;
405 __le16 attempt;
406 __le16 timeout;
407} __attribute__ ((packed));
408
409#define OCF_EXIT_SNIFF_MODE 0x0004
410struct hci_cp_exit_sniff_mode {
411 __le16 handle;
412} __attribute__ ((packed));
413
389#define OCF_ROLE_DISCOVERY 0x0009 414#define OCF_ROLE_DISCOVERY 0x0009
390struct hci_cp_role_discovery { 415struct hci_cp_role_discovery {
391 __le16 handle; 416 __le16 handle;
@@ -406,7 +431,7 @@ struct hci_rp_read_link_policy {
406 __le16 policy; 431 __le16 policy;
407} __attribute__ ((packed)); 432} __attribute__ ((packed));
408 433
409#define OCF_SWITCH_ROLE 0x000B 434#define OCF_SWITCH_ROLE 0x000B
410struct hci_cp_switch_role { 435struct hci_cp_switch_role {
411 bdaddr_t bdaddr; 436 bdaddr_t bdaddr;
412 __u8 role; 437 __u8 role;
@@ -422,6 +447,14 @@ struct hci_rp_write_link_policy {
422 __le16 handle; 447 __le16 handle;
423} __attribute__ ((packed)); 448} __attribute__ ((packed));
424 449
450#define OCF_SNIFF_SUBRATE 0x0011
451struct hci_cp_sniff_subrate {
452 __le16 handle;
453 __le16 max_latency;
454 __le16 min_remote_timeout;
455 __le16 min_local_timeout;
456} __attribute__ ((packed));
457
425/* Status params */ 458/* Status params */
426#define OGF_STATUS_PARAM 0x05 459#define OGF_STATUS_PARAM 0x05
427 460
@@ -581,15 +614,15 @@ struct hci_ev_link_key_notify {
581 __u8 key_type; 614 __u8 key_type;
582} __attribute__ ((packed)); 615} __attribute__ ((packed));
583 616
584#define HCI_EV_RMT_FEATURES 0x0B 617#define HCI_EV_REMOTE_FEATURES 0x0B
585struct hci_ev_rmt_features { 618struct hci_ev_remote_features {
586 __u8 status; 619 __u8 status;
587 __le16 handle; 620 __le16 handle;
588 __u8 features[8]; 621 __u8 features[8];
589} __attribute__ ((packed)); 622} __attribute__ ((packed));
590 623
591#define HCI_EV_RMT_VERSION 0x0C 624#define HCI_EV_REMOTE_VERSION 0x0C
592struct hci_ev_rmt_version { 625struct hci_ev_remote_version {
593 __u8 status; 626 __u8 status;
594 __le16 handle; 627 __le16 handle;
595 __u8 lmp_ver; 628 __u8 lmp_ver;
@@ -610,6 +643,16 @@ struct hci_ev_pscan_rep_mode {
610 __u8 pscan_rep_mode; 643 __u8 pscan_rep_mode;
611} __attribute__ ((packed)); 644} __attribute__ ((packed));
612 645
646#define HCI_EV_SNIFF_SUBRATE 0x2E
647struct hci_ev_sniff_subrate {
648 __u8 status;
649 __le16 handle;
650 __le16 max_tx_latency;
651 __le16 max_rx_latency;
652 __le16 max_remote_timeout;
653 __le16 max_local_timeout;
654} __attribute__ ((packed));
655
613/* Internal events generated by Bluetooth stack */ 656/* Internal events generated by Bluetooth stack */
614#define HCI_EV_STACK_INTERNAL 0xFD 657#define HCI_EV_STACK_INTERNAL 0xFD
615struct hci_ev_stack_internal { 658struct hci_ev_stack_internal {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index bb9f81dc8723..d84855fe7336 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -31,10 +31,7 @@
31#define HCI_PROTO_L2CAP 0 31#define HCI_PROTO_L2CAP 0
32#define HCI_PROTO_SCO 1 32#define HCI_PROTO_SCO 1
33 33
34#define HCI_INIT_TIMEOUT (HZ * 10)
35
36/* HCI Core structures */ 34/* HCI Core structures */
37
38struct inquiry_data { 35struct inquiry_data {
39 bdaddr_t bdaddr; 36 bdaddr_t bdaddr;
40 __u8 pscan_rep_mode; 37 __u8 pscan_rep_mode;
@@ -81,6 +78,10 @@ struct hci_dev {
81 __u16 link_policy; 78 __u16 link_policy;
82 __u16 link_mode; 79 __u16 link_mode;
83 80
81 __u32 idle_timeout;
82 __u16 sniff_min_interval;
83 __u16 sniff_max_interval;
84
84 unsigned long quirks; 85 unsigned long quirks;
85 86
86 atomic_t cmd_cnt; 87 atomic_t cmd_cnt;
@@ -123,7 +124,8 @@ struct hci_dev {
123 124
124 atomic_t promisc; 125 atomic_t promisc;
125 126
126 struct class_device class_dev; 127 struct device *parent;
128 struct device dev;
127 129
128 struct module *owner; 130 struct module *owner;
129 131
@@ -145,18 +147,24 @@ struct hci_conn {
145 bdaddr_t dst; 147 bdaddr_t dst;
146 __u16 handle; 148 __u16 handle;
147 __u16 state; 149 __u16 state;
150 __u8 mode;
148 __u8 type; 151 __u8 type;
149 __u8 out; 152 __u8 out;
150 __u8 dev_class[3]; 153 __u8 dev_class[3];
154 __u8 features[8];
155 __u16 interval;
156 __u16 link_policy;
151 __u32 link_mode; 157 __u32 link_mode;
158 __u8 power_save;
152 unsigned long pend; 159 unsigned long pend;
153 160
154 unsigned int sent; 161 unsigned int sent;
155 162
156 struct sk_buff_head data_q; 163 struct sk_buff_head data_q;
157 164
158 struct timer_list timer; 165 struct timer_list disc_timer;
159 166 struct timer_list idle_timer;
167
160 struct hci_dev *hdev; 168 struct hci_dev *hdev;
161 void *l2cap_data; 169 void *l2cap_data;
162 void *sco_data; 170 void *sco_data;
@@ -211,7 +219,8 @@ void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
211enum { 219enum {
212 HCI_CONN_AUTH_PEND, 220 HCI_CONN_AUTH_PEND,
213 HCI_CONN_ENCRYPT_PEND, 221 HCI_CONN_ENCRYPT_PEND,
214 HCI_CONN_RSWITCH_PEND 222 HCI_CONN_RSWITCH_PEND,
223 HCI_CONN_MODE_CHANGE_PEND,
215}; 224};
216 225
217static inline void hci_conn_hash_init(struct hci_dev *hdev) 226static inline void hci_conn_hash_init(struct hci_dev *hdev)
@@ -286,31 +295,27 @@ int hci_conn_encrypt(struct hci_conn *conn);
286int hci_conn_change_link_key(struct hci_conn *conn); 295int hci_conn_change_link_key(struct hci_conn *conn);
287int hci_conn_switch_role(struct hci_conn *conn, uint8_t role); 296int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
288 297
289static inline void hci_conn_set_timer(struct hci_conn *conn, unsigned long timeout) 298void hci_conn_enter_active_mode(struct hci_conn *conn);
290{ 299void hci_conn_enter_sniff_mode(struct hci_conn *conn);
291 mod_timer(&conn->timer, jiffies + timeout);
292}
293
294static inline void hci_conn_del_timer(struct hci_conn *conn)
295{
296 del_timer(&conn->timer);
297}
298 300
299static inline void hci_conn_hold(struct hci_conn *conn) 301static inline void hci_conn_hold(struct hci_conn *conn)
300{ 302{
301 atomic_inc(&conn->refcnt); 303 atomic_inc(&conn->refcnt);
302 hci_conn_del_timer(conn); 304 del_timer(&conn->disc_timer);
303} 305}
304 306
305static inline void hci_conn_put(struct hci_conn *conn) 307static inline void hci_conn_put(struct hci_conn *conn)
306{ 308{
307 if (atomic_dec_and_test(&conn->refcnt)) { 309 if (atomic_dec_and_test(&conn->refcnt)) {
310 unsigned long timeo;
308 if (conn->type == ACL_LINK) { 311 if (conn->type == ACL_LINK) {
309 unsigned long timeo = (conn->out) ? 312 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
310 HCI_DISCONN_TIMEOUT : HCI_DISCONN_TIMEOUT * 2; 313 if (!conn->out)
311 hci_conn_set_timer(conn, timeo); 314 timeo *= 2;
315 del_timer(&conn->idle_timer);
312 } else 316 } else
313 hci_conn_set_timer(conn, HZ / 100); 317 timeo = msecs_to_jiffies(10);
318 mod_timer(&conn->disc_timer, jiffies + timeo);
314 } 319 }
315} 320}
316 321
@@ -408,11 +413,13 @@ static inline int hci_recv_frame(struct sk_buff *skb)
408int hci_register_sysfs(struct hci_dev *hdev); 413int hci_register_sysfs(struct hci_dev *hdev);
409void hci_unregister_sysfs(struct hci_dev *hdev); 414void hci_unregister_sysfs(struct hci_dev *hdev);
410 415
411#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->class_dev.dev = (pdev)) 416#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
412 417
413/* ----- LMP capabilities ----- */ 418/* ----- LMP capabilities ----- */
414#define lmp_rswitch_capable(dev) (dev->features[0] & LMP_RSWITCH) 419#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
415#define lmp_encrypt_capable(dev) (dev->features[0] & LMP_ENCRYPT) 420#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
421#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
422#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
416 423
417/* ----- HCI protocols ----- */ 424/* ----- HCI protocols ----- */
418struct hci_proto { 425struct hci_proto {
diff --git a/include/net/sock.h b/include/net/sock.h
index 7b3d6b856946..324b3ea233d6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -44,6 +44,7 @@
44#include <linux/timer.h> 44#include <linux/timer.h>
45#include <linux/cache.h> 45#include <linux/cache.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/lockdep.h>
47#include <linux/netdevice.h> 48#include <linux/netdevice.h>
48#include <linux/skbuff.h> /* struct sk_buff */ 49#include <linux/skbuff.h> /* struct sk_buff */
49#include <linux/security.h> 50#include <linux/security.h>
@@ -78,14 +79,17 @@ typedef struct {
78 spinlock_t slock; 79 spinlock_t slock;
79 struct sock_iocb *owner; 80 struct sock_iocb *owner;
80 wait_queue_head_t wq; 81 wait_queue_head_t wq;
82 /*
83 * We express the mutex-alike socket_lock semantics
84 * to the lock validator by explicitly managing
85 * the slock as a lock variant (in addition to
86 * the slock itself):
87 */
88#ifdef CONFIG_DEBUG_LOCK_ALLOC
89 struct lockdep_map dep_map;
90#endif
81} socket_lock_t; 91} socket_lock_t;
82 92
83#define sock_lock_init(__sk) \
84do { spin_lock_init(&((__sk)->sk_lock.slock)); \
85 (__sk)->sk_lock.owner = NULL; \
86 init_waitqueue_head(&((__sk)->sk_lock.wq)); \
87} while(0)
88
89struct sock; 93struct sock;
90struct proto; 94struct proto;
91 95
@@ -747,6 +751,9 @@ extern void FASTCALL(release_sock(struct sock *sk));
747 751
748/* BH context may only use the following locking interface. */ 752/* BH context may only use the following locking interface. */
749#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 753#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
754#define bh_lock_sock_nested(__sk) \
755 spin_lock_nested(&((__sk)->sk_lock.slock), \
756 SINGLE_DEPTH_NESTING)
750#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 757#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
751 758
752extern struct sock *sk_alloc(int family, 759extern struct sock *sk_alloc(int family,
diff --git a/include/rdma/Kbuild b/include/rdma/Kbuild
new file mode 100644
index 000000000000..eb710ba9b1a0
--- /dev/null
+++ b/include/rdma/Kbuild
@@ -0,0 +1 @@
header-y := ib_user_mad.h
diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild
new file mode 100644
index 000000000000..14a033d73314
--- /dev/null
+++ b/include/scsi/Kbuild
@@ -0,0 +1,2 @@
1header-y += scsi.h
2unifdef-y := scsi_ioctl.h sg.h
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 253797c60095..55ebf035e620 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -47,10 +47,19 @@ enum iscsi_uevent_e {
47 ISCSI_UEVENT_TRANSPORT_EP_POLL = UEVENT_BASE + 13, 47 ISCSI_UEVENT_TRANSPORT_EP_POLL = UEVENT_BASE + 13,
48 ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14, 48 ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14,
49 49
50 ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
51
50 /* up events */ 52 /* up events */
51 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, 53 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
52 ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2, 54 ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2,
53 ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3, 55 ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3,
56 ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
57};
58
59enum iscsi_tgt_dscvr {
60 ISCSI_TGT_DSCVR_SEND_TARGETS = 1,
61 ISCSI_TGT_DSCVR_ISNS = 2,
62 ISCSI_TGT_DSCVR_SLP = 3,
54}; 63};
55 64
56struct iscsi_uevent { 65struct iscsi_uevent {
@@ -116,6 +125,17 @@ struct iscsi_uevent {
116 struct msg_transport_disconnect { 125 struct msg_transport_disconnect {
117 uint64_t ep_handle; 126 uint64_t ep_handle;
118 } ep_disconnect; 127 } ep_disconnect;
128 struct msg_tgt_dscvr {
129 enum iscsi_tgt_dscvr type;
130 uint32_t host_no;
131 /*
132 * enable = 1 to establish a new connection
133 * with the server. enable = 0 to disconnect
134 * from the server. Used primarily to switch
135 * from one iSNS server to another.
136 */
137 uint32_t enable;
138 } tgt_dscvr;
119 } u; 139 } u;
120 union { 140 union {
121 /* messages k -> u */ 141 /* messages k -> u */
@@ -138,6 +158,10 @@ struct iscsi_uevent {
138 uint32_t cid; 158 uint32_t cid;
139 uint32_t error; /* enum iscsi_err */ 159 uint32_t error; /* enum iscsi_err */
140 } connerror; 160 } connerror;
161 struct msg_session_destroyed {
162 uint32_t host_no;
163 uint32_t sid;
164 } d_session;
141 struct msg_transport_connect_ret { 165 struct msg_transport_connect_ret {
142 uint64_t handle; 166 uint64_t handle;
143 } ep_connect_ret; 167 } ep_connect_ret;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index cbf7e58bd6f9..ba2760802ded 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -157,6 +157,11 @@ struct iscsi_conn {
157 int max_xmit_dlength; /* target_max_recv_dsl */ 157 int max_xmit_dlength; /* target_max_recv_dsl */
158 int hdrdgst_en; 158 int hdrdgst_en;
159 int datadgst_en; 159 int datadgst_en;
160 int ifmarker_en;
161 int ofmarker_en;
162 /* values userspace uses to id a conn */
163 int persistent_port;
164 char *persistent_address;
160 165
161 /* MIB-statistics */ 166 /* MIB-statistics */
162 uint64_t txdata_octets; 167 uint64_t txdata_octets;
@@ -196,8 +201,8 @@ struct iscsi_session {
196 int pdu_inorder_en; 201 int pdu_inorder_en;
197 int dataseq_inorder_en; 202 int dataseq_inorder_en;
198 int erl; 203 int erl;
199 int ifmarker_en; 204 int tpgt;
200 int ofmarker_en; 205 char *targetname;
201 206
202 /* control data */ 207 /* control data */
203 struct iscsi_transport *tt; 208 struct iscsi_transport *tt;
@@ -240,6 +245,10 @@ iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
240extern void iscsi_session_teardown(struct iscsi_cls_session *); 245extern void iscsi_session_teardown(struct iscsi_cls_session *);
241extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *); 246extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
242extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *); 247extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
248extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
249 enum iscsi_param param, char *buf, int buflen);
250extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
251 enum iscsi_param param, char *buf);
243 252
244#define session_to_cls(_sess) \ 253#define session_to_cls(_sess) \
245 hostdata_session(_sess->host->hostdata) 254 hostdata_session(_sess->host->hostdata)
@@ -255,6 +264,8 @@ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
255extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *, 264extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
256 int); 265 int);
257extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); 266extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
267extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
268 enum iscsi_param param, char *buf);
258 269
259/* 270/*
260 * pdu and task processing 271 * pdu and task processing
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index e46cd404bd7d..371f70d9aa92 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -143,7 +143,7 @@ struct scsi_cmnd {
143 143
144extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t); 144extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
145extern void scsi_put_command(struct scsi_cmnd *); 145extern void scsi_put_command(struct scsi_cmnd *);
146extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int); 146extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
147extern void scsi_finish_command(struct scsi_cmnd *cmd); 147extern void scsi_finish_command(struct scsi_cmnd *cmd);
148extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd); 148extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
149 149
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a42efd6e4be8..b3dd90f3e858 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -542,6 +542,9 @@ struct Scsi_Host {
542 */ 542 */
543 unsigned ordered_tag:1; 543 unsigned ordered_tag:1;
544 544
545 /* task mgmt function in progress */
546 unsigned tmf_in_progress:1;
547
545 /* 548 /*
546 * Optional work queue to be utilized by the transport 549 * Optional work queue to be utilized by the transport
547 */ 550 */
@@ -619,7 +622,8 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
619{ 622{
620 return shost->shost_state == SHOST_RECOVERY || 623 return shost->shost_state == SHOST_RECOVERY ||
621 shost->shost_state == SHOST_CANCEL_RECOVERY || 624 shost->shost_state == SHOST_CANCEL_RECOVERY ||
622 shost->shost_state == SHOST_DEL_RECOVERY; 625 shost->shost_state == SHOST_DEL_RECOVERY ||
626 shost->tmf_in_progress;
623} 627}
624 628
625extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); 629extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index b684426a5900..5a3df1d7085f 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -34,6 +34,7 @@ struct iscsi_cls_conn;
34struct iscsi_conn; 34struct iscsi_conn;
35struct iscsi_cmd_task; 35struct iscsi_cmd_task;
36struct iscsi_mgmt_task; 36struct iscsi_mgmt_task;
37struct sockaddr;
37 38
38/** 39/**
39 * struct iscsi_transport - iSCSI Transport template 40 * struct iscsi_transport - iSCSI Transport template
@@ -46,7 +47,12 @@ struct iscsi_mgmt_task;
46 * @bind_conn: associate this connection with existing iSCSI session 47 * @bind_conn: associate this connection with existing iSCSI session
47 * and specified transport descriptor 48 * and specified transport descriptor
48 * @destroy_conn: destroy inactive iSCSI connection 49 * @destroy_conn: destroy inactive iSCSI connection
49 * @set_param: set iSCSI Data-Path operational parameter 50 * @set_param: set iSCSI parameter. Return 0 on success, -ENODATA
51 * when param is not supported, and a -Exx value on other
52 * error.
53 * @get_param get iSCSI parameter. Must return number of bytes
54 * copied to buffer on success, -ENODATA when param
55 * is not supported, and a -Exx value on other error
50 * @start_conn: set connection to be operational 56 * @start_conn: set connection to be operational
51 * @stop_conn: suspend/recover/terminate connection 57 * @stop_conn: suspend/recover/terminate connection
52 * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text. 58 * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
@@ -97,15 +103,11 @@ struct iscsi_transport {
97 void (*stop_conn) (struct iscsi_cls_conn *conn, int flag); 103 void (*stop_conn) (struct iscsi_cls_conn *conn, int flag);
98 void (*destroy_conn) (struct iscsi_cls_conn *conn); 104 void (*destroy_conn) (struct iscsi_cls_conn *conn);
99 int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param, 105 int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param,
100 uint32_t value); 106 char *buf, int buflen);
101 int (*get_conn_param) (struct iscsi_cls_conn *conn, 107 int (*get_conn_param) (struct iscsi_cls_conn *conn,
102 enum iscsi_param param, uint32_t *value); 108 enum iscsi_param param, char *buf);
103 int (*get_session_param) (struct iscsi_cls_session *session, 109 int (*get_session_param) (struct iscsi_cls_session *session,
104 enum iscsi_param param, uint32_t *value); 110 enum iscsi_param param, char *buf);
105 int (*get_conn_str_param) (struct iscsi_cls_conn *conn,
106 enum iscsi_param param, char *buf);
107 int (*get_session_str_param) (struct iscsi_cls_session *session,
108 enum iscsi_param param, char *buf);
109 int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 111 int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
110 char *data, uint32_t data_size); 112 char *data, uint32_t data_size);
111 void (*get_stats) (struct iscsi_cls_conn *conn, 113 void (*get_stats) (struct iscsi_cls_conn *conn,
@@ -127,6 +129,8 @@ struct iscsi_transport {
127 uint64_t *ep_handle); 129 uint64_t *ep_handle);
128 int (*ep_poll) (uint64_t ep_handle, int timeout_ms); 130 int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
129 void (*ep_disconnect) (uint64_t ep_handle); 131 void (*ep_disconnect) (uint64_t ep_handle);
132 int (*tgt_dscvr) (enum iscsi_tgt_dscvr type, uint32_t host_no,
133 uint32_t enable, struct sockaddr *dst_addr);
130}; 134};
131 135
132/* 136/*
@@ -155,13 +159,6 @@ struct iscsi_cls_conn {
155 struct iscsi_transport *transport; 159 struct iscsi_transport *transport;
156 uint32_t cid; /* connection id */ 160 uint32_t cid; /* connection id */
157 161
158 /* portal/group values we got during discovery */
159 char *persistent_address;
160 int persistent_port;
161 /* portal/group values we are currently using */
162 char *address;
163 int port;
164
165 int active; /* must be accessed with the connlock */ 162 int active; /* must be accessed with the connlock */
166 struct device dev; /* sysfs transport/container device */ 163 struct device dev; /* sysfs transport/container device */
167 struct mempool_zone *z_error; 164 struct mempool_zone *z_error;
@@ -185,16 +182,11 @@ struct iscsi_cls_session {
185 struct list_head host_list; 182 struct list_head host_list;
186 struct iscsi_transport *transport; 183 struct iscsi_transport *transport;
187 184
188 /* iSCSI values used as unique id by userspace. */
189 char *targetname;
190 int tpgt;
191
192 /* recovery fields */ 185 /* recovery fields */
193 int recovery_tmo; 186 int recovery_tmo;
194 struct work_struct recovery_work; 187 struct work_struct recovery_work;
195 188
196 int target_id; 189 int target_id;
197 int channel;
198 190
199 int sid; /* session id */ 191 int sid; /* session id */
200 void *dd_data; /* LLD private data */ 192 void *dd_data; /* LLD private data */
@@ -207,8 +199,10 @@ struct iscsi_cls_session {
207#define iscsi_session_to_shost(_session) \ 199#define iscsi_session_to_shost(_session) \
208 dev_to_shost(_session->dev.parent) 200 dev_to_shost(_session->dev.parent)
209 201
202#define starget_to_session(_stgt) \
203 iscsi_dev_to_session(_stgt->dev.parent)
204
210struct iscsi_host { 205struct iscsi_host {
211 int next_target_id;
212 struct list_head sessions; 206 struct list_head sessions;
213 struct mutex mutex; 207 struct mutex mutex;
214}; 208};
@@ -216,8 +210,17 @@ struct iscsi_host {
216/* 210/*
217 * session and connection functions that can be used by HW iSCSI LLDs 211 * session and connection functions that can be used by HW iSCSI LLDs
218 */ 212 */
213extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
214 struct iscsi_transport *transport);
215extern int iscsi_add_session(struct iscsi_cls_session *session,
216 unsigned int target_id);
217extern int iscsi_if_create_session_done(struct iscsi_cls_conn *conn);
218extern int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn);
219extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost, 219extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
220 struct iscsi_transport *t, int channel); 220 struct iscsi_transport *t,
221 unsigned int target_id);
222extern void iscsi_remove_session(struct iscsi_cls_session *session);
223extern void iscsi_free_session(struct iscsi_cls_session *session);
221extern int iscsi_destroy_session(struct iscsi_cls_session *session); 224extern int iscsi_destroy_session(struct iscsi_cls_session *session);
222extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, 225extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
223 uint32_t cid); 226 uint32_t cid);
@@ -225,4 +228,5 @@ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
225extern void iscsi_unblock_session(struct iscsi_cls_session *session); 228extern void iscsi_unblock_session(struct iscsi_cls_session *session);
226extern void iscsi_block_session(struct iscsi_cls_session *session); 229extern void iscsi_block_session(struct iscsi_cls_session *session);
227 230
231
228#endif 232#endif
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 93cfb4bf4211..e3c503cd175e 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/transport_class.h> 4#include <linux/transport_class.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/mutex.h>
6 7
7struct scsi_transport_template; 8struct scsi_transport_template;
8struct sas_rphy; 9struct sas_rphy;
@@ -55,7 +56,6 @@ struct sas_phy {
55 enum sas_linkrate minimum_linkrate; 56 enum sas_linkrate minimum_linkrate;
56 enum sas_linkrate maximum_linkrate_hw; 57 enum sas_linkrate maximum_linkrate_hw;
57 enum sas_linkrate maximum_linkrate; 58 enum sas_linkrate maximum_linkrate;
58 u8 port_identifier;
59 59
60 /* internal state */ 60 /* internal state */
61 unsigned int local_attached : 1; 61 unsigned int local_attached : 1;
@@ -66,8 +66,8 @@ struct sas_phy {
66 u32 loss_of_dword_sync_count; 66 u32 loss_of_dword_sync_count;
67 u32 phy_reset_problem_count; 67 u32 phy_reset_problem_count;
68 68
69 /* the other end of the link */ 69 /* for the list of phys belonging to a port */
70 struct sas_rphy *rphy; 70 struct list_head port_siblings;
71}; 71};
72 72
73#define dev_to_phy(d) \ 73#define dev_to_phy(d) \
@@ -124,6 +124,24 @@ struct sas_expander_device {
124#define rphy_to_expander_device(r) \ 124#define rphy_to_expander_device(r) \
125 container_of((r), struct sas_expander_device, rphy) 125 container_of((r), struct sas_expander_device, rphy)
126 126
127struct sas_port {
128 struct device dev;
129
130 u8 port_identifier;
131 int num_phys;
132
133 /* the other end of the link */
134 struct sas_rphy *rphy;
135
136 struct mutex phy_list_mutex;
137 struct list_head phy_list;
138};
139
140#define dev_to_sas_port(d) \
141 container_of((d), struct sas_port, dev)
142#define transport_class_to_sas_port(cdev) \
143 dev_to_sas_port((cdev)->dev)
144
127/* The functions by which the transport class and the driver communicate */ 145/* The functions by which the transport class and the driver communicate */
128struct sas_function_template { 146struct sas_function_template {
129 int (*get_linkerrors)(struct sas_phy *); 147 int (*get_linkerrors)(struct sas_phy *);
@@ -133,6 +151,7 @@ struct sas_function_template {
133}; 151};
134 152
135 153
154void sas_remove_children(struct device *);
136extern void sas_remove_host(struct Scsi_Host *); 155extern void sas_remove_host(struct Scsi_Host *);
137 156
138extern struct sas_phy *sas_phy_alloc(struct device *, int); 157extern struct sas_phy *sas_phy_alloc(struct device *, int);
@@ -141,13 +160,21 @@ extern int sas_phy_add(struct sas_phy *);
141extern void sas_phy_delete(struct sas_phy *); 160extern void sas_phy_delete(struct sas_phy *);
142extern int scsi_is_sas_phy(const struct device *); 161extern int scsi_is_sas_phy(const struct device *);
143 162
144extern struct sas_rphy *sas_end_device_alloc(struct sas_phy *); 163extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
145extern struct sas_rphy *sas_expander_alloc(struct sas_phy *, enum sas_device_type); 164extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
146void sas_rphy_free(struct sas_rphy *); 165void sas_rphy_free(struct sas_rphy *);
147extern int sas_rphy_add(struct sas_rphy *); 166extern int sas_rphy_add(struct sas_rphy *);
148extern void sas_rphy_delete(struct sas_rphy *); 167extern void sas_rphy_delete(struct sas_rphy *);
149extern int scsi_is_sas_rphy(const struct device *); 168extern int scsi_is_sas_rphy(const struct device *);
150 169
170struct sas_port *sas_port_alloc(struct device *, int);
171int sas_port_add(struct sas_port *);
172void sas_port_free(struct sas_port *);
173void sas_port_delete(struct sas_port *);
174void sas_port_add_phy(struct sas_port *, struct sas_phy *);
175void sas_port_delete_phy(struct sas_port *, struct sas_phy *);
176int scsi_is_sas_port(const struct device *);
177
151extern struct scsi_transport_template * 178extern struct scsi_transport_template *
152sas_attach_transport(struct sas_function_template *); 179sas_attach_transport(struct sas_function_template *);
153extern void sas_release_transport(struct scsi_transport_template *); 180extern void sas_release_transport(struct scsi_transport_template *);
diff --git a/include/sound/Kbuild b/include/sound/Kbuild
new file mode 100644
index 000000000000..3a5a3df61496
--- /dev/null
+++ b/include/sound/Kbuild
@@ -0,0 +1,2 @@
1header-y := asound_fm.h hdsp.h hdspm.h sfnt_info.h sscape_ioctl.h
2unifdef-y := asequencer.h asound.h emu10k1.h sb16_csp.h
diff --git a/include/video/Kbuild b/include/video/Kbuild
new file mode 100644
index 000000000000..76a60737cc15
--- /dev/null
+++ b/include/video/Kbuild
@@ -0,0 +1 @@
unifdef-y := sisfb.h
diff --git a/init/main.c b/init/main.c
index b2f3b566790e..628b8e9e841a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -47,6 +47,8 @@
47#include <linux/key.h> 47#include <linux/key.h>
48#include <linux/unwind.h> 48#include <linux/unwind.h>
49#include <linux/buffer_head.h> 49#include <linux/buffer_head.h>
50#include <linux/debug_locks.h>
51#include <linux/lockdep.h>
50 52
51#include <asm/io.h> 53#include <asm/io.h>
52#include <asm/bugs.h> 54#include <asm/bugs.h>
@@ -456,6 +458,16 @@ asmlinkage void __init start_kernel(void)
456 458
457 smp_setup_processor_id(); 459 smp_setup_processor_id();
458 460
461 /*
462 * Need to run as early as possible, to initialize the
463 * lockdep hash:
464 */
465 lockdep_init();
466
467 local_irq_disable();
468 early_boot_irqs_off();
469 early_init_irq_lock_class();
470
459/* 471/*
460 * Interrupts are still disabled. Do necessary setups, then 472 * Interrupts are still disabled. Do necessary setups, then
461 * enable them 473 * enable them
@@ -496,8 +508,13 @@ asmlinkage void __init start_kernel(void)
496 init_timers(); 508 init_timers();
497 hrtimers_init(); 509 hrtimers_init();
498 softirq_init(); 510 softirq_init();
499 time_init();
500 timekeeping_init(); 511 timekeeping_init();
512 time_init();
513 profile_init();
514 if (!irqs_disabled())
515 printk("start_kernel(): bug: interrupts were enabled early\n");
516 early_boot_irqs_on();
517 local_irq_enable();
501 518
502 /* 519 /*
503 * HACK ALERT! This is early. We're enabling the console before 520 * HACK ALERT! This is early. We're enabling the console before
@@ -507,8 +524,16 @@ asmlinkage void __init start_kernel(void)
507 console_init(); 524 console_init();
508 if (panic_later) 525 if (panic_later)
509 panic(panic_later, panic_param); 526 panic(panic_later, panic_param);
510 profile_init(); 527
511 local_irq_enable(); 528 lockdep_info();
529
530 /*
531 * Need to run this when irqs are enabled, because it wants
532 * to self-test [hard/soft]-irqs on/off lock inversion bugs
533 * too:
534 */
535 locking_selftest();
536
512#ifdef CONFIG_BLK_DEV_INITRD 537#ifdef CONFIG_BLK_DEV_INITRD
513 if (initrd_start && !initrd_below_start_ok && 538 if (initrd_start && !initrd_below_start_ok &&
514 initrd_start < min_low_pfn << PAGE_SHIFT) { 539 initrd_start < min_low_pfn << PAGE_SHIFT) {
diff --git a/init/version.c b/init/version.c
index 3ddc3ceec2fe..e290802c6bd2 100644
--- a/init/version.c
+++ b/init/version.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/uts.h> 11#include <linux/uts.h>
12#include <linux/utsname.h> 12#include <linux/utsname.h>
13#include <linux/utsrelease.h>
13#include <linux/version.h> 14#include <linux/version.h>
14 15
15#define version(a) Version_ ## a 16#define version(a) Version_ ## a
diff --git a/kernel/Makefile b/kernel/Makefile
index 82fb182f6f61..47dbcd570cd8 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,10 +8,15 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o 11 hrtimer.o rwsem.o
12 12
13obj-$(CONFIG_STACKTRACE) += stacktrace.o
13obj-y += time/ 14obj-y += time/
14obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o 15obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
16obj-$(CONFIG_LOCKDEP) += lockdep.o
17ifeq ($(CONFIG_PROC_FS),y)
18obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
19endif
15obj-$(CONFIG_FUTEX) += futex.o 20obj-$(CONFIG_FUTEX) += futex.o
16ifeq ($(CONFIG_COMPAT),y) 21ifeq ($(CONFIG_COMPAT),y)
17obj-$(CONFIG_FUTEX) += futex_compat.o 22obj-$(CONFIG_FUTEX) += futex_compat.o
@@ -22,6 +27,7 @@ obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
22obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o 27obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
23obj-$(CONFIG_SMP) += cpu.o spinlock.o 28obj-$(CONFIG_SMP) += cpu.o spinlock.o
24obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o 29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
30obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
25obj-$(CONFIG_UID16) += uid16.o 31obj-$(CONFIG_UID16) += uid16.o
26obj-$(CONFIG_MODULES) += module.o 32obj-$(CONFIG_MODULES) += module.o
27obj-$(CONFIG_KALLSYMS) += kallsyms.o 33obj-$(CONFIG_KALLSYMS) += kallsyms.o
diff --git a/kernel/capability.c b/kernel/capability.c
index 1a4d8a40d3f9..c7685ad00a97 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -46,7 +46,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
46 int ret = 0; 46 int ret = 0;
47 pid_t pid; 47 pid_t pid;
48 __u32 version; 48 __u32 version;
49 task_t *target; 49 struct task_struct *target;
50 struct __user_cap_data_struct data; 50 struct __user_cap_data_struct data;
51 51
52 if (get_user(version, &header->version)) 52 if (get_user(version, &header->version))
@@ -96,7 +96,7 @@ static inline int cap_set_pg(int pgrp, kernel_cap_t *effective,
96 kernel_cap_t *inheritable, 96 kernel_cap_t *inheritable,
97 kernel_cap_t *permitted) 97 kernel_cap_t *permitted)
98{ 98{
99 task_t *g, *target; 99 struct task_struct *g, *target;
100 int ret = -EPERM; 100 int ret = -EPERM;
101 int found = 0; 101 int found = 0;
102 102
@@ -128,7 +128,7 @@ static inline int cap_set_all(kernel_cap_t *effective,
128 kernel_cap_t *inheritable, 128 kernel_cap_t *inheritable,
129 kernel_cap_t *permitted) 129 kernel_cap_t *permitted)
130{ 130{
131 task_t *g, *target; 131 struct task_struct *g, *target;
132 int ret = -EPERM; 132 int ret = -EPERM;
133 int found = 0; 133 int found = 0;
134 134
@@ -172,7 +172,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
172{ 172{
173 kernel_cap_t inheritable, permitted, effective; 173 kernel_cap_t inheritable, permitted, effective;
174 __u32 version; 174 __u32 version;
175 task_t *target; 175 struct task_struct *target;
176 int ret; 176 int ret;
177 pid_t pid; 177 pid_t pid;
178 178
diff --git a/kernel/exit.c b/kernel/exit.c
index 7f7ef2258553..6664c084783d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -134,8 +134,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
134 134
135void release_task(struct task_struct * p) 135void release_task(struct task_struct * p)
136{ 136{
137 struct task_struct *leader;
137 int zap_leader; 138 int zap_leader;
138 task_t *leader;
139repeat: 139repeat:
140 atomic_dec(&p->user->processes); 140 atomic_dec(&p->user->processes);
141 write_lock_irq(&tasklist_lock); 141 write_lock_irq(&tasklist_lock);
@@ -209,7 +209,7 @@ out:
209 * 209 *
210 * "I ask you, have you ever known what it is to be an orphan?" 210 * "I ask you, have you ever known what it is to be an orphan?"
211 */ 211 */
212static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) 212static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)
213{ 213{
214 struct task_struct *p; 214 struct task_struct *p;
215 int ret = 1; 215 int ret = 1;
@@ -582,7 +582,8 @@ static void exit_mm(struct task_struct * tsk)
582 mmput(mm); 582 mmput(mm);
583} 583}
584 584
585static inline void choose_new_parent(task_t *p, task_t *reaper) 585static inline void
586choose_new_parent(struct task_struct *p, struct task_struct *reaper)
586{ 587{
587 /* 588 /*
588 * Make sure we're not reparenting to ourselves and that 589 * Make sure we're not reparenting to ourselves and that
@@ -592,7 +593,8 @@ static inline void choose_new_parent(task_t *p, task_t *reaper)
592 p->real_parent = reaper; 593 p->real_parent = reaper;
593} 594}
594 595
595static void reparent_thread(task_t *p, task_t *father, int traced) 596static void
597reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
596{ 598{
597 /* We don't want people slaying init. */ 599 /* We don't want people slaying init. */
598 if (p->exit_signal != -1) 600 if (p->exit_signal != -1)
@@ -656,8 +658,8 @@ static void reparent_thread(task_t *p, task_t *father, int traced)
656 * group, and if no such member exists, give it to 658 * group, and if no such member exists, give it to
657 * the global child reaper process (ie "init") 659 * the global child reaper process (ie "init")
658 */ 660 */
659static void forget_original_parent(struct task_struct * father, 661static void
660 struct list_head *to_release) 662forget_original_parent(struct task_struct *father, struct list_head *to_release)
661{ 663{
662 struct task_struct *p, *reaper = father; 664 struct task_struct *p, *reaper = father;
663 struct list_head *_p, *_n; 665 struct list_head *_p, *_n;
@@ -680,7 +682,7 @@ static void forget_original_parent(struct task_struct * father,
680 */ 682 */
681 list_for_each_safe(_p, _n, &father->children) { 683 list_for_each_safe(_p, _n, &father->children) {
682 int ptrace; 684 int ptrace;
683 p = list_entry(_p,struct task_struct,sibling); 685 p = list_entry(_p, struct task_struct, sibling);
684 686
685 ptrace = p->ptrace; 687 ptrace = p->ptrace;
686 688
@@ -709,7 +711,7 @@ static void forget_original_parent(struct task_struct * father,
709 list_add(&p->ptrace_list, to_release); 711 list_add(&p->ptrace_list, to_release);
710 } 712 }
711 list_for_each_safe(_p, _n, &father->ptrace_children) { 713 list_for_each_safe(_p, _n, &father->ptrace_children) {
712 p = list_entry(_p,struct task_struct,ptrace_list); 714 p = list_entry(_p, struct task_struct, ptrace_list);
713 choose_new_parent(p, reaper); 715 choose_new_parent(p, reaper);
714 reparent_thread(p, father, 1); 716 reparent_thread(p, father, 1);
715 } 717 }
@@ -829,7 +831,7 @@ static void exit_notify(struct task_struct *tsk)
829 831
830 list_for_each_safe(_p, _n, &ptrace_dead) { 832 list_for_each_safe(_p, _n, &ptrace_dead) {
831 list_del_init(_p); 833 list_del_init(_p);
832 t = list_entry(_p,struct task_struct,ptrace_list); 834 t = list_entry(_p, struct task_struct, ptrace_list);
833 release_task(t); 835 release_task(t);
834 } 836 }
835 837
@@ -933,10 +935,9 @@ fastcall NORET_TYPE void do_exit(long code)
933 if (unlikely(current->pi_state_cache)) 935 if (unlikely(current->pi_state_cache))
934 kfree(current->pi_state_cache); 936 kfree(current->pi_state_cache);
935 /* 937 /*
936 * If DEBUG_MUTEXES is on, make sure we are holding no locks: 938 * Make sure we are holding no locks:
937 */ 939 */
938 mutex_debug_check_no_locks_held(tsk); 940 debug_check_no_locks_held(tsk);
939 rt_mutex_debug_check_no_locks_held(tsk);
940 941
941 if (tsk->io_context) 942 if (tsk->io_context)
942 exit_io_context(); 943 exit_io_context();
@@ -1011,7 +1012,7 @@ asmlinkage void sys_exit_group(int error_code)
1011 do_group_exit((error_code & 0xff) << 8); 1012 do_group_exit((error_code & 0xff) << 8);
1012} 1013}
1013 1014
1014static int eligible_child(pid_t pid, int options, task_t *p) 1015static int eligible_child(pid_t pid, int options, struct task_struct *p)
1015{ 1016{
1016 if (pid > 0) { 1017 if (pid > 0) {
1017 if (p->pid != pid) 1018 if (p->pid != pid)
@@ -1052,12 +1053,13 @@ static int eligible_child(pid_t pid, int options, task_t *p)
1052 return 1; 1053 return 1;
1053} 1054}
1054 1055
1055static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, 1056static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1056 int why, int status, 1057 int why, int status,
1057 struct siginfo __user *infop, 1058 struct siginfo __user *infop,
1058 struct rusage __user *rusagep) 1059 struct rusage __user *rusagep)
1059{ 1060{
1060 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; 1061 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1062
1061 put_task_struct(p); 1063 put_task_struct(p);
1062 if (!retval) 1064 if (!retval)
1063 retval = put_user(SIGCHLD, &infop->si_signo); 1065 retval = put_user(SIGCHLD, &infop->si_signo);
@@ -1082,7 +1084,7 @@ static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
1082 * the lock and this task is uninteresting. If we return nonzero, we have 1084 * the lock and this task is uninteresting. If we return nonzero, we have
1083 * released the lock and the system call should return. 1085 * released the lock and the system call should return.
1084 */ 1086 */
1085static int wait_task_zombie(task_t *p, int noreap, 1087static int wait_task_zombie(struct task_struct *p, int noreap,
1086 struct siginfo __user *infop, 1088 struct siginfo __user *infop,
1087 int __user *stat_addr, struct rusage __user *ru) 1089 int __user *stat_addr, struct rusage __user *ru)
1088{ 1090{
@@ -1244,8 +1246,8 @@ static int wait_task_zombie(task_t *p, int noreap,
1244 * the lock and this task is uninteresting. If we return nonzero, we have 1246 * the lock and this task is uninteresting. If we return nonzero, we have
1245 * released the lock and the system call should return. 1247 * released the lock and the system call should return.
1246 */ 1248 */
1247static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, 1249static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1248 struct siginfo __user *infop, 1250 int noreap, struct siginfo __user *infop,
1249 int __user *stat_addr, struct rusage __user *ru) 1251 int __user *stat_addr, struct rusage __user *ru)
1250{ 1252{
1251 int retval, exit_code; 1253 int retval, exit_code;
@@ -1359,7 +1361,7 @@ bail_ref:
1359 * the lock and this task is uninteresting. If we return nonzero, we have 1361 * the lock and this task is uninteresting. If we return nonzero, we have
1360 * released the lock and the system call should return. 1362 * released the lock and the system call should return.
1361 */ 1363 */
1362static int wait_task_continued(task_t *p, int noreap, 1364static int wait_task_continued(struct task_struct *p, int noreap,
1363 struct siginfo __user *infop, 1365 struct siginfo __user *infop,
1364 int __user *stat_addr, struct rusage __user *ru) 1366 int __user *stat_addr, struct rusage __user *ru)
1365{ 1367{
@@ -1445,7 +1447,7 @@ repeat:
1445 int ret; 1447 int ret;
1446 1448
1447 list_for_each(_p,&tsk->children) { 1449 list_for_each(_p,&tsk->children) {
1448 p = list_entry(_p,struct task_struct,sibling); 1450 p = list_entry(_p, struct task_struct, sibling);
1449 1451
1450 ret = eligible_child(pid, options, p); 1452 ret = eligible_child(pid, options, p);
1451 if (!ret) 1453 if (!ret)
diff --git a/kernel/fork.c b/kernel/fork.c
index 9064bf9e131b..56e4e07e45f7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -193,7 +193,10 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
193 193
194 down_write(&oldmm->mmap_sem); 194 down_write(&oldmm->mmap_sem);
195 flush_cache_mm(oldmm); 195 flush_cache_mm(oldmm);
196 down_write(&mm->mmap_sem); 196 /*
197 * Not linked in yet - no deadlock potential:
198 */
199 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
197 200
198 mm->locked_vm = 0; 201 mm->locked_vm = 0;
199 mm->mmap = NULL; 202 mm->mmap = NULL;
@@ -919,10 +922,6 @@ static inline void rt_mutex_init_task(struct task_struct *p)
919 spin_lock_init(&p->pi_lock); 922 spin_lock_init(&p->pi_lock);
920 plist_head_init(&p->pi_waiters, &p->pi_lock); 923 plist_head_init(&p->pi_waiters, &p->pi_lock);
921 p->pi_blocked_on = NULL; 924 p->pi_blocked_on = NULL;
922# ifdef CONFIG_DEBUG_RT_MUTEXES
923 spin_lock_init(&p->held_list_lock);
924 INIT_LIST_HEAD(&p->held_list_head);
925# endif
926#endif 925#endif
927} 926}
928 927
@@ -934,13 +933,13 @@ static inline void rt_mutex_init_task(struct task_struct *p)
934 * parts of the process environment (as per the clone 933 * parts of the process environment (as per the clone
935 * flags). The actual kick-off is left to the caller. 934 * flags). The actual kick-off is left to the caller.
936 */ 935 */
937static task_t *copy_process(unsigned long clone_flags, 936static struct task_struct *copy_process(unsigned long clone_flags,
938 unsigned long stack_start, 937 unsigned long stack_start,
939 struct pt_regs *regs, 938 struct pt_regs *regs,
940 unsigned long stack_size, 939 unsigned long stack_size,
941 int __user *parent_tidptr, 940 int __user *parent_tidptr,
942 int __user *child_tidptr, 941 int __user *child_tidptr,
943 int pid) 942 int pid)
944{ 943{
945 int retval; 944 int retval;
946 struct task_struct *p = NULL; 945 struct task_struct *p = NULL;
@@ -972,6 +971,10 @@ static task_t *copy_process(unsigned long clone_flags,
972 if (!p) 971 if (!p)
973 goto fork_out; 972 goto fork_out;
974 973
974#ifdef CONFIG_TRACE_IRQFLAGS
975 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
976 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
977#endif
975 retval = -EAGAIN; 978 retval = -EAGAIN;
976 if (atomic_read(&p->user->processes) >= 979 if (atomic_read(&p->user->processes) >=
977 p->signal->rlim[RLIMIT_NPROC].rlim_cur) { 980 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
@@ -1046,6 +1049,26 @@ static task_t *copy_process(unsigned long clone_flags,
1046 } 1049 }
1047 mpol_fix_fork_child_flag(p); 1050 mpol_fix_fork_child_flag(p);
1048#endif 1051#endif
1052#ifdef CONFIG_TRACE_IRQFLAGS
1053 p->irq_events = 0;
1054 p->hardirqs_enabled = 0;
1055 p->hardirq_enable_ip = 0;
1056 p->hardirq_enable_event = 0;
1057 p->hardirq_disable_ip = _THIS_IP_;
1058 p->hardirq_disable_event = 0;
1059 p->softirqs_enabled = 1;
1060 p->softirq_enable_ip = _THIS_IP_;
1061 p->softirq_enable_event = 0;
1062 p->softirq_disable_ip = 0;
1063 p->softirq_disable_event = 0;
1064 p->hardirq_context = 0;
1065 p->softirq_context = 0;
1066#endif
1067#ifdef CONFIG_LOCKDEP
1068 p->lockdep_depth = 0; /* no locks held yet */
1069 p->curr_chain_key = 0;
1070 p->lockdep_recursion = 0;
1071#endif
1049 1072
1050 rt_mutex_init_task(p); 1073 rt_mutex_init_task(p);
1051 1074
@@ -1271,9 +1294,9 @@ struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1271 return regs; 1294 return regs;
1272} 1295}
1273 1296
1274task_t * __devinit fork_idle(int cpu) 1297struct task_struct * __devinit fork_idle(int cpu)
1275{ 1298{
1276 task_t *task; 1299 struct task_struct *task;
1277 struct pt_regs regs; 1300 struct pt_regs regs;
1278 1301
1279 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0); 1302 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
diff --git a/kernel/futex.c b/kernel/futex.c
index 15caf93e4a43..1dc98e4dd287 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -607,6 +607,22 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
607} 607}
608 608
609/* 609/*
610 * Express the locking dependencies for lockdep:
611 */
612static inline void
613double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
614{
615 if (hb1 <= hb2) {
616 spin_lock(&hb1->lock);
617 if (hb1 < hb2)
618 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
619 } else { /* hb1 > hb2 */
620 spin_lock(&hb2->lock);
621 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
622 }
623}
624
625/*
610 * Wake up all waiters hashed on the physical page that is mapped 626 * Wake up all waiters hashed on the physical page that is mapped
611 * to this virtual address: 627 * to this virtual address:
612 */ 628 */
@@ -674,11 +690,7 @@ retryfull:
674 hb2 = hash_futex(&key2); 690 hb2 = hash_futex(&key2);
675 691
676retry: 692retry:
677 if (hb1 < hb2) 693 double_lock_hb(hb1, hb2);
678 spin_lock(&hb1->lock);
679 spin_lock(&hb2->lock);
680 if (hb1 > hb2)
681 spin_lock(&hb1->lock);
682 694
683 op_ret = futex_atomic_op_inuser(op, uaddr2); 695 op_ret = futex_atomic_op_inuser(op, uaddr2);
684 if (unlikely(op_ret < 0)) { 696 if (unlikely(op_ret < 0)) {
@@ -787,11 +799,7 @@ static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
787 hb1 = hash_futex(&key1); 799 hb1 = hash_futex(&key1);
788 hb2 = hash_futex(&key2); 800 hb2 = hash_futex(&key2);
789 801
790 if (hb1 < hb2) 802 double_lock_hb(hb1, hb2);
791 spin_lock(&hb1->lock);
792 spin_lock(&hb2->lock);
793 if (hb1 > hb2)
794 spin_lock(&hb1->lock);
795 803
796 if (likely(cmpval != NULL)) { 804 if (likely(cmpval != NULL)) {
797 u32 curval; 805 u32 curval;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 8d3dc29ef41a..d17766d40dab 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -669,7 +669,7 @@ static int hrtimer_wakeup(struct hrtimer *timer)
669 return HRTIMER_NORESTART; 669 return HRTIMER_NORESTART;
670} 670}
671 671
672void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task) 672void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
673{ 673{
674 sl->timer.function = hrtimer_wakeup; 674 sl->timer.function = hrtimer_wakeup;
675 sl->task = task; 675 sl->task = task;
@@ -782,8 +782,10 @@ static void __devinit init_hrtimers_cpu(int cpu)
782 struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu); 782 struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu);
783 int i; 783 int i;
784 784
785 for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) 785 for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) {
786 spin_lock_init(&base->lock); 786 spin_lock_init(&base->lock);
787 lockdep_set_class(&base->lock, &base->lock_key);
788 }
787} 789}
788 790
789#ifdef CONFIG_HOTPLUG_CPU 791#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 54105bdfe20d..9336f2e89e40 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -261,10 +261,13 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
261 * keep it masked and get out of here 261 * keep it masked and get out of here
262 */ 262 */
263 action = desc->action; 263 action = desc->action;
264 if (unlikely(!action || (desc->status & IRQ_DISABLED))) 264 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
265 desc->status |= IRQ_PENDING;
265 goto out; 266 goto out;
267 }
266 268
267 desc->status |= IRQ_INPROGRESS; 269 desc->status |= IRQ_INPROGRESS;
270 desc->status &= ~IRQ_PENDING;
268 spin_unlock(&desc->lock); 271 spin_unlock(&desc->lock);
269 272
270 action_ret = handle_IRQ_event(irq, regs, action); 273 action_ret = handle_IRQ_event(irq, regs, action);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index aeb6e391276c..fc4e906aedbd 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -132,7 +132,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
132 handle_dynamic_tick(action); 132 handle_dynamic_tick(action);
133 133
134 if (!(action->flags & IRQF_DISABLED)) 134 if (!(action->flags & IRQF_DISABLED))
135 local_irq_enable(); 135 local_irq_enable_in_hardirq();
136 136
137 do { 137 do {
138 ret = action->handler(irq, action->dev_id, regs); 138 ret = action->handler(irq, action->dev_id, regs);
@@ -249,3 +249,19 @@ out:
249 return 1; 249 return 1;
250} 250}
251 251
252#ifdef CONFIG_TRACE_IRQFLAGS
253
254/*
255 * lockdep: we want to handle all irq_desc locks as a single lock-class:
256 */
257static struct lock_class_key irq_desc_lock_class;
258
259void early_init_irq_lock_class(void)
260{
261 int i;
262
263 for (i = 0; i < NR_IRQS; i++)
264 lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
265}
266
267#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c911c6ec4dd6..4e461438e48b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -410,6 +410,12 @@ int request_irq(unsigned int irq,
410 struct irqaction *action; 410 struct irqaction *action;
411 int retval; 411 int retval;
412 412
413#ifdef CONFIG_LOCKDEP
414 /*
415 * Lockdep wants atomic interrupt handlers:
416 */
417 irqflags |= SA_INTERRUPT;
418#endif
413 /* 419 /*
414 * Sanity-check: shared interrupts must pass in a real dev-ID, 420 * Sanity-check: shared interrupts must pass in a real dev-ID,
415 * otherwise we'll have trouble later trying to figure out 421 * otherwise we'll have trouble later trying to figure out
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 1b7157af051c..1d32defa38ab 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -233,7 +233,7 @@ static void __call_usermodehelper(void *data)
233int call_usermodehelper_keys(char *path, char **argv, char **envp, 233int call_usermodehelper_keys(char *path, char **argv, char **envp,
234 struct key *session_keyring, int wait) 234 struct key *session_keyring, int wait)
235{ 235{
236 DECLARE_COMPLETION(done); 236 DECLARE_COMPLETION_ONSTACK(done);
237 struct subprocess_info sub_info = { 237 struct subprocess_info sub_info = {
238 .complete = &done, 238 .complete = &done,
239 .path = path, 239 .path = path,
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
new file mode 100644
index 000000000000..f32ca78c198d
--- /dev/null
+++ b/kernel/lockdep.c
@@ -0,0 +1,2702 @@
1/*
2 * kernel/lockdep.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * this code maps all the lock dependencies as they occur in a live kernel
11 * and will warn about the following classes of locking bugs:
12 *
13 * - lock inversion scenarios
14 * - circular lock dependencies
15 * - hardirq/softirq safe/unsafe locking bugs
16 *
17 * Bugs are reported even if the current locking scenario does not cause
18 * any deadlock at this point.
19 *
20 * I.e. if anytime in the past two locks were taken in a different order,
21 * even if it happened for another task, even if those were different
22 * locks (but of the same class as this lock), this code will detect it.
23 *
24 * Thanks to Arjan van de Ven for coming up with the initial idea of
25 * mapping lock dependencies runtime.
26 */
27#include <linux/mutex.h>
28#include <linux/sched.h>
29#include <linux/delay.h>
30#include <linux/module.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/spinlock.h>
34#include <linux/kallsyms.h>
35#include <linux/interrupt.h>
36#include <linux/stacktrace.h>
37#include <linux/debug_locks.h>
38#include <linux/irqflags.h>
39
40#include <asm/sections.h>
41
42#include "lockdep_internals.h"
43
44/*
45 * hash_lock: protects the lockdep hashes and class/list/hash allocators.
46 *
47 * This is one of the rare exceptions where it's justified
48 * to use a raw spinlock - we really dont want the spinlock
49 * code to recurse back into the lockdep code.
50 */
51static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
52
53static int lockdep_initialized;
54
55unsigned long nr_list_entries;
56static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
57
58/*
59 * Allocate a lockdep entry. (assumes hash_lock held, returns
60 * with NULL on failure)
61 */
62static struct lock_list *alloc_list_entry(void)
63{
64 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
65 __raw_spin_unlock(&hash_lock);
66 debug_locks_off();
67 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
68 printk("turning off the locking correctness validator.\n");
69 return NULL;
70 }
71 return list_entries + nr_list_entries++;
72}
73
74/*
75 * All data structures here are protected by the global debug_lock.
76 *
77 * Mutex key structs only get allocated, once during bootup, and never
78 * get freed - this significantly simplifies the debugging code.
79 */
80unsigned long nr_lock_classes;
81static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
82
83/*
84 * We keep a global list of all lock classes. The list only grows,
85 * never shrinks. The list is only accessed with the lockdep
86 * spinlock lock held.
87 */
88LIST_HEAD(all_lock_classes);
89
90/*
91 * The lockdep classes are in a hash-table as well, for fast lookup:
92 */
93#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
94#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
95#define CLASSHASH_MASK (CLASSHASH_SIZE - 1)
96#define __classhashfn(key) ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
97#define classhashentry(key) (classhash_table + __classhashfn((key)))
98
99static struct list_head classhash_table[CLASSHASH_SIZE];
100
101unsigned long nr_lock_chains;
102static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
103
104/*
105 * We put the lock dependency chains into a hash-table as well, to cache
106 * their existence:
107 */
108#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
109#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
110#define CHAINHASH_MASK (CHAINHASH_SIZE - 1)
111#define __chainhashfn(chain) \
112 (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
113#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
114
115static struct list_head chainhash_table[CHAINHASH_SIZE];
116
117/*
118 * The hash key of the lock dependency chains is a hash itself too:
119 * it's a hash of all locks taken up to that lock, including that lock.
120 * It's a 64-bit hash, because it's important for the keys to be
121 * unique.
122 */
123#define iterate_chain_key(key1, key2) \
124 (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \
125 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \
126 (key2))
127
128void lockdep_off(void)
129{
130 current->lockdep_recursion++;
131}
132
133EXPORT_SYMBOL(lockdep_off);
134
135void lockdep_on(void)
136{
137 current->lockdep_recursion--;
138}
139
140EXPORT_SYMBOL(lockdep_on);
141
142int lockdep_internal(void)
143{
144 return current->lockdep_recursion != 0;
145}
146
147EXPORT_SYMBOL(lockdep_internal);
148
149/*
150 * Debugging switches:
151 */
152
153#define VERBOSE 0
154#ifdef VERBOSE
155# define VERY_VERBOSE 0
156#endif
157
158#if VERBOSE
159# define HARDIRQ_VERBOSE 1
160# define SOFTIRQ_VERBOSE 1
161#else
162# define HARDIRQ_VERBOSE 0
163# define SOFTIRQ_VERBOSE 0
164#endif
165
166#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
167/*
168 * Quick filtering for interesting events:
169 */
170static int class_filter(struct lock_class *class)
171{
172 if (class->name_version == 1 &&
173 !strcmp(class->name, "&rl->lock"))
174 return 1;
175 if (class->name_version == 1 &&
176 !strcmp(class->name, "&ni->mrec_lock"))
177 return 1;
178 if (class->name_version == 1 &&
179 !strcmp(class->name, "mft_ni_runlist_lock"))
180 return 1;
181 if (class->name_version == 1 &&
182 !strcmp(class->name, "mft_ni_mrec_lock"))
183 return 1;
184 if (class->name_version == 1 &&
185 !strcmp(class->name, "&vol->lcnbmp_lock"))
186 return 1;
187 return 0;
188}
189#endif
190
191static int verbose(struct lock_class *class)
192{
193#if VERBOSE
194 return class_filter(class);
195#endif
196 return 0;
197}
198
199#ifdef CONFIG_TRACE_IRQFLAGS
200
201static int hardirq_verbose(struct lock_class *class)
202{
203#if HARDIRQ_VERBOSE
204 return class_filter(class);
205#endif
206 return 0;
207}
208
209static int softirq_verbose(struct lock_class *class)
210{
211#if SOFTIRQ_VERBOSE
212 return class_filter(class);
213#endif
214 return 0;
215}
216
217#endif
218
219/*
220 * Stack-trace: tightly packed array of stack backtrace
221 * addresses. Protected by the hash_lock.
222 */
223unsigned long nr_stack_trace_entries;
224static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
225
226static int save_trace(struct stack_trace *trace)
227{
228 trace->nr_entries = 0;
229 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
230 trace->entries = stack_trace + nr_stack_trace_entries;
231
232 save_stack_trace(trace, NULL, 0, 3);
233
234 trace->max_entries = trace->nr_entries;
235
236 nr_stack_trace_entries += trace->nr_entries;
237 if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES))
238 return 0;
239
240 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
241 __raw_spin_unlock(&hash_lock);
242 if (debug_locks_off()) {
243 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
244 printk("turning off the locking correctness validator.\n");
245 dump_stack();
246 }
247 return 0;
248 }
249
250 return 1;
251}
252
253unsigned int nr_hardirq_chains;
254unsigned int nr_softirq_chains;
255unsigned int nr_process_chains;
256unsigned int max_lockdep_depth;
257unsigned int max_recursion_depth;
258
259#ifdef CONFIG_DEBUG_LOCKDEP
260/*
261 * We cannot printk in early bootup code. Not even early_printk()
262 * might work. So we mark any initialization errors and printk
263 * about it later on, in lockdep_info().
264 */
265static int lockdep_init_error;
266
267/*
268 * Various lockdep statistics:
269 */
270atomic_t chain_lookup_hits;
271atomic_t chain_lookup_misses;
272atomic_t hardirqs_on_events;
273atomic_t hardirqs_off_events;
274atomic_t redundant_hardirqs_on;
275atomic_t redundant_hardirqs_off;
276atomic_t softirqs_on_events;
277atomic_t softirqs_off_events;
278atomic_t redundant_softirqs_on;
279atomic_t redundant_softirqs_off;
280atomic_t nr_unused_locks;
281atomic_t nr_cyclic_checks;
282atomic_t nr_cyclic_check_recursions;
283atomic_t nr_find_usage_forwards_checks;
284atomic_t nr_find_usage_forwards_recursions;
285atomic_t nr_find_usage_backwards_checks;
286atomic_t nr_find_usage_backwards_recursions;
287# define debug_atomic_inc(ptr) atomic_inc(ptr)
288# define debug_atomic_dec(ptr) atomic_dec(ptr)
289# define debug_atomic_read(ptr) atomic_read(ptr)
290#else
291# define debug_atomic_inc(ptr) do { } while (0)
292# define debug_atomic_dec(ptr) do { } while (0)
293# define debug_atomic_read(ptr) 0
294#endif
295
296/*
297 * Locking printouts:
298 */
299
300static const char *usage_str[] =
301{
302 [LOCK_USED] = "initial-use ",
303 [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W",
304 [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W",
305 [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W",
306 [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W",
307 [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R",
308 [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R",
309 [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R",
310 [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R",
311};
312
313const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
314{
315 unsigned long offs, size;
316 char *modname;
317
318 return kallsyms_lookup((unsigned long)key, &size, &offs, &modname, str);
319}
320
321void
322get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
323{
324 *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
325
326 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
327 *c1 = '+';
328 else
329 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
330 *c1 = '-';
331
332 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
333 *c2 = '+';
334 else
335 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
336 *c2 = '-';
337
338 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
339 *c3 = '-';
340 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
341 *c3 = '+';
342 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
343 *c3 = '?';
344 }
345
346 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
347 *c4 = '-';
348 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
349 *c4 = '+';
350 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
351 *c4 = '?';
352 }
353}
354
355static void print_lock_name(struct lock_class *class)
356{
357 char str[128], c1, c2, c3, c4;
358 const char *name;
359
360 get_usage_chars(class, &c1, &c2, &c3, &c4);
361
362 name = class->name;
363 if (!name) {
364 name = __get_key_name(class->key, str);
365 printk(" (%s", name);
366 } else {
367 printk(" (%s", name);
368 if (class->name_version > 1)
369 printk("#%d", class->name_version);
370 if (class->subclass)
371 printk("/%d", class->subclass);
372 }
373 printk("){%c%c%c%c}", c1, c2, c3, c4);
374}
375
376static void print_lockdep_cache(struct lockdep_map *lock)
377{
378 const char *name;
379 char str[128];
380
381 name = lock->name;
382 if (!name)
383 name = __get_key_name(lock->key->subkeys, str);
384
385 printk("%s", name);
386}
387
388static void print_lock(struct held_lock *hlock)
389{
390 print_lock_name(hlock->class);
391 printk(", at: ");
392 print_ip_sym(hlock->acquire_ip);
393}
394
395static void lockdep_print_held_locks(struct task_struct *curr)
396{
397 int i, depth = curr->lockdep_depth;
398
399 if (!depth) {
400 printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
401 return;
402 }
403 printk("%d lock%s held by %s/%d:\n",
404 depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
405
406 for (i = 0; i < depth; i++) {
407 printk(" #%d: ", i);
408 print_lock(curr->held_locks + i);
409 }
410}
411/*
412 * Helper to print a nice hierarchy of lock dependencies:
413 */
414static void print_spaces(int nr)
415{
416 int i;
417
418 for (i = 0; i < nr; i++)
419 printk(" ");
420}
421
422static void print_lock_class_header(struct lock_class *class, int depth)
423{
424 int bit;
425
426 print_spaces(depth);
427 printk("->");
428 print_lock_name(class);
429 printk(" ops: %lu", class->ops);
430 printk(" {\n");
431
432 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
433 if (class->usage_mask & (1 << bit)) {
434 int len = depth;
435
436 print_spaces(depth);
437 len += printk(" %s", usage_str[bit]);
438 len += printk(" at:\n");
439 print_stack_trace(class->usage_traces + bit, len);
440 }
441 }
442 print_spaces(depth);
443 printk(" }\n");
444
445 print_spaces(depth);
446 printk(" ... key at: ");
447 print_ip_sym((unsigned long)class->key);
448}
449
450/*
451 * printk all lock dependencies starting at <entry>:
452 */
453static void print_lock_dependencies(struct lock_class *class, int depth)
454{
455 struct lock_list *entry;
456
457 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
458 return;
459
460 print_lock_class_header(class, depth);
461
462 list_for_each_entry(entry, &class->locks_after, entry) {
463 DEBUG_LOCKS_WARN_ON(!entry->class);
464 print_lock_dependencies(entry->class, depth + 1);
465
466 print_spaces(depth);
467 printk(" ... acquired at:\n");
468 print_stack_trace(&entry->trace, 2);
469 printk("\n");
470 }
471}
472
473/*
474 * Add a new dependency to the head of the list:
475 */
476static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
477 struct list_head *head, unsigned long ip)
478{
479 struct lock_list *entry;
480 /*
481 * Lock not present yet - get a new dependency struct and
482 * add it to the list:
483 */
484 entry = alloc_list_entry();
485 if (!entry)
486 return 0;
487
488 entry->class = this;
489 save_trace(&entry->trace);
490
491 /*
492 * Since we never remove from the dependency list, the list can
493 * be walked lockless by other CPUs, it's only allocation
494 * that must be protected by the spinlock. But this also means
495 * we must make new entries visible only once writes to the
496 * entry become visible - hence the RCU op:
497 */
498 list_add_tail_rcu(&entry->entry, head);
499
500 return 1;
501}
502
503/*
504 * Recursive, forwards-direction lock-dependency checking, used for
505 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
506 * checking.
507 *
508 * (to keep the stackframe of the recursive functions small we
509 * use these global variables, and we also mark various helper
510 * functions as noinline.)
511 */
512static struct held_lock *check_source, *check_target;
513
514/*
515 * Print a dependency chain entry (this is only done when a deadlock
516 * has been detected):
517 */
518static noinline int
519print_circular_bug_entry(struct lock_list *target, unsigned int depth)
520{
521 if (debug_locks_silent)
522 return 0;
523 printk("\n-> #%u", depth);
524 print_lock_name(target->class);
525 printk(":\n");
526 print_stack_trace(&target->trace, 6);
527
528 return 0;
529}
530
531/*
532 * When a circular dependency is detected, print the
533 * header first:
534 */
535static noinline int
536print_circular_bug_header(struct lock_list *entry, unsigned int depth)
537{
538 struct task_struct *curr = current;
539
540 __raw_spin_unlock(&hash_lock);
541 debug_locks_off();
542 if (debug_locks_silent)
543 return 0;
544
545 printk("\n=======================================================\n");
546 printk( "[ INFO: possible circular locking dependency detected ]\n");
547 printk( "-------------------------------------------------------\n");
548 printk("%s/%d is trying to acquire lock:\n",
549 curr->comm, curr->pid);
550 print_lock(check_source);
551 printk("\nbut task is already holding lock:\n");
552 print_lock(check_target);
553 printk("\nwhich lock already depends on the new lock.\n\n");
554 printk("\nthe existing dependency chain (in reverse order) is:\n");
555
556 print_circular_bug_entry(entry, depth);
557
558 return 0;
559}
560
561static noinline int print_circular_bug_tail(void)
562{
563 struct task_struct *curr = current;
564 struct lock_list this;
565
566 if (debug_locks_silent)
567 return 0;
568
569 this.class = check_source->class;
570 save_trace(&this.trace);
571 print_circular_bug_entry(&this, 0);
572
573 printk("\nother info that might help us debug this:\n\n");
574 lockdep_print_held_locks(curr);
575
576 printk("\nstack backtrace:\n");
577 dump_stack();
578
579 return 0;
580}
581
582static int noinline print_infinite_recursion_bug(void)
583{
584 __raw_spin_unlock(&hash_lock);
585 DEBUG_LOCKS_WARN_ON(1);
586
587 return 0;
588}
589
590/*
591 * Prove that the dependency graph starting at <entry> can not
592 * lead to <target>. Print an error and return 0 if it does.
593 */
594static noinline int
595check_noncircular(struct lock_class *source, unsigned int depth)
596{
597 struct lock_list *entry;
598
599 debug_atomic_inc(&nr_cyclic_check_recursions);
600 if (depth > max_recursion_depth)
601 max_recursion_depth = depth;
602 if (depth >= 20)
603 return print_infinite_recursion_bug();
604 /*
605 * Check this lock's dependency list:
606 */
607 list_for_each_entry(entry, &source->locks_after, entry) {
608 if (entry->class == check_target->class)
609 return print_circular_bug_header(entry, depth+1);
610 debug_atomic_inc(&nr_cyclic_checks);
611 if (!check_noncircular(entry->class, depth+1))
612 return print_circular_bug_entry(entry, depth+1);
613 }
614 return 1;
615}
616
617static int very_verbose(struct lock_class *class)
618{
619#if VERY_VERBOSE
620 return class_filter(class);
621#endif
622 return 0;
623}
624#ifdef CONFIG_TRACE_IRQFLAGS
625
626/*
627 * Forwards and backwards subgraph searching, for the purposes of
628 * proving that two subgraphs can be connected by a new dependency
629 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
630 */
631static enum lock_usage_bit find_usage_bit;
632static struct lock_class *forwards_match, *backwards_match;
633
634/*
635 * Find a node in the forwards-direction dependency sub-graph starting
636 * at <source> that matches <find_usage_bit>.
637 *
638 * Return 2 if such a node exists in the subgraph, and put that node
639 * into <forwards_match>.
640 *
641 * Return 1 otherwise and keep <forwards_match> unchanged.
642 * Return 0 on error.
643 */
644static noinline int
645find_usage_forwards(struct lock_class *source, unsigned int depth)
646{
647 struct lock_list *entry;
648 int ret;
649
650 if (depth > max_recursion_depth)
651 max_recursion_depth = depth;
652 if (depth >= 20)
653 return print_infinite_recursion_bug();
654
655 debug_atomic_inc(&nr_find_usage_forwards_checks);
656 if (source->usage_mask & (1 << find_usage_bit)) {
657 forwards_match = source;
658 return 2;
659 }
660
661 /*
662 * Check this lock's dependency list:
663 */
664 list_for_each_entry(entry, &source->locks_after, entry) {
665 debug_atomic_inc(&nr_find_usage_forwards_recursions);
666 ret = find_usage_forwards(entry->class, depth+1);
667 if (ret == 2 || ret == 0)
668 return ret;
669 }
670 return 1;
671}
672
673/*
674 * Find a node in the backwards-direction dependency sub-graph starting
675 * at <source> that matches <find_usage_bit>.
676 *
677 * Return 2 if such a node exists in the subgraph, and put that node
678 * into <backwards_match>.
679 *
680 * Return 1 otherwise and keep <backwards_match> unchanged.
681 * Return 0 on error.
682 */
683static noinline int
684find_usage_backwards(struct lock_class *source, unsigned int depth)
685{
686 struct lock_list *entry;
687 int ret;
688
689 if (depth > max_recursion_depth)
690 max_recursion_depth = depth;
691 if (depth >= 20)
692 return print_infinite_recursion_bug();
693
694 debug_atomic_inc(&nr_find_usage_backwards_checks);
695 if (source->usage_mask & (1 << find_usage_bit)) {
696 backwards_match = source;
697 return 2;
698 }
699
700 /*
701 * Check this lock's dependency list:
702 */
703 list_for_each_entry(entry, &source->locks_before, entry) {
704 debug_atomic_inc(&nr_find_usage_backwards_recursions);
705 ret = find_usage_backwards(entry->class, depth+1);
706 if (ret == 2 || ret == 0)
707 return ret;
708 }
709 return 1;
710}
711
712static int
713print_bad_irq_dependency(struct task_struct *curr,
714 struct held_lock *prev,
715 struct held_lock *next,
716 enum lock_usage_bit bit1,
717 enum lock_usage_bit bit2,
718 const char *irqclass)
719{
720 __raw_spin_unlock(&hash_lock);
721 debug_locks_off();
722 if (debug_locks_silent)
723 return 0;
724
725 printk("\n======================================================\n");
726 printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
727 irqclass, irqclass);
728 printk( "------------------------------------------------------\n");
729 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
730 curr->comm, curr->pid,
731 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
732 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
733 curr->hardirqs_enabled,
734 curr->softirqs_enabled);
735 print_lock(next);
736
737 printk("\nand this task is already holding:\n");
738 print_lock(prev);
739 printk("which would create a new lock dependency:\n");
740 print_lock_name(prev->class);
741 printk(" ->");
742 print_lock_name(next->class);
743 printk("\n");
744
745 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
746 irqclass);
747 print_lock_name(backwards_match);
748 printk("\n... which became %s-irq-safe at:\n", irqclass);
749
750 print_stack_trace(backwards_match->usage_traces + bit1, 1);
751
752 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
753 print_lock_name(forwards_match);
754 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
755 printk("...");
756
757 print_stack_trace(forwards_match->usage_traces + bit2, 1);
758
759 printk("\nother info that might help us debug this:\n\n");
760 lockdep_print_held_locks(curr);
761
762 printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
763 print_lock_dependencies(backwards_match, 0);
764
765 printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
766 print_lock_dependencies(forwards_match, 0);
767
768 printk("\nstack backtrace:\n");
769 dump_stack();
770
771 return 0;
772}
773
774static int
775check_usage(struct task_struct *curr, struct held_lock *prev,
776 struct held_lock *next, enum lock_usage_bit bit_backwards,
777 enum lock_usage_bit bit_forwards, const char *irqclass)
778{
779 int ret;
780
781 find_usage_bit = bit_backwards;
782 /* fills in <backwards_match> */
783 ret = find_usage_backwards(prev->class, 0);
784 if (!ret || ret == 1)
785 return ret;
786
787 find_usage_bit = bit_forwards;
788 ret = find_usage_forwards(next->class, 0);
789 if (!ret || ret == 1)
790 return ret;
791 /* ret == 2 */
792 return print_bad_irq_dependency(curr, prev, next,
793 bit_backwards, bit_forwards, irqclass);
794}
795
796#endif
797
798static int
799print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
800 struct held_lock *next)
801{
802 debug_locks_off();
803 __raw_spin_unlock(&hash_lock);
804 if (debug_locks_silent)
805 return 0;
806
807 printk("\n=============================================\n");
808 printk( "[ INFO: possible recursive locking detected ]\n");
809 printk( "---------------------------------------------\n");
810 printk("%s/%d is trying to acquire lock:\n",
811 curr->comm, curr->pid);
812 print_lock(next);
813 printk("\nbut task is already holding lock:\n");
814 print_lock(prev);
815
816 printk("\nother info that might help us debug this:\n");
817 lockdep_print_held_locks(curr);
818
819 printk("\nstack backtrace:\n");
820 dump_stack();
821
822 return 0;
823}
824
825/*
826 * Check whether we are holding such a class already.
827 *
828 * (Note that this has to be done separately, because the graph cannot
829 * detect such classes of deadlocks.)
830 *
831 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
832 */
833static int
834check_deadlock(struct task_struct *curr, struct held_lock *next,
835 struct lockdep_map *next_instance, int read)
836{
837 struct held_lock *prev;
838 int i;
839
840 for (i = 0; i < curr->lockdep_depth; i++) {
841 prev = curr->held_locks + i;
842 if (prev->class != next->class)
843 continue;
844 /*
845 * Allow read-after-read recursion of the same
846 * lock class (i.e. read_lock(lock)+read_lock(lock)):
847 */
848 if ((read == 2) && prev->read)
849 return 2;
850 return print_deadlock_bug(curr, prev, next);
851 }
852 return 1;
853}
854
855/*
856 * There was a chain-cache miss, and we are about to add a new dependency
857 * to a previous lock. We recursively validate the following rules:
858 *
859 * - would the adding of the <prev> -> <next> dependency create a
860 * circular dependency in the graph? [== circular deadlock]
861 *
862 * - does the new prev->next dependency connect any hardirq-safe lock
863 * (in the full backwards-subgraph starting at <prev>) with any
864 * hardirq-unsafe lock (in the full forwards-subgraph starting at
865 * <next>)? [== illegal lock inversion with hardirq contexts]
866 *
867 * - does the new prev->next dependency connect any softirq-safe lock
868 * (in the full backwards-subgraph starting at <prev>) with any
869 * softirq-unsafe lock (in the full forwards-subgraph starting at
870 * <next>)? [== illegal lock inversion with softirq contexts]
871 *
872 * any of these scenarios could lead to a deadlock.
873 *
874 * Then if all the validations pass, we add the forwards and backwards
875 * dependency.
876 */
877static int
878check_prev_add(struct task_struct *curr, struct held_lock *prev,
879 struct held_lock *next)
880{
881 struct lock_list *entry;
882 int ret;
883
884 /*
885 * Prove that the new <prev> -> <next> dependency would not
886 * create a circular dependency in the graph. (We do this by
887 * forward-recursing into the graph starting at <next>, and
888 * checking whether we can reach <prev>.)
889 *
890 * We are using global variables to control the recursion, to
891 * keep the stackframe size of the recursive functions low:
892 */
893 check_source = next;
894 check_target = prev;
895 if (!(check_noncircular(next->class, 0)))
896 return print_circular_bug_tail();
897
898#ifdef CONFIG_TRACE_IRQFLAGS
899 /*
900 * Prove that the new dependency does not connect a hardirq-safe
901 * lock with a hardirq-unsafe lock - to achieve this we search
902 * the backwards-subgraph starting at <prev>, and the
903 * forwards-subgraph starting at <next>:
904 */
905 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
906 LOCK_ENABLED_HARDIRQS, "hard"))
907 return 0;
908
909 /*
910 * Prove that the new dependency does not connect a hardirq-safe-read
911 * lock with a hardirq-unsafe lock - to achieve this we search
912 * the backwards-subgraph starting at <prev>, and the
913 * forwards-subgraph starting at <next>:
914 */
915 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
916 LOCK_ENABLED_HARDIRQS, "hard-read"))
917 return 0;
918
919 /*
920 * Prove that the new dependency does not connect a softirq-safe
921 * lock with a softirq-unsafe lock - to achieve this we search
922 * the backwards-subgraph starting at <prev>, and the
923 * forwards-subgraph starting at <next>:
924 */
925 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
926 LOCK_ENABLED_SOFTIRQS, "soft"))
927 return 0;
928 /*
929 * Prove that the new dependency does not connect a softirq-safe-read
930 * lock with a softirq-unsafe lock - to achieve this we search
931 * the backwards-subgraph starting at <prev>, and the
932 * forwards-subgraph starting at <next>:
933 */
934 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
935 LOCK_ENABLED_SOFTIRQS, "soft"))
936 return 0;
937#endif
938 /*
939 * For recursive read-locks we do all the dependency checks,
940 * but we dont store read-triggered dependencies (only
941 * write-triggered dependencies). This ensures that only the
942 * write-side dependencies matter, and that if for example a
943 * write-lock never takes any other locks, then the reads are
944 * equivalent to a NOP.
945 */
946 if (next->read == 2 || prev->read == 2)
947 return 1;
948 /*
949 * Is the <prev> -> <next> dependency already present?
950 *
951 * (this may occur even though this is a new chain: consider
952 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
953 * chains - the second one will be new, but L1 already has
954 * L2 added to its dependency list, due to the first chain.)
955 */
956 list_for_each_entry(entry, &prev->class->locks_after, entry) {
957 if (entry->class == next->class)
958 return 2;
959 }
960
961 /*
962 * Ok, all validations passed, add the new lock
963 * to the previous lock's dependency list:
964 */
965 ret = add_lock_to_list(prev->class, next->class,
966 &prev->class->locks_after, next->acquire_ip);
967 if (!ret)
968 return 0;
969 /*
970 * Return value of 2 signals 'dependency already added',
971 * in that case we dont have to add the backlink either.
972 */
973 if (ret == 2)
974 return 2;
975 ret = add_lock_to_list(next->class, prev->class,
976 &next->class->locks_before, next->acquire_ip);
977
978 /*
979 * Debugging printouts:
980 */
981 if (verbose(prev->class) || verbose(next->class)) {
982 __raw_spin_unlock(&hash_lock);
983 printk("\n new dependency: ");
984 print_lock_name(prev->class);
985 printk(" => ");
986 print_lock_name(next->class);
987 printk("\n");
988 dump_stack();
989 __raw_spin_lock(&hash_lock);
990 }
991 return 1;
992}
993
994/*
995 * Add the dependency to all directly-previous locks that are 'relevant'.
996 * The ones that are relevant are (in increasing distance from curr):
997 * all consecutive trylock entries and the final non-trylock entry - or
998 * the end of this context's lock-chain - whichever comes first.
999 */
1000static int
1001check_prevs_add(struct task_struct *curr, struct held_lock *next)
1002{
1003 int depth = curr->lockdep_depth;
1004 struct held_lock *hlock;
1005
1006 /*
1007 * Debugging checks.
1008 *
1009 * Depth must not be zero for a non-head lock:
1010 */
1011 if (!depth)
1012 goto out_bug;
1013 /*
1014 * At least two relevant locks must exist for this
1015 * to be a head:
1016 */
1017 if (curr->held_locks[depth].irq_context !=
1018 curr->held_locks[depth-1].irq_context)
1019 goto out_bug;
1020
1021 for (;;) {
1022 hlock = curr->held_locks + depth-1;
1023 /*
1024 * Only non-recursive-read entries get new dependencies
1025 * added:
1026 */
1027 if (hlock->read != 2) {
1028 check_prev_add(curr, hlock, next);
1029 /*
1030 * Stop after the first non-trylock entry,
1031 * as non-trylock entries have added their
1032 * own direct dependencies already, so this
1033 * lock is connected to them indirectly:
1034 */
1035 if (!hlock->trylock)
1036 break;
1037 }
1038 depth--;
1039 /*
1040 * End of lock-stack?
1041 */
1042 if (!depth)
1043 break;
1044 /*
1045 * Stop the search if we cross into another context:
1046 */
1047 if (curr->held_locks[depth].irq_context !=
1048 curr->held_locks[depth-1].irq_context)
1049 break;
1050 }
1051 return 1;
1052out_bug:
1053 __raw_spin_unlock(&hash_lock);
1054 DEBUG_LOCKS_WARN_ON(1);
1055
1056 return 0;
1057}
1058
1059
1060/*
1061 * Is this the address of a static object:
1062 */
1063static int static_obj(void *obj)
1064{
1065 unsigned long start = (unsigned long) &_stext,
1066 end = (unsigned long) &_end,
1067 addr = (unsigned long) obj;
1068#ifdef CONFIG_SMP
1069 int i;
1070#endif
1071
1072 /*
1073 * static variable?
1074 */
1075 if ((addr >= start) && (addr < end))
1076 return 1;
1077
1078#ifdef CONFIG_SMP
1079 /*
1080 * percpu var?
1081 */
1082 for_each_possible_cpu(i) {
1083 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
1084 end = (unsigned long) &__per_cpu_end + per_cpu_offset(i);
1085
1086 if ((addr >= start) && (addr < end))
1087 return 1;
1088 }
1089#endif
1090
1091 /*
1092 * module var?
1093 */
1094 return is_module_address(addr);
1095}
1096
1097/*
1098 * To make lock name printouts unique, we calculate a unique
1099 * class->name_version generation counter:
1100 */
1101static int count_matching_names(struct lock_class *new_class)
1102{
1103 struct lock_class *class;
1104 int count = 0;
1105
1106 if (!new_class->name)
1107 return 0;
1108
1109 list_for_each_entry(class, &all_lock_classes, lock_entry) {
1110 if (new_class->key - new_class->subclass == class->key)
1111 return class->name_version;
1112 if (class->name && !strcmp(class->name, new_class->name))
1113 count = max(count, class->name_version);
1114 }
1115
1116 return count + 1;
1117}
1118
1119extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
1120
1121/*
1122 * Register a lock's class in the hash-table, if the class is not present
1123 * yet. Otherwise we look it up. We cache the result in the lock object
1124 * itself, so actual lookup of the hash should be once per lock object.
1125 */
1126static inline struct lock_class *
1127register_lock_class(struct lockdep_map *lock, unsigned int subclass)
1128{
1129 struct lockdep_subclass_key *key;
1130 struct list_head *hash_head;
1131 struct lock_class *class;
1132
1133#ifdef CONFIG_DEBUG_LOCKDEP
1134 /*
1135 * If the architecture calls into lockdep before initializing
1136 * the hashes then we'll warn about it later. (we cannot printk
1137 * right now)
1138 */
1139 if (unlikely(!lockdep_initialized)) {
1140 lockdep_init();
1141 lockdep_init_error = 1;
1142 }
1143#endif
1144
1145 /*
1146 * Static locks do not have their class-keys yet - for them the key
1147 * is the lock object itself:
1148 */
1149 if (unlikely(!lock->key))
1150 lock->key = (void *)lock;
1151
1152 /*
1153 * NOTE: the class-key must be unique. For dynamic locks, a static
1154 * lock_class_key variable is passed in through the mutex_init()
1155 * (or spin_lock_init()) call - which acts as the key. For static
1156 * locks we use the lock object itself as the key.
1157 */
1158 if (sizeof(struct lock_class_key) > sizeof(struct lock_class))
1159 __error_too_big_MAX_LOCKDEP_SUBCLASSES();
1160
1161 key = lock->key->subkeys + subclass;
1162
1163 hash_head = classhashentry(key);
1164
1165 /*
1166 * We can walk the hash lockfree, because the hash only
1167 * grows, and we are careful when adding entries to the end:
1168 */
1169 list_for_each_entry(class, hash_head, hash_entry)
1170 if (class->key == key)
1171 goto out_set;
1172
1173 /*
1174 * Debug-check: all keys must be persistent!
1175 */
1176 if (!static_obj(lock->key)) {
1177 debug_locks_off();
1178 printk("INFO: trying to register non-static key.\n");
1179 printk("the code is fine but needs lockdep annotation.\n");
1180 printk("turning off the locking correctness validator.\n");
1181 dump_stack();
1182
1183 return NULL;
1184 }
1185
1186 __raw_spin_lock(&hash_lock);
1187 /*
1188 * We have to do the hash-walk again, to avoid races
1189 * with another CPU:
1190 */
1191 list_for_each_entry(class, hash_head, hash_entry)
1192 if (class->key == key)
1193 goto out_unlock_set;
1194 /*
1195 * Allocate a new key from the static array, and add it to
1196 * the hash:
1197 */
1198 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1199 __raw_spin_unlock(&hash_lock);
1200 debug_locks_off();
1201 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1202 printk("turning off the locking correctness validator.\n");
1203 return NULL;
1204 }
1205 class = lock_classes + nr_lock_classes++;
1206 debug_atomic_inc(&nr_unused_locks);
1207 class->key = key;
1208 class->name = lock->name;
1209 class->subclass = subclass;
1210 INIT_LIST_HEAD(&class->lock_entry);
1211 INIT_LIST_HEAD(&class->locks_before);
1212 INIT_LIST_HEAD(&class->locks_after);
1213 class->name_version = count_matching_names(class);
1214 /*
1215 * We use RCU's safe list-add method to make
1216 * parallel walking of the hash-list safe:
1217 */
1218 list_add_tail_rcu(&class->hash_entry, hash_head);
1219
1220 if (verbose(class)) {
1221 __raw_spin_unlock(&hash_lock);
1222 printk("\nnew class %p: %s", class->key, class->name);
1223 if (class->name_version > 1)
1224 printk("#%d", class->name_version);
1225 printk("\n");
1226 dump_stack();
1227 __raw_spin_lock(&hash_lock);
1228 }
1229out_unlock_set:
1230 __raw_spin_unlock(&hash_lock);
1231
1232out_set:
1233 lock->class[subclass] = class;
1234
1235 DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
1236
1237 return class;
1238}
1239
1240/*
1241 * Look up a dependency chain. If the key is not present yet then
1242 * add it and return 0 - in this case the new dependency chain is
1243 * validated. If the key is already hashed, return 1.
1244 */
1245static inline int lookup_chain_cache(u64 chain_key)
1246{
1247 struct list_head *hash_head = chainhashentry(chain_key);
1248 struct lock_chain *chain;
1249
1250 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1251 /*
1252 * We can walk it lock-free, because entries only get added
1253 * to the hash:
1254 */
1255 list_for_each_entry(chain, hash_head, entry) {
1256 if (chain->chain_key == chain_key) {
1257cache_hit:
1258 debug_atomic_inc(&chain_lookup_hits);
1259 /*
1260 * In the debugging case, force redundant checking
1261 * by returning 1:
1262 */
1263#ifdef CONFIG_DEBUG_LOCKDEP
1264 __raw_spin_lock(&hash_lock);
1265 return 1;
1266#endif
1267 return 0;
1268 }
1269 }
1270 /*
1271 * Allocate a new chain entry from the static array, and add
1272 * it to the hash:
1273 */
1274 __raw_spin_lock(&hash_lock);
1275 /*
1276 * We have to walk the chain again locked - to avoid duplicates:
1277 */
1278 list_for_each_entry(chain, hash_head, entry) {
1279 if (chain->chain_key == chain_key) {
1280 __raw_spin_unlock(&hash_lock);
1281 goto cache_hit;
1282 }
1283 }
1284 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1285 __raw_spin_unlock(&hash_lock);
1286 debug_locks_off();
1287 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1288 printk("turning off the locking correctness validator.\n");
1289 return 0;
1290 }
1291 chain = lock_chains + nr_lock_chains++;
1292 chain->chain_key = chain_key;
1293 list_add_tail_rcu(&chain->entry, hash_head);
1294 debug_atomic_inc(&chain_lookup_misses);
1295#ifdef CONFIG_TRACE_IRQFLAGS
1296 if (current->hardirq_context)
1297 nr_hardirq_chains++;
1298 else {
1299 if (current->softirq_context)
1300 nr_softirq_chains++;
1301 else
1302 nr_process_chains++;
1303 }
1304#else
1305 nr_process_chains++;
1306#endif
1307
1308 return 1;
1309}
1310
1311/*
1312 * We are building curr_chain_key incrementally, so double-check
1313 * it from scratch, to make sure that it's done correctly:
1314 */
1315static void check_chain_key(struct task_struct *curr)
1316{
1317#ifdef CONFIG_DEBUG_LOCKDEP
1318 struct held_lock *hlock, *prev_hlock = NULL;
1319 unsigned int i, id;
1320 u64 chain_key = 0;
1321
1322 for (i = 0; i < curr->lockdep_depth; i++) {
1323 hlock = curr->held_locks + i;
1324 if (chain_key != hlock->prev_chain_key) {
1325 debug_locks_off();
1326 printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1327 curr->lockdep_depth, i,
1328 (unsigned long long)chain_key,
1329 (unsigned long long)hlock->prev_chain_key);
1330 WARN_ON(1);
1331 return;
1332 }
1333 id = hlock->class - lock_classes;
1334 DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS);
1335 if (prev_hlock && (prev_hlock->irq_context !=
1336 hlock->irq_context))
1337 chain_key = 0;
1338 chain_key = iterate_chain_key(chain_key, id);
1339 prev_hlock = hlock;
1340 }
1341 if (chain_key != curr->curr_chain_key) {
1342 debug_locks_off();
1343 printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1344 curr->lockdep_depth, i,
1345 (unsigned long long)chain_key,
1346 (unsigned long long)curr->curr_chain_key);
1347 WARN_ON(1);
1348 }
1349#endif
1350}
1351
1352#ifdef CONFIG_TRACE_IRQFLAGS
1353
1354/*
1355 * print irq inversion bug:
1356 */
1357static int
1358print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1359 struct held_lock *this, int forwards,
1360 const char *irqclass)
1361{
1362 __raw_spin_unlock(&hash_lock);
1363 debug_locks_off();
1364 if (debug_locks_silent)
1365 return 0;
1366
1367 printk("\n=========================================================\n");
1368 printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
1369 printk( "---------------------------------------------------------\n");
1370 printk("%s/%d just changed the state of lock:\n",
1371 curr->comm, curr->pid);
1372 print_lock(this);
1373 if (forwards)
1374 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1375 else
1376 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1377 print_lock_name(other);
1378 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1379
1380 printk("\nother info that might help us debug this:\n");
1381 lockdep_print_held_locks(curr);
1382
1383 printk("\nthe first lock's dependencies:\n");
1384 print_lock_dependencies(this->class, 0);
1385
1386 printk("\nthe second lock's dependencies:\n");
1387 print_lock_dependencies(other, 0);
1388
1389 printk("\nstack backtrace:\n");
1390 dump_stack();
1391
1392 return 0;
1393}
1394
1395/*
1396 * Prove that in the forwards-direction subgraph starting at <this>
1397 * there is no lock matching <mask>:
1398 */
1399static int
1400check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1401 enum lock_usage_bit bit, const char *irqclass)
1402{
1403 int ret;
1404
1405 find_usage_bit = bit;
1406 /* fills in <forwards_match> */
1407 ret = find_usage_forwards(this->class, 0);
1408 if (!ret || ret == 1)
1409 return ret;
1410
1411 return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1412}
1413
1414/*
1415 * Prove that in the backwards-direction subgraph starting at <this>
1416 * there is no lock matching <mask>:
1417 */
1418static int
1419check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1420 enum lock_usage_bit bit, const char *irqclass)
1421{
1422 int ret;
1423
1424 find_usage_bit = bit;
1425 /* fills in <backwards_match> */
1426 ret = find_usage_backwards(this->class, 0);
1427 if (!ret || ret == 1)
1428 return ret;
1429
1430 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1431}
1432
1433static inline void print_irqtrace_events(struct task_struct *curr)
1434{
1435 printk("irq event stamp: %u\n", curr->irq_events);
1436 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
1437 print_ip_sym(curr->hardirq_enable_ip);
1438 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1439 print_ip_sym(curr->hardirq_disable_ip);
1440 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
1441 print_ip_sym(curr->softirq_enable_ip);
1442 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1443 print_ip_sym(curr->softirq_disable_ip);
1444}
1445
1446#else
1447static inline void print_irqtrace_events(struct task_struct *curr)
1448{
1449}
1450#endif
1451
1452static int
1453print_usage_bug(struct task_struct *curr, struct held_lock *this,
1454 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1455{
1456 __raw_spin_unlock(&hash_lock);
1457 debug_locks_off();
1458 if (debug_locks_silent)
1459 return 0;
1460
1461 printk("\n=================================\n");
1462 printk( "[ INFO: inconsistent lock state ]\n");
1463 printk( "---------------------------------\n");
1464
1465 printk("inconsistent {%s} -> {%s} usage.\n",
1466 usage_str[prev_bit], usage_str[new_bit]);
1467
1468 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1469 curr->comm, curr->pid,
1470 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1471 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1472 trace_hardirqs_enabled(curr),
1473 trace_softirqs_enabled(curr));
1474 print_lock(this);
1475
1476 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1477 print_stack_trace(this->class->usage_traces + prev_bit, 1);
1478
1479 print_irqtrace_events(curr);
1480 printk("\nother info that might help us debug this:\n");
1481 lockdep_print_held_locks(curr);
1482
1483 printk("\nstack backtrace:\n");
1484 dump_stack();
1485
1486 return 0;
1487}
1488
1489/*
1490 * Print out an error if an invalid bit is set:
1491 */
1492static inline int
1493valid_state(struct task_struct *curr, struct held_lock *this,
1494 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1495{
1496 if (unlikely(this->class->usage_mask & (1 << bad_bit)))
1497 return print_usage_bug(curr, this, bad_bit, new_bit);
1498 return 1;
1499}
1500
1501#define STRICT_READ_CHECKS 1
1502
1503/*
1504 * Mark a lock with a usage bit, and validate the state transition:
1505 */
1506static int mark_lock(struct task_struct *curr, struct held_lock *this,
1507 enum lock_usage_bit new_bit, unsigned long ip)
1508{
1509 unsigned int new_mask = 1 << new_bit, ret = 1;
1510
1511 /*
1512 * If already set then do not dirty the cacheline,
1513 * nor do any checks:
1514 */
1515 if (likely(this->class->usage_mask & new_mask))
1516 return 1;
1517
1518 __raw_spin_lock(&hash_lock);
1519 /*
1520 * Make sure we didnt race:
1521 */
1522 if (unlikely(this->class->usage_mask & new_mask)) {
1523 __raw_spin_unlock(&hash_lock);
1524 return 1;
1525 }
1526
1527 this->class->usage_mask |= new_mask;
1528
1529#ifdef CONFIG_TRACE_IRQFLAGS
1530 if (new_bit == LOCK_ENABLED_HARDIRQS ||
1531 new_bit == LOCK_ENABLED_HARDIRQS_READ)
1532 ip = curr->hardirq_enable_ip;
1533 else if (new_bit == LOCK_ENABLED_SOFTIRQS ||
1534 new_bit == LOCK_ENABLED_SOFTIRQS_READ)
1535 ip = curr->softirq_enable_ip;
1536#endif
1537 if (!save_trace(this->class->usage_traces + new_bit))
1538 return 0;
1539
1540 switch (new_bit) {
1541#ifdef CONFIG_TRACE_IRQFLAGS
1542 case LOCK_USED_IN_HARDIRQ:
1543 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1544 return 0;
1545 if (!valid_state(curr, this, new_bit,
1546 LOCK_ENABLED_HARDIRQS_READ))
1547 return 0;
1548 /*
1549 * just marked it hardirq-safe, check that this lock
1550 * took no hardirq-unsafe lock in the past:
1551 */
1552 if (!check_usage_forwards(curr, this,
1553 LOCK_ENABLED_HARDIRQS, "hard"))
1554 return 0;
1555#if STRICT_READ_CHECKS
1556 /*
1557 * just marked it hardirq-safe, check that this lock
1558 * took no hardirq-unsafe-read lock in the past:
1559 */
1560 if (!check_usage_forwards(curr, this,
1561 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1562 return 0;
1563#endif
1564 if (hardirq_verbose(this->class))
1565 ret = 2;
1566 break;
1567 case LOCK_USED_IN_SOFTIRQ:
1568 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1569 return 0;
1570 if (!valid_state(curr, this, new_bit,
1571 LOCK_ENABLED_SOFTIRQS_READ))
1572 return 0;
1573 /*
1574 * just marked it softirq-safe, check that this lock
1575 * took no softirq-unsafe lock in the past:
1576 */
1577 if (!check_usage_forwards(curr, this,
1578 LOCK_ENABLED_SOFTIRQS, "soft"))
1579 return 0;
1580#if STRICT_READ_CHECKS
1581 /*
1582 * just marked it softirq-safe, check that this lock
1583 * took no softirq-unsafe-read lock in the past:
1584 */
1585 if (!check_usage_forwards(curr, this,
1586 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
1587 return 0;
1588#endif
1589 if (softirq_verbose(this->class))
1590 ret = 2;
1591 break;
1592 case LOCK_USED_IN_HARDIRQ_READ:
1593 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1594 return 0;
1595 /*
1596 * just marked it hardirq-read-safe, check that this lock
1597 * took no hardirq-unsafe lock in the past:
1598 */
1599 if (!check_usage_forwards(curr, this,
1600 LOCK_ENABLED_HARDIRQS, "hard"))
1601 return 0;
1602 if (hardirq_verbose(this->class))
1603 ret = 2;
1604 break;
1605 case LOCK_USED_IN_SOFTIRQ_READ:
1606 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1607 return 0;
1608 /*
1609 * just marked it softirq-read-safe, check that this lock
1610 * took no softirq-unsafe lock in the past:
1611 */
1612 if (!check_usage_forwards(curr, this,
1613 LOCK_ENABLED_SOFTIRQS, "soft"))
1614 return 0;
1615 if (softirq_verbose(this->class))
1616 ret = 2;
1617 break;
1618 case LOCK_ENABLED_HARDIRQS:
1619 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1620 return 0;
1621 if (!valid_state(curr, this, new_bit,
1622 LOCK_USED_IN_HARDIRQ_READ))
1623 return 0;
1624 /*
1625 * just marked it hardirq-unsafe, check that no hardirq-safe
1626 * lock in the system ever took it in the past:
1627 */
1628 if (!check_usage_backwards(curr, this,
1629 LOCK_USED_IN_HARDIRQ, "hard"))
1630 return 0;
1631#if STRICT_READ_CHECKS
1632 /*
1633 * just marked it hardirq-unsafe, check that no
1634 * hardirq-safe-read lock in the system ever took
1635 * it in the past:
1636 */
1637 if (!check_usage_backwards(curr, this,
1638 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
1639 return 0;
1640#endif
1641 if (hardirq_verbose(this->class))
1642 ret = 2;
1643 break;
1644 case LOCK_ENABLED_SOFTIRQS:
1645 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1646 return 0;
1647 if (!valid_state(curr, this, new_bit,
1648 LOCK_USED_IN_SOFTIRQ_READ))
1649 return 0;
1650 /*
1651 * just marked it softirq-unsafe, check that no softirq-safe
1652 * lock in the system ever took it in the past:
1653 */
1654 if (!check_usage_backwards(curr, this,
1655 LOCK_USED_IN_SOFTIRQ, "soft"))
1656 return 0;
1657#if STRICT_READ_CHECKS
1658 /*
1659 * just marked it softirq-unsafe, check that no
1660 * softirq-safe-read lock in the system ever took
1661 * it in the past:
1662 */
1663 if (!check_usage_backwards(curr, this,
1664 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
1665 return 0;
1666#endif
1667 if (softirq_verbose(this->class))
1668 ret = 2;
1669 break;
1670 case LOCK_ENABLED_HARDIRQS_READ:
1671 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1672 return 0;
1673#if STRICT_READ_CHECKS
1674 /*
1675 * just marked it hardirq-read-unsafe, check that no
1676 * hardirq-safe lock in the system ever took it in the past:
1677 */
1678 if (!check_usage_backwards(curr, this,
1679 LOCK_USED_IN_HARDIRQ, "hard"))
1680 return 0;
1681#endif
1682 if (hardirq_verbose(this->class))
1683 ret = 2;
1684 break;
1685 case LOCK_ENABLED_SOFTIRQS_READ:
1686 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1687 return 0;
1688#if STRICT_READ_CHECKS
1689 /*
1690 * just marked it softirq-read-unsafe, check that no
1691 * softirq-safe lock in the system ever took it in the past:
1692 */
1693 if (!check_usage_backwards(curr, this,
1694 LOCK_USED_IN_SOFTIRQ, "soft"))
1695 return 0;
1696#endif
1697 if (softirq_verbose(this->class))
1698 ret = 2;
1699 break;
1700#endif
1701 case LOCK_USED:
1702 /*
1703 * Add it to the global list of classes:
1704 */
1705 list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
1706 debug_atomic_dec(&nr_unused_locks);
1707 break;
1708 default:
1709 debug_locks_off();
1710 WARN_ON(1);
1711 return 0;
1712 }
1713
1714 __raw_spin_unlock(&hash_lock);
1715
1716 /*
1717 * We must printk outside of the hash_lock:
1718 */
1719 if (ret == 2) {
1720 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
1721 print_lock(this);
1722 print_irqtrace_events(curr);
1723 dump_stack();
1724 }
1725
1726 return ret;
1727}
1728
1729#ifdef CONFIG_TRACE_IRQFLAGS
1730/*
1731 * Mark all held locks with a usage bit:
1732 */
1733static int
1734mark_held_locks(struct task_struct *curr, int hardirq, unsigned long ip)
1735{
1736 enum lock_usage_bit usage_bit;
1737 struct held_lock *hlock;
1738 int i;
1739
1740 for (i = 0; i < curr->lockdep_depth; i++) {
1741 hlock = curr->held_locks + i;
1742
1743 if (hardirq) {
1744 if (hlock->read)
1745 usage_bit = LOCK_ENABLED_HARDIRQS_READ;
1746 else
1747 usage_bit = LOCK_ENABLED_HARDIRQS;
1748 } else {
1749 if (hlock->read)
1750 usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
1751 else
1752 usage_bit = LOCK_ENABLED_SOFTIRQS;
1753 }
1754 if (!mark_lock(curr, hlock, usage_bit, ip))
1755 return 0;
1756 }
1757
1758 return 1;
1759}
1760
1761/*
1762 * Debugging helper: via this flag we know that we are in
1763 * 'early bootup code', and will warn about any invalid irqs-on event:
1764 */
1765static int early_boot_irqs_enabled;
1766
1767void early_boot_irqs_off(void)
1768{
1769 early_boot_irqs_enabled = 0;
1770}
1771
1772void early_boot_irqs_on(void)
1773{
1774 early_boot_irqs_enabled = 1;
1775}
1776
1777/*
1778 * Hardirqs will be enabled:
1779 */
1780void trace_hardirqs_on(void)
1781{
1782 struct task_struct *curr = current;
1783 unsigned long ip;
1784
1785 if (unlikely(!debug_locks || current->lockdep_recursion))
1786 return;
1787
1788 if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
1789 return;
1790
1791 if (unlikely(curr->hardirqs_enabled)) {
1792 debug_atomic_inc(&redundant_hardirqs_on);
1793 return;
1794 }
1795 /* we'll do an OFF -> ON transition: */
1796 curr->hardirqs_enabled = 1;
1797 ip = (unsigned long) __builtin_return_address(0);
1798
1799 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1800 return;
1801 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
1802 return;
1803 /*
1804 * We are going to turn hardirqs on, so set the
1805 * usage bit for all held locks:
1806 */
1807 if (!mark_held_locks(curr, 1, ip))
1808 return;
1809 /*
1810 * If we have softirqs enabled, then set the usage
1811 * bit for all held locks. (disabled hardirqs prevented
1812 * this bit from being set before)
1813 */
1814 if (curr->softirqs_enabled)
1815 if (!mark_held_locks(curr, 0, ip))
1816 return;
1817
1818 curr->hardirq_enable_ip = ip;
1819 curr->hardirq_enable_event = ++curr->irq_events;
1820 debug_atomic_inc(&hardirqs_on_events);
1821}
1822
1823EXPORT_SYMBOL(trace_hardirqs_on);
1824
1825/*
1826 * Hardirqs were disabled:
1827 */
1828void trace_hardirqs_off(void)
1829{
1830 struct task_struct *curr = current;
1831
1832 if (unlikely(!debug_locks || current->lockdep_recursion))
1833 return;
1834
1835 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1836 return;
1837
1838 if (curr->hardirqs_enabled) {
1839 /*
1840 * We have done an ON -> OFF transition:
1841 */
1842 curr->hardirqs_enabled = 0;
1843 curr->hardirq_disable_ip = _RET_IP_;
1844 curr->hardirq_disable_event = ++curr->irq_events;
1845 debug_atomic_inc(&hardirqs_off_events);
1846 } else
1847 debug_atomic_inc(&redundant_hardirqs_off);
1848}
1849
1850EXPORT_SYMBOL(trace_hardirqs_off);
1851
1852/*
1853 * Softirqs will be enabled:
1854 */
1855void trace_softirqs_on(unsigned long ip)
1856{
1857 struct task_struct *curr = current;
1858
1859 if (unlikely(!debug_locks))
1860 return;
1861
1862 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1863 return;
1864
1865 if (curr->softirqs_enabled) {
1866 debug_atomic_inc(&redundant_softirqs_on);
1867 return;
1868 }
1869
1870 /*
1871 * We'll do an OFF -> ON transition:
1872 */
1873 curr->softirqs_enabled = 1;
1874 curr->softirq_enable_ip = ip;
1875 curr->softirq_enable_event = ++curr->irq_events;
1876 debug_atomic_inc(&softirqs_on_events);
1877 /*
1878 * We are going to turn softirqs on, so set the
1879 * usage bit for all held locks, if hardirqs are
1880 * enabled too:
1881 */
1882 if (curr->hardirqs_enabled)
1883 mark_held_locks(curr, 0, ip);
1884}
1885
1886/*
1887 * Softirqs were disabled:
1888 */
1889void trace_softirqs_off(unsigned long ip)
1890{
1891 struct task_struct *curr = current;
1892
1893 if (unlikely(!debug_locks))
1894 return;
1895
1896 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1897 return;
1898
1899 if (curr->softirqs_enabled) {
1900 /*
1901 * We have done an ON -> OFF transition:
1902 */
1903 curr->softirqs_enabled = 0;
1904 curr->softirq_disable_ip = ip;
1905 curr->softirq_disable_event = ++curr->irq_events;
1906 debug_atomic_inc(&softirqs_off_events);
1907 DEBUG_LOCKS_WARN_ON(!softirq_count());
1908 } else
1909 debug_atomic_inc(&redundant_softirqs_off);
1910}
1911
1912#endif
1913
1914/*
1915 * Initialize a lock instance's lock-class mapping info:
1916 */
1917void lockdep_init_map(struct lockdep_map *lock, const char *name,
1918 struct lock_class_key *key)
1919{
1920 if (unlikely(!debug_locks))
1921 return;
1922
1923 if (DEBUG_LOCKS_WARN_ON(!key))
1924 return;
1925 if (DEBUG_LOCKS_WARN_ON(!name))
1926 return;
1927 /*
1928 * Sanity check, the lock-class key must be persistent:
1929 */
1930 if (!static_obj(key)) {
1931 printk("BUG: key %p not in .data!\n", key);
1932 DEBUG_LOCKS_WARN_ON(1);
1933 return;
1934 }
1935 lock->name = name;
1936 lock->key = key;
1937 memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES);
1938}
1939
1940EXPORT_SYMBOL_GPL(lockdep_init_map);
1941
1942/*
1943 * This gets called for every mutex_lock*()/spin_lock*() operation.
1944 * We maintain the dependency maps and validate the locking attempt:
1945 */
1946static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1947 int trylock, int read, int check, int hardirqs_off,
1948 unsigned long ip)
1949{
1950 struct task_struct *curr = current;
1951 struct held_lock *hlock;
1952 struct lock_class *class;
1953 unsigned int depth, id;
1954 int chain_head = 0;
1955 u64 chain_key;
1956
1957 if (unlikely(!debug_locks))
1958 return 0;
1959
1960 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1961 return 0;
1962
1963 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
1964 debug_locks_off();
1965 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
1966 printk("turning off the locking correctness validator.\n");
1967 return 0;
1968 }
1969
1970 class = lock->class[subclass];
1971 /* not cached yet? */
1972 if (unlikely(!class)) {
1973 class = register_lock_class(lock, subclass);
1974 if (!class)
1975 return 0;
1976 }
1977 debug_atomic_inc((atomic_t *)&class->ops);
1978 if (very_verbose(class)) {
1979 printk("\nacquire class [%p] %s", class->key, class->name);
1980 if (class->name_version > 1)
1981 printk("#%d", class->name_version);
1982 printk("\n");
1983 dump_stack();
1984 }
1985
1986 /*
1987 * Add the lock to the list of currently held locks.
1988 * (we dont increase the depth just yet, up until the
1989 * dependency checks are done)
1990 */
1991 depth = curr->lockdep_depth;
1992 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
1993 return 0;
1994
1995 hlock = curr->held_locks + depth;
1996
1997 hlock->class = class;
1998 hlock->acquire_ip = ip;
1999 hlock->instance = lock;
2000 hlock->trylock = trylock;
2001 hlock->read = read;
2002 hlock->check = check;
2003 hlock->hardirqs_off = hardirqs_off;
2004
2005 if (check != 2)
2006 goto out_calc_hash;
2007#ifdef CONFIG_TRACE_IRQFLAGS
2008 /*
2009 * If non-trylock use in a hardirq or softirq context, then
2010 * mark the lock as used in these contexts:
2011 */
2012 if (!trylock) {
2013 if (read) {
2014 if (curr->hardirq_context)
2015 if (!mark_lock(curr, hlock,
2016 LOCK_USED_IN_HARDIRQ_READ, ip))
2017 return 0;
2018 if (curr->softirq_context)
2019 if (!mark_lock(curr, hlock,
2020 LOCK_USED_IN_SOFTIRQ_READ, ip))
2021 return 0;
2022 } else {
2023 if (curr->hardirq_context)
2024 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ, ip))
2025 return 0;
2026 if (curr->softirq_context)
2027 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ, ip))
2028 return 0;
2029 }
2030 }
2031 if (!hardirqs_off) {
2032 if (read) {
2033 if (!mark_lock(curr, hlock,
2034 LOCK_ENABLED_HARDIRQS_READ, ip))
2035 return 0;
2036 if (curr->softirqs_enabled)
2037 if (!mark_lock(curr, hlock,
2038 LOCK_ENABLED_SOFTIRQS_READ, ip))
2039 return 0;
2040 } else {
2041 if (!mark_lock(curr, hlock,
2042 LOCK_ENABLED_HARDIRQS, ip))
2043 return 0;
2044 if (curr->softirqs_enabled)
2045 if (!mark_lock(curr, hlock,
2046 LOCK_ENABLED_SOFTIRQS, ip))
2047 return 0;
2048 }
2049 }
2050#endif
2051 /* mark it as used: */
2052 if (!mark_lock(curr, hlock, LOCK_USED, ip))
2053 return 0;
2054out_calc_hash:
2055 /*
2056 * Calculate the chain hash: it's the combined has of all the
2057 * lock keys along the dependency chain. We save the hash value
2058 * at every step so that we can get the current hash easily
2059 * after unlock. The chain hash is then used to cache dependency
2060 * results.
2061 *
2062 * The 'key ID' is what is the most compact key value to drive
2063 * the hash, not class->key.
2064 */
2065 id = class - lock_classes;
2066 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2067 return 0;
2068
2069 chain_key = curr->curr_chain_key;
2070 if (!depth) {
2071 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2072 return 0;
2073 chain_head = 1;
2074 }
2075
2076 hlock->prev_chain_key = chain_key;
2077
2078#ifdef CONFIG_TRACE_IRQFLAGS
2079 /*
2080 * Keep track of points where we cross into an interrupt context:
2081 */
2082 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2083 curr->softirq_context;
2084 if (depth) {
2085 struct held_lock *prev_hlock;
2086
2087 prev_hlock = curr->held_locks + depth-1;
2088 /*
2089 * If we cross into another context, reset the
2090 * hash key (this also prevents the checking and the
2091 * adding of the dependency to 'prev'):
2092 */
2093 if (prev_hlock->irq_context != hlock->irq_context) {
2094 chain_key = 0;
2095 chain_head = 1;
2096 }
2097 }
2098#endif
2099 chain_key = iterate_chain_key(chain_key, id);
2100 curr->curr_chain_key = chain_key;
2101
2102 /*
2103 * Trylock needs to maintain the stack of held locks, but it
2104 * does not add new dependencies, because trylock can be done
2105 * in any order.
2106 *
2107 * We look up the chain_key and do the O(N^2) check and update of
2108 * the dependencies only if this is a new dependency chain.
2109 * (If lookup_chain_cache() returns with 1 it acquires
2110 * hash_lock for us)
2111 */
2112 if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) {
2113 /*
2114 * Check whether last held lock:
2115 *
2116 * - is irq-safe, if this lock is irq-unsafe
2117 * - is softirq-safe, if this lock is hardirq-unsafe
2118 *
2119 * And check whether the new lock's dependency graph
2120 * could lead back to the previous lock.
2121 *
2122 * any of these scenarios could lead to a deadlock. If
2123 * All validations
2124 */
2125 int ret = check_deadlock(curr, hlock, lock, read);
2126
2127 if (!ret)
2128 return 0;
2129 /*
2130 * Mark recursive read, as we jump over it when
2131 * building dependencies (just like we jump over
2132 * trylock entries):
2133 */
2134 if (ret == 2)
2135 hlock->read = 2;
2136 /*
2137 * Add dependency only if this lock is not the head
2138 * of the chain, and if it's not a secondary read-lock:
2139 */
2140 if (!chain_head && ret != 2)
2141 if (!check_prevs_add(curr, hlock))
2142 return 0;
2143 __raw_spin_unlock(&hash_lock);
2144 }
2145 curr->lockdep_depth++;
2146 check_chain_key(curr);
2147 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2148 debug_locks_off();
2149 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2150 printk("turning off the locking correctness validator.\n");
2151 return 0;
2152 }
2153 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2154 max_lockdep_depth = curr->lockdep_depth;
2155
2156 return 1;
2157}
2158
2159static int
2160print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2161 unsigned long ip)
2162{
2163 if (!debug_locks_off())
2164 return 0;
2165 if (debug_locks_silent)
2166 return 0;
2167
2168 printk("\n=====================================\n");
2169 printk( "[ BUG: bad unlock balance detected! ]\n");
2170 printk( "-------------------------------------\n");
2171 printk("%s/%d is trying to release lock (",
2172 curr->comm, curr->pid);
2173 print_lockdep_cache(lock);
2174 printk(") at:\n");
2175 print_ip_sym(ip);
2176 printk("but there are no more locks to release!\n");
2177 printk("\nother info that might help us debug this:\n");
2178 lockdep_print_held_locks(curr);
2179
2180 printk("\nstack backtrace:\n");
2181 dump_stack();
2182
2183 return 0;
2184}
2185
2186/*
2187 * Common debugging checks for both nested and non-nested unlock:
2188 */
2189static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2190 unsigned long ip)
2191{
2192 if (unlikely(!debug_locks))
2193 return 0;
2194 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2195 return 0;
2196
2197 if (curr->lockdep_depth <= 0)
2198 return print_unlock_inbalance_bug(curr, lock, ip);
2199
2200 return 1;
2201}
2202
2203/*
2204 * Remove the lock to the list of currently held locks in a
2205 * potentially non-nested (out of order) manner. This is a
2206 * relatively rare operation, as all the unlock APIs default
2207 * to nested mode (which uses lock_release()):
2208 */
2209static int
2210lock_release_non_nested(struct task_struct *curr,
2211 struct lockdep_map *lock, unsigned long ip)
2212{
2213 struct held_lock *hlock, *prev_hlock;
2214 unsigned int depth;
2215 int i;
2216
2217 /*
2218 * Check whether the lock exists in the current stack
2219 * of held locks:
2220 */
2221 depth = curr->lockdep_depth;
2222 if (DEBUG_LOCKS_WARN_ON(!depth))
2223 return 0;
2224
2225 prev_hlock = NULL;
2226 for (i = depth-1; i >= 0; i--) {
2227 hlock = curr->held_locks + i;
2228 /*
2229 * We must not cross into another context:
2230 */
2231 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2232 break;
2233 if (hlock->instance == lock)
2234 goto found_it;
2235 prev_hlock = hlock;
2236 }
2237 return print_unlock_inbalance_bug(curr, lock, ip);
2238
2239found_it:
2240 /*
2241 * We have the right lock to unlock, 'hlock' points to it.
2242 * Now we remove it from the stack, and add back the other
2243 * entries (if any), recalculating the hash along the way:
2244 */
2245 curr->lockdep_depth = i;
2246 curr->curr_chain_key = hlock->prev_chain_key;
2247
2248 for (i++; i < depth; i++) {
2249 hlock = curr->held_locks + i;
2250 if (!__lock_acquire(hlock->instance,
2251 hlock->class->subclass, hlock->trylock,
2252 hlock->read, hlock->check, hlock->hardirqs_off,
2253 hlock->acquire_ip))
2254 return 0;
2255 }
2256
2257 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2258 return 0;
2259 return 1;
2260}
2261
2262/*
2263 * Remove the lock to the list of currently held locks - this gets
2264 * called on mutex_unlock()/spin_unlock*() (or on a failed
2265 * mutex_lock_interruptible()). This is done for unlocks that nest
2266 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2267 */
2268static int lock_release_nested(struct task_struct *curr,
2269 struct lockdep_map *lock, unsigned long ip)
2270{
2271 struct held_lock *hlock;
2272 unsigned int depth;
2273
2274 /*
2275 * Pop off the top of the lock stack:
2276 */
2277 depth = curr->lockdep_depth - 1;
2278 hlock = curr->held_locks + depth;
2279
2280 /*
2281 * Is the unlock non-nested:
2282 */
2283 if (hlock->instance != lock)
2284 return lock_release_non_nested(curr, lock, ip);
2285 curr->lockdep_depth--;
2286
2287 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2288 return 0;
2289
2290 curr->curr_chain_key = hlock->prev_chain_key;
2291
2292#ifdef CONFIG_DEBUG_LOCKDEP
2293 hlock->prev_chain_key = 0;
2294 hlock->class = NULL;
2295 hlock->acquire_ip = 0;
2296 hlock->irq_context = 0;
2297#endif
2298 return 1;
2299}
2300
2301/*
2302 * Remove the lock to the list of currently held locks - this gets
2303 * called on mutex_unlock()/spin_unlock*() (or on a failed
2304 * mutex_lock_interruptible()). This is done for unlocks that nest
2305 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2306 */
2307static void
2308__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2309{
2310 struct task_struct *curr = current;
2311
2312 if (!check_unlock(curr, lock, ip))
2313 return;
2314
2315 if (nested) {
2316 if (!lock_release_nested(curr, lock, ip))
2317 return;
2318 } else {
2319 if (!lock_release_non_nested(curr, lock, ip))
2320 return;
2321 }
2322
2323 check_chain_key(curr);
2324}
2325
2326/*
2327 * Check whether we follow the irq-flags state precisely:
2328 */
2329static void check_flags(unsigned long flags)
2330{
2331#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
2332 if (!debug_locks)
2333 return;
2334
2335 if (irqs_disabled_flags(flags))
2336 DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled);
2337 else
2338 DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled);
2339
2340 /*
2341 * We dont accurately track softirq state in e.g.
2342 * hardirq contexts (such as on 4KSTACKS), so only
2343 * check if not in hardirq contexts:
2344 */
2345 if (!hardirq_count()) {
2346 if (softirq_count())
2347 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2348 else
2349 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2350 }
2351
2352 if (!debug_locks)
2353 print_irqtrace_events(current);
2354#endif
2355}
2356
2357/*
2358 * We are not always called with irqs disabled - do that here,
2359 * and also avoid lockdep recursion:
2360 */
2361void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2362 int trylock, int read, int check, unsigned long ip)
2363{
2364 unsigned long flags;
2365
2366 if (unlikely(current->lockdep_recursion))
2367 return;
2368
2369 raw_local_irq_save(flags);
2370 check_flags(flags);
2371
2372 current->lockdep_recursion = 1;
2373 __lock_acquire(lock, subclass, trylock, read, check,
2374 irqs_disabled_flags(flags), ip);
2375 current->lockdep_recursion = 0;
2376 raw_local_irq_restore(flags);
2377}
2378
2379EXPORT_SYMBOL_GPL(lock_acquire);
2380
2381void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2382{
2383 unsigned long flags;
2384
2385 if (unlikely(current->lockdep_recursion))
2386 return;
2387
2388 raw_local_irq_save(flags);
2389 check_flags(flags);
2390 current->lockdep_recursion = 1;
2391 __lock_release(lock, nested, ip);
2392 current->lockdep_recursion = 0;
2393 raw_local_irq_restore(flags);
2394}
2395
2396EXPORT_SYMBOL_GPL(lock_release);
2397
2398/*
2399 * Used by the testsuite, sanitize the validator state
2400 * after a simulated failure:
2401 */
2402
2403void lockdep_reset(void)
2404{
2405 unsigned long flags;
2406
2407 raw_local_irq_save(flags);
2408 current->curr_chain_key = 0;
2409 current->lockdep_depth = 0;
2410 current->lockdep_recursion = 0;
2411 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
2412 nr_hardirq_chains = 0;
2413 nr_softirq_chains = 0;
2414 nr_process_chains = 0;
2415 debug_locks = 1;
2416 raw_local_irq_restore(flags);
2417}
2418
2419static void zap_class(struct lock_class *class)
2420{
2421 int i;
2422
2423 /*
2424 * Remove all dependencies this lock is
2425 * involved in:
2426 */
2427 for (i = 0; i < nr_list_entries; i++) {
2428 if (list_entries[i].class == class)
2429 list_del_rcu(&list_entries[i].entry);
2430 }
2431 /*
2432 * Unhash the class and remove it from the all_lock_classes list:
2433 */
2434 list_del_rcu(&class->hash_entry);
2435 list_del_rcu(&class->lock_entry);
2436
2437}
2438
2439static inline int within(void *addr, void *start, unsigned long size)
2440{
2441 return addr >= start && addr < start + size;
2442}
2443
2444void lockdep_free_key_range(void *start, unsigned long size)
2445{
2446 struct lock_class *class, *next;
2447 struct list_head *head;
2448 unsigned long flags;
2449 int i;
2450
2451 raw_local_irq_save(flags);
2452 __raw_spin_lock(&hash_lock);
2453
2454 /*
2455 * Unhash all classes that were created by this module:
2456 */
2457 for (i = 0; i < CLASSHASH_SIZE; i++) {
2458 head = classhash_table + i;
2459 if (list_empty(head))
2460 continue;
2461 list_for_each_entry_safe(class, next, head, hash_entry)
2462 if (within(class->key, start, size))
2463 zap_class(class);
2464 }
2465
2466 __raw_spin_unlock(&hash_lock);
2467 raw_local_irq_restore(flags);
2468}
2469
2470void lockdep_reset_lock(struct lockdep_map *lock)
2471{
2472 struct lock_class *class, *next, *entry;
2473 struct list_head *head;
2474 unsigned long flags;
2475 int i, j;
2476
2477 raw_local_irq_save(flags);
2478 __raw_spin_lock(&hash_lock);
2479
2480 /*
2481 * Remove all classes this lock has:
2482 */
2483 for (i = 0; i < CLASSHASH_SIZE; i++) {
2484 head = classhash_table + i;
2485 if (list_empty(head))
2486 continue;
2487 list_for_each_entry_safe(class, next, head, hash_entry) {
2488 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2489 entry = lock->class[j];
2490 if (class == entry) {
2491 zap_class(class);
2492 lock->class[j] = NULL;
2493 break;
2494 }
2495 }
2496 }
2497 }
2498
2499 /*
2500 * Debug check: in the end all mapped classes should
2501 * be gone.
2502 */
2503 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2504 entry = lock->class[j];
2505 if (!entry)
2506 continue;
2507 __raw_spin_unlock(&hash_lock);
2508 DEBUG_LOCKS_WARN_ON(1);
2509 raw_local_irq_restore(flags);
2510 return;
2511 }
2512
2513 __raw_spin_unlock(&hash_lock);
2514 raw_local_irq_restore(flags);
2515}
2516
2517void __init lockdep_init(void)
2518{
2519 int i;
2520
2521 /*
2522 * Some architectures have their own start_kernel()
2523 * code which calls lockdep_init(), while we also
2524 * call lockdep_init() from the start_kernel() itself,
2525 * and we want to initialize the hashes only once:
2526 */
2527 if (lockdep_initialized)
2528 return;
2529
2530 for (i = 0; i < CLASSHASH_SIZE; i++)
2531 INIT_LIST_HEAD(classhash_table + i);
2532
2533 for (i = 0; i < CHAINHASH_SIZE; i++)
2534 INIT_LIST_HEAD(chainhash_table + i);
2535
2536 lockdep_initialized = 1;
2537}
2538
2539void __init lockdep_info(void)
2540{
2541 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
2542
2543 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
2544 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
2545 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
2546 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
2547 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
2548 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
2549 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
2550
2551 printk(" memory used by lock dependency info: %lu kB\n",
2552 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
2553 sizeof(struct list_head) * CLASSHASH_SIZE +
2554 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
2555 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
2556 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
2557
2558 printk(" per task-struct memory footprint: %lu bytes\n",
2559 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
2560
2561#ifdef CONFIG_DEBUG_LOCKDEP
2562 if (lockdep_init_error)
2563 printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
2564#endif
2565}
2566
2567static inline int in_range(const void *start, const void *addr, const void *end)
2568{
2569 return addr >= start && addr <= end;
2570}
2571
2572static void
2573print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
2574 const void *mem_to)
2575{
2576 if (!debug_locks_off())
2577 return;
2578 if (debug_locks_silent)
2579 return;
2580
2581 printk("\n=========================\n");
2582 printk( "[ BUG: held lock freed! ]\n");
2583 printk( "-------------------------\n");
2584 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
2585 curr->comm, curr->pid, mem_from, mem_to-1);
2586 lockdep_print_held_locks(curr);
2587
2588 printk("\nstack backtrace:\n");
2589 dump_stack();
2590}
2591
2592/*
2593 * Called when kernel memory is freed (or unmapped), or if a lock
2594 * is destroyed or reinitialized - this code checks whether there is
2595 * any held lock in the memory range of <from> to <to>:
2596 */
2597void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
2598{
2599 const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
2600 struct task_struct *curr = current;
2601 struct held_lock *hlock;
2602 unsigned long flags;
2603 int i;
2604
2605 if (unlikely(!debug_locks))
2606 return;
2607
2608 local_irq_save(flags);
2609 for (i = 0; i < curr->lockdep_depth; i++) {
2610 hlock = curr->held_locks + i;
2611
2612 lock_from = (void *)hlock->instance;
2613 lock_to = (void *)(hlock->instance + 1);
2614
2615 if (!in_range(mem_from, lock_from, mem_to) &&
2616 !in_range(mem_from, lock_to, mem_to))
2617 continue;
2618
2619 print_freed_lock_bug(curr, mem_from, mem_to);
2620 break;
2621 }
2622 local_irq_restore(flags);
2623}
2624
2625static void print_held_locks_bug(struct task_struct *curr)
2626{
2627 if (!debug_locks_off())
2628 return;
2629 if (debug_locks_silent)
2630 return;
2631
2632 printk("\n=====================================\n");
2633 printk( "[ BUG: lock held at task exit time! ]\n");
2634 printk( "-------------------------------------\n");
2635 printk("%s/%d is exiting with locks still held!\n",
2636 curr->comm, curr->pid);
2637 lockdep_print_held_locks(curr);
2638
2639 printk("\nstack backtrace:\n");
2640 dump_stack();
2641}
2642
2643void debug_check_no_locks_held(struct task_struct *task)
2644{
2645 if (unlikely(task->lockdep_depth > 0))
2646 print_held_locks_bug(task);
2647}
2648
2649void debug_show_all_locks(void)
2650{
2651 struct task_struct *g, *p;
2652 int count = 10;
2653 int unlock = 1;
2654
2655 printk("\nShowing all locks held in the system:\n");
2656
2657 /*
2658 * Here we try to get the tasklist_lock as hard as possible,
2659 * if not successful after 2 seconds we ignore it (but keep
2660 * trying). This is to enable a debug printout even if a
2661 * tasklist_lock-holding task deadlocks or crashes.
2662 */
2663retry:
2664 if (!read_trylock(&tasklist_lock)) {
2665 if (count == 10)
2666 printk("hm, tasklist_lock locked, retrying... ");
2667 if (count) {
2668 count--;
2669 printk(" #%d", 10-count);
2670 mdelay(200);
2671 goto retry;
2672 }
2673 printk(" ignoring it.\n");
2674 unlock = 0;
2675 }
2676 if (count != 10)
2677 printk(" locked it.\n");
2678
2679 do_each_thread(g, p) {
2680 if (p->lockdep_depth)
2681 lockdep_print_held_locks(p);
2682 if (!unlock)
2683 if (read_trylock(&tasklist_lock))
2684 unlock = 1;
2685 } while_each_thread(g, p);
2686
2687 printk("\n");
2688 printk("=============================================\n\n");
2689
2690 if (unlock)
2691 read_unlock(&tasklist_lock);
2692}
2693
2694EXPORT_SYMBOL_GPL(debug_show_all_locks);
2695
2696void debug_show_held_locks(struct task_struct *task)
2697{
2698 lockdep_print_held_locks(task);
2699}
2700
2701EXPORT_SYMBOL_GPL(debug_show_held_locks);
2702
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
new file mode 100644
index 000000000000..0d355f24fe04
--- /dev/null
+++ b/kernel/lockdep_internals.h
@@ -0,0 +1,78 @@
1/*
2 * kernel/lockdep_internals.h
3 *
4 * Runtime locking correctness validator
5 *
6 * lockdep subsystem internal functions and variables.
7 */
8
9/*
10 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
11 * we track.
12 *
13 * We use the per-lock dependency maps in two ways: we grow it by adding
14 * every to-be-taken lock to all currently held lock's own dependency
15 * table (if it's not there yet), and we check it for lock order
16 * conflicts and deadlocks.
17 */
18#define MAX_LOCKDEP_ENTRIES 8192UL
19
20#define MAX_LOCKDEP_KEYS_BITS 11
21#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
22
23#define MAX_LOCKDEP_CHAINS_BITS 13
24#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
25
26/*
27 * Stack-trace: tightly packed array of stack backtrace
28 * addresses. Protected by the hash_lock.
29 */
30#define MAX_STACK_TRACE_ENTRIES 131072UL
31
32extern struct list_head all_lock_classes;
33
34extern void
35get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4);
36
37extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
38
39extern unsigned long nr_lock_classes;
40extern unsigned long nr_list_entries;
41extern unsigned long nr_lock_chains;
42extern unsigned long nr_stack_trace_entries;
43
44extern unsigned int nr_hardirq_chains;
45extern unsigned int nr_softirq_chains;
46extern unsigned int nr_process_chains;
47extern unsigned int max_lockdep_depth;
48extern unsigned int max_recursion_depth;
49
50#ifdef CONFIG_DEBUG_LOCKDEP
51/*
52 * Various lockdep statistics:
53 */
54extern atomic_t chain_lookup_hits;
55extern atomic_t chain_lookup_misses;
56extern atomic_t hardirqs_on_events;
57extern atomic_t hardirqs_off_events;
58extern atomic_t redundant_hardirqs_on;
59extern atomic_t redundant_hardirqs_off;
60extern atomic_t softirqs_on_events;
61extern atomic_t softirqs_off_events;
62extern atomic_t redundant_softirqs_on;
63extern atomic_t redundant_softirqs_off;
64extern atomic_t nr_unused_locks;
65extern atomic_t nr_cyclic_checks;
66extern atomic_t nr_cyclic_check_recursions;
67extern atomic_t nr_find_usage_forwards_checks;
68extern atomic_t nr_find_usage_forwards_recursions;
69extern atomic_t nr_find_usage_backwards_checks;
70extern atomic_t nr_find_usage_backwards_recursions;
71# define debug_atomic_inc(ptr) atomic_inc(ptr)
72# define debug_atomic_dec(ptr) atomic_dec(ptr)
73# define debug_atomic_read(ptr) atomic_read(ptr)
74#else
75# define debug_atomic_inc(ptr) do { } while (0)
76# define debug_atomic_dec(ptr) do { } while (0)
77# define debug_atomic_read(ptr) 0
78#endif
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
new file mode 100644
index 000000000000..f6e72eaab3fa
--- /dev/null
+++ b/kernel/lockdep_proc.c
@@ -0,0 +1,345 @@
1/*
2 * kernel/lockdep_proc.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Code for /proc/lockdep and /proc/lockdep_stats:
11 *
12 */
13#include <linux/sched.h>
14#include <linux/module.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/kallsyms.h>
18#include <linux/debug_locks.h>
19
20#include "lockdep_internals.h"
21
22static void *l_next(struct seq_file *m, void *v, loff_t *pos)
23{
24 struct lock_class *class = v;
25
26 (*pos)++;
27
28 if (class->lock_entry.next != &all_lock_classes)
29 class = list_entry(class->lock_entry.next, struct lock_class,
30 lock_entry);
31 else
32 class = NULL;
33 m->private = class;
34
35 return class;
36}
37
38static void *l_start(struct seq_file *m, loff_t *pos)
39{
40 struct lock_class *class = m->private;
41
42 if (&class->lock_entry == all_lock_classes.next)
43 seq_printf(m, "all lock classes:\n");
44
45 return class;
46}
47
48static void l_stop(struct seq_file *m, void *v)
49{
50}
51
52static unsigned long count_forward_deps(struct lock_class *class)
53{
54 struct lock_list *entry;
55 unsigned long ret = 1;
56
57 /*
58 * Recurse this class's dependency list:
59 */
60 list_for_each_entry(entry, &class->locks_after, entry)
61 ret += count_forward_deps(entry->class);
62
63 return ret;
64}
65
66static unsigned long count_backward_deps(struct lock_class *class)
67{
68 struct lock_list *entry;
69 unsigned long ret = 1;
70
71 /*
72 * Recurse this class's dependency list:
73 */
74 list_for_each_entry(entry, &class->locks_before, entry)
75 ret += count_backward_deps(entry->class);
76
77 return ret;
78}
79
80static int l_show(struct seq_file *m, void *v)
81{
82 unsigned long nr_forward_deps, nr_backward_deps;
83 struct lock_class *class = m->private;
84 char str[128], c1, c2, c3, c4;
85 const char *name;
86
87 seq_printf(m, "%p", class->key);
88#ifdef CONFIG_DEBUG_LOCKDEP
89 seq_printf(m, " OPS:%8ld", class->ops);
90#endif
91 nr_forward_deps = count_forward_deps(class);
92 seq_printf(m, " FD:%5ld", nr_forward_deps);
93
94 nr_backward_deps = count_backward_deps(class);
95 seq_printf(m, " BD:%5ld", nr_backward_deps);
96
97 get_usage_chars(class, &c1, &c2, &c3, &c4);
98 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
99
100 name = class->name;
101 if (!name) {
102 name = __get_key_name(class->key, str);
103 seq_printf(m, ": %s", name);
104 } else{
105 seq_printf(m, ": %s", name);
106 if (class->name_version > 1)
107 seq_printf(m, "#%d", class->name_version);
108 if (class->subclass)
109 seq_printf(m, "/%d", class->subclass);
110 }
111 seq_puts(m, "\n");
112
113 return 0;
114}
115
116static struct seq_operations lockdep_ops = {
117 .start = l_start,
118 .next = l_next,
119 .stop = l_stop,
120 .show = l_show,
121};
122
123static int lockdep_open(struct inode *inode, struct file *file)
124{
125 int res = seq_open(file, &lockdep_ops);
126 if (!res) {
127 struct seq_file *m = file->private_data;
128
129 if (!list_empty(&all_lock_classes))
130 m->private = list_entry(all_lock_classes.next,
131 struct lock_class, lock_entry);
132 else
133 m->private = NULL;
134 }
135 return res;
136}
137
138static struct file_operations proc_lockdep_operations = {
139 .open = lockdep_open,
140 .read = seq_read,
141 .llseek = seq_lseek,
142 .release = seq_release,
143};
144
145static void lockdep_stats_debug_show(struct seq_file *m)
146{
147#ifdef CONFIG_DEBUG_LOCKDEP
148 unsigned int hi1 = debug_atomic_read(&hardirqs_on_events),
149 hi2 = debug_atomic_read(&hardirqs_off_events),
150 hr1 = debug_atomic_read(&redundant_hardirqs_on),
151 hr2 = debug_atomic_read(&redundant_hardirqs_off),
152 si1 = debug_atomic_read(&softirqs_on_events),
153 si2 = debug_atomic_read(&softirqs_off_events),
154 sr1 = debug_atomic_read(&redundant_softirqs_on),
155 sr2 = debug_atomic_read(&redundant_softirqs_off);
156
157 seq_printf(m, " chain lookup misses: %11u\n",
158 debug_atomic_read(&chain_lookup_misses));
159 seq_printf(m, " chain lookup hits: %11u\n",
160 debug_atomic_read(&chain_lookup_hits));
161 seq_printf(m, " cyclic checks: %11u\n",
162 debug_atomic_read(&nr_cyclic_checks));
163 seq_printf(m, " cyclic-check recursions: %11u\n",
164 debug_atomic_read(&nr_cyclic_check_recursions));
165 seq_printf(m, " find-mask forwards checks: %11u\n",
166 debug_atomic_read(&nr_find_usage_forwards_checks));
167 seq_printf(m, " find-mask forwards recursions: %11u\n",
168 debug_atomic_read(&nr_find_usage_forwards_recursions));
169 seq_printf(m, " find-mask backwards checks: %11u\n",
170 debug_atomic_read(&nr_find_usage_backwards_checks));
171 seq_printf(m, " find-mask backwards recursions:%11u\n",
172 debug_atomic_read(&nr_find_usage_backwards_recursions));
173
174 seq_printf(m, " hardirq on events: %11u\n", hi1);
175 seq_printf(m, " hardirq off events: %11u\n", hi2);
176 seq_printf(m, " redundant hardirq ons: %11u\n", hr1);
177 seq_printf(m, " redundant hardirq offs: %11u\n", hr2);
178 seq_printf(m, " softirq on events: %11u\n", si1);
179 seq_printf(m, " softirq off events: %11u\n", si2);
180 seq_printf(m, " redundant softirq ons: %11u\n", sr1);
181 seq_printf(m, " redundant softirq offs: %11u\n", sr2);
182#endif
183}
184
185static int lockdep_stats_show(struct seq_file *m, void *v)
186{
187 struct lock_class *class;
188 unsigned long nr_unused = 0, nr_uncategorized = 0,
189 nr_irq_safe = 0, nr_irq_unsafe = 0,
190 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
191 nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
192 nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
193 nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
194 nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
195 sum_forward_deps = 0, factor = 0;
196
197 list_for_each_entry(class, &all_lock_classes, lock_entry) {
198
199 if (class->usage_mask == 0)
200 nr_unused++;
201 if (class->usage_mask == LOCKF_USED)
202 nr_uncategorized++;
203 if (class->usage_mask & LOCKF_USED_IN_IRQ)
204 nr_irq_safe++;
205 if (class->usage_mask & LOCKF_ENABLED_IRQS)
206 nr_irq_unsafe++;
207 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
208 nr_softirq_safe++;
209 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
210 nr_softirq_unsafe++;
211 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
212 nr_hardirq_safe++;
213 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
214 nr_hardirq_unsafe++;
215 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
216 nr_irq_read_safe++;
217 if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
218 nr_irq_read_unsafe++;
219 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
220 nr_softirq_read_safe++;
221 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
222 nr_softirq_read_unsafe++;
223 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
224 nr_hardirq_read_safe++;
225 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
226 nr_hardirq_read_unsafe++;
227
228 sum_forward_deps += count_forward_deps(class);
229 }
230#ifdef CONFIG_LOCKDEP_DEBUG
231 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
232#endif
233 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
234 nr_lock_classes, MAX_LOCKDEP_KEYS);
235 seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
236 nr_list_entries, MAX_LOCKDEP_ENTRIES);
237 seq_printf(m, " indirect dependencies: %11lu\n",
238 sum_forward_deps);
239
240 /*
241 * Total number of dependencies:
242 *
243 * All irq-safe locks may nest inside irq-unsafe locks,
244 * plus all the other known dependencies:
245 */
246 seq_printf(m, " all direct dependencies: %11lu\n",
247 nr_irq_unsafe * nr_irq_safe +
248 nr_hardirq_unsafe * nr_hardirq_safe +
249 nr_list_entries);
250
251 /*
252 * Estimated factor between direct and indirect
253 * dependencies:
254 */
255 if (nr_list_entries)
256 factor = sum_forward_deps / nr_list_entries;
257
258 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
259 nr_lock_chains, MAX_LOCKDEP_CHAINS);
260
261#ifdef CONFIG_TRACE_IRQFLAGS
262 seq_printf(m, " in-hardirq chains: %11u\n",
263 nr_hardirq_chains);
264 seq_printf(m, " in-softirq chains: %11u\n",
265 nr_softirq_chains);
266#endif
267 seq_printf(m, " in-process chains: %11u\n",
268 nr_process_chains);
269 seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
270 nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
271 seq_printf(m, " combined max dependencies: %11u\n",
272 (nr_hardirq_chains + 1) *
273 (nr_softirq_chains + 1) *
274 (nr_process_chains + 1)
275 );
276 seq_printf(m, " hardirq-safe locks: %11lu\n",
277 nr_hardirq_safe);
278 seq_printf(m, " hardirq-unsafe locks: %11lu\n",
279 nr_hardirq_unsafe);
280 seq_printf(m, " softirq-safe locks: %11lu\n",
281 nr_softirq_safe);
282 seq_printf(m, " softirq-unsafe locks: %11lu\n",
283 nr_softirq_unsafe);
284 seq_printf(m, " irq-safe locks: %11lu\n",
285 nr_irq_safe);
286 seq_printf(m, " irq-unsafe locks: %11lu\n",
287 nr_irq_unsafe);
288
289 seq_printf(m, " hardirq-read-safe locks: %11lu\n",
290 nr_hardirq_read_safe);
291 seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
292 nr_hardirq_read_unsafe);
293 seq_printf(m, " softirq-read-safe locks: %11lu\n",
294 nr_softirq_read_safe);
295 seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
296 nr_softirq_read_unsafe);
297 seq_printf(m, " irq-read-safe locks: %11lu\n",
298 nr_irq_read_safe);
299 seq_printf(m, " irq-read-unsafe locks: %11lu\n",
300 nr_irq_read_unsafe);
301
302 seq_printf(m, " uncategorized locks: %11lu\n",
303 nr_uncategorized);
304 seq_printf(m, " unused locks: %11lu\n",
305 nr_unused);
306 seq_printf(m, " max locking depth: %11u\n",
307 max_lockdep_depth);
308 seq_printf(m, " max recursion depth: %11u\n",
309 max_recursion_depth);
310 lockdep_stats_debug_show(m);
311 seq_printf(m, " debug_locks: %11u\n",
312 debug_locks);
313
314 return 0;
315}
316
317static int lockdep_stats_open(struct inode *inode, struct file *file)
318{
319 return single_open(file, lockdep_stats_show, NULL);
320}
321
322static struct file_operations proc_lockdep_stats_operations = {
323 .open = lockdep_stats_open,
324 .read = seq_read,
325 .llseek = seq_lseek,
326 .release = seq_release,
327};
328
329static int __init lockdep_proc_init(void)
330{
331 struct proc_dir_entry *entry;
332
333 entry = create_proc_entry("lockdep", S_IRUSR, NULL);
334 if (entry)
335 entry->proc_fops = &proc_lockdep_operations;
336
337 entry = create_proc_entry("lockdep_stats", S_IRUSR, NULL);
338 if (entry)
339 entry->proc_fops = &proc_lockdep_stats_operations;
340
341 return 0;
342}
343
344__initcall(lockdep_proc_init);
345
diff --git a/kernel/module.c b/kernel/module.c
index 281172f01e9a..35e1b1f859d7 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1121,6 +1121,9 @@ static void free_module(struct module *mod)
1121 if (mod->percpu) 1121 if (mod->percpu)
1122 percpu_modfree(mod->percpu); 1122 percpu_modfree(mod->percpu);
1123 1123
1124 /* Free lock-classes: */
1125 lockdep_free_key_range(mod->module_core, mod->core_size);
1126
1124 /* Finally, free the core (containing the module structure) */ 1127 /* Finally, free the core (containing the module structure) */
1125 module_free(mod, mod->module_core); 1128 module_free(mod, mod->module_core);
1126} 1129}
@@ -2159,6 +2162,29 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
2159 return e; 2162 return e;
2160} 2163}
2161 2164
2165/*
2166 * Is this a valid module address?
2167 */
2168int is_module_address(unsigned long addr)
2169{
2170 unsigned long flags;
2171 struct module *mod;
2172
2173 spin_lock_irqsave(&modlist_lock, flags);
2174
2175 list_for_each_entry(mod, &modules, list) {
2176 if (within(addr, mod->module_core, mod->core_size)) {
2177 spin_unlock_irqrestore(&modlist_lock, flags);
2178 return 1;
2179 }
2180 }
2181
2182 spin_unlock_irqrestore(&modlist_lock, flags);
2183
2184 return 0;
2185}
2186
2187
2162/* Is this a valid kernel address? We don't grab the lock: we are oopsing. */ 2188/* Is this a valid kernel address? We don't grab the lock: we are oopsing. */
2163struct module *__module_text_address(unsigned long addr) 2189struct module *__module_text_address(unsigned long addr)
2164{ 2190{
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index e38e4bac97ca..e3203c654dda 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -20,367 +20,19 @@
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/debug_locks.h>
23 24
24#include "mutex-debug.h" 25#include "mutex-debug.h"
25 26
26/* 27/*
27 * We need a global lock when we walk through the multi-process
28 * lock tree. Only used in the deadlock-debugging case.
29 */
30DEFINE_SPINLOCK(debug_mutex_lock);
31
32/*
33 * All locks held by all tasks, in a single global list:
34 */
35LIST_HEAD(debug_mutex_held_locks);
36
37/*
38 * In the debug case we carry the caller's instruction pointer into
39 * other functions, but we dont want the function argument overhead
40 * in the nondebug case - hence these macros:
41 */
42#define __IP_DECL__ , unsigned long ip
43#define __IP__ , ip
44#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
45
46/*
47 * "mutex debugging enabled" flag. We turn it off when we detect
48 * the first problem because we dont want to recurse back
49 * into the tracing code when doing error printk or
50 * executing a BUG():
51 */
52int debug_mutex_on = 1;
53
54static void printk_task(struct task_struct *p)
55{
56 if (p)
57 printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
58 else
59 printk("<none>");
60}
61
62static void printk_ti(struct thread_info *ti)
63{
64 if (ti)
65 printk_task(ti->task);
66 else
67 printk("<none>");
68}
69
70static void printk_task_short(struct task_struct *p)
71{
72 if (p)
73 printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
74 else
75 printk("<none>");
76}
77
78static void printk_lock(struct mutex *lock, int print_owner)
79{
80 printk(" [%p] {%s}\n", lock, lock->name);
81
82 if (print_owner && lock->owner) {
83 printk(".. held by: ");
84 printk_ti(lock->owner);
85 printk("\n");
86 }
87 if (lock->owner) {
88 printk("... acquired at: ");
89 print_symbol("%s\n", lock->acquire_ip);
90 }
91}
92
93/*
94 * printk locks held by a task:
95 */
96static void show_task_locks(struct task_struct *p)
97{
98 switch (p->state) {
99 case TASK_RUNNING: printk("R"); break;
100 case TASK_INTERRUPTIBLE: printk("S"); break;
101 case TASK_UNINTERRUPTIBLE: printk("D"); break;
102 case TASK_STOPPED: printk("T"); break;
103 case EXIT_ZOMBIE: printk("Z"); break;
104 case EXIT_DEAD: printk("X"); break;
105 default: printk("?"); break;
106 }
107 printk_task(p);
108 if (p->blocked_on) {
109 struct mutex *lock = p->blocked_on->lock;
110
111 printk(" blocked on mutex:");
112 printk_lock(lock, 1);
113 } else
114 printk(" (not blocked on mutex)\n");
115}
116
117/*
118 * printk all locks held in the system (if filter == NULL),
119 * or all locks belonging to a single task (if filter != NULL):
120 */
121void show_held_locks(struct task_struct *filter)
122{
123 struct list_head *curr, *cursor = NULL;
124 struct mutex *lock;
125 struct thread_info *t;
126 unsigned long flags;
127 int count = 0;
128
129 if (filter) {
130 printk("------------------------------\n");
131 printk("| showing all locks held by: | (");
132 printk_task_short(filter);
133 printk("):\n");
134 printk("------------------------------\n");
135 } else {
136 printk("---------------------------\n");
137 printk("| showing all locks held: |\n");
138 printk("---------------------------\n");
139 }
140
141 /*
142 * Play safe and acquire the global trace lock. We
143 * cannot printk with that lock held so we iterate
144 * very carefully:
145 */
146next:
147 debug_spin_lock_save(&debug_mutex_lock, flags);
148 list_for_each(curr, &debug_mutex_held_locks) {
149 if (cursor && curr != cursor)
150 continue;
151 lock = list_entry(curr, struct mutex, held_list);
152 t = lock->owner;
153 if (filter && (t != filter->thread_info))
154 continue;
155 count++;
156 cursor = curr->next;
157 debug_spin_unlock_restore(&debug_mutex_lock, flags);
158
159 printk("\n#%03d: ", count);
160 printk_lock(lock, filter ? 0 : 1);
161 goto next;
162 }
163 debug_spin_unlock_restore(&debug_mutex_lock, flags);
164 printk("\n");
165}
166
167void mutex_debug_show_all_locks(void)
168{
169 struct task_struct *g, *p;
170 int count = 10;
171 int unlock = 1;
172
173 printk("\nShowing all blocking locks in the system:\n");
174
175 /*
176 * Here we try to get the tasklist_lock as hard as possible,
177 * if not successful after 2 seconds we ignore it (but keep
178 * trying). This is to enable a debug printout even if a
179 * tasklist_lock-holding task deadlocks or crashes.
180 */
181retry:
182 if (!read_trylock(&tasklist_lock)) {
183 if (count == 10)
184 printk("hm, tasklist_lock locked, retrying... ");
185 if (count) {
186 count--;
187 printk(" #%d", 10-count);
188 mdelay(200);
189 goto retry;
190 }
191 printk(" ignoring it.\n");
192 unlock = 0;
193 }
194 if (count != 10)
195 printk(" locked it.\n");
196
197 do_each_thread(g, p) {
198 show_task_locks(p);
199 if (!unlock)
200 if (read_trylock(&tasklist_lock))
201 unlock = 1;
202 } while_each_thread(g, p);
203
204 printk("\n");
205 show_held_locks(NULL);
206 printk("=============================================\n\n");
207
208 if (unlock)
209 read_unlock(&tasklist_lock);
210}
211
212static void report_deadlock(struct task_struct *task, struct mutex *lock,
213 struct mutex *lockblk, unsigned long ip)
214{
215 printk("\n%s/%d is trying to acquire this lock:\n",
216 current->comm, current->pid);
217 printk_lock(lock, 1);
218 printk("... trying at: ");
219 print_symbol("%s\n", ip);
220 show_held_locks(current);
221
222 if (lockblk) {
223 printk("but %s/%d is deadlocking current task %s/%d!\n\n",
224 task->comm, task->pid, current->comm, current->pid);
225 printk("\n%s/%d is blocked on this lock:\n",
226 task->comm, task->pid);
227 printk_lock(lockblk, 1);
228
229 show_held_locks(task);
230
231 printk("\n%s/%d's [blocked] stackdump:\n\n",
232 task->comm, task->pid);
233 show_stack(task, NULL);
234 }
235
236 printk("\n%s/%d's [current] stackdump:\n\n",
237 current->comm, current->pid);
238 dump_stack();
239 mutex_debug_show_all_locks();
240 printk("[ turning off deadlock detection. Please report this. ]\n\n");
241 local_irq_disable();
242}
243
244/*
245 * Recursively check for mutex deadlocks:
246 */
247static int check_deadlock(struct mutex *lock, int depth,
248 struct thread_info *ti, unsigned long ip)
249{
250 struct mutex *lockblk;
251 struct task_struct *task;
252
253 if (!debug_mutex_on)
254 return 0;
255
256 ti = lock->owner;
257 if (!ti)
258 return 0;
259
260 task = ti->task;
261 lockblk = NULL;
262 if (task->blocked_on)
263 lockblk = task->blocked_on->lock;
264
265 /* Self-deadlock: */
266 if (current == task) {
267 DEBUG_OFF();
268 if (depth)
269 return 1;
270 printk("\n==========================================\n");
271 printk( "[ BUG: lock recursion deadlock detected! |\n");
272 printk( "------------------------------------------\n");
273 report_deadlock(task, lock, NULL, ip);
274 return 0;
275 }
276
277 /* Ugh, something corrupted the lock data structure? */
278 if (depth > 20) {
279 DEBUG_OFF();
280 printk("\n===========================================\n");
281 printk( "[ BUG: infinite lock dependency detected!? |\n");
282 printk( "-------------------------------------------\n");
283 report_deadlock(task, lock, lockblk, ip);
284 return 0;
285 }
286
287 /* Recursively check for dependencies: */
288 if (lockblk && check_deadlock(lockblk, depth+1, ti, ip)) {
289 printk("\n============================================\n");
290 printk( "[ BUG: circular locking deadlock detected! ]\n");
291 printk( "--------------------------------------------\n");
292 report_deadlock(task, lock, lockblk, ip);
293 return 0;
294 }
295 return 0;
296}
297
298/*
299 * Called when a task exits, this function checks whether the
300 * task is holding any locks, and reports the first one if so:
301 */
302void mutex_debug_check_no_locks_held(struct task_struct *task)
303{
304 struct list_head *curr, *next;
305 struct thread_info *t;
306 unsigned long flags;
307 struct mutex *lock;
308
309 if (!debug_mutex_on)
310 return;
311
312 debug_spin_lock_save(&debug_mutex_lock, flags);
313 list_for_each_safe(curr, next, &debug_mutex_held_locks) {
314 lock = list_entry(curr, struct mutex, held_list);
315 t = lock->owner;
316 if (t != task->thread_info)
317 continue;
318 list_del_init(curr);
319 DEBUG_OFF();
320 debug_spin_unlock_restore(&debug_mutex_lock, flags);
321
322 printk("BUG: %s/%d, lock held at task exit time!\n",
323 task->comm, task->pid);
324 printk_lock(lock, 1);
325 if (lock->owner != task->thread_info)
326 printk("exiting task is not even the owner??\n");
327 return;
328 }
329 debug_spin_unlock_restore(&debug_mutex_lock, flags);
330}
331
332/*
333 * Called when kernel memory is freed (or unmapped), or if a mutex
334 * is destroyed or reinitialized - this code checks whether there is
335 * any held lock in the memory range of <from> to <to>:
336 */
337void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
338{
339 struct list_head *curr, *next;
340 const void *to = from + len;
341 unsigned long flags;
342 struct mutex *lock;
343 void *lock_addr;
344
345 if (!debug_mutex_on)
346 return;
347
348 debug_spin_lock_save(&debug_mutex_lock, flags);
349 list_for_each_safe(curr, next, &debug_mutex_held_locks) {
350 lock = list_entry(curr, struct mutex, held_list);
351 lock_addr = lock;
352 if (lock_addr < from || lock_addr >= to)
353 continue;
354 list_del_init(curr);
355 DEBUG_OFF();
356 debug_spin_unlock_restore(&debug_mutex_lock, flags);
357
358 printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
359 current->comm, current->pid, lock, from, to);
360 dump_stack();
361 printk_lock(lock, 1);
362 if (lock->owner != current_thread_info())
363 printk("freeing task is not even the owner??\n");
364 return;
365 }
366 debug_spin_unlock_restore(&debug_mutex_lock, flags);
367}
368
369/*
370 * Must be called with lock->wait_lock held. 28 * Must be called with lock->wait_lock held.
371 */ 29 */
372void debug_mutex_set_owner(struct mutex *lock, 30void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner)
373 struct thread_info *new_owner __IP_DECL__)
374{ 31{
375 lock->owner = new_owner; 32 lock->owner = new_owner;
376 DEBUG_WARN_ON(!list_empty(&lock->held_list));
377 if (debug_mutex_on) {
378 list_add_tail(&lock->held_list, &debug_mutex_held_locks);
379 lock->acquire_ip = ip;
380 }
381} 33}
382 34
383void debug_mutex_init_waiter(struct mutex_waiter *waiter) 35void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
384{ 36{
385 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); 37 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
386 waiter->magic = waiter; 38 waiter->magic = waiter;
@@ -389,23 +41,23 @@ void debug_mutex_init_waiter(struct mutex_waiter *waiter)
389 41
390void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) 42void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
391{ 43{
392 SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock)); 44 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
393 DEBUG_WARN_ON(list_empty(&lock->wait_list)); 45 DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
394 DEBUG_WARN_ON(waiter->magic != waiter); 46 DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
395 DEBUG_WARN_ON(list_empty(&waiter->list)); 47 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
396} 48}
397 49
398void debug_mutex_free_waiter(struct mutex_waiter *waiter) 50void debug_mutex_free_waiter(struct mutex_waiter *waiter)
399{ 51{
400 DEBUG_WARN_ON(!list_empty(&waiter->list)); 52 DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list));
401 memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter)); 53 memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
402} 54}
403 55
404void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 56void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
405 struct thread_info *ti __IP_DECL__) 57 struct thread_info *ti)
406{ 58{
407 SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock)); 59 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
408 check_deadlock(lock, 0, ti, ip); 60
409 /* Mark the current thread as blocked on the lock: */ 61 /* Mark the current thread as blocked on the lock: */
410 ti->task->blocked_on = waiter; 62 ti->task->blocked_on = waiter;
411 waiter->lock = lock; 63 waiter->lock = lock;
@@ -414,9 +66,9 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
414void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 66void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
415 struct thread_info *ti) 67 struct thread_info *ti)
416{ 68{
417 DEBUG_WARN_ON(list_empty(&waiter->list)); 69 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
418 DEBUG_WARN_ON(waiter->task != ti->task); 70 DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
419 DEBUG_WARN_ON(ti->task->blocked_on != waiter); 71 DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
420 ti->task->blocked_on = NULL; 72 ti->task->blocked_on = NULL;
421 73
422 list_del_init(&waiter->list); 74 list_del_init(&waiter->list);
@@ -425,24 +77,23 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
425 77
426void debug_mutex_unlock(struct mutex *lock) 78void debug_mutex_unlock(struct mutex *lock)
427{ 79{
428 DEBUG_WARN_ON(lock->magic != lock); 80 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
429 DEBUG_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 81 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
430 DEBUG_WARN_ON(lock->owner != current_thread_info()); 82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
431 if (debug_mutex_on) { 83 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
432 DEBUG_WARN_ON(list_empty(&lock->held_list));
433 list_del_init(&lock->held_list);
434 }
435} 84}
436 85
437void debug_mutex_init(struct mutex *lock, const char *name) 86void debug_mutex_init(struct mutex *lock, const char *name,
87 struct lock_class_key *key)
438{ 88{
89#ifdef CONFIG_DEBUG_LOCK_ALLOC
439 /* 90 /*
440 * Make sure we are not reinitializing a held lock: 91 * Make sure we are not reinitializing a held lock:
441 */ 92 */
442 mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 93 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
94 lockdep_init_map(&lock->dep_map, name, key);
95#endif
443 lock->owner = NULL; 96 lock->owner = NULL;
444 INIT_LIST_HEAD(&lock->held_list);
445 lock->name = name;
446 lock->magic = lock; 97 lock->magic = lock;
447} 98}
448 99
@@ -456,7 +107,7 @@ void debug_mutex_init(struct mutex *lock, const char *name)
456 */ 107 */
457void fastcall mutex_destroy(struct mutex *lock) 108void fastcall mutex_destroy(struct mutex *lock)
458{ 109{
459 DEBUG_WARN_ON(mutex_is_locked(lock)); 110 DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
460 lock->magic = NULL; 111 lock->magic = NULL;
461} 112}
462 113
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index a5196c36a5fd..babfbdfc534b 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -10,110 +10,44 @@
10 * More details are in kernel/mutex-debug.c. 10 * More details are in kernel/mutex-debug.c.
11 */ 11 */
12 12
13extern spinlock_t debug_mutex_lock;
14extern struct list_head debug_mutex_held_locks;
15extern int debug_mutex_on;
16
17/*
18 * In the debug case we carry the caller's instruction pointer into
19 * other functions, but we dont want the function argument overhead
20 * in the nondebug case - hence these macros:
21 */
22#define __IP_DECL__ , unsigned long ip
23#define __IP__ , ip
24#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
25
26/* 13/*
27 * This must be called with lock->wait_lock held. 14 * This must be called with lock->wait_lock held.
28 */ 15 */
29extern void debug_mutex_set_owner(struct mutex *lock, 16extern void
30 struct thread_info *new_owner __IP_DECL__); 17debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner);
31 18
32static inline void debug_mutex_clear_owner(struct mutex *lock) 19static inline void debug_mutex_clear_owner(struct mutex *lock)
33{ 20{
34 lock->owner = NULL; 21 lock->owner = NULL;
35} 22}
36 23
37extern void debug_mutex_init_waiter(struct mutex_waiter *waiter); 24extern void debug_mutex_lock_common(struct mutex *lock,
25 struct mutex_waiter *waiter);
38extern void debug_mutex_wake_waiter(struct mutex *lock, 26extern void debug_mutex_wake_waiter(struct mutex *lock,
39 struct mutex_waiter *waiter); 27 struct mutex_waiter *waiter);
40extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); 28extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
41extern void debug_mutex_add_waiter(struct mutex *lock, 29extern void debug_mutex_add_waiter(struct mutex *lock,
42 struct mutex_waiter *waiter, 30 struct mutex_waiter *waiter,
43 struct thread_info *ti __IP_DECL__); 31 struct thread_info *ti);
44extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 32extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
45 struct thread_info *ti); 33 struct thread_info *ti);
46extern void debug_mutex_unlock(struct mutex *lock); 34extern void debug_mutex_unlock(struct mutex *lock);
47extern void debug_mutex_init(struct mutex *lock, const char *name); 35extern void debug_mutex_init(struct mutex *lock, const char *name,
48 36 struct lock_class_key *key);
49#define debug_spin_lock_save(lock, flags) \
50 do { \
51 local_irq_save(flags); \
52 if (debug_mutex_on) \
53 spin_lock(lock); \
54 } while (0)
55
56#define debug_spin_unlock_restore(lock, flags) \
57 do { \
58 if (debug_mutex_on) \
59 spin_unlock(lock); \
60 local_irq_restore(flags); \
61 preempt_check_resched(); \
62 } while (0)
63 37
64#define spin_lock_mutex(lock, flags) \ 38#define spin_lock_mutex(lock, flags) \
65 do { \ 39 do { \
66 struct mutex *l = container_of(lock, struct mutex, wait_lock); \ 40 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
67 \ 41 \
68 DEBUG_WARN_ON(in_interrupt()); \ 42 DEBUG_LOCKS_WARN_ON(in_interrupt()); \
69 debug_spin_lock_save(&debug_mutex_lock, flags); \ 43 local_irq_save(flags); \
70 spin_lock(lock); \ 44 __raw_spin_lock(&(lock)->raw_lock); \
71 DEBUG_WARN_ON(l->magic != l); \ 45 DEBUG_LOCKS_WARN_ON(l->magic != l); \
72 } while (0) 46 } while (0)
73 47
74#define spin_unlock_mutex(lock, flags) \ 48#define spin_unlock_mutex(lock, flags) \
75 do { \ 49 do { \
76 spin_unlock(lock); \ 50 __raw_spin_unlock(&(lock)->raw_lock); \
77 debug_spin_unlock_restore(&debug_mutex_lock, flags); \ 51 local_irq_restore(flags); \
52 preempt_check_resched(); \
78 } while (0) 53 } while (0)
79
80#define DEBUG_OFF() \
81do { \
82 if (debug_mutex_on) { \
83 debug_mutex_on = 0; \
84 console_verbose(); \
85 if (spin_is_locked(&debug_mutex_lock)) \
86 spin_unlock(&debug_mutex_lock); \
87 } \
88} while (0)
89
90#define DEBUG_BUG() \
91do { \
92 if (debug_mutex_on) { \
93 DEBUG_OFF(); \
94 BUG(); \
95 } \
96} while (0)
97
98#define DEBUG_WARN_ON(c) \
99do { \
100 if (unlikely(c && debug_mutex_on)) { \
101 DEBUG_OFF(); \
102 WARN_ON(1); \
103 } \
104} while (0)
105
106# define DEBUG_BUG_ON(c) \
107do { \
108 if (unlikely(c)) \
109 DEBUG_BUG(); \
110} while (0)
111
112#ifdef CONFIG_SMP
113# define SMP_DEBUG_WARN_ON(c) DEBUG_WARN_ON(c)
114# define SMP_DEBUG_BUG_ON(c) DEBUG_BUG_ON(c)
115#else
116# define SMP_DEBUG_WARN_ON(c) do { } while (0)
117# define SMP_DEBUG_BUG_ON(c) do { } while (0)
118#endif
119
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 7043db21bbce..8c71cf72a497 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/debug_locks.h>
20 21
21/* 22/*
22 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -38,13 +39,14 @@
38 * 39 *
39 * It is not allowed to initialize an already locked mutex. 40 * It is not allowed to initialize an already locked mutex.
40 */ 41 */
41void fastcall __mutex_init(struct mutex *lock, const char *name) 42void
43__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
42{ 44{
43 atomic_set(&lock->count, 1); 45 atomic_set(&lock->count, 1);
44 spin_lock_init(&lock->wait_lock); 46 spin_lock_init(&lock->wait_lock);
45 INIT_LIST_HEAD(&lock->wait_list); 47 INIT_LIST_HEAD(&lock->wait_list);
46 48
47 debug_mutex_init(lock, name); 49 debug_mutex_init(lock, name, key);
48} 50}
49 51
50EXPORT_SYMBOL(__mutex_init); 52EXPORT_SYMBOL(__mutex_init);
@@ -56,7 +58,7 @@ EXPORT_SYMBOL(__mutex_init);
56 * branch is predicted by the CPU as default-untaken. 58 * branch is predicted by the CPU as default-untaken.
57 */ 59 */
58static void fastcall noinline __sched 60static void fastcall noinline __sched
59__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); 61__mutex_lock_slowpath(atomic_t *lock_count);
60 62
61/*** 63/***
62 * mutex_lock - acquire the mutex 64 * mutex_lock - acquire the mutex
@@ -79,7 +81,7 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__);
79 * 81 *
80 * This function is similar to (but not equivalent to) down(). 82 * This function is similar to (but not equivalent to) down().
81 */ 83 */
82void fastcall __sched mutex_lock(struct mutex *lock) 84void inline fastcall __sched mutex_lock(struct mutex *lock)
83{ 85{
84 might_sleep(); 86 might_sleep();
85 /* 87 /*
@@ -92,7 +94,7 @@ void fastcall __sched mutex_lock(struct mutex *lock)
92EXPORT_SYMBOL(mutex_lock); 94EXPORT_SYMBOL(mutex_lock);
93 95
94static void fastcall noinline __sched 96static void fastcall noinline __sched
95__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); 97__mutex_unlock_slowpath(atomic_t *lock_count);
96 98
97/*** 99/***
98 * mutex_unlock - release the mutex 100 * mutex_unlock - release the mutex
@@ -120,18 +122,18 @@ EXPORT_SYMBOL(mutex_unlock);
120 * Lock a mutex (possibly interruptible), slowpath: 122 * Lock a mutex (possibly interruptible), slowpath:
121 */ 123 */
122static inline int __sched 124static inline int __sched
123__mutex_lock_common(struct mutex *lock, long state __IP_DECL__) 125__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
124{ 126{
125 struct task_struct *task = current; 127 struct task_struct *task = current;
126 struct mutex_waiter waiter; 128 struct mutex_waiter waiter;
127 unsigned int old_val; 129 unsigned int old_val;
128 unsigned long flags; 130 unsigned long flags;
129 131
130 debug_mutex_init_waiter(&waiter);
131
132 spin_lock_mutex(&lock->wait_lock, flags); 132 spin_lock_mutex(&lock->wait_lock, flags);
133 133
134 debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); 134 debug_mutex_lock_common(lock, &waiter);
135 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
136 debug_mutex_add_waiter(lock, &waiter, task->thread_info);
135 137
136 /* add waiting tasks to the end of the waitqueue (FIFO): */ 138 /* add waiting tasks to the end of the waitqueue (FIFO): */
137 list_add_tail(&waiter.list, &lock->wait_list); 139 list_add_tail(&waiter.list, &lock->wait_list);
@@ -158,6 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
158 if (unlikely(state == TASK_INTERRUPTIBLE && 160 if (unlikely(state == TASK_INTERRUPTIBLE &&
159 signal_pending(task))) { 161 signal_pending(task))) {
160 mutex_remove_waiter(lock, &waiter, task->thread_info); 162 mutex_remove_waiter(lock, &waiter, task->thread_info);
163 mutex_release(&lock->dep_map, 1, _RET_IP_);
161 spin_unlock_mutex(&lock->wait_lock, flags); 164 spin_unlock_mutex(&lock->wait_lock, flags);
162 165
163 debug_mutex_free_waiter(&waiter); 166 debug_mutex_free_waiter(&waiter);
@@ -173,7 +176,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
173 176
174 /* got the lock - rejoice! */ 177 /* got the lock - rejoice! */
175 mutex_remove_waiter(lock, &waiter, task->thread_info); 178 mutex_remove_waiter(lock, &waiter, task->thread_info);
176 debug_mutex_set_owner(lock, task->thread_info __IP__); 179 debug_mutex_set_owner(lock, task->thread_info);
177 180
178 /* set it to 0 if there are no waiters left: */ 181 /* set it to 0 if there are no waiters left: */
179 if (likely(list_empty(&lock->wait_list))) 182 if (likely(list_empty(&lock->wait_list)))
@@ -183,32 +186,40 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
183 186
184 debug_mutex_free_waiter(&waiter); 187 debug_mutex_free_waiter(&waiter);
185 188
186 DEBUG_WARN_ON(list_empty(&lock->held_list));
187 DEBUG_WARN_ON(lock->owner != task->thread_info);
188
189 return 0; 189 return 0;
190} 190}
191 191
192static void fastcall noinline __sched 192static void fastcall noinline __sched
193__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) 193__mutex_lock_slowpath(atomic_t *lock_count)
194{ 194{
195 struct mutex *lock = container_of(lock_count, struct mutex, count); 195 struct mutex *lock = container_of(lock_count, struct mutex, count);
196 196
197 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); 197 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
198}
199
200#ifdef CONFIG_DEBUG_LOCK_ALLOC
201void __sched
202mutex_lock_nested(struct mutex *lock, unsigned int subclass)
203{
204 might_sleep();
205 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
198} 206}
199 207
208EXPORT_SYMBOL_GPL(mutex_lock_nested);
209#endif
210
200/* 211/*
201 * Release the lock, slowpath: 212 * Release the lock, slowpath:
202 */ 213 */
203static fastcall noinline void 214static fastcall inline void
204__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) 215__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
205{ 216{
206 struct mutex *lock = container_of(lock_count, struct mutex, count); 217 struct mutex *lock = container_of(lock_count, struct mutex, count);
207 unsigned long flags; 218 unsigned long flags;
208 219
209 DEBUG_WARN_ON(lock->owner != current_thread_info());
210
211 spin_lock_mutex(&lock->wait_lock, flags); 220 spin_lock_mutex(&lock->wait_lock, flags);
221 mutex_release(&lock->dep_map, nested, _RET_IP_);
222 debug_mutex_unlock(lock);
212 223
213 /* 224 /*
214 * some architectures leave the lock unlocked in the fastpath failure 225 * some architectures leave the lock unlocked in the fastpath failure
@@ -218,8 +229,6 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
218 if (__mutex_slowpath_needs_to_unlock()) 229 if (__mutex_slowpath_needs_to_unlock())
219 atomic_set(&lock->count, 1); 230 atomic_set(&lock->count, 1);
220 231
221 debug_mutex_unlock(lock);
222
223 if (!list_empty(&lock->wait_list)) { 232 if (!list_empty(&lock->wait_list)) {
224 /* get the first entry from the wait-list: */ 233 /* get the first entry from the wait-list: */
225 struct mutex_waiter *waiter = 234 struct mutex_waiter *waiter =
@@ -237,11 +246,20 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
237} 246}
238 247
239/* 248/*
249 * Release the lock, slowpath:
250 */
251static fastcall noinline void
252__mutex_unlock_slowpath(atomic_t *lock_count)
253{
254 __mutex_unlock_common_slowpath(lock_count, 1);
255}
256
257/*
240 * Here come the less common (and hence less performance-critical) APIs: 258 * Here come the less common (and hence less performance-critical) APIs:
241 * mutex_lock_interruptible() and mutex_trylock(). 259 * mutex_lock_interruptible() and mutex_trylock().
242 */ 260 */
243static int fastcall noinline __sched 261static int fastcall noinline __sched
244__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); 262__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
245 263
246/*** 264/***
247 * mutex_lock_interruptible - acquire the mutex, interruptable 265 * mutex_lock_interruptible - acquire the mutex, interruptable
@@ -264,11 +282,11 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
264EXPORT_SYMBOL(mutex_lock_interruptible); 282EXPORT_SYMBOL(mutex_lock_interruptible);
265 283
266static int fastcall noinline __sched 284static int fastcall noinline __sched
267__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) 285__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
268{ 286{
269 struct mutex *lock = container_of(lock_count, struct mutex, count); 287 struct mutex *lock = container_of(lock_count, struct mutex, count);
270 288
271 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); 289 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
272} 290}
273 291
274/* 292/*
@@ -284,8 +302,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
284 spin_lock_mutex(&lock->wait_lock, flags); 302 spin_lock_mutex(&lock->wait_lock, flags);
285 303
286 prev = atomic_xchg(&lock->count, -1); 304 prev = atomic_xchg(&lock->count, -1);
287 if (likely(prev == 1)) 305 if (likely(prev == 1)) {
288 debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); 306 debug_mutex_set_owner(lock, current_thread_info());
307 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
308 }
289 /* Set it back to 0 if there are no waiters: */ 309 /* Set it back to 0 if there are no waiters: */
290 if (likely(list_empty(&lock->wait_list))) 310 if (likely(list_empty(&lock->wait_list)))
291 atomic_set(&lock->count, 0); 311 atomic_set(&lock->count, 0);
@@ -309,7 +329,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
309 * This function must not be used in interrupt context. The 329 * This function must not be used in interrupt context. The
310 * mutex must be released by the same task that acquired it. 330 * mutex must be released by the same task that acquired it.
311 */ 331 */
312int fastcall mutex_trylock(struct mutex *lock) 332int fastcall __sched mutex_trylock(struct mutex *lock)
313{ 333{
314 return __mutex_fastpath_trylock(&lock->count, 334 return __mutex_fastpath_trylock(&lock->count,
315 __mutex_trylock_slowpath); 335 __mutex_trylock_slowpath);
diff --git a/kernel/mutex.h b/kernel/mutex.h
index 069189947257..a075dafbb290 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -16,22 +16,15 @@
16#define mutex_remove_waiter(lock, waiter, ti) \ 16#define mutex_remove_waiter(lock, waiter, ti) \
17 __list_del((waiter)->list.prev, (waiter)->list.next) 17 __list_del((waiter)->list.prev, (waiter)->list.next)
18 18
19#define DEBUG_WARN_ON(c) do { } while (0)
20#define debug_mutex_set_owner(lock, new_owner) do { } while (0) 19#define debug_mutex_set_owner(lock, new_owner) do { } while (0)
21#define debug_mutex_clear_owner(lock) do { } while (0) 20#define debug_mutex_clear_owner(lock) do { } while (0)
22#define debug_mutex_init_waiter(waiter) do { } while (0)
23#define debug_mutex_wake_waiter(lock, waiter) do { } while (0) 21#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
24#define debug_mutex_free_waiter(waiter) do { } while (0) 22#define debug_mutex_free_waiter(waiter) do { } while (0)
25#define debug_mutex_add_waiter(lock, waiter, ti, ip) do { } while (0) 23#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
26#define debug_mutex_unlock(lock) do { } while (0) 24#define debug_mutex_unlock(lock) do { } while (0)
27#define debug_mutex_init(lock, name) do { } while (0) 25#define debug_mutex_init(lock, name, key) do { } while (0)
28
29/*
30 * Return-address parameters/declarations. They are very useful for
31 * debugging, but add overhead in the !DEBUG case - so we go the
32 * trouble of using this not too elegant but zero-cost solution:
33 */
34#define __IP_DECL__
35#define __IP__
36#define __RET_IP__
37 26
27static inline void
28debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
29{
30}
diff --git a/kernel/pid.c b/kernel/pid.c
index eeb836b65ca4..93e212f20671 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -218,7 +218,7 @@ struct pid * fastcall find_pid(int nr)
218 return NULL; 218 return NULL;
219} 219}
220 220
221int fastcall attach_pid(task_t *task, enum pid_type type, int nr) 221int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
222{ 222{
223 struct pid_link *link; 223 struct pid_link *link;
224 struct pid *pid; 224 struct pid *pid;
@@ -233,7 +233,7 @@ int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
233 return 0; 233 return 0;
234} 234}
235 235
236void fastcall detach_pid(task_t *task, enum pid_type type) 236void fastcall detach_pid(struct task_struct *task, enum pid_type type)
237{ 237{
238 struct pid_link *link; 238 struct pid_link *link;
239 struct pid *pid; 239 struct pid *pid;
@@ -267,7 +267,7 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
267/* 267/*
268 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 268 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
269 */ 269 */
270task_t *find_task_by_pid_type(int type, int nr) 270struct task_struct *find_task_by_pid_type(int type, int nr)
271{ 271{
272 return pid_task(find_pid(nr), type); 272 return pid_task(find_pid(nr), type);
273} 273}
diff --git a/kernel/printk.c b/kernel/printk.c
index 39ae24d2a415..bdba5d80496c 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -518,7 +518,9 @@ asmlinkage int vprintk(const char *fmt, va_list args)
518 zap_locks(); 518 zap_locks();
519 519
520 /* This stops the holder of console_sem just where we want him */ 520 /* This stops the holder of console_sem just where we want him */
521 spin_lock_irqsave(&logbuf_lock, flags); 521 local_irq_save(flags);
522 lockdep_off();
523 spin_lock(&logbuf_lock);
522 printk_cpu = smp_processor_id(); 524 printk_cpu = smp_processor_id();
523 525
524 /* Emit the output into the temporary buffer */ 526 /* Emit the output into the temporary buffer */
@@ -588,7 +590,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
588 */ 590 */
589 console_locked = 1; 591 console_locked = 1;
590 printk_cpu = UINT_MAX; 592 printk_cpu = UINT_MAX;
591 spin_unlock_irqrestore(&logbuf_lock, flags); 593 spin_unlock(&logbuf_lock);
592 594
593 /* 595 /*
594 * Console drivers may assume that per-cpu resources have 596 * Console drivers may assume that per-cpu resources have
@@ -604,6 +606,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
604 console_locked = 0; 606 console_locked = 0;
605 up(&console_sem); 607 up(&console_sem);
606 } 608 }
609 lockdep_on();
610 local_irq_restore(flags);
607 } else { 611 } else {
608 /* 612 /*
609 * Someone else owns the drivers. We drop the spinlock, which 613 * Someone else owns the drivers. We drop the spinlock, which
@@ -611,7 +615,9 @@ asmlinkage int vprintk(const char *fmt, va_list args)
611 * console drivers with the output which we just produced. 615 * console drivers with the output which we just produced.
612 */ 616 */
613 printk_cpu = UINT_MAX; 617 printk_cpu = UINT_MAX;
614 spin_unlock_irqrestore(&logbuf_lock, flags); 618 spin_unlock(&logbuf_lock);
619 lockdep_on();
620 local_irq_restore(flags);
615 } 621 }
616 622
617 preempt_enable(); 623 preempt_enable();
@@ -809,8 +815,15 @@ void release_console_sem(void)
809 console_may_schedule = 0; 815 console_may_schedule = 0;
810 up(&console_sem); 816 up(&console_sem);
811 spin_unlock_irqrestore(&logbuf_lock, flags); 817 spin_unlock_irqrestore(&logbuf_lock, flags);
812 if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) 818 if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) {
813 wake_up_interruptible(&log_wait); 819 /*
820 * If we printk from within the lock dependency code,
821 * from within the scheduler code, then do not lock
822 * up due to self-recursion:
823 */
824 if (!lockdep_internal())
825 wake_up_interruptible(&log_wait);
826 }
814} 827}
815EXPORT_SYMBOL(release_console_sem); 828EXPORT_SYMBOL(release_console_sem);
816 829
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 335c5b932e14..9a111f70145c 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -28,7 +28,7 @@
28 * 28 *
29 * Must be called with the tasklist lock write-held. 29 * Must be called with the tasklist lock write-held.
30 */ 30 */
31void __ptrace_link(task_t *child, task_t *new_parent) 31void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
32{ 32{
33 BUG_ON(!list_empty(&child->ptrace_list)); 33 BUG_ON(!list_empty(&child->ptrace_list));
34 if (child->parent == new_parent) 34 if (child->parent == new_parent)
@@ -46,7 +46,7 @@ void __ptrace_link(task_t *child, task_t *new_parent)
46 * TASK_TRACED, resume it now. 46 * TASK_TRACED, resume it now.
47 * Requires that irqs be disabled. 47 * Requires that irqs be disabled.
48 */ 48 */
49void ptrace_untrace(task_t *child) 49void ptrace_untrace(struct task_struct *child)
50{ 50{
51 spin_lock(&child->sighand->siglock); 51 spin_lock(&child->sighand->siglock);
52 if (child->state == TASK_TRACED) { 52 if (child->state == TASK_TRACED) {
@@ -65,7 +65,7 @@ void ptrace_untrace(task_t *child)
65 * 65 *
66 * Must be called with the tasklist lock write-held. 66 * Must be called with the tasklist lock write-held.
67 */ 67 */
68void __ptrace_unlink(task_t *child) 68void __ptrace_unlink(struct task_struct *child)
69{ 69{
70 BUG_ON(!child->ptrace); 70 BUG_ON(!child->ptrace);
71 71
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index f464f5ae3f11..759805c9859a 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -53,13 +53,13 @@
53static struct rcu_ctrlblk rcu_ctrlblk = { 53static struct rcu_ctrlblk rcu_ctrlblk = {
54 .cur = -300, 54 .cur = -300,
55 .completed = -300, 55 .completed = -300,
56 .lock = SPIN_LOCK_UNLOCKED, 56 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
57 .cpumask = CPU_MASK_NONE, 57 .cpumask = CPU_MASK_NONE,
58}; 58};
59static struct rcu_ctrlblk rcu_bh_ctrlblk = { 59static struct rcu_ctrlblk rcu_bh_ctrlblk = {
60 .cur = -300, 60 .cur = -300,
61 .completed = -300, 61 .completed = -300,
62 .lock = SPIN_LOCK_UNLOCKED, 62 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
63 .cpumask = CPU_MASK_NONE, 63 .cpumask = CPU_MASK_NONE,
64}; 64};
65 65
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 4aa8a2c9f453..0c1faa950af7 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/plist.h> 27#include <linux/plist.h>
28#include <linux/fs.h> 28#include <linux/fs.h>
29#include <linux/debug_locks.h>
29 30
30#include "rtmutex_common.h" 31#include "rtmutex_common.h"
31 32
@@ -45,8 +46,6 @@ do { \
45 console_verbose(); \ 46 console_verbose(); \
46 if (spin_is_locked(&current->pi_lock)) \ 47 if (spin_is_locked(&current->pi_lock)) \
47 spin_unlock(&current->pi_lock); \ 48 spin_unlock(&current->pi_lock); \
48 if (spin_is_locked(&current->held_list_lock)) \
49 spin_unlock(&current->held_list_lock); \
50 } \ 49 } \
51} while (0) 50} while (0)
52 51
@@ -97,7 +96,7 @@ void deadlock_trace_off(void)
97 rt_trace_on = 0; 96 rt_trace_on = 0;
98} 97}
99 98
100static void printk_task(task_t *p) 99static void printk_task(struct task_struct *p)
101{ 100{
102 if (p) 101 if (p)
103 printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); 102 printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
@@ -105,14 +104,6 @@ static void printk_task(task_t *p)
105 printk("<none>"); 104 printk("<none>");
106} 105}
107 106
108static void printk_task_short(task_t *p)
109{
110 if (p)
111 printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
112 else
113 printk("<none>");
114}
115
116static void printk_lock(struct rt_mutex *lock, int print_owner) 107static void printk_lock(struct rt_mutex *lock, int print_owner)
117{ 108{
118 if (lock->name) 109 if (lock->name)
@@ -128,222 +119,6 @@ static void printk_lock(struct rt_mutex *lock, int print_owner)
128 printk_task(rt_mutex_owner(lock)); 119 printk_task(rt_mutex_owner(lock));
129 printk("\n"); 120 printk("\n");
130 } 121 }
131 if (rt_mutex_owner(lock)) {
132 printk("... acquired at: ");
133 print_symbol("%s\n", lock->acquire_ip);
134 }
135}
136
137static void printk_waiter(struct rt_mutex_waiter *w)
138{
139 printk("-------------------------\n");
140 printk("| waiter struct %p:\n", w);
141 printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
142 w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next,
143 w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next,
144 w->list_entry.prio);
145 printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
146 w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next,
147 w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next,
148 w->pi_list_entry.prio);
149 printk("\n| lock:\n");
150 printk_lock(w->lock, 1);
151 printk("| w->ti->task:\n");
152 printk_task(w->task);
153 printk("| blocked at: ");
154 print_symbol("%s\n", w->ip);
155 printk("-------------------------\n");
156}
157
158static void show_task_locks(task_t *p)
159{
160 switch (p->state) {
161 case TASK_RUNNING: printk("R"); break;
162 case TASK_INTERRUPTIBLE: printk("S"); break;
163 case TASK_UNINTERRUPTIBLE: printk("D"); break;
164 case TASK_STOPPED: printk("T"); break;
165 case EXIT_ZOMBIE: printk("Z"); break;
166 case EXIT_DEAD: printk("X"); break;
167 default: printk("?"); break;
168 }
169 printk_task(p);
170 if (p->pi_blocked_on) {
171 struct rt_mutex *lock = p->pi_blocked_on->lock;
172
173 printk(" blocked on:");
174 printk_lock(lock, 1);
175 } else
176 printk(" (not blocked)\n");
177}
178
179void rt_mutex_show_held_locks(task_t *task, int verbose)
180{
181 struct list_head *curr, *cursor = NULL;
182 struct rt_mutex *lock;
183 task_t *t;
184 unsigned long flags;
185 int count = 0;
186
187 if (!rt_trace_on)
188 return;
189
190 if (verbose) {
191 printk("------------------------------\n");
192 printk("| showing all locks held by: | (");
193 printk_task_short(task);
194 printk("):\n");
195 printk("------------------------------\n");
196 }
197
198next:
199 spin_lock_irqsave(&task->held_list_lock, flags);
200 list_for_each(curr, &task->held_list_head) {
201 if (cursor && curr != cursor)
202 continue;
203 lock = list_entry(curr, struct rt_mutex, held_list_entry);
204 t = rt_mutex_owner(lock);
205 WARN_ON(t != task);
206 count++;
207 cursor = curr->next;
208 spin_unlock_irqrestore(&task->held_list_lock, flags);
209
210 printk("\n#%03d: ", count);
211 printk_lock(lock, 0);
212 goto next;
213 }
214 spin_unlock_irqrestore(&task->held_list_lock, flags);
215
216 printk("\n");
217}
218
219void rt_mutex_show_all_locks(void)
220{
221 task_t *g, *p;
222 int count = 10;
223 int unlock = 1;
224
225 printk("\n");
226 printk("----------------------\n");
227 printk("| showing all tasks: |\n");
228 printk("----------------------\n");
229
230 /*
231 * Here we try to get the tasklist_lock as hard as possible,
232 * if not successful after 2 seconds we ignore it (but keep
233 * trying). This is to enable a debug printout even if a
234 * tasklist_lock-holding task deadlocks or crashes.
235 */
236retry:
237 if (!read_trylock(&tasklist_lock)) {
238 if (count == 10)
239 printk("hm, tasklist_lock locked, retrying... ");
240 if (count) {
241 count--;
242 printk(" #%d", 10-count);
243 mdelay(200);
244 goto retry;
245 }
246 printk(" ignoring it.\n");
247 unlock = 0;
248 }
249 if (count != 10)
250 printk(" locked it.\n");
251
252 do_each_thread(g, p) {
253 show_task_locks(p);
254 if (!unlock)
255 if (read_trylock(&tasklist_lock))
256 unlock = 1;
257 } while_each_thread(g, p);
258
259 printk("\n");
260
261 printk("-----------------------------------------\n");
262 printk("| showing all locks held in the system: |\n");
263 printk("-----------------------------------------\n");
264
265 do_each_thread(g, p) {
266 rt_mutex_show_held_locks(p, 0);
267 if (!unlock)
268 if (read_trylock(&tasklist_lock))
269 unlock = 1;
270 } while_each_thread(g, p);
271
272
273 printk("=============================================\n\n");
274
275 if (unlock)
276 read_unlock(&tasklist_lock);
277}
278
279void rt_mutex_debug_check_no_locks_held(task_t *task)
280{
281 struct rt_mutex_waiter *w;
282 struct list_head *curr;
283 struct rt_mutex *lock;
284
285 if (!rt_trace_on)
286 return;
287 if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) {
288 printk("BUG: PI priority boost leaked!\n");
289 printk_task(task);
290 printk("\n");
291 }
292 if (list_empty(&task->held_list_head))
293 return;
294
295 spin_lock(&task->pi_lock);
296 plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) {
297 TRACE_OFF();
298
299 printk("hm, PI interest held at exit time? Task:\n");
300 printk_task(task);
301 printk_waiter(w);
302 return;
303 }
304 spin_unlock(&task->pi_lock);
305
306 list_for_each(curr, &task->held_list_head) {
307 lock = list_entry(curr, struct rt_mutex, held_list_entry);
308
309 printk("BUG: %s/%d, lock held at task exit time!\n",
310 task->comm, task->pid);
311 printk_lock(lock, 1);
312 if (rt_mutex_owner(lock) != task)
313 printk("exiting task is not even the owner??\n");
314 }
315}
316
317int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
318{
319 const void *to = from + len;
320 struct list_head *curr;
321 struct rt_mutex *lock;
322 unsigned long flags;
323 void *lock_addr;
324
325 if (!rt_trace_on)
326 return 0;
327
328 spin_lock_irqsave(&current->held_list_lock, flags);
329 list_for_each(curr, &current->held_list_head) {
330 lock = list_entry(curr, struct rt_mutex, held_list_entry);
331 lock_addr = lock;
332 if (lock_addr < from || lock_addr >= to)
333 continue;
334 TRACE_OFF();
335
336 printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
337 current->comm, current->pid, lock, from, to);
338 dump_stack();
339 printk_lock(lock, 1);
340 if (rt_mutex_owner(lock) != current)
341 printk("freeing task is not even the owner??\n");
342 return 1;
343 }
344 spin_unlock_irqrestore(&current->held_list_lock, flags);
345
346 return 0;
347} 122}
348 123
349void rt_mutex_debug_task_free(struct task_struct *task) 124void rt_mutex_debug_task_free(struct task_struct *task)
@@ -395,85 +170,41 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
395 current->comm, current->pid); 170 current->comm, current->pid);
396 printk_lock(waiter->lock, 1); 171 printk_lock(waiter->lock, 1);
397 172
398 printk("... trying at: ");
399 print_symbol("%s\n", waiter->ip);
400
401 printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); 173 printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
402 printk_lock(waiter->deadlock_lock, 1); 174 printk_lock(waiter->deadlock_lock, 1);
403 175
404 rt_mutex_show_held_locks(current, 1); 176 debug_show_held_locks(current);
405 rt_mutex_show_held_locks(task, 1); 177 debug_show_held_locks(task);
406 178
407 printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); 179 printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
408 show_stack(task, NULL); 180 show_stack(task, NULL);
409 printk("\n%s/%d's [current] stackdump:\n\n", 181 printk("\n%s/%d's [current] stackdump:\n\n",
410 current->comm, current->pid); 182 current->comm, current->pid);
411 dump_stack(); 183 dump_stack();
412 rt_mutex_show_all_locks(); 184 debug_show_all_locks();
185
413 printk("[ turning off deadlock detection." 186 printk("[ turning off deadlock detection."
414 "Please report this trace. ]\n\n"); 187 "Please report this trace. ]\n\n");
415 local_irq_disable(); 188 local_irq_disable();
416} 189}
417 190
418void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__) 191void debug_rt_mutex_lock(struct rt_mutex *lock)
419{ 192{
420 unsigned long flags;
421
422 if (rt_trace_on) {
423 TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
424
425 spin_lock_irqsave(&current->held_list_lock, flags);
426 list_add_tail(&lock->held_list_entry, &current->held_list_head);
427 spin_unlock_irqrestore(&current->held_list_lock, flags);
428
429 lock->acquire_ip = ip;
430 }
431} 193}
432 194
433void debug_rt_mutex_unlock(struct rt_mutex *lock) 195void debug_rt_mutex_unlock(struct rt_mutex *lock)
434{ 196{
435 unsigned long flags; 197 TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
436
437 if (rt_trace_on) {
438 TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
439 TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
440
441 spin_lock_irqsave(&current->held_list_lock, flags);
442 list_del_init(&lock->held_list_entry);
443 spin_unlock_irqrestore(&current->held_list_lock, flags);
444 }
445} 198}
446 199
447void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, 200void
448 struct task_struct *powner __IP_DECL__) 201debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner)
449{ 202{
450 unsigned long flags;
451
452 if (rt_trace_on) {
453 TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
454
455 spin_lock_irqsave(&powner->held_list_lock, flags);
456 list_add_tail(&lock->held_list_entry, &powner->held_list_head);
457 spin_unlock_irqrestore(&powner->held_list_lock, flags);
458
459 lock->acquire_ip = ip;
460 }
461} 203}
462 204
463void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) 205void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
464{ 206{
465 unsigned long flags; 207 TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock));
466
467 if (rt_trace_on) {
468 struct task_struct *owner = rt_mutex_owner(lock);
469
470 TRACE_WARN_ON_LOCKED(!owner);
471 TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
472
473 spin_lock_irqsave(&owner->held_list_lock, flags);
474 list_del_init(&lock->held_list_entry);
475 spin_unlock_irqrestore(&owner->held_list_lock, flags);
476 }
477} 208}
478 209
479void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 210void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
@@ -493,17 +224,15 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
493 224
494void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) 225void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
495{ 226{
496 void *addr = lock; 227 /*
497 228 * Make sure we are not reinitializing a held lock:
498 if (rt_trace_on) { 229 */
499 rt_mutex_debug_check_no_locks_freed(addr, 230 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
500 sizeof(struct rt_mutex)); 231 lock->name = name;
501 INIT_LIST_HEAD(&lock->held_list_entry);
502 lock->name = name;
503 }
504} 232}
505 233
506void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) 234void
235rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
507{ 236{
508} 237}
509 238
diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h
index 7612fbc62d70..14193d596d78 100644
--- a/kernel/rtmutex-debug.h
+++ b/kernel/rtmutex-debug.h
@@ -9,20 +9,16 @@
9 * This file contains macros used solely by rtmutex.c. Debug version. 9 * This file contains macros used solely by rtmutex.c. Debug version.
10 */ 10 */
11 11
12#define __IP_DECL__ , unsigned long ip
13#define __IP__ , ip
14#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
15
16extern void 12extern void
17rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); 13rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
18extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); 14extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
19extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); 15extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
20extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); 16extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
21extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); 17extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
22extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__); 18extern void debug_rt_mutex_lock(struct rt_mutex *lock);
23extern void debug_rt_mutex_unlock(struct rt_mutex *lock); 19extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
24extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, 20extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
25 struct task_struct *powner __IP_DECL__); 21 struct task_struct *powner);
26extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); 22extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
27extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, 23extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
28 struct rt_mutex *lock); 24 struct rt_mutex *lock);
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index e82c2f848249..494dac872a13 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -33,7 +33,7 @@ struct test_thread_data {
33}; 33};
34 34
35static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; 35static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
36static task_t *threads[MAX_RT_TEST_THREADS]; 36static struct task_struct *threads[MAX_RT_TEST_THREADS];
37static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; 37static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
38 38
39enum test_opcodes { 39enum test_opcodes {
@@ -361,8 +361,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,
361static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) 361static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)
362{ 362{
363 struct test_thread_data *td; 363 struct test_thread_data *td;
364 struct task_struct *tsk;
364 char *curr = buf; 365 char *curr = buf;
365 task_t *tsk;
366 int i; 366 int i;
367 367
368 td = container_of(dev, struct test_thread_data, sysdev); 368 td = container_of(dev, struct test_thread_data, sysdev);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 45d61016da57..d2ef13b485e7 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -157,12 +157,11 @@ int max_lock_depth = 1024;
157 * Decreases task's usage by one - may thus free the task. 157 * Decreases task's usage by one - may thus free the task.
158 * Returns 0 or -EDEADLK. 158 * Returns 0 or -EDEADLK.
159 */ 159 */
160static int rt_mutex_adjust_prio_chain(task_t *task, 160static int rt_mutex_adjust_prio_chain(struct task_struct *task,
161 int deadlock_detect, 161 int deadlock_detect,
162 struct rt_mutex *orig_lock, 162 struct rt_mutex *orig_lock,
163 struct rt_mutex_waiter *orig_waiter, 163 struct rt_mutex_waiter *orig_waiter,
164 struct task_struct *top_task 164 struct task_struct *top_task)
165 __IP_DECL__)
166{ 165{
167 struct rt_mutex *lock; 166 struct rt_mutex *lock;
168 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; 167 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
@@ -283,6 +282,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task,
283 spin_unlock_irqrestore(&task->pi_lock, flags); 282 spin_unlock_irqrestore(&task->pi_lock, flags);
284 out_put_task: 283 out_put_task:
285 put_task_struct(task); 284 put_task_struct(task);
285
286 return ret; 286 return ret;
287} 287}
288 288
@@ -357,7 +357,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock)
357 * 357 *
358 * Must be called with lock->wait_lock held. 358 * Must be called with lock->wait_lock held.
359 */ 359 */
360static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) 360static int try_to_take_rt_mutex(struct rt_mutex *lock)
361{ 361{
362 /* 362 /*
363 * We have to be careful here if the atomic speedups are 363 * We have to be careful here if the atomic speedups are
@@ -384,7 +384,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
384 return 0; 384 return 0;
385 385
386 /* We got the lock. */ 386 /* We got the lock. */
387 debug_rt_mutex_lock(lock __IP__); 387 debug_rt_mutex_lock(lock);
388 388
389 rt_mutex_set_owner(lock, current, 0); 389 rt_mutex_set_owner(lock, current, 0);
390 390
@@ -402,13 +402,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
402 */ 402 */
403static int task_blocks_on_rt_mutex(struct rt_mutex *lock, 403static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
404 struct rt_mutex_waiter *waiter, 404 struct rt_mutex_waiter *waiter,
405 int detect_deadlock 405 int detect_deadlock)
406 __IP_DECL__)
407{ 406{
407 struct task_struct *owner = rt_mutex_owner(lock);
408 struct rt_mutex_waiter *top_waiter = waiter; 408 struct rt_mutex_waiter *top_waiter = waiter;
409 task_t *owner = rt_mutex_owner(lock);
410 int boost = 0, res;
411 unsigned long flags; 409 unsigned long flags;
410 int boost = 0, res;
412 411
413 spin_lock_irqsave(&current->pi_lock, flags); 412 spin_lock_irqsave(&current->pi_lock, flags);
414 __rt_mutex_adjust_prio(current); 413 __rt_mutex_adjust_prio(current);
@@ -454,7 +453,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
454 spin_unlock(&lock->wait_lock); 453 spin_unlock(&lock->wait_lock);
455 454
456 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, 455 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
457 current __IP__); 456 current);
458 457
459 spin_lock(&lock->wait_lock); 458 spin_lock(&lock->wait_lock);
460 459
@@ -526,12 +525,12 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
526 * Must be called with lock->wait_lock held 525 * Must be called with lock->wait_lock held
527 */ 526 */
528static void remove_waiter(struct rt_mutex *lock, 527static void remove_waiter(struct rt_mutex *lock,
529 struct rt_mutex_waiter *waiter __IP_DECL__) 528 struct rt_mutex_waiter *waiter)
530{ 529{
531 int first = (waiter == rt_mutex_top_waiter(lock)); 530 int first = (waiter == rt_mutex_top_waiter(lock));
532 int boost = 0; 531 struct task_struct *owner = rt_mutex_owner(lock);
533 task_t *owner = rt_mutex_owner(lock);
534 unsigned long flags; 532 unsigned long flags;
533 int boost = 0;
535 534
536 spin_lock_irqsave(&current->pi_lock, flags); 535 spin_lock_irqsave(&current->pi_lock, flags);
537 plist_del(&waiter->list_entry, &lock->wait_list); 536 plist_del(&waiter->list_entry, &lock->wait_list);
@@ -568,7 +567,7 @@ static void remove_waiter(struct rt_mutex *lock,
568 567
569 spin_unlock(&lock->wait_lock); 568 spin_unlock(&lock->wait_lock);
570 569
571 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); 570 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
572 571
573 spin_lock(&lock->wait_lock); 572 spin_lock(&lock->wait_lock);
574} 573}
@@ -595,7 +594,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
595 get_task_struct(task); 594 get_task_struct(task);
596 spin_unlock_irqrestore(&task->pi_lock, flags); 595 spin_unlock_irqrestore(&task->pi_lock, flags);
597 596
598 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); 597 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
599} 598}
600 599
601/* 600/*
@@ -604,7 +603,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
604static int __sched 603static int __sched
605rt_mutex_slowlock(struct rt_mutex *lock, int state, 604rt_mutex_slowlock(struct rt_mutex *lock, int state,
606 struct hrtimer_sleeper *timeout, 605 struct hrtimer_sleeper *timeout,
607 int detect_deadlock __IP_DECL__) 606 int detect_deadlock)
608{ 607{
609 struct rt_mutex_waiter waiter; 608 struct rt_mutex_waiter waiter;
610 int ret = 0; 609 int ret = 0;
@@ -615,7 +614,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
615 spin_lock(&lock->wait_lock); 614 spin_lock(&lock->wait_lock);
616 615
617 /* Try to acquire the lock again: */ 616 /* Try to acquire the lock again: */
618 if (try_to_take_rt_mutex(lock __IP__)) { 617 if (try_to_take_rt_mutex(lock)) {
619 spin_unlock(&lock->wait_lock); 618 spin_unlock(&lock->wait_lock);
620 return 0; 619 return 0;
621 } 620 }
@@ -629,7 +628,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
629 628
630 for (;;) { 629 for (;;) {
631 /* Try to acquire the lock: */ 630 /* Try to acquire the lock: */
632 if (try_to_take_rt_mutex(lock __IP__)) 631 if (try_to_take_rt_mutex(lock))
633 break; 632 break;
634 633
635 /* 634 /*
@@ -653,7 +652,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
653 */ 652 */
654 if (!waiter.task) { 653 if (!waiter.task) {
655 ret = task_blocks_on_rt_mutex(lock, &waiter, 654 ret = task_blocks_on_rt_mutex(lock, &waiter,
656 detect_deadlock __IP__); 655 detect_deadlock);
657 /* 656 /*
658 * If we got woken up by the owner then start loop 657 * If we got woken up by the owner then start loop
659 * all over without going into schedule to try 658 * all over without going into schedule to try
@@ -680,7 +679,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
680 set_current_state(TASK_RUNNING); 679 set_current_state(TASK_RUNNING);
681 680
682 if (unlikely(waiter.task)) 681 if (unlikely(waiter.task))
683 remove_waiter(lock, &waiter __IP__); 682 remove_waiter(lock, &waiter);
684 683
685 /* 684 /*
686 * try_to_take_rt_mutex() sets the waiter bit 685 * try_to_take_rt_mutex() sets the waiter bit
@@ -711,7 +710,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
711 * Slow path try-lock function: 710 * Slow path try-lock function:
712 */ 711 */
713static inline int 712static inline int
714rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) 713rt_mutex_slowtrylock(struct rt_mutex *lock)
715{ 714{
716 int ret = 0; 715 int ret = 0;
717 716
@@ -719,7 +718,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__)
719 718
720 if (likely(rt_mutex_owner(lock) != current)) { 719 if (likely(rt_mutex_owner(lock) != current)) {
721 720
722 ret = try_to_take_rt_mutex(lock __IP__); 721 ret = try_to_take_rt_mutex(lock);
723 /* 722 /*
724 * try_to_take_rt_mutex() sets the lock waiters 723 * try_to_take_rt_mutex() sets the lock waiters
725 * bit unconditionally. Clean this up. 724 * bit unconditionally. Clean this up.
@@ -769,13 +768,13 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
769 int detect_deadlock, 768 int detect_deadlock,
770 int (*slowfn)(struct rt_mutex *lock, int state, 769 int (*slowfn)(struct rt_mutex *lock, int state,
771 struct hrtimer_sleeper *timeout, 770 struct hrtimer_sleeper *timeout,
772 int detect_deadlock __IP_DECL__)) 771 int detect_deadlock))
773{ 772{
774 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { 773 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
775 rt_mutex_deadlock_account_lock(lock, current); 774 rt_mutex_deadlock_account_lock(lock, current);
776 return 0; 775 return 0;
777 } else 776 } else
778 return slowfn(lock, state, NULL, detect_deadlock __RET_IP__); 777 return slowfn(lock, state, NULL, detect_deadlock);
779} 778}
780 779
781static inline int 780static inline int
@@ -783,24 +782,24 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
783 struct hrtimer_sleeper *timeout, int detect_deadlock, 782 struct hrtimer_sleeper *timeout, int detect_deadlock,
784 int (*slowfn)(struct rt_mutex *lock, int state, 783 int (*slowfn)(struct rt_mutex *lock, int state,
785 struct hrtimer_sleeper *timeout, 784 struct hrtimer_sleeper *timeout,
786 int detect_deadlock __IP_DECL__)) 785 int detect_deadlock))
787{ 786{
788 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { 787 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
789 rt_mutex_deadlock_account_lock(lock, current); 788 rt_mutex_deadlock_account_lock(lock, current);
790 return 0; 789 return 0;
791 } else 790 } else
792 return slowfn(lock, state, timeout, detect_deadlock __RET_IP__); 791 return slowfn(lock, state, timeout, detect_deadlock);
793} 792}
794 793
795static inline int 794static inline int
796rt_mutex_fasttrylock(struct rt_mutex *lock, 795rt_mutex_fasttrylock(struct rt_mutex *lock,
797 int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) 796 int (*slowfn)(struct rt_mutex *lock))
798{ 797{
799 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { 798 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
800 rt_mutex_deadlock_account_lock(lock, current); 799 rt_mutex_deadlock_account_lock(lock, current);
801 return 1; 800 return 1;
802 } 801 }
803 return slowfn(lock __RET_IP__); 802 return slowfn(lock);
804} 803}
805 804
806static inline void 805static inline void
@@ -948,7 +947,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
948 struct task_struct *proxy_owner) 947 struct task_struct *proxy_owner)
949{ 948{
950 __rt_mutex_init(lock, NULL); 949 __rt_mutex_init(lock, NULL);
951 debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__); 950 debug_rt_mutex_proxy_lock(lock, proxy_owner);
952 rt_mutex_set_owner(lock, proxy_owner, 0); 951 rt_mutex_set_owner(lock, proxy_owner, 0);
953 rt_mutex_deadlock_account_lock(lock, proxy_owner); 952 rt_mutex_deadlock_account_lock(lock, proxy_owner);
954} 953}
diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
index 1e0fca13ff72..a1a1dd06421d 100644
--- a/kernel/rtmutex.h
+++ b/kernel/rtmutex.h
@@ -10,9 +10,6 @@
10 * Non-debug version. 10 * Non-debug version.
11 */ 11 */
12 12
13#define __IP_DECL__
14#define __IP__
15#define __RET_IP__
16#define rt_mutex_deadlock_check(l) (0) 13#define rt_mutex_deadlock_check(l) (0)
17#define rt_mutex_deadlock_account_lock(m, t) do { } while (0) 14#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
18#define rt_mutex_deadlock_account_unlock(l) do { } while (0) 15#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
diff --git a/kernel/rwsem.c b/kernel/rwsem.c
new file mode 100644
index 000000000000..291ded556aa0
--- /dev/null
+++ b/kernel/rwsem.c
@@ -0,0 +1,147 @@
1/* kernel/rwsem.c: R/W semaphores, public implementation
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from asm-i386/semaphore.h
5 */
6
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/rwsem.h>
11
12#include <asm/system.h>
13#include <asm/atomic.h>
14
15/*
16 * lock for reading
17 */
18void down_read(struct rw_semaphore *sem)
19{
20 might_sleep();
21 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
22
23 __down_read(sem);
24}
25
26EXPORT_SYMBOL(down_read);
27
28/*
29 * trylock for reading -- returns 1 if successful, 0 if contention
30 */
31int down_read_trylock(struct rw_semaphore *sem)
32{
33 int ret = __down_read_trylock(sem);
34
35 if (ret == 1)
36 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
37 return ret;
38}
39
40EXPORT_SYMBOL(down_read_trylock);
41
42/*
43 * lock for writing
44 */
45void down_write(struct rw_semaphore *sem)
46{
47 might_sleep();
48 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
49
50 __down_write(sem);
51}
52
53EXPORT_SYMBOL(down_write);
54
55/*
56 * trylock for writing -- returns 1 if successful, 0 if contention
57 */
58int down_write_trylock(struct rw_semaphore *sem)
59{
60 int ret = __down_write_trylock(sem);
61
62 if (ret == 1)
63 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
64 return ret;
65}
66
67EXPORT_SYMBOL(down_write_trylock);
68
69/*
70 * release a read lock
71 */
72void up_read(struct rw_semaphore *sem)
73{
74 rwsem_release(&sem->dep_map, 1, _RET_IP_);
75
76 __up_read(sem);
77}
78
79EXPORT_SYMBOL(up_read);
80
81/*
82 * release a write lock
83 */
84void up_write(struct rw_semaphore *sem)
85{
86 rwsem_release(&sem->dep_map, 1, _RET_IP_);
87
88 __up_write(sem);
89}
90
91EXPORT_SYMBOL(up_write);
92
93/*
94 * downgrade write lock to read lock
95 */
96void downgrade_write(struct rw_semaphore *sem)
97{
98 /*
99 * lockdep: a downgraded write will live on as a write
100 * dependency.
101 */
102 __downgrade_write(sem);
103}
104
105EXPORT_SYMBOL(downgrade_write);
106
107#ifdef CONFIG_DEBUG_LOCK_ALLOC
108
109void down_read_nested(struct rw_semaphore *sem, int subclass)
110{
111 might_sleep();
112 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
113
114 __down_read(sem);
115}
116
117EXPORT_SYMBOL(down_read_nested);
118
119void down_read_non_owner(struct rw_semaphore *sem)
120{
121 might_sleep();
122
123 __down_read(sem);
124}
125
126EXPORT_SYMBOL(down_read_non_owner);
127
128void down_write_nested(struct rw_semaphore *sem, int subclass)
129{
130 might_sleep();
131 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
132
133 __down_write_nested(sem, subclass);
134}
135
136EXPORT_SYMBOL(down_write_nested);
137
138void up_read_non_owner(struct rw_semaphore *sem)
139{
140 __up_read(sem);
141}
142
143EXPORT_SYMBOL(up_read_non_owner);
144
145#endif
146
147
diff --git a/kernel/sched.c b/kernel/sched.c
index d5e37072ea54..4ee400f9d56b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -30,6 +30,7 @@
30#include <linux/capability.h> 30#include <linux/capability.h>
31#include <linux/completion.h> 31#include <linux/completion.h>
32#include <linux/kernel_stat.h> 32#include <linux/kernel_stat.h>
33#include <linux/debug_locks.h>
33#include <linux/security.h> 34#include <linux/security.h>
34#include <linux/notifier.h> 35#include <linux/notifier.h>
35#include <linux/profile.h> 36#include <linux/profile.h>
@@ -178,20 +179,15 @@ static unsigned int static_prio_timeslice(int static_prio)
178 return SCALE_PRIO(DEF_TIMESLICE, static_prio); 179 return SCALE_PRIO(DEF_TIMESLICE, static_prio);
179} 180}
180 181
181static inline unsigned int task_timeslice(task_t *p) 182static inline unsigned int task_timeslice(struct task_struct *p)
182{ 183{
183 return static_prio_timeslice(p->static_prio); 184 return static_prio_timeslice(p->static_prio);
184} 185}
185 186
186#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
187 < (long long) (sd)->cache_hot_time)
188
189/* 187/*
190 * These are the runqueue data structures: 188 * These are the runqueue data structures:
191 */ 189 */
192 190
193typedef struct runqueue runqueue_t;
194
195struct prio_array { 191struct prio_array {
196 unsigned int nr_active; 192 unsigned int nr_active;
197 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ 193 DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
@@ -205,7 +201,7 @@ struct prio_array {
205 * (such as the load balancing or the thread migration code), lock 201 * (such as the load balancing or the thread migration code), lock
206 * acquire operations must be ordered by ascending &runqueue. 202 * acquire operations must be ordered by ascending &runqueue.
207 */ 203 */
208struct runqueue { 204struct rq {
209 spinlock_t lock; 205 spinlock_t lock;
210 206
211 /* 207 /*
@@ -229,9 +225,9 @@ struct runqueue {
229 225
230 unsigned long expired_timestamp; 226 unsigned long expired_timestamp;
231 unsigned long long timestamp_last_tick; 227 unsigned long long timestamp_last_tick;
232 task_t *curr, *idle; 228 struct task_struct *curr, *idle;
233 struct mm_struct *prev_mm; 229 struct mm_struct *prev_mm;
234 prio_array_t *active, *expired, arrays[2]; 230 struct prio_array *active, *expired, arrays[2];
235 int best_expired_prio; 231 int best_expired_prio;
236 atomic_t nr_iowait; 232 atomic_t nr_iowait;
237 233
@@ -242,7 +238,7 @@ struct runqueue {
242 int active_balance; 238 int active_balance;
243 int push_cpu; 239 int push_cpu;
244 240
245 task_t *migration_thread; 241 struct task_struct *migration_thread;
246 struct list_head migration_queue; 242 struct list_head migration_queue;
247#endif 243#endif
248 244
@@ -265,9 +261,10 @@ struct runqueue {
265 unsigned long ttwu_cnt; 261 unsigned long ttwu_cnt;
266 unsigned long ttwu_local; 262 unsigned long ttwu_local;
267#endif 263#endif
264 struct lock_class_key rq_lock_key;
268}; 265};
269 266
270static DEFINE_PER_CPU(struct runqueue, runqueues); 267static DEFINE_PER_CPU(struct rq, runqueues);
271 268
272/* 269/*
273 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 270 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
@@ -276,8 +273,8 @@ static DEFINE_PER_CPU(struct runqueue, runqueues);
276 * The domain tree of any CPU may only be accessed from within 273 * The domain tree of any CPU may only be accessed from within
277 * preempt-disabled sections. 274 * preempt-disabled sections.
278 */ 275 */
279#define for_each_domain(cpu, domain) \ 276#define for_each_domain(cpu, __sd) \
280for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent) 277 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
281 278
282#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 279#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
283#define this_rq() (&__get_cpu_var(runqueues)) 280#define this_rq() (&__get_cpu_var(runqueues))
@@ -292,26 +289,33 @@ for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent)
292#endif 289#endif
293 290
294#ifndef __ARCH_WANT_UNLOCKED_CTXSW 291#ifndef __ARCH_WANT_UNLOCKED_CTXSW
295static inline int task_running(runqueue_t *rq, task_t *p) 292static inline int task_running(struct rq *rq, struct task_struct *p)
296{ 293{
297 return rq->curr == p; 294 return rq->curr == p;
298} 295}
299 296
300static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) 297static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
301{ 298{
302} 299}
303 300
304static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) 301static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
305{ 302{
306#ifdef CONFIG_DEBUG_SPINLOCK 303#ifdef CONFIG_DEBUG_SPINLOCK
307 /* this is a valid case when another task releases the spinlock */ 304 /* this is a valid case when another task releases the spinlock */
308 rq->lock.owner = current; 305 rq->lock.owner = current;
309#endif 306#endif
307 /*
308 * If we are tracking spinlock dependencies then we have to
309 * fix up the runqueue lock - which gets 'carried over' from
310 * prev into current:
311 */
312 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
313
310 spin_unlock_irq(&rq->lock); 314 spin_unlock_irq(&rq->lock);
311} 315}
312 316
313#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 317#else /* __ARCH_WANT_UNLOCKED_CTXSW */
314static inline int task_running(runqueue_t *rq, task_t *p) 318static inline int task_running(struct rq *rq, struct task_struct *p)
315{ 319{
316#ifdef CONFIG_SMP 320#ifdef CONFIG_SMP
317 return p->oncpu; 321 return p->oncpu;
@@ -320,7 +324,7 @@ static inline int task_running(runqueue_t *rq, task_t *p)
320#endif 324#endif
321} 325}
322 326
323static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) 327static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
324{ 328{
325#ifdef CONFIG_SMP 329#ifdef CONFIG_SMP
326 /* 330 /*
@@ -337,7 +341,7 @@ static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
337#endif 341#endif
338} 342}
339 343
340static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) 344static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
341{ 345{
342#ifdef CONFIG_SMP 346#ifdef CONFIG_SMP
343 /* 347 /*
@@ -358,10 +362,10 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
358 * __task_rq_lock - lock the runqueue a given task resides on. 362 * __task_rq_lock - lock the runqueue a given task resides on.
359 * Must be called interrupts disabled. 363 * Must be called interrupts disabled.
360 */ 364 */
361static inline runqueue_t *__task_rq_lock(task_t *p) 365static inline struct rq *__task_rq_lock(struct task_struct *p)
362 __acquires(rq->lock) 366 __acquires(rq->lock)
363{ 367{
364 struct runqueue *rq; 368 struct rq *rq;
365 369
366repeat_lock_task: 370repeat_lock_task:
367 rq = task_rq(p); 371 rq = task_rq(p);
@@ -378,10 +382,10 @@ repeat_lock_task:
378 * interrupts. Note the ordering: we can safely lookup the task_rq without 382 * interrupts. Note the ordering: we can safely lookup the task_rq without
379 * explicitly disabling preemption. 383 * explicitly disabling preemption.
380 */ 384 */
381static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) 385static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
382 __acquires(rq->lock) 386 __acquires(rq->lock)
383{ 387{
384 struct runqueue *rq; 388 struct rq *rq;
385 389
386repeat_lock_task: 390repeat_lock_task:
387 local_irq_save(*flags); 391 local_irq_save(*flags);
@@ -394,13 +398,13 @@ repeat_lock_task:
394 return rq; 398 return rq;
395} 399}
396 400
397static inline void __task_rq_unlock(runqueue_t *rq) 401static inline void __task_rq_unlock(struct rq *rq)
398 __releases(rq->lock) 402 __releases(rq->lock)
399{ 403{
400 spin_unlock(&rq->lock); 404 spin_unlock(&rq->lock);
401} 405}
402 406
403static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) 407static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
404 __releases(rq->lock) 408 __releases(rq->lock)
405{ 409{
406 spin_unlock_irqrestore(&rq->lock, *flags); 410 spin_unlock_irqrestore(&rq->lock, *flags);
@@ -420,7 +424,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
420 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); 424 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
421 seq_printf(seq, "timestamp %lu\n", jiffies); 425 seq_printf(seq, "timestamp %lu\n", jiffies);
422 for_each_online_cpu(cpu) { 426 for_each_online_cpu(cpu) {
423 runqueue_t *rq = cpu_rq(cpu); 427 struct rq *rq = cpu_rq(cpu);
424#ifdef CONFIG_SMP 428#ifdef CONFIG_SMP
425 struct sched_domain *sd; 429 struct sched_domain *sd;
426 int dcnt = 0; 430 int dcnt = 0;
@@ -507,10 +511,10 @@ struct file_operations proc_schedstat_operations = {
507/* 511/*
508 * rq_lock - lock a given runqueue and disable interrupts. 512 * rq_lock - lock a given runqueue and disable interrupts.
509 */ 513 */
510static inline runqueue_t *this_rq_lock(void) 514static inline struct rq *this_rq_lock(void)
511 __acquires(rq->lock) 515 __acquires(rq->lock)
512{ 516{
513 runqueue_t *rq; 517 struct rq *rq;
514 518
515 local_irq_disable(); 519 local_irq_disable();
516 rq = this_rq(); 520 rq = this_rq();
@@ -535,7 +539,7 @@ static inline runqueue_t *this_rq_lock(void)
535 * long it was from the *first* time it was queued to the time that it 539 * long it was from the *first* time it was queued to the time that it
536 * finally hit a cpu. 540 * finally hit a cpu.
537 */ 541 */
538static inline void sched_info_dequeued(task_t *t) 542static inline void sched_info_dequeued(struct task_struct *t)
539{ 543{
540 t->sched_info.last_queued = 0; 544 t->sched_info.last_queued = 0;
541} 545}
@@ -545,10 +549,10 @@ static inline void sched_info_dequeued(task_t *t)
545 * long it was waiting to run. We also note when it began so that we 549 * long it was waiting to run. We also note when it began so that we
546 * can keep stats on how long its timeslice is. 550 * can keep stats on how long its timeslice is.
547 */ 551 */
548static void sched_info_arrive(task_t *t) 552static void sched_info_arrive(struct task_struct *t)
549{ 553{
550 unsigned long now = jiffies, diff = 0; 554 unsigned long now = jiffies, diff = 0;
551 struct runqueue *rq = task_rq(t); 555 struct rq *rq = task_rq(t);
552 556
553 if (t->sched_info.last_queued) 557 if (t->sched_info.last_queued)
554 diff = now - t->sched_info.last_queued; 558 diff = now - t->sched_info.last_queued;
@@ -579,7 +583,7 @@ static void sched_info_arrive(task_t *t)
579 * the timestamp if it is already not set. It's assumed that 583 * the timestamp if it is already not set. It's assumed that
580 * sched_info_dequeued() will clear that stamp when appropriate. 584 * sched_info_dequeued() will clear that stamp when appropriate.
581 */ 585 */
582static inline void sched_info_queued(task_t *t) 586static inline void sched_info_queued(struct task_struct *t)
583{ 587{
584 if (!t->sched_info.last_queued) 588 if (!t->sched_info.last_queued)
585 t->sched_info.last_queued = jiffies; 589 t->sched_info.last_queued = jiffies;
@@ -589,9 +593,9 @@ static inline void sched_info_queued(task_t *t)
589 * Called when a process ceases being the active-running process, either 593 * Called when a process ceases being the active-running process, either
590 * voluntarily or involuntarily. Now we can calculate how long we ran. 594 * voluntarily or involuntarily. Now we can calculate how long we ran.
591 */ 595 */
592static inline void sched_info_depart(task_t *t) 596static inline void sched_info_depart(struct task_struct *t)
593{ 597{
594 struct runqueue *rq = task_rq(t); 598 struct rq *rq = task_rq(t);
595 unsigned long diff = jiffies - t->sched_info.last_arrival; 599 unsigned long diff = jiffies - t->sched_info.last_arrival;
596 600
597 t->sched_info.cpu_time += diff; 601 t->sched_info.cpu_time += diff;
@@ -605,9 +609,10 @@ static inline void sched_info_depart(task_t *t)
605 * their time slice. (This may also be called when switching to or from 609 * their time slice. (This may also be called when switching to or from
606 * the idle task.) We are only called when prev != next. 610 * the idle task.) We are only called when prev != next.
607 */ 611 */
608static inline void sched_info_switch(task_t *prev, task_t *next) 612static inline void
613sched_info_switch(struct task_struct *prev, struct task_struct *next)
609{ 614{
610 struct runqueue *rq = task_rq(prev); 615 struct rq *rq = task_rq(prev);
611 616
612 /* 617 /*
613 * prev now departs the cpu. It's not interesting to record 618 * prev now departs the cpu. It's not interesting to record
@@ -628,7 +633,7 @@ static inline void sched_info_switch(task_t *prev, task_t *next)
628/* 633/*
629 * Adding/removing a task to/from a priority array: 634 * Adding/removing a task to/from a priority array:
630 */ 635 */
631static void dequeue_task(struct task_struct *p, prio_array_t *array) 636static void dequeue_task(struct task_struct *p, struct prio_array *array)
632{ 637{
633 array->nr_active--; 638 array->nr_active--;
634 list_del(&p->run_list); 639 list_del(&p->run_list);
@@ -636,7 +641,7 @@ static void dequeue_task(struct task_struct *p, prio_array_t *array)
636 __clear_bit(p->prio, array->bitmap); 641 __clear_bit(p->prio, array->bitmap);
637} 642}
638 643
639static void enqueue_task(struct task_struct *p, prio_array_t *array) 644static void enqueue_task(struct task_struct *p, struct prio_array *array)
640{ 645{
641 sched_info_queued(p); 646 sched_info_queued(p);
642 list_add_tail(&p->run_list, array->queue + p->prio); 647 list_add_tail(&p->run_list, array->queue + p->prio);
@@ -649,12 +654,13 @@ static void enqueue_task(struct task_struct *p, prio_array_t *array)
649 * Put task to the end of the run list without the overhead of dequeue 654 * Put task to the end of the run list without the overhead of dequeue
650 * followed by enqueue. 655 * followed by enqueue.
651 */ 656 */
652static void requeue_task(struct task_struct *p, prio_array_t *array) 657static void requeue_task(struct task_struct *p, struct prio_array *array)
653{ 658{
654 list_move_tail(&p->run_list, array->queue + p->prio); 659 list_move_tail(&p->run_list, array->queue + p->prio);
655} 660}
656 661
657static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) 662static inline void
663enqueue_task_head(struct task_struct *p, struct prio_array *array)
658{ 664{
659 list_add(&p->run_list, array->queue + p->prio); 665 list_add(&p->run_list, array->queue + p->prio);
660 __set_bit(p->prio, array->bitmap); 666 __set_bit(p->prio, array->bitmap);
@@ -677,7 +683,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
677 * Both properties are important to certain workloads. 683 * Both properties are important to certain workloads.
678 */ 684 */
679 685
680static inline int __normal_prio(task_t *p) 686static inline int __normal_prio(struct task_struct *p)
681{ 687{
682 int bonus, prio; 688 int bonus, prio;
683 689
@@ -713,7 +719,7 @@ static inline int __normal_prio(task_t *p)
713#define RTPRIO_TO_LOAD_WEIGHT(rp) \ 719#define RTPRIO_TO_LOAD_WEIGHT(rp) \
714 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) 720 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
715 721
716static void set_load_weight(task_t *p) 722static void set_load_weight(struct task_struct *p)
717{ 723{
718 if (has_rt_policy(p)) { 724 if (has_rt_policy(p)) {
719#ifdef CONFIG_SMP 725#ifdef CONFIG_SMP
@@ -731,23 +737,25 @@ static void set_load_weight(task_t *p)
731 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); 737 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
732} 738}
733 739
734static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) 740static inline void
741inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
735{ 742{
736 rq->raw_weighted_load += p->load_weight; 743 rq->raw_weighted_load += p->load_weight;
737} 744}
738 745
739static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) 746static inline void
747dec_raw_weighted_load(struct rq *rq, const struct task_struct *p)
740{ 748{
741 rq->raw_weighted_load -= p->load_weight; 749 rq->raw_weighted_load -= p->load_weight;
742} 750}
743 751
744static inline void inc_nr_running(task_t *p, runqueue_t *rq) 752static inline void inc_nr_running(struct task_struct *p, struct rq *rq)
745{ 753{
746 rq->nr_running++; 754 rq->nr_running++;
747 inc_raw_weighted_load(rq, p); 755 inc_raw_weighted_load(rq, p);
748} 756}
749 757
750static inline void dec_nr_running(task_t *p, runqueue_t *rq) 758static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
751{ 759{
752 rq->nr_running--; 760 rq->nr_running--;
753 dec_raw_weighted_load(rq, p); 761 dec_raw_weighted_load(rq, p);
@@ -760,7 +768,7 @@ static inline void dec_nr_running(task_t *p, runqueue_t *rq)
760 * setprio syscalls, and whenever the interactivity 768 * setprio syscalls, and whenever the interactivity
761 * estimator recalculates. 769 * estimator recalculates.
762 */ 770 */
763static inline int normal_prio(task_t *p) 771static inline int normal_prio(struct task_struct *p)
764{ 772{
765 int prio; 773 int prio;
766 774
@@ -778,7 +786,7 @@ static inline int normal_prio(task_t *p)
778 * interactivity modifiers. Will be RT if the task got 786 * interactivity modifiers. Will be RT if the task got
779 * RT-boosted. If not then it returns p->normal_prio. 787 * RT-boosted. If not then it returns p->normal_prio.
780 */ 788 */
781static int effective_prio(task_t *p) 789static int effective_prio(struct task_struct *p)
782{ 790{
783 p->normal_prio = normal_prio(p); 791 p->normal_prio = normal_prio(p);
784 /* 792 /*
@@ -794,9 +802,9 @@ static int effective_prio(task_t *p)
794/* 802/*
795 * __activate_task - move a task to the runqueue. 803 * __activate_task - move a task to the runqueue.
796 */ 804 */
797static void __activate_task(task_t *p, runqueue_t *rq) 805static void __activate_task(struct task_struct *p, struct rq *rq)
798{ 806{
799 prio_array_t *target = rq->active; 807 struct prio_array *target = rq->active;
800 808
801 if (batch_task(p)) 809 if (batch_task(p))
802 target = rq->expired; 810 target = rq->expired;
@@ -807,7 +815,7 @@ static void __activate_task(task_t *p, runqueue_t *rq)
807/* 815/*
808 * __activate_idle_task - move idle task to the _front_ of runqueue. 816 * __activate_idle_task - move idle task to the _front_ of runqueue.
809 */ 817 */
810static inline void __activate_idle_task(task_t *p, runqueue_t *rq) 818static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
811{ 819{
812 enqueue_task_head(p, rq->active); 820 enqueue_task_head(p, rq->active);
813 inc_nr_running(p, rq); 821 inc_nr_running(p, rq);
@@ -817,7 +825,7 @@ static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
817 * Recalculate p->normal_prio and p->prio after having slept, 825 * Recalculate p->normal_prio and p->prio after having slept,
818 * updating the sleep-average too: 826 * updating the sleep-average too:
819 */ 827 */
820static int recalc_task_prio(task_t *p, unsigned long long now) 828static int recalc_task_prio(struct task_struct *p, unsigned long long now)
821{ 829{
822 /* Caller must always ensure 'now >= p->timestamp' */ 830 /* Caller must always ensure 'now >= p->timestamp' */
823 unsigned long sleep_time = now - p->timestamp; 831 unsigned long sleep_time = now - p->timestamp;
@@ -889,7 +897,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
889 * Update all the scheduling statistics stuff. (sleep average 897 * Update all the scheduling statistics stuff. (sleep average
890 * calculation, priority modifiers, etc.) 898 * calculation, priority modifiers, etc.)
891 */ 899 */
892static void activate_task(task_t *p, runqueue_t *rq, int local) 900static void activate_task(struct task_struct *p, struct rq *rq, int local)
893{ 901{
894 unsigned long long now; 902 unsigned long long now;
895 903
@@ -897,7 +905,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
897#ifdef CONFIG_SMP 905#ifdef CONFIG_SMP
898 if (!local) { 906 if (!local) {
899 /* Compensate for drifting sched_clock */ 907 /* Compensate for drifting sched_clock */
900 runqueue_t *this_rq = this_rq(); 908 struct rq *this_rq = this_rq();
901 now = (now - this_rq->timestamp_last_tick) 909 now = (now - this_rq->timestamp_last_tick)
902 + rq->timestamp_last_tick; 910 + rq->timestamp_last_tick;
903 } 911 }
@@ -936,7 +944,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
936/* 944/*
937 * deactivate_task - remove a task from the runqueue. 945 * deactivate_task - remove a task from the runqueue.
938 */ 946 */
939static void deactivate_task(struct task_struct *p, runqueue_t *rq) 947static void deactivate_task(struct task_struct *p, struct rq *rq)
940{ 948{
941 dec_nr_running(p, rq); 949 dec_nr_running(p, rq);
942 dequeue_task(p, p->array); 950 dequeue_task(p, p->array);
@@ -956,7 +964,7 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)
956#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 964#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
957#endif 965#endif
958 966
959static void resched_task(task_t *p) 967static void resched_task(struct task_struct *p)
960{ 968{
961 int cpu; 969 int cpu;
962 970
@@ -977,7 +985,7 @@ static void resched_task(task_t *p)
977 smp_send_reschedule(cpu); 985 smp_send_reschedule(cpu);
978} 986}
979#else 987#else
980static inline void resched_task(task_t *p) 988static inline void resched_task(struct task_struct *p)
981{ 989{
982 assert_spin_locked(&task_rq(p)->lock); 990 assert_spin_locked(&task_rq(p)->lock);
983 set_tsk_need_resched(p); 991 set_tsk_need_resched(p);
@@ -988,7 +996,7 @@ static inline void resched_task(task_t *p)
988 * task_curr - is this task currently executing on a CPU? 996 * task_curr - is this task currently executing on a CPU?
989 * @p: the task in question. 997 * @p: the task in question.
990 */ 998 */
991inline int task_curr(const task_t *p) 999inline int task_curr(const struct task_struct *p)
992{ 1000{
993 return cpu_curr(task_cpu(p)) == p; 1001 return cpu_curr(task_cpu(p)) == p;
994} 1002}
@@ -1000,22 +1008,23 @@ unsigned long weighted_cpuload(const int cpu)
1000} 1008}
1001 1009
1002#ifdef CONFIG_SMP 1010#ifdef CONFIG_SMP
1003typedef struct { 1011struct migration_req {
1004 struct list_head list; 1012 struct list_head list;
1005 1013
1006 task_t *task; 1014 struct task_struct *task;
1007 int dest_cpu; 1015 int dest_cpu;
1008 1016
1009 struct completion done; 1017 struct completion done;
1010} migration_req_t; 1018};
1011 1019
1012/* 1020/*
1013 * The task's runqueue lock must be held. 1021 * The task's runqueue lock must be held.
1014 * Returns true if you have to wait for migration thread. 1022 * Returns true if you have to wait for migration thread.
1015 */ 1023 */
1016static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) 1024static int
1025migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1017{ 1026{
1018 runqueue_t *rq = task_rq(p); 1027 struct rq *rq = task_rq(p);
1019 1028
1020 /* 1029 /*
1021 * If the task is not on a runqueue (and not running), then 1030 * If the task is not on a runqueue (and not running), then
@@ -1030,6 +1039,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
1030 req->task = p; 1039 req->task = p;
1031 req->dest_cpu = dest_cpu; 1040 req->dest_cpu = dest_cpu;
1032 list_add(&req->list, &rq->migration_queue); 1041 list_add(&req->list, &rq->migration_queue);
1042
1033 return 1; 1043 return 1;
1034} 1044}
1035 1045
@@ -1042,10 +1052,10 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
1042 * smp_call_function() if an IPI is sent by the same process we are 1052 * smp_call_function() if an IPI is sent by the same process we are
1043 * waiting to become inactive. 1053 * waiting to become inactive.
1044 */ 1054 */
1045void wait_task_inactive(task_t *p) 1055void wait_task_inactive(struct task_struct *p)
1046{ 1056{
1047 unsigned long flags; 1057 unsigned long flags;
1048 runqueue_t *rq; 1058 struct rq *rq;
1049 int preempted; 1059 int preempted;
1050 1060
1051repeat: 1061repeat:
@@ -1076,7 +1086,7 @@ repeat:
1076 * to another CPU then no harm is done and the purpose has been 1086 * to another CPU then no harm is done and the purpose has been
1077 * achieved as well. 1087 * achieved as well.
1078 */ 1088 */
1079void kick_process(task_t *p) 1089void kick_process(struct task_struct *p)
1080{ 1090{
1081 int cpu; 1091 int cpu;
1082 1092
@@ -1096,7 +1106,7 @@ void kick_process(task_t *p)
1096 */ 1106 */
1097static inline unsigned long source_load(int cpu, int type) 1107static inline unsigned long source_load(int cpu, int type)
1098{ 1108{
1099 runqueue_t *rq = cpu_rq(cpu); 1109 struct rq *rq = cpu_rq(cpu);
1100 1110
1101 if (type == 0) 1111 if (type == 0)
1102 return rq->raw_weighted_load; 1112 return rq->raw_weighted_load;
@@ -1110,7 +1120,7 @@ static inline unsigned long source_load(int cpu, int type)
1110 */ 1120 */
1111static inline unsigned long target_load(int cpu, int type) 1121static inline unsigned long target_load(int cpu, int type)
1112{ 1122{
1113 runqueue_t *rq = cpu_rq(cpu); 1123 struct rq *rq = cpu_rq(cpu);
1114 1124
1115 if (type == 0) 1125 if (type == 0)
1116 return rq->raw_weighted_load; 1126 return rq->raw_weighted_load;
@@ -1123,10 +1133,10 @@ static inline unsigned long target_load(int cpu, int type)
1123 */ 1133 */
1124static inline unsigned long cpu_avg_load_per_task(int cpu) 1134static inline unsigned long cpu_avg_load_per_task(int cpu)
1125{ 1135{
1126 runqueue_t *rq = cpu_rq(cpu); 1136 struct rq *rq = cpu_rq(cpu);
1127 unsigned long n = rq->nr_running; 1137 unsigned long n = rq->nr_running;
1128 1138
1129 return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; 1139 return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
1130} 1140}
1131 1141
1132/* 1142/*
@@ -1279,7 +1289,7 @@ nextlevel:
1279 * Returns the CPU we should wake onto. 1289 * Returns the CPU we should wake onto.
1280 */ 1290 */
1281#if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1291#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1282static int wake_idle(int cpu, task_t *p) 1292static int wake_idle(int cpu, struct task_struct *p)
1283{ 1293{
1284 cpumask_t tmp; 1294 cpumask_t tmp;
1285 struct sched_domain *sd; 1295 struct sched_domain *sd;
@@ -1302,7 +1312,7 @@ static int wake_idle(int cpu, task_t *p)
1302 return cpu; 1312 return cpu;
1303} 1313}
1304#else 1314#else
1305static inline int wake_idle(int cpu, task_t *p) 1315static inline int wake_idle(int cpu, struct task_struct *p)
1306{ 1316{
1307 return cpu; 1317 return cpu;
1308} 1318}
@@ -1322,15 +1332,15 @@ static inline int wake_idle(int cpu, task_t *p)
1322 * 1332 *
1323 * returns failure only if the task is already active. 1333 * returns failure only if the task is already active.
1324 */ 1334 */
1325static int try_to_wake_up(task_t *p, unsigned int state, int sync) 1335static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1326{ 1336{
1327 int cpu, this_cpu, success = 0; 1337 int cpu, this_cpu, success = 0;
1328 unsigned long flags; 1338 unsigned long flags;
1329 long old_state; 1339 long old_state;
1330 runqueue_t *rq; 1340 struct rq *rq;
1331#ifdef CONFIG_SMP 1341#ifdef CONFIG_SMP
1332 unsigned long load, this_load;
1333 struct sched_domain *sd, *this_sd = NULL; 1342 struct sched_domain *sd, *this_sd = NULL;
1343 unsigned long load, this_load;
1334 int new_cpu; 1344 int new_cpu;
1335#endif 1345#endif
1336 1346
@@ -1480,15 +1490,14 @@ out:
1480 return success; 1490 return success;
1481} 1491}
1482 1492
1483int fastcall wake_up_process(task_t *p) 1493int fastcall wake_up_process(struct task_struct *p)
1484{ 1494{
1485 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | 1495 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
1486 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); 1496 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1487} 1497}
1488
1489EXPORT_SYMBOL(wake_up_process); 1498EXPORT_SYMBOL(wake_up_process);
1490 1499
1491int fastcall wake_up_state(task_t *p, unsigned int state) 1500int fastcall wake_up_state(struct task_struct *p, unsigned int state)
1492{ 1501{
1493 return try_to_wake_up(p, state, 0); 1502 return try_to_wake_up(p, state, 0);
1494} 1503}
@@ -1497,7 +1506,7 @@ int fastcall wake_up_state(task_t *p, unsigned int state)
1497 * Perform scheduler related setup for a newly forked process p. 1506 * Perform scheduler related setup for a newly forked process p.
1498 * p is forked by current. 1507 * p is forked by current.
1499 */ 1508 */
1500void fastcall sched_fork(task_t *p, int clone_flags) 1509void fastcall sched_fork(struct task_struct *p, int clone_flags)
1501{ 1510{
1502 int cpu = get_cpu(); 1511 int cpu = get_cpu();
1503 1512
@@ -1565,11 +1574,11 @@ void fastcall sched_fork(task_t *p, int clone_flags)
1565 * that must be done for every newly created context, then puts the task 1574 * that must be done for every newly created context, then puts the task
1566 * on the runqueue and wakes it. 1575 * on the runqueue and wakes it.
1567 */ 1576 */
1568void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) 1577void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1569{ 1578{
1579 struct rq *rq, *this_rq;
1570 unsigned long flags; 1580 unsigned long flags;
1571 int this_cpu, cpu; 1581 int this_cpu, cpu;
1572 runqueue_t *rq, *this_rq;
1573 1582
1574 rq = task_rq_lock(p, &flags); 1583 rq = task_rq_lock(p, &flags);
1575 BUG_ON(p->state != TASK_RUNNING); 1584 BUG_ON(p->state != TASK_RUNNING);
@@ -1649,10 +1658,10 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
1649 * artificially, because any timeslice recovered here 1658 * artificially, because any timeslice recovered here
1650 * was given away by the parent in the first place.) 1659 * was given away by the parent in the first place.)
1651 */ 1660 */
1652void fastcall sched_exit(task_t *p) 1661void fastcall sched_exit(struct task_struct *p)
1653{ 1662{
1654 unsigned long flags; 1663 unsigned long flags;
1655 runqueue_t *rq; 1664 struct rq *rq;
1656 1665
1657 /* 1666 /*
1658 * If the child was a (relative-) CPU hog then decrease 1667 * If the child was a (relative-) CPU hog then decrease
@@ -1683,7 +1692,7 @@ void fastcall sched_exit(task_t *p)
1683 * prepare_task_switch sets up locking and calls architecture specific 1692 * prepare_task_switch sets up locking and calls architecture specific
1684 * hooks. 1693 * hooks.
1685 */ 1694 */
1686static inline void prepare_task_switch(runqueue_t *rq, task_t *next) 1695static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
1687{ 1696{
1688 prepare_lock_switch(rq, next); 1697 prepare_lock_switch(rq, next);
1689 prepare_arch_switch(next); 1698 prepare_arch_switch(next);
@@ -1704,7 +1713,7 @@ static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
1704 * with the lock held can cause deadlocks; see schedule() for 1713 * with the lock held can cause deadlocks; see schedule() for
1705 * details.) 1714 * details.)
1706 */ 1715 */
1707static inline void finish_task_switch(runqueue_t *rq, task_t *prev) 1716static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
1708 __releases(rq->lock) 1717 __releases(rq->lock)
1709{ 1718{
1710 struct mm_struct *mm = rq->prev_mm; 1719 struct mm_struct *mm = rq->prev_mm;
@@ -1742,10 +1751,11 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
1742 * schedule_tail - first thing a freshly forked thread must call. 1751 * schedule_tail - first thing a freshly forked thread must call.
1743 * @prev: the thread we just switched away from. 1752 * @prev: the thread we just switched away from.
1744 */ 1753 */
1745asmlinkage void schedule_tail(task_t *prev) 1754asmlinkage void schedule_tail(struct task_struct *prev)
1746 __releases(rq->lock) 1755 __releases(rq->lock)
1747{ 1756{
1748 runqueue_t *rq = this_rq(); 1757 struct rq *rq = this_rq();
1758
1749 finish_task_switch(rq, prev); 1759 finish_task_switch(rq, prev);
1750#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1760#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1751 /* In this case, finish_task_switch does not reenable preemption */ 1761 /* In this case, finish_task_switch does not reenable preemption */
@@ -1759,8 +1769,9 @@ asmlinkage void schedule_tail(task_t *prev)
1759 * context_switch - switch to the new MM and the new 1769 * context_switch - switch to the new MM and the new
1760 * thread's register state. 1770 * thread's register state.
1761 */ 1771 */
1762static inline 1772static inline struct task_struct *
1763task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) 1773context_switch(struct rq *rq, struct task_struct *prev,
1774 struct task_struct *next)
1764{ 1775{
1765 struct mm_struct *mm = next->mm; 1776 struct mm_struct *mm = next->mm;
1766 struct mm_struct *oldmm = prev->active_mm; 1777 struct mm_struct *oldmm = prev->active_mm;
@@ -1777,6 +1788,7 @@ task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
1777 WARN_ON(rq->prev_mm); 1788 WARN_ON(rq->prev_mm);
1778 rq->prev_mm = oldmm; 1789 rq->prev_mm = oldmm;
1779 } 1790 }
1791 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1780 1792
1781 /* Here we just switch the register state and the stack. */ 1793 /* Here we just switch the register state and the stack. */
1782 switch_to(prev, next, prev); 1794 switch_to(prev, next, prev);
@@ -1857,12 +1869,21 @@ unsigned long nr_active(void)
1857#ifdef CONFIG_SMP 1869#ifdef CONFIG_SMP
1858 1870
1859/* 1871/*
1872 * Is this task likely cache-hot:
1873 */
1874static inline int
1875task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
1876{
1877 return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
1878}
1879
1880/*
1860 * double_rq_lock - safely lock two runqueues 1881 * double_rq_lock - safely lock two runqueues
1861 * 1882 *
1862 * Note this does not disable interrupts like task_rq_lock, 1883 * Note this does not disable interrupts like task_rq_lock,
1863 * you need to do so manually before calling. 1884 * you need to do so manually before calling.
1864 */ 1885 */
1865static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) 1886static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1866 __acquires(rq1->lock) 1887 __acquires(rq1->lock)
1867 __acquires(rq2->lock) 1888 __acquires(rq2->lock)
1868{ 1889{
@@ -1886,7 +1907,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
1886 * Note this does not restore interrupts like task_rq_unlock, 1907 * Note this does not restore interrupts like task_rq_unlock,
1887 * you need to do so manually after calling. 1908 * you need to do so manually after calling.
1888 */ 1909 */
1889static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) 1910static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1890 __releases(rq1->lock) 1911 __releases(rq1->lock)
1891 __releases(rq2->lock) 1912 __releases(rq2->lock)
1892{ 1913{
@@ -1900,7 +1921,7 @@ static void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
1900/* 1921/*
1901 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1922 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1902 */ 1923 */
1903static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) 1924static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
1904 __releases(this_rq->lock) 1925 __releases(this_rq->lock)
1905 __acquires(busiest->lock) 1926 __acquires(busiest->lock)
1906 __acquires(this_rq->lock) 1927 __acquires(this_rq->lock)
@@ -1921,11 +1942,11 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
1921 * allow dest_cpu, which will force the cpu onto dest_cpu. Then 1942 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
1922 * the cpu_allowed mask is restored. 1943 * the cpu_allowed mask is restored.
1923 */ 1944 */
1924static void sched_migrate_task(task_t *p, int dest_cpu) 1945static void sched_migrate_task(struct task_struct *p, int dest_cpu)
1925{ 1946{
1926 migration_req_t req; 1947 struct migration_req req;
1927 runqueue_t *rq;
1928 unsigned long flags; 1948 unsigned long flags;
1949 struct rq *rq;
1929 1950
1930 rq = task_rq_lock(p, &flags); 1951 rq = task_rq_lock(p, &flags);
1931 if (!cpu_isset(dest_cpu, p->cpus_allowed) 1952 if (!cpu_isset(dest_cpu, p->cpus_allowed)
@@ -1936,11 +1957,13 @@ static void sched_migrate_task(task_t *p, int dest_cpu)
1936 if (migrate_task(p, dest_cpu, &req)) { 1957 if (migrate_task(p, dest_cpu, &req)) {
1937 /* Need to wait for migration thread (might exit: take ref). */ 1958 /* Need to wait for migration thread (might exit: take ref). */
1938 struct task_struct *mt = rq->migration_thread; 1959 struct task_struct *mt = rq->migration_thread;
1960
1939 get_task_struct(mt); 1961 get_task_struct(mt);
1940 task_rq_unlock(rq, &flags); 1962 task_rq_unlock(rq, &flags);
1941 wake_up_process(mt); 1963 wake_up_process(mt);
1942 put_task_struct(mt); 1964 put_task_struct(mt);
1943 wait_for_completion(&req.done); 1965 wait_for_completion(&req.done);
1966
1944 return; 1967 return;
1945 } 1968 }
1946out: 1969out:
@@ -1964,9 +1987,9 @@ void sched_exec(void)
1964 * pull_task - move a task from a remote runqueue to the local runqueue. 1987 * pull_task - move a task from a remote runqueue to the local runqueue.
1965 * Both runqueues must be locked. 1988 * Both runqueues must be locked.
1966 */ 1989 */
1967static 1990static void pull_task(struct rq *src_rq, struct prio_array *src_array,
1968void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, 1991 struct task_struct *p, struct rq *this_rq,
1969 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) 1992 struct prio_array *this_array, int this_cpu)
1970{ 1993{
1971 dequeue_task(p, src_array); 1994 dequeue_task(p, src_array);
1972 dec_nr_running(p, src_rq); 1995 dec_nr_running(p, src_rq);
@@ -1987,7 +2010,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1987 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 2010 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1988 */ 2011 */
1989static 2012static
1990int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, 2013int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1991 struct sched_domain *sd, enum idle_type idle, 2014 struct sched_domain *sd, enum idle_type idle,
1992 int *all_pinned) 2015 int *all_pinned)
1993{ 2016{
@@ -2019,6 +2042,7 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
2019} 2042}
2020 2043
2021#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio) 2044#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
2045
2022/* 2046/*
2023 * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted 2047 * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
2024 * load from busiest to this_rq, as part of a balancing operation within 2048 * load from busiest to this_rq, as part of a balancing operation within
@@ -2026,18 +2050,17 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
2026 * 2050 *
2027 * Called with both runqueues locked. 2051 * Called with both runqueues locked.
2028 */ 2052 */
2029static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, 2053static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2030 unsigned long max_nr_move, unsigned long max_load_move, 2054 unsigned long max_nr_move, unsigned long max_load_move,
2031 struct sched_domain *sd, enum idle_type idle, 2055 struct sched_domain *sd, enum idle_type idle,
2032 int *all_pinned) 2056 int *all_pinned)
2033{ 2057{
2034 prio_array_t *array, *dst_array; 2058 int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
2059 best_prio_seen, skip_for_load;
2060 struct prio_array *array, *dst_array;
2035 struct list_head *head, *curr; 2061 struct list_head *head, *curr;
2036 int idx, pulled = 0, pinned = 0, this_best_prio, busiest_best_prio; 2062 struct task_struct *tmp;
2037 int busiest_best_prio_seen;
2038 int skip_for_load; /* skip the task based on weighted load issues */
2039 long rem_load_move; 2063 long rem_load_move;
2040 task_t *tmp;
2041 2064
2042 if (max_nr_move == 0 || max_load_move == 0) 2065 if (max_nr_move == 0 || max_load_move == 0)
2043 goto out; 2066 goto out;
@@ -2045,15 +2068,15 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
2045 rem_load_move = max_load_move; 2068 rem_load_move = max_load_move;
2046 pinned = 1; 2069 pinned = 1;
2047 this_best_prio = rq_best_prio(this_rq); 2070 this_best_prio = rq_best_prio(this_rq);
2048 busiest_best_prio = rq_best_prio(busiest); 2071 best_prio = rq_best_prio(busiest);
2049 /* 2072 /*
2050 * Enable handling of the case where there is more than one task 2073 * Enable handling of the case where there is more than one task
2051 * with the best priority. If the current running task is one 2074 * with the best priority. If the current running task is one
2052 * of those with prio==busiest_best_prio we know it won't be moved 2075 * of those with prio==best_prio we know it won't be moved
2053 * and therefore it's safe to override the skip (based on load) of 2076 * and therefore it's safe to override the skip (based on load) of
2054 * any task we find with that prio. 2077 * any task we find with that prio.
2055 */ 2078 */
2056 busiest_best_prio_seen = busiest_best_prio == busiest->curr->prio; 2079 best_prio_seen = best_prio == busiest->curr->prio;
2057 2080
2058 /* 2081 /*
2059 * We first consider expired tasks. Those will likely not be 2082 * We first consider expired tasks. Those will likely not be
@@ -2089,7 +2112,7 @@ skip_bitmap:
2089 head = array->queue + idx; 2112 head = array->queue + idx;
2090 curr = head->prev; 2113 curr = head->prev;
2091skip_queue: 2114skip_queue:
2092 tmp = list_entry(curr, task_t, run_list); 2115 tmp = list_entry(curr, struct task_struct, run_list);
2093 2116
2094 curr = curr->prev; 2117 curr = curr->prev;
2095 2118
@@ -2100,10 +2123,11 @@ skip_queue:
2100 */ 2123 */
2101 skip_for_load = tmp->load_weight > rem_load_move; 2124 skip_for_load = tmp->load_weight > rem_load_move;
2102 if (skip_for_load && idx < this_best_prio) 2125 if (skip_for_load && idx < this_best_prio)
2103 skip_for_load = !busiest_best_prio_seen && idx == busiest_best_prio; 2126 skip_for_load = !best_prio_seen && idx == best_prio;
2104 if (skip_for_load || 2127 if (skip_for_load ||
2105 !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { 2128 !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
2106 busiest_best_prio_seen |= idx == busiest_best_prio; 2129
2130 best_prio_seen |= idx == best_prio;
2107 if (curr != head) 2131 if (curr != head)
2108 goto skip_queue; 2132 goto skip_queue;
2109 idx++; 2133 idx++;
@@ -2146,8 +2170,8 @@ out:
2146 2170
2147/* 2171/*
2148 * find_busiest_group finds and returns the busiest CPU group within the 2172 * find_busiest_group finds and returns the busiest CPU group within the
2149 * domain. It calculates and returns the amount of weighted load which should be 2173 * domain. It calculates and returns the amount of weighted load which
2150 * moved to restore balance via the imbalance parameter. 2174 * should be moved to restore balance via the imbalance parameter.
2151 */ 2175 */
2152static struct sched_group * 2176static struct sched_group *
2153find_busiest_group(struct sched_domain *sd, int this_cpu, 2177find_busiest_group(struct sched_domain *sd, int this_cpu,
@@ -2188,7 +2212,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2188 sum_weighted_load = sum_nr_running = avg_load = 0; 2212 sum_weighted_load = sum_nr_running = avg_load = 0;
2189 2213
2190 for_each_cpu_mask(i, group->cpumask) { 2214 for_each_cpu_mask(i, group->cpumask) {
2191 runqueue_t *rq = cpu_rq(i); 2215 struct rq *rq = cpu_rq(i);
2192 2216
2193 if (*sd_idle && !idle_cpu(i)) 2217 if (*sd_idle && !idle_cpu(i))
2194 *sd_idle = 0; 2218 *sd_idle = 0;
@@ -2269,7 +2293,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2269 * capacity but still has some space to pick up some load 2293 * capacity but still has some space to pick up some load
2270 * from other group and save more power 2294 * from other group and save more power
2271 */ 2295 */
2272 if (sum_nr_running <= group_capacity - 1) 2296 if (sum_nr_running <= group_capacity - 1) {
2273 if (sum_nr_running > leader_nr_running || 2297 if (sum_nr_running > leader_nr_running ||
2274 (sum_nr_running == leader_nr_running && 2298 (sum_nr_running == leader_nr_running &&
2275 first_cpu(group->cpumask) > 2299 first_cpu(group->cpumask) >
@@ -2277,7 +2301,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2277 group_leader = group; 2301 group_leader = group;
2278 leader_nr_running = sum_nr_running; 2302 leader_nr_running = sum_nr_running;
2279 } 2303 }
2280 2304 }
2281group_next: 2305group_next:
2282#endif 2306#endif
2283 group = group->next; 2307 group = group->next;
@@ -2332,8 +2356,7 @@ group_next:
2332 * moved 2356 * moved
2333 */ 2357 */
2334 if (*imbalance < busiest_load_per_task) { 2358 if (*imbalance < busiest_load_per_task) {
2335 unsigned long pwr_now, pwr_move; 2359 unsigned long tmp, pwr_now, pwr_move;
2336 unsigned long tmp;
2337 unsigned int imbn; 2360 unsigned int imbn;
2338 2361
2339small_imbalance: 2362small_imbalance:
@@ -2405,22 +2428,23 @@ ret:
2405/* 2428/*
2406 * find_busiest_queue - find the busiest runqueue among the cpus in group. 2429 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2407 */ 2430 */
2408static runqueue_t *find_busiest_queue(struct sched_group *group, 2431static struct rq *
2409 enum idle_type idle, unsigned long imbalance) 2432find_busiest_queue(struct sched_group *group, enum idle_type idle,
2433 unsigned long imbalance)
2410{ 2434{
2435 struct rq *busiest = NULL, *rq;
2411 unsigned long max_load = 0; 2436 unsigned long max_load = 0;
2412 runqueue_t *busiest = NULL, *rqi;
2413 int i; 2437 int i;
2414 2438
2415 for_each_cpu_mask(i, group->cpumask) { 2439 for_each_cpu_mask(i, group->cpumask) {
2416 rqi = cpu_rq(i); 2440 rq = cpu_rq(i);
2417 2441
2418 if (rqi->nr_running == 1 && rqi->raw_weighted_load > imbalance) 2442 if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
2419 continue; 2443 continue;
2420 2444
2421 if (rqi->raw_weighted_load > max_load) { 2445 if (rq->raw_weighted_load > max_load) {
2422 max_load = rqi->raw_weighted_load; 2446 max_load = rq->raw_weighted_load;
2423 busiest = rqi; 2447 busiest = rq;
2424 } 2448 }
2425 } 2449 }
2426 2450
@@ -2433,22 +2457,24 @@ static runqueue_t *find_busiest_queue(struct sched_group *group,
2433 */ 2457 */
2434#define MAX_PINNED_INTERVAL 512 2458#define MAX_PINNED_INTERVAL 512
2435 2459
2436#define minus_1_or_zero(n) ((n) > 0 ? (n) - 1 : 0) 2460static inline unsigned long minus_1_or_zero(unsigned long n)
2461{
2462 return n > 0 ? n - 1 : 0;
2463}
2464
2437/* 2465/*
2438 * Check this_cpu to ensure it is balanced within domain. Attempt to move 2466 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2439 * tasks if there is an imbalance. 2467 * tasks if there is an imbalance.
2440 * 2468 *
2441 * Called with this_rq unlocked. 2469 * Called with this_rq unlocked.
2442 */ 2470 */
2443static int load_balance(int this_cpu, runqueue_t *this_rq, 2471static int load_balance(int this_cpu, struct rq *this_rq,
2444 struct sched_domain *sd, enum idle_type idle) 2472 struct sched_domain *sd, enum idle_type idle)
2445{ 2473{
2474 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
2446 struct sched_group *group; 2475 struct sched_group *group;
2447 runqueue_t *busiest;
2448 unsigned long imbalance; 2476 unsigned long imbalance;
2449 int nr_moved, all_pinned = 0; 2477 struct rq *busiest;
2450 int active_balance = 0;
2451 int sd_idle = 0;
2452 2478
2453 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && 2479 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
2454 !sched_smt_power_savings) 2480 !sched_smt_power_savings)
@@ -2482,8 +2508,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
2482 */ 2508 */
2483 double_rq_lock(this_rq, busiest); 2509 double_rq_lock(this_rq, busiest);
2484 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2510 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2485 minus_1_or_zero(busiest->nr_running), 2511 minus_1_or_zero(busiest->nr_running),
2486 imbalance, sd, idle, &all_pinned); 2512 imbalance, sd, idle, &all_pinned);
2487 double_rq_unlock(this_rq, busiest); 2513 double_rq_unlock(this_rq, busiest);
2488 2514
2489 /* All tasks on this runqueue were pinned by CPU affinity */ 2515 /* All tasks on this runqueue were pinned by CPU affinity */
@@ -2556,7 +2582,8 @@ out_one_pinned:
2556 (sd->balance_interval < sd->max_interval)) 2582 (sd->balance_interval < sd->max_interval))
2557 sd->balance_interval *= 2; 2583 sd->balance_interval *= 2;
2558 2584
2559 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) 2585 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2586 !sched_smt_power_savings)
2560 return -1; 2587 return -1;
2561 return 0; 2588 return 0;
2562} 2589}
@@ -2568,11 +2595,11 @@ out_one_pinned:
2568 * Called from schedule when this_rq is about to become idle (NEWLY_IDLE). 2595 * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
2569 * this_rq is locked. 2596 * this_rq is locked.
2570 */ 2597 */
2571static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, 2598static int
2572 struct sched_domain *sd) 2599load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2573{ 2600{
2574 struct sched_group *group; 2601 struct sched_group *group;
2575 runqueue_t *busiest = NULL; 2602 struct rq *busiest = NULL;
2576 unsigned long imbalance; 2603 unsigned long imbalance;
2577 int nr_moved = 0; 2604 int nr_moved = 0;
2578 int sd_idle = 0; 2605 int sd_idle = 0;
@@ -2618,9 +2645,11 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
2618 2645
2619out_balanced: 2646out_balanced:
2620 schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); 2647 schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
2621 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) 2648 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2649 !sched_smt_power_savings)
2622 return -1; 2650 return -1;
2623 sd->nr_balance_failed = 0; 2651 sd->nr_balance_failed = 0;
2652
2624 return 0; 2653 return 0;
2625} 2654}
2626 2655
@@ -2628,16 +2657,15 @@ out_balanced:
2628 * idle_balance is called by schedule() if this_cpu is about to become 2657 * idle_balance is called by schedule() if this_cpu is about to become
2629 * idle. Attempts to pull tasks from other CPUs. 2658 * idle. Attempts to pull tasks from other CPUs.
2630 */ 2659 */
2631static void idle_balance(int this_cpu, runqueue_t *this_rq) 2660static void idle_balance(int this_cpu, struct rq *this_rq)
2632{ 2661{
2633 struct sched_domain *sd; 2662 struct sched_domain *sd;
2634 2663
2635 for_each_domain(this_cpu, sd) { 2664 for_each_domain(this_cpu, sd) {
2636 if (sd->flags & SD_BALANCE_NEWIDLE) { 2665 if (sd->flags & SD_BALANCE_NEWIDLE) {
2637 if (load_balance_newidle(this_cpu, this_rq, sd)) { 2666 /* If we've pulled tasks over stop searching: */
2638 /* We've pulled tasks over so stop searching */ 2667 if (load_balance_newidle(this_cpu, this_rq, sd))
2639 break; 2668 break;
2640 }
2641 } 2669 }
2642 } 2670 }
2643} 2671}
@@ -2650,14 +2678,14 @@ static void idle_balance(int this_cpu, runqueue_t *this_rq)
2650 * 2678 *
2651 * Called with busiest_rq locked. 2679 * Called with busiest_rq locked.
2652 */ 2680 */
2653static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) 2681static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2654{ 2682{
2655 struct sched_domain *sd;
2656 runqueue_t *target_rq;
2657 int target_cpu = busiest_rq->push_cpu; 2683 int target_cpu = busiest_rq->push_cpu;
2684 struct sched_domain *sd;
2685 struct rq *target_rq;
2658 2686
2687 /* Is there any task to move? */
2659 if (busiest_rq->nr_running <= 1) 2688 if (busiest_rq->nr_running <= 1)
2660 /* no task to move */
2661 return; 2689 return;
2662 2690
2663 target_rq = cpu_rq(target_cpu); 2691 target_rq = cpu_rq(target_cpu);
@@ -2675,21 +2703,20 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
2675 /* Search for an sd spanning us and the target CPU. */ 2703 /* Search for an sd spanning us and the target CPU. */
2676 for_each_domain(target_cpu, sd) { 2704 for_each_domain(target_cpu, sd) {
2677 if ((sd->flags & SD_LOAD_BALANCE) && 2705 if ((sd->flags & SD_LOAD_BALANCE) &&
2678 cpu_isset(busiest_cpu, sd->span)) 2706 cpu_isset(busiest_cpu, sd->span))
2679 break; 2707 break;
2680 } 2708 }
2681 2709
2682 if (unlikely(sd == NULL)) 2710 if (likely(sd)) {
2683 goto out; 2711 schedstat_inc(sd, alb_cnt);
2684
2685 schedstat_inc(sd, alb_cnt);
2686 2712
2687 if (move_tasks(target_rq, target_cpu, busiest_rq, 1, 2713 if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
2688 RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL)) 2714 RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE,
2689 schedstat_inc(sd, alb_pushed); 2715 NULL))
2690 else 2716 schedstat_inc(sd, alb_pushed);
2691 schedstat_inc(sd, alb_failed); 2717 else
2692out: 2718 schedstat_inc(sd, alb_failed);
2719 }
2693 spin_unlock(&target_rq->lock); 2720 spin_unlock(&target_rq->lock);
2694} 2721}
2695 2722
@@ -2702,23 +2729,27 @@ out:
2702 * Balancing parameters are set up in arch_init_sched_domains. 2729 * Balancing parameters are set up in arch_init_sched_domains.
2703 */ 2730 */
2704 2731
2705/* Don't have all balancing operations going off at once */ 2732/* Don't have all balancing operations going off at once: */
2706#define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS) 2733static inline unsigned long cpu_offset(int cpu)
2734{
2735 return jiffies + cpu * HZ / NR_CPUS;
2736}
2707 2737
2708static void rebalance_tick(int this_cpu, runqueue_t *this_rq, 2738static void
2709 enum idle_type idle) 2739rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
2710{ 2740{
2711 unsigned long old_load, this_load; 2741 unsigned long this_load, interval, j = cpu_offset(this_cpu);
2712 unsigned long j = jiffies + CPU_OFFSET(this_cpu);
2713 struct sched_domain *sd; 2742 struct sched_domain *sd;
2714 int i; 2743 int i, scale;
2715 2744
2716 this_load = this_rq->raw_weighted_load; 2745 this_load = this_rq->raw_weighted_load;
2717 /* Update our load */ 2746
2718 for (i = 0; i < 3; i++) { 2747 /* Update our load: */
2719 unsigned long new_load = this_load; 2748 for (i = 0, scale = 1; i < 3; i++, scale <<= 1) {
2720 int scale = 1 << i; 2749 unsigned long old_load, new_load;
2750
2721 old_load = this_rq->cpu_load[i]; 2751 old_load = this_rq->cpu_load[i];
2752 new_load = this_load;
2722 /* 2753 /*
2723 * Round up the averaging division if load is increasing. This 2754 * Round up the averaging division if load is increasing. This
2724 * prevents us from getting stuck on 9 if the load is 10, for 2755 * prevents us from getting stuck on 9 if the load is 10, for
@@ -2730,8 +2761,6 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
2730 } 2761 }
2731 2762
2732 for_each_domain(this_cpu, sd) { 2763 for_each_domain(this_cpu, sd) {
2733 unsigned long interval;
2734
2735 if (!(sd->flags & SD_LOAD_BALANCE)) 2764 if (!(sd->flags & SD_LOAD_BALANCE))
2736 continue; 2765 continue;
2737 2766
@@ -2761,17 +2790,18 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
2761/* 2790/*
2762 * on UP we do not need to balance between CPUs: 2791 * on UP we do not need to balance between CPUs:
2763 */ 2792 */
2764static inline void rebalance_tick(int cpu, runqueue_t *rq, enum idle_type idle) 2793static inline void rebalance_tick(int cpu, struct rq *rq, enum idle_type idle)
2765{ 2794{
2766} 2795}
2767static inline void idle_balance(int cpu, runqueue_t *rq) 2796static inline void idle_balance(int cpu, struct rq *rq)
2768{ 2797{
2769} 2798}
2770#endif 2799#endif
2771 2800
2772static inline int wake_priority_sleeper(runqueue_t *rq) 2801static inline int wake_priority_sleeper(struct rq *rq)
2773{ 2802{
2774 int ret = 0; 2803 int ret = 0;
2804
2775#ifdef CONFIG_SCHED_SMT 2805#ifdef CONFIG_SCHED_SMT
2776 spin_lock(&rq->lock); 2806 spin_lock(&rq->lock);
2777 /* 2807 /*
@@ -2795,25 +2825,26 @@ EXPORT_PER_CPU_SYMBOL(kstat);
2795 * This is called on clock ticks and on context switches. 2825 * This is called on clock ticks and on context switches.
2796 * Bank in p->sched_time the ns elapsed since the last tick or switch. 2826 * Bank in p->sched_time the ns elapsed since the last tick or switch.
2797 */ 2827 */
2798static inline void update_cpu_clock(task_t *p, runqueue_t *rq, 2828static inline void
2799 unsigned long long now) 2829update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
2800{ 2830{
2801 unsigned long long last = max(p->timestamp, rq->timestamp_last_tick); 2831 p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick);
2802 p->sched_time += now - last;
2803} 2832}
2804 2833
2805/* 2834/*
2806 * Return current->sched_time plus any more ns on the sched_clock 2835 * Return current->sched_time plus any more ns on the sched_clock
2807 * that have not yet been banked. 2836 * that have not yet been banked.
2808 */ 2837 */
2809unsigned long long current_sched_time(const task_t *tsk) 2838unsigned long long current_sched_time(const struct task_struct *p)
2810{ 2839{
2811 unsigned long long ns; 2840 unsigned long long ns;
2812 unsigned long flags; 2841 unsigned long flags;
2842
2813 local_irq_save(flags); 2843 local_irq_save(flags);
2814 ns = max(tsk->timestamp, task_rq(tsk)->timestamp_last_tick); 2844 ns = max(p->timestamp, task_rq(p)->timestamp_last_tick);
2815 ns = tsk->sched_time + (sched_clock() - ns); 2845 ns = p->sched_time + sched_clock() - ns;
2816 local_irq_restore(flags); 2846 local_irq_restore(flags);
2847
2817 return ns; 2848 return ns;
2818} 2849}
2819 2850
@@ -2827,11 +2858,16 @@ unsigned long long current_sched_time(const task_t *tsk)
2827 * increasing number of running tasks. We also ignore the interactivity 2858 * increasing number of running tasks. We also ignore the interactivity
2828 * if a better static_prio task has expired: 2859 * if a better static_prio task has expired:
2829 */ 2860 */
2830#define EXPIRED_STARVING(rq) \ 2861static inline int expired_starving(struct rq *rq)
2831 ((STARVATION_LIMIT && ((rq)->expired_timestamp && \ 2862{
2832 (jiffies - (rq)->expired_timestamp >= \ 2863 if (rq->curr->static_prio > rq->best_expired_prio)
2833 STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \ 2864 return 1;
2834 ((rq)->curr->static_prio > (rq)->best_expired_prio)) 2865 if (!STARVATION_LIMIT || !rq->expired_timestamp)
2866 return 0;
2867 if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running)
2868 return 1;
2869 return 0;
2870}
2835 2871
2836/* 2872/*
2837 * Account user cpu time to a process. 2873 * Account user cpu time to a process.
@@ -2864,7 +2900,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
2864 cputime_t cputime) 2900 cputime_t cputime)
2865{ 2901{
2866 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2902 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2867 runqueue_t *rq = this_rq(); 2903 struct rq *rq = this_rq();
2868 cputime64_t tmp; 2904 cputime64_t tmp;
2869 2905
2870 p->stime = cputime_add(p->stime, cputime); 2906 p->stime = cputime_add(p->stime, cputime);
@@ -2894,7 +2930,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
2894{ 2930{
2895 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 2931 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2896 cputime64_t tmp = cputime_to_cputime64(steal); 2932 cputime64_t tmp = cputime_to_cputime64(steal);
2897 runqueue_t *rq = this_rq(); 2933 struct rq *rq = this_rq();
2898 2934
2899 if (p == rq->idle) { 2935 if (p == rq->idle) {
2900 p->stime = cputime_add(p->stime, steal); 2936 p->stime = cputime_add(p->stime, steal);
@@ -2915,10 +2951,10 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
2915 */ 2951 */
2916void scheduler_tick(void) 2952void scheduler_tick(void)
2917{ 2953{
2918 int cpu = smp_processor_id();
2919 runqueue_t *rq = this_rq();
2920 task_t *p = current;
2921 unsigned long long now = sched_clock(); 2954 unsigned long long now = sched_clock();
2955 struct task_struct *p = current;
2956 int cpu = smp_processor_id();
2957 struct rq *rq = cpu_rq(cpu);
2922 2958
2923 update_cpu_clock(p, rq, now); 2959 update_cpu_clock(p, rq, now);
2924 2960
@@ -2968,7 +3004,7 @@ void scheduler_tick(void)
2968 3004
2969 if (!rq->expired_timestamp) 3005 if (!rq->expired_timestamp)
2970 rq->expired_timestamp = jiffies; 3006 rq->expired_timestamp = jiffies;
2971 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { 3007 if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
2972 enqueue_task(p, rq->expired); 3008 enqueue_task(p, rq->expired);
2973 if (p->static_prio < rq->best_expired_prio) 3009 if (p->static_prio < rq->best_expired_prio)
2974 rq->best_expired_prio = p->static_prio; 3010 rq->best_expired_prio = p->static_prio;
@@ -3007,7 +3043,7 @@ out:
3007} 3043}
3008 3044
3009#ifdef CONFIG_SCHED_SMT 3045#ifdef CONFIG_SCHED_SMT
3010static inline void wakeup_busy_runqueue(runqueue_t *rq) 3046static inline void wakeup_busy_runqueue(struct rq *rq)
3011{ 3047{
3012 /* If an SMT runqueue is sleeping due to priority reasons wake it up */ 3048 /* If an SMT runqueue is sleeping due to priority reasons wake it up */
3013 if (rq->curr == rq->idle && rq->nr_running) 3049 if (rq->curr == rq->idle && rq->nr_running)
@@ -3033,7 +3069,7 @@ static void wake_sleeping_dependent(int this_cpu)
3033 return; 3069 return;
3034 3070
3035 for_each_cpu_mask(i, sd->span) { 3071 for_each_cpu_mask(i, sd->span) {
3036 runqueue_t *smt_rq = cpu_rq(i); 3072 struct rq *smt_rq = cpu_rq(i);
3037 3073
3038 if (i == this_cpu) 3074 if (i == this_cpu)
3039 continue; 3075 continue;
@@ -3050,7 +3086,8 @@ static void wake_sleeping_dependent(int this_cpu)
3050 * utilize, if another task runs on a sibling. This models the 3086 * utilize, if another task runs on a sibling. This models the
3051 * slowdown effect of other tasks running on siblings: 3087 * slowdown effect of other tasks running on siblings:
3052 */ 3088 */
3053static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) 3089static inline unsigned long
3090smt_slice(struct task_struct *p, struct sched_domain *sd)
3054{ 3091{
3055 return p->time_slice * (100 - sd->per_cpu_gain) / 100; 3092 return p->time_slice * (100 - sd->per_cpu_gain) / 100;
3056} 3093}
@@ -3061,7 +3098,8 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
3061 * acquire their lock. As we only trylock the normal locking order does not 3098 * acquire their lock. As we only trylock the normal locking order does not
3062 * need to be obeyed. 3099 * need to be obeyed.
3063 */ 3100 */
3064static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) 3101static int
3102dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3065{ 3103{
3066 struct sched_domain *tmp, *sd = NULL; 3104 struct sched_domain *tmp, *sd = NULL;
3067 int ret = 0, i; 3105 int ret = 0, i;
@@ -3081,8 +3119,8 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p)
3081 return 0; 3119 return 0;
3082 3120
3083 for_each_cpu_mask(i, sd->span) { 3121 for_each_cpu_mask(i, sd->span) {
3084 runqueue_t *smt_rq; 3122 struct task_struct *smt_curr;
3085 task_t *smt_curr; 3123 struct rq *smt_rq;
3086 3124
3087 if (i == this_cpu) 3125 if (i == this_cpu)
3088 continue; 3126 continue;
@@ -3127,9 +3165,8 @@ unlock:
3127static inline void wake_sleeping_dependent(int this_cpu) 3165static inline void wake_sleeping_dependent(int this_cpu)
3128{ 3166{
3129} 3167}
3130 3168static inline int
3131static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq, 3169dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3132 task_t *p)
3133{ 3170{
3134 return 0; 3171 return 0;
3135} 3172}
@@ -3142,12 +3179,13 @@ void fastcall add_preempt_count(int val)
3142 /* 3179 /*
3143 * Underflow? 3180 * Underflow?
3144 */ 3181 */
3145 BUG_ON((preempt_count() < 0)); 3182 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3183 return;
3146 preempt_count() += val; 3184 preempt_count() += val;
3147 /* 3185 /*
3148 * Spinlock count overflowing soon? 3186 * Spinlock count overflowing soon?
3149 */ 3187 */
3150 BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); 3188 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
3151} 3189}
3152EXPORT_SYMBOL(add_preempt_count); 3190EXPORT_SYMBOL(add_preempt_count);
3153 3191
@@ -3156,11 +3194,15 @@ void fastcall sub_preempt_count(int val)
3156 /* 3194 /*
3157 * Underflow? 3195 * Underflow?
3158 */ 3196 */
3159 BUG_ON(val > preempt_count()); 3197 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3198 return;
3160 /* 3199 /*
3161 * Is the spinlock portion underflowing? 3200 * Is the spinlock portion underflowing?
3162 */ 3201 */
3163 BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK)); 3202 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3203 !(preempt_count() & PREEMPT_MASK)))
3204 return;
3205
3164 preempt_count() -= val; 3206 preempt_count() -= val;
3165} 3207}
3166EXPORT_SYMBOL(sub_preempt_count); 3208EXPORT_SYMBOL(sub_preempt_count);
@@ -3178,14 +3220,14 @@ static inline int interactive_sleep(enum sleep_type sleep_type)
3178 */ 3220 */
3179asmlinkage void __sched schedule(void) 3221asmlinkage void __sched schedule(void)
3180{ 3222{
3181 long *switch_count; 3223 struct task_struct *prev, *next;
3182 task_t *prev, *next; 3224 struct prio_array *array;
3183 runqueue_t *rq;
3184 prio_array_t *array;
3185 struct list_head *queue; 3225 struct list_head *queue;
3186 unsigned long long now; 3226 unsigned long long now;
3187 unsigned long run_time; 3227 unsigned long run_time;
3188 int cpu, idx, new_prio; 3228 int cpu, idx, new_prio;
3229 long *switch_count;
3230 struct rq *rq;
3189 3231
3190 /* 3232 /*
3191 * Test if we are atomic. Since do_exit() needs to call into 3233 * Test if we are atomic. Since do_exit() needs to call into
@@ -3275,7 +3317,7 @@ need_resched_nonpreemptible:
3275 3317
3276 idx = sched_find_first_bit(array->bitmap); 3318 idx = sched_find_first_bit(array->bitmap);
3277 queue = array->queue + idx; 3319 queue = array->queue + idx;
3278 next = list_entry(queue->next, task_t, run_list); 3320 next = list_entry(queue->next, struct task_struct, run_list);
3279 3321
3280 if (!rt_task(next) && interactive_sleep(next->sleep_type)) { 3322 if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
3281 unsigned long long delta = now - next->timestamp; 3323 unsigned long long delta = now - next->timestamp;
@@ -3338,7 +3380,6 @@ switch_tasks:
3338 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 3380 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3339 goto need_resched; 3381 goto need_resched;
3340} 3382}
3341
3342EXPORT_SYMBOL(schedule); 3383EXPORT_SYMBOL(schedule);
3343 3384
3344#ifdef CONFIG_PREEMPT 3385#ifdef CONFIG_PREEMPT
@@ -3383,7 +3424,6 @@ need_resched:
3383 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 3424 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3384 goto need_resched; 3425 goto need_resched;
3385} 3426}
3386
3387EXPORT_SYMBOL(preempt_schedule); 3427EXPORT_SYMBOL(preempt_schedule);
3388 3428
3389/* 3429/*
@@ -3432,10 +3472,8 @@ need_resched:
3432int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, 3472int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
3433 void *key) 3473 void *key)
3434{ 3474{
3435 task_t *p = curr->private; 3475 return try_to_wake_up(curr->private, mode, sync);
3436 return try_to_wake_up(p, mode, sync);
3437} 3476}
3438
3439EXPORT_SYMBOL(default_wake_function); 3477EXPORT_SYMBOL(default_wake_function);
3440 3478
3441/* 3479/*
@@ -3453,13 +3491,11 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
3453 struct list_head *tmp, *next; 3491 struct list_head *tmp, *next;
3454 3492
3455 list_for_each_safe(tmp, next, &q->task_list) { 3493 list_for_each_safe(tmp, next, &q->task_list) {
3456 wait_queue_t *curr; 3494 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
3457 unsigned flags; 3495 unsigned flags = curr->flags;
3458 curr = list_entry(tmp, wait_queue_t, task_list); 3496
3459 flags = curr->flags;
3460 if (curr->func(curr, mode, sync, key) && 3497 if (curr->func(curr, mode, sync, key) &&
3461 (flags & WQ_FLAG_EXCLUSIVE) && 3498 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
3462 !--nr_exclusive)
3463 break; 3499 break;
3464 } 3500 }
3465} 3501}
@@ -3480,7 +3516,6 @@ void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
3480 __wake_up_common(q, mode, nr_exclusive, 0, key); 3516 __wake_up_common(q, mode, nr_exclusive, 0, key);
3481 spin_unlock_irqrestore(&q->lock, flags); 3517 spin_unlock_irqrestore(&q->lock, flags);
3482} 3518}
3483
3484EXPORT_SYMBOL(__wake_up); 3519EXPORT_SYMBOL(__wake_up);
3485 3520
3486/* 3521/*
@@ -3549,6 +3584,7 @@ EXPORT_SYMBOL(complete_all);
3549void fastcall __sched wait_for_completion(struct completion *x) 3584void fastcall __sched wait_for_completion(struct completion *x)
3550{ 3585{
3551 might_sleep(); 3586 might_sleep();
3587
3552 spin_lock_irq(&x->wait.lock); 3588 spin_lock_irq(&x->wait.lock);
3553 if (!x->done) { 3589 if (!x->done) {
3554 DECLARE_WAITQUEUE(wait, current); 3590 DECLARE_WAITQUEUE(wait, current);
@@ -3693,7 +3729,6 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
3693 schedule(); 3729 schedule();
3694 SLEEP_ON_TAIL 3730 SLEEP_ON_TAIL
3695} 3731}
3696
3697EXPORT_SYMBOL(interruptible_sleep_on); 3732EXPORT_SYMBOL(interruptible_sleep_on);
3698 3733
3699long fastcall __sched 3734long fastcall __sched
@@ -3709,7 +3744,6 @@ interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3709 3744
3710 return timeout; 3745 return timeout;
3711} 3746}
3712
3713EXPORT_SYMBOL(interruptible_sleep_on_timeout); 3747EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3714 3748
3715void fastcall __sched sleep_on(wait_queue_head_t *q) 3749void fastcall __sched sleep_on(wait_queue_head_t *q)
@@ -3722,7 +3756,6 @@ void fastcall __sched sleep_on(wait_queue_head_t *q)
3722 schedule(); 3756 schedule();
3723 SLEEP_ON_TAIL 3757 SLEEP_ON_TAIL
3724} 3758}
3725
3726EXPORT_SYMBOL(sleep_on); 3759EXPORT_SYMBOL(sleep_on);
3727 3760
3728long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) 3761long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
@@ -3752,11 +3785,11 @@ EXPORT_SYMBOL(sleep_on_timeout);
3752 * 3785 *
3753 * Used by the rt_mutex code to implement priority inheritance logic. 3786 * Used by the rt_mutex code to implement priority inheritance logic.
3754 */ 3787 */
3755void rt_mutex_setprio(task_t *p, int prio) 3788void rt_mutex_setprio(struct task_struct *p, int prio)
3756{ 3789{
3790 struct prio_array *array;
3757 unsigned long flags; 3791 unsigned long flags;
3758 prio_array_t *array; 3792 struct rq *rq;
3759 runqueue_t *rq;
3760 int oldprio; 3793 int oldprio;
3761 3794
3762 BUG_ON(prio < 0 || prio > MAX_PRIO); 3795 BUG_ON(prio < 0 || prio > MAX_PRIO);
@@ -3793,12 +3826,12 @@ void rt_mutex_setprio(task_t *p, int prio)
3793 3826
3794#endif 3827#endif
3795 3828
3796void set_user_nice(task_t *p, long nice) 3829void set_user_nice(struct task_struct *p, long nice)
3797{ 3830{
3798 unsigned long flags; 3831 struct prio_array *array;
3799 prio_array_t *array;
3800 runqueue_t *rq;
3801 int old_prio, delta; 3832 int old_prio, delta;
3833 unsigned long flags;
3834 struct rq *rq;
3802 3835
3803 if (TASK_NICE(p) == nice || nice < -20 || nice > 19) 3836 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3804 return; 3837 return;
@@ -3849,10 +3882,11 @@ EXPORT_SYMBOL(set_user_nice);
3849 * @p: task 3882 * @p: task
3850 * @nice: nice value 3883 * @nice: nice value
3851 */ 3884 */
3852int can_nice(const task_t *p, const int nice) 3885int can_nice(const struct task_struct *p, const int nice)
3853{ 3886{
3854 /* convert nice value [19,-20] to rlimit style value [1,40] */ 3887 /* convert nice value [19,-20] to rlimit style value [1,40] */
3855 int nice_rlim = 20 - nice; 3888 int nice_rlim = 20 - nice;
3889
3856 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || 3890 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
3857 capable(CAP_SYS_NICE)); 3891 capable(CAP_SYS_NICE));
3858} 3892}
@@ -3868,8 +3902,7 @@ int can_nice(const task_t *p, const int nice)
3868 */ 3902 */
3869asmlinkage long sys_nice(int increment) 3903asmlinkage long sys_nice(int increment)
3870{ 3904{
3871 int retval; 3905 long nice, retval;
3872 long nice;
3873 3906
3874 /* 3907 /*
3875 * Setpriority might change our priority at the same moment. 3908 * Setpriority might change our priority at the same moment.
@@ -3908,7 +3941,7 @@ asmlinkage long sys_nice(int increment)
3908 * RT tasks are offset by -200. Normal tasks are centered 3941 * RT tasks are offset by -200. Normal tasks are centered
3909 * around 0, value goes from -16 to +15. 3942 * around 0, value goes from -16 to +15.
3910 */ 3943 */
3911int task_prio(const task_t *p) 3944int task_prio(const struct task_struct *p)
3912{ 3945{
3913 return p->prio - MAX_RT_PRIO; 3946 return p->prio - MAX_RT_PRIO;
3914} 3947}
@@ -3917,7 +3950,7 @@ int task_prio(const task_t *p)
3917 * task_nice - return the nice value of a given task. 3950 * task_nice - return the nice value of a given task.
3918 * @p: the task in question. 3951 * @p: the task in question.
3919 */ 3952 */
3920int task_nice(const task_t *p) 3953int task_nice(const struct task_struct *p)
3921{ 3954{
3922 return TASK_NICE(p); 3955 return TASK_NICE(p);
3923} 3956}
@@ -3936,7 +3969,7 @@ int idle_cpu(int cpu)
3936 * idle_task - return the idle task for a given cpu. 3969 * idle_task - return the idle task for a given cpu.
3937 * @cpu: the processor in question. 3970 * @cpu: the processor in question.
3938 */ 3971 */
3939task_t *idle_task(int cpu) 3972struct task_struct *idle_task(int cpu)
3940{ 3973{
3941 return cpu_rq(cpu)->idle; 3974 return cpu_rq(cpu)->idle;
3942} 3975}
@@ -3945,7 +3978,7 @@ task_t *idle_task(int cpu)
3945 * find_process_by_pid - find a process with a matching PID value. 3978 * find_process_by_pid - find a process with a matching PID value.
3946 * @pid: the pid in question. 3979 * @pid: the pid in question.
3947 */ 3980 */
3948static inline task_t *find_process_by_pid(pid_t pid) 3981static inline struct task_struct *find_process_by_pid(pid_t pid)
3949{ 3982{
3950 return pid ? find_task_by_pid(pid) : current; 3983 return pid ? find_task_by_pid(pid) : current;
3951} 3984}
@@ -3954,6 +3987,7 @@ static inline task_t *find_process_by_pid(pid_t pid)
3954static void __setscheduler(struct task_struct *p, int policy, int prio) 3987static void __setscheduler(struct task_struct *p, int policy, int prio)
3955{ 3988{
3956 BUG_ON(p->array); 3989 BUG_ON(p->array);
3990
3957 p->policy = policy; 3991 p->policy = policy;
3958 p->rt_priority = prio; 3992 p->rt_priority = prio;
3959 p->normal_prio = normal_prio(p); 3993 p->normal_prio = normal_prio(p);
@@ -3977,11 +4011,10 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
3977int sched_setscheduler(struct task_struct *p, int policy, 4011int sched_setscheduler(struct task_struct *p, int policy,
3978 struct sched_param *param) 4012 struct sched_param *param)
3979{ 4013{
3980 int retval; 4014 int retval, oldprio, oldpolicy = -1;
3981 int oldprio, oldpolicy = -1; 4015 struct prio_array *array;
3982 prio_array_t *array;
3983 unsigned long flags; 4016 unsigned long flags;
3984 runqueue_t *rq; 4017 struct rq *rq;
3985 4018
3986 /* may grab non-irq protected spin_locks */ 4019 /* may grab non-irq protected spin_locks */
3987 BUG_ON(in_interrupt()); 4020 BUG_ON(in_interrupt());
@@ -4079,9 +4112,9 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
4079static int 4112static int
4080do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 4113do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4081{ 4114{
4082 int retval;
4083 struct sched_param lparam; 4115 struct sched_param lparam;
4084 struct task_struct *p; 4116 struct task_struct *p;
4117 int retval;
4085 4118
4086 if (!param || pid < 0) 4119 if (!param || pid < 0)
4087 return -EINVAL; 4120 return -EINVAL;
@@ -4097,6 +4130,7 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4097 read_unlock_irq(&tasklist_lock); 4130 read_unlock_irq(&tasklist_lock);
4098 retval = sched_setscheduler(p, policy, &lparam); 4131 retval = sched_setscheduler(p, policy, &lparam);
4099 put_task_struct(p); 4132 put_task_struct(p);
4133
4100 return retval; 4134 return retval;
4101} 4135}
4102 4136
@@ -4132,8 +4166,8 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
4132 */ 4166 */
4133asmlinkage long sys_sched_getscheduler(pid_t pid) 4167asmlinkage long sys_sched_getscheduler(pid_t pid)
4134{ 4168{
4169 struct task_struct *p;
4135 int retval = -EINVAL; 4170 int retval = -EINVAL;
4136 task_t *p;
4137 4171
4138 if (pid < 0) 4172 if (pid < 0)
4139 goto out_nounlock; 4173 goto out_nounlock;
@@ -4160,8 +4194,8 @@ out_nounlock:
4160asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) 4194asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
4161{ 4195{
4162 struct sched_param lp; 4196 struct sched_param lp;
4197 struct task_struct *p;
4163 int retval = -EINVAL; 4198 int retval = -EINVAL;
4164 task_t *p;
4165 4199
4166 if (!param || pid < 0) 4200 if (!param || pid < 0)
4167 goto out_nounlock; 4201 goto out_nounlock;
@@ -4194,9 +4228,9 @@ out_unlock:
4194 4228
4195long sched_setaffinity(pid_t pid, cpumask_t new_mask) 4229long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4196{ 4230{
4197 task_t *p;
4198 int retval;
4199 cpumask_t cpus_allowed; 4231 cpumask_t cpus_allowed;
4232 struct task_struct *p;
4233 int retval;
4200 4234
4201 lock_cpu_hotplug(); 4235 lock_cpu_hotplug();
4202 read_lock(&tasklist_lock); 4236 read_lock(&tasklist_lock);
@@ -4282,8 +4316,8 @@ cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
4282 4316
4283long sched_getaffinity(pid_t pid, cpumask_t *mask) 4317long sched_getaffinity(pid_t pid, cpumask_t *mask)
4284{ 4318{
4319 struct task_struct *p;
4285 int retval; 4320 int retval;
4286 task_t *p;
4287 4321
4288 lock_cpu_hotplug(); 4322 lock_cpu_hotplug();
4289 read_lock(&tasklist_lock); 4323 read_lock(&tasklist_lock);
@@ -4342,9 +4376,8 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
4342 */ 4376 */
4343asmlinkage long sys_sched_yield(void) 4377asmlinkage long sys_sched_yield(void)
4344{ 4378{
4345 runqueue_t *rq = this_rq_lock(); 4379 struct rq *rq = this_rq_lock();
4346 prio_array_t *array = current->array; 4380 struct prio_array *array = current->array, *target = rq->expired;
4347 prio_array_t *target = rq->expired;
4348 4381
4349 schedstat_inc(rq, yld_cnt); 4382 schedstat_inc(rq, yld_cnt);
4350 /* 4383 /*
@@ -4378,6 +4411,7 @@ asmlinkage long sys_sched_yield(void)
4378 * no need to preempt or enable interrupts: 4411 * no need to preempt or enable interrupts:
4379 */ 4412 */
4380 __release(rq->lock); 4413 __release(rq->lock);
4414 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4381 _raw_spin_unlock(&rq->lock); 4415 _raw_spin_unlock(&rq->lock);
4382 preempt_enable_no_resched(); 4416 preempt_enable_no_resched();
4383 4417
@@ -4441,6 +4475,7 @@ int cond_resched_lock(spinlock_t *lock)
4441 spin_lock(lock); 4475 spin_lock(lock);
4442 } 4476 }
4443 if (need_resched() && __resched_legal()) { 4477 if (need_resched() && __resched_legal()) {
4478 spin_release(&lock->dep_map, 1, _THIS_IP_);
4444 _raw_spin_unlock(lock); 4479 _raw_spin_unlock(lock);
4445 preempt_enable_no_resched(); 4480 preempt_enable_no_resched();
4446 __cond_resched(); 4481 __cond_resched();
@@ -4456,7 +4491,9 @@ int __sched cond_resched_softirq(void)
4456 BUG_ON(!in_softirq()); 4491 BUG_ON(!in_softirq());
4457 4492
4458 if (need_resched() && __resched_legal()) { 4493 if (need_resched() && __resched_legal()) {
4459 __local_bh_enable(); 4494 raw_local_irq_disable();
4495 _local_bh_enable();
4496 raw_local_irq_enable();
4460 __cond_resched(); 4497 __cond_resched();
4461 local_bh_disable(); 4498 local_bh_disable();
4462 return 1; 4499 return 1;
@@ -4476,7 +4513,6 @@ void __sched yield(void)
4476 set_current_state(TASK_RUNNING); 4513 set_current_state(TASK_RUNNING);
4477 sys_sched_yield(); 4514 sys_sched_yield();
4478} 4515}
4479
4480EXPORT_SYMBOL(yield); 4516EXPORT_SYMBOL(yield);
4481 4517
4482/* 4518/*
@@ -4488,18 +4524,17 @@ EXPORT_SYMBOL(yield);
4488 */ 4524 */
4489void __sched io_schedule(void) 4525void __sched io_schedule(void)
4490{ 4526{
4491 struct runqueue *rq = &__raw_get_cpu_var(runqueues); 4527 struct rq *rq = &__raw_get_cpu_var(runqueues);
4492 4528
4493 atomic_inc(&rq->nr_iowait); 4529 atomic_inc(&rq->nr_iowait);
4494 schedule(); 4530 schedule();
4495 atomic_dec(&rq->nr_iowait); 4531 atomic_dec(&rq->nr_iowait);
4496} 4532}
4497
4498EXPORT_SYMBOL(io_schedule); 4533EXPORT_SYMBOL(io_schedule);
4499 4534
4500long __sched io_schedule_timeout(long timeout) 4535long __sched io_schedule_timeout(long timeout)
4501{ 4536{
4502 struct runqueue *rq = &__raw_get_cpu_var(runqueues); 4537 struct rq *rq = &__raw_get_cpu_var(runqueues);
4503 long ret; 4538 long ret;
4504 4539
4505 atomic_inc(&rq->nr_iowait); 4540 atomic_inc(&rq->nr_iowait);
@@ -4566,9 +4601,9 @@ asmlinkage long sys_sched_get_priority_min(int policy)
4566asmlinkage 4601asmlinkage
4567long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) 4602long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
4568{ 4603{
4604 struct task_struct *p;
4569 int retval = -EINVAL; 4605 int retval = -EINVAL;
4570 struct timespec t; 4606 struct timespec t;
4571 task_t *p;
4572 4607
4573 if (pid < 0) 4608 if (pid < 0)
4574 goto out_nounlock; 4609 goto out_nounlock;
@@ -4596,28 +4631,32 @@ out_unlock:
4596 4631
4597static inline struct task_struct *eldest_child(struct task_struct *p) 4632static inline struct task_struct *eldest_child(struct task_struct *p)
4598{ 4633{
4599 if (list_empty(&p->children)) return NULL; 4634 if (list_empty(&p->children))
4635 return NULL;
4600 return list_entry(p->children.next,struct task_struct,sibling); 4636 return list_entry(p->children.next,struct task_struct,sibling);
4601} 4637}
4602 4638
4603static inline struct task_struct *older_sibling(struct task_struct *p) 4639static inline struct task_struct *older_sibling(struct task_struct *p)
4604{ 4640{
4605 if (p->sibling.prev==&p->parent->children) return NULL; 4641 if (p->sibling.prev==&p->parent->children)
4642 return NULL;
4606 return list_entry(p->sibling.prev,struct task_struct,sibling); 4643 return list_entry(p->sibling.prev,struct task_struct,sibling);
4607} 4644}
4608 4645
4609static inline struct task_struct *younger_sibling(struct task_struct *p) 4646static inline struct task_struct *younger_sibling(struct task_struct *p)
4610{ 4647{
4611 if (p->sibling.next==&p->parent->children) return NULL; 4648 if (p->sibling.next==&p->parent->children)
4649 return NULL;
4612 return list_entry(p->sibling.next,struct task_struct,sibling); 4650 return list_entry(p->sibling.next,struct task_struct,sibling);
4613} 4651}
4614 4652
4615static void show_task(task_t *p) 4653static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" };
4654
4655static void show_task(struct task_struct *p)
4616{ 4656{
4617 task_t *relative; 4657 struct task_struct *relative;
4618 unsigned state;
4619 unsigned long free = 0; 4658 unsigned long free = 0;
4620 static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; 4659 unsigned state;
4621 4660
4622 printk("%-13.13s ", p->comm); 4661 printk("%-13.13s ", p->comm);
4623 state = p->state ? __ffs(p->state) + 1 : 0; 4662 state = p->state ? __ffs(p->state) + 1 : 0;
@@ -4668,7 +4707,7 @@ static void show_task(task_t *p)
4668 4707
4669void show_state(void) 4708void show_state(void)
4670{ 4709{
4671 task_t *g, *p; 4710 struct task_struct *g, *p;
4672 4711
4673#if (BITS_PER_LONG == 32) 4712#if (BITS_PER_LONG == 32)
4674 printk("\n" 4713 printk("\n"
@@ -4690,7 +4729,7 @@ void show_state(void)
4690 } while_each_thread(g, p); 4729 } while_each_thread(g, p);
4691 4730
4692 read_unlock(&tasklist_lock); 4731 read_unlock(&tasklist_lock);
4693 mutex_debug_show_all_locks(); 4732 debug_show_all_locks();
4694} 4733}
4695 4734
4696/** 4735/**
@@ -4701,9 +4740,9 @@ void show_state(void)
4701 * NOTE: this function does not set the idle thread's NEED_RESCHED 4740 * NOTE: this function does not set the idle thread's NEED_RESCHED
4702 * flag, to make booting more robust. 4741 * flag, to make booting more robust.
4703 */ 4742 */
4704void __devinit init_idle(task_t *idle, int cpu) 4743void __devinit init_idle(struct task_struct *idle, int cpu)
4705{ 4744{
4706 runqueue_t *rq = cpu_rq(cpu); 4745 struct rq *rq = cpu_rq(cpu);
4707 unsigned long flags; 4746 unsigned long flags;
4708 4747
4709 idle->timestamp = sched_clock(); 4748 idle->timestamp = sched_clock();
@@ -4742,7 +4781,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4742/* 4781/*
4743 * This is how migration works: 4782 * This is how migration works:
4744 * 4783 *
4745 * 1) we queue a migration_req_t structure in the source CPU's 4784 * 1) we queue a struct migration_req structure in the source CPU's
4746 * runqueue and wake up that CPU's migration thread. 4785 * runqueue and wake up that CPU's migration thread.
4747 * 2) we down() the locked semaphore => thread blocks. 4786 * 2) we down() the locked semaphore => thread blocks.
4748 * 3) migration thread wakes up (implicitly it forces the migrated 4787 * 3) migration thread wakes up (implicitly it forces the migrated
@@ -4764,12 +4803,12 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4764 * task must not exit() & deallocate itself prematurely. The 4803 * task must not exit() & deallocate itself prematurely. The
4765 * call is not atomic; no spinlocks may be held. 4804 * call is not atomic; no spinlocks may be held.
4766 */ 4805 */
4767int set_cpus_allowed(task_t *p, cpumask_t new_mask) 4806int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
4768{ 4807{
4808 struct migration_req req;
4769 unsigned long flags; 4809 unsigned long flags;
4810 struct rq *rq;
4770 int ret = 0; 4811 int ret = 0;
4771 migration_req_t req;
4772 runqueue_t *rq;
4773 4812
4774 rq = task_rq_lock(p, &flags); 4813 rq = task_rq_lock(p, &flags);
4775 if (!cpus_intersects(new_mask, cpu_online_map)) { 4814 if (!cpus_intersects(new_mask, cpu_online_map)) {
@@ -4792,9 +4831,9 @@ int set_cpus_allowed(task_t *p, cpumask_t new_mask)
4792 } 4831 }
4793out: 4832out:
4794 task_rq_unlock(rq, &flags); 4833 task_rq_unlock(rq, &flags);
4834
4795 return ret; 4835 return ret;
4796} 4836}
4797
4798EXPORT_SYMBOL_GPL(set_cpus_allowed); 4837EXPORT_SYMBOL_GPL(set_cpus_allowed);
4799 4838
4800/* 4839/*
@@ -4810,7 +4849,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
4810 */ 4849 */
4811static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 4850static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4812{ 4851{
4813 runqueue_t *rq_dest, *rq_src; 4852 struct rq *rq_dest, *rq_src;
4814 int ret = 0; 4853 int ret = 0;
4815 4854
4816 if (unlikely(cpu_is_offline(dest_cpu))) 4855 if (unlikely(cpu_is_offline(dest_cpu)))
@@ -4855,16 +4894,16 @@ out:
4855 */ 4894 */
4856static int migration_thread(void *data) 4895static int migration_thread(void *data)
4857{ 4896{
4858 runqueue_t *rq;
4859 int cpu = (long)data; 4897 int cpu = (long)data;
4898 struct rq *rq;
4860 4899
4861 rq = cpu_rq(cpu); 4900 rq = cpu_rq(cpu);
4862 BUG_ON(rq->migration_thread != current); 4901 BUG_ON(rq->migration_thread != current);
4863 4902
4864 set_current_state(TASK_INTERRUPTIBLE); 4903 set_current_state(TASK_INTERRUPTIBLE);
4865 while (!kthread_should_stop()) { 4904 while (!kthread_should_stop()) {
4905 struct migration_req *req;
4866 struct list_head *head; 4906 struct list_head *head;
4867 migration_req_t *req;
4868 4907
4869 try_to_freeze(); 4908 try_to_freeze();
4870 4909
@@ -4888,7 +4927,7 @@ static int migration_thread(void *data)
4888 set_current_state(TASK_INTERRUPTIBLE); 4927 set_current_state(TASK_INTERRUPTIBLE);
4889 continue; 4928 continue;
4890 } 4929 }
4891 req = list_entry(head->next, migration_req_t, list); 4930 req = list_entry(head->next, struct migration_req, list);
4892 list_del_init(head->next); 4931 list_del_init(head->next);
4893 4932
4894 spin_unlock(&rq->lock); 4933 spin_unlock(&rq->lock);
@@ -4913,28 +4952,28 @@ wait_to_die:
4913 4952
4914#ifdef CONFIG_HOTPLUG_CPU 4953#ifdef CONFIG_HOTPLUG_CPU
4915/* Figure out where task on dead CPU should go, use force if neccessary. */ 4954/* Figure out where task on dead CPU should go, use force if neccessary. */
4916static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) 4955static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
4917{ 4956{
4918 runqueue_t *rq;
4919 unsigned long flags; 4957 unsigned long flags;
4920 int dest_cpu;
4921 cpumask_t mask; 4958 cpumask_t mask;
4959 struct rq *rq;
4960 int dest_cpu;
4922 4961
4923restart: 4962restart:
4924 /* On same node? */ 4963 /* On same node? */
4925 mask = node_to_cpumask(cpu_to_node(dead_cpu)); 4964 mask = node_to_cpumask(cpu_to_node(dead_cpu));
4926 cpus_and(mask, mask, tsk->cpus_allowed); 4965 cpus_and(mask, mask, p->cpus_allowed);
4927 dest_cpu = any_online_cpu(mask); 4966 dest_cpu = any_online_cpu(mask);
4928 4967
4929 /* On any allowed CPU? */ 4968 /* On any allowed CPU? */
4930 if (dest_cpu == NR_CPUS) 4969 if (dest_cpu == NR_CPUS)
4931 dest_cpu = any_online_cpu(tsk->cpus_allowed); 4970 dest_cpu = any_online_cpu(p->cpus_allowed);
4932 4971
4933 /* No more Mr. Nice Guy. */ 4972 /* No more Mr. Nice Guy. */
4934 if (dest_cpu == NR_CPUS) { 4973 if (dest_cpu == NR_CPUS) {
4935 rq = task_rq_lock(tsk, &flags); 4974 rq = task_rq_lock(p, &flags);
4936 cpus_setall(tsk->cpus_allowed); 4975 cpus_setall(p->cpus_allowed);
4937 dest_cpu = any_online_cpu(tsk->cpus_allowed); 4976 dest_cpu = any_online_cpu(p->cpus_allowed);
4938 task_rq_unlock(rq, &flags); 4977 task_rq_unlock(rq, &flags);
4939 4978
4940 /* 4979 /*
@@ -4942,12 +4981,12 @@ restart:
4942 * kernel threads (both mm NULL), since they never 4981 * kernel threads (both mm NULL), since they never
4943 * leave kernel. 4982 * leave kernel.
4944 */ 4983 */
4945 if (tsk->mm && printk_ratelimit()) 4984 if (p->mm && printk_ratelimit())
4946 printk(KERN_INFO "process %d (%s) no " 4985 printk(KERN_INFO "process %d (%s) no "
4947 "longer affine to cpu%d\n", 4986 "longer affine to cpu%d\n",
4948 tsk->pid, tsk->comm, dead_cpu); 4987 p->pid, p->comm, dead_cpu);
4949 } 4988 }
4950 if (!__migrate_task(tsk, dead_cpu, dest_cpu)) 4989 if (!__migrate_task(p, dead_cpu, dest_cpu))
4951 goto restart; 4990 goto restart;
4952} 4991}
4953 4992
@@ -4958,9 +4997,9 @@ restart:
4958 * their home CPUs. So we just add the counter to another CPU's counter, 4997 * their home CPUs. So we just add the counter to another CPU's counter,
4959 * to keep the global sum constant after CPU-down: 4998 * to keep the global sum constant after CPU-down:
4960 */ 4999 */
4961static void migrate_nr_uninterruptible(runqueue_t *rq_src) 5000static void migrate_nr_uninterruptible(struct rq *rq_src)
4962{ 5001{
4963 runqueue_t *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); 5002 struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
4964 unsigned long flags; 5003 unsigned long flags;
4965 5004
4966 local_irq_save(flags); 5005 local_irq_save(flags);
@@ -4974,48 +5013,51 @@ static void migrate_nr_uninterruptible(runqueue_t *rq_src)
4974/* Run through task list and migrate tasks from the dead cpu. */ 5013/* Run through task list and migrate tasks from the dead cpu. */
4975static void migrate_live_tasks(int src_cpu) 5014static void migrate_live_tasks(int src_cpu)
4976{ 5015{
4977 struct task_struct *tsk, *t; 5016 struct task_struct *p, *t;
4978 5017
4979 write_lock_irq(&tasklist_lock); 5018 write_lock_irq(&tasklist_lock);
4980 5019
4981 do_each_thread(t, tsk) { 5020 do_each_thread(t, p) {
4982 if (tsk == current) 5021 if (p == current)
4983 continue; 5022 continue;
4984 5023
4985 if (task_cpu(tsk) == src_cpu) 5024 if (task_cpu(p) == src_cpu)
4986 move_task_off_dead_cpu(src_cpu, tsk); 5025 move_task_off_dead_cpu(src_cpu, p);
4987 } while_each_thread(t, tsk); 5026 } while_each_thread(t, p);
4988 5027
4989 write_unlock_irq(&tasklist_lock); 5028 write_unlock_irq(&tasklist_lock);
4990} 5029}
4991 5030
4992/* Schedules idle task to be the next runnable task on current CPU. 5031/* Schedules idle task to be the next runnable task on current CPU.
4993 * It does so by boosting its priority to highest possible and adding it to 5032 * It does so by boosting its priority to highest possible and adding it to
4994 * the _front_ of runqueue. Used by CPU offline code. 5033 * the _front_ of the runqueue. Used by CPU offline code.
4995 */ 5034 */
4996void sched_idle_next(void) 5035void sched_idle_next(void)
4997{ 5036{
4998 int cpu = smp_processor_id(); 5037 int this_cpu = smp_processor_id();
4999 runqueue_t *rq = this_rq(); 5038 struct rq *rq = cpu_rq(this_cpu);
5000 struct task_struct *p = rq->idle; 5039 struct task_struct *p = rq->idle;
5001 unsigned long flags; 5040 unsigned long flags;
5002 5041
5003 /* cpu has to be offline */ 5042 /* cpu has to be offline */
5004 BUG_ON(cpu_online(cpu)); 5043 BUG_ON(cpu_online(this_cpu));
5005 5044
5006 /* Strictly not necessary since rest of the CPUs are stopped by now 5045 /*
5007 * and interrupts disabled on current cpu. 5046 * Strictly not necessary since rest of the CPUs are stopped by now
5047 * and interrupts disabled on the current cpu.
5008 */ 5048 */
5009 spin_lock_irqsave(&rq->lock, flags); 5049 spin_lock_irqsave(&rq->lock, flags);
5010 5050
5011 __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1); 5051 __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
5012 /* Add idle task to _front_ of it's priority queue */ 5052
5053 /* Add idle task to the _front_ of its priority queue: */
5013 __activate_idle_task(p, rq); 5054 __activate_idle_task(p, rq);
5014 5055
5015 spin_unlock_irqrestore(&rq->lock, flags); 5056 spin_unlock_irqrestore(&rq->lock, flags);
5016} 5057}
5017 5058
5018/* Ensures that the idle task is using init_mm right before its cpu goes 5059/*
5060 * Ensures that the idle task is using init_mm right before its cpu goes
5019 * offline. 5061 * offline.
5020 */ 5062 */
5021void idle_task_exit(void) 5063void idle_task_exit(void)
@@ -5029,17 +5071,17 @@ void idle_task_exit(void)
5029 mmdrop(mm); 5071 mmdrop(mm);
5030} 5072}
5031 5073
5032static void migrate_dead(unsigned int dead_cpu, task_t *tsk) 5074static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5033{ 5075{
5034 struct runqueue *rq = cpu_rq(dead_cpu); 5076 struct rq *rq = cpu_rq(dead_cpu);
5035 5077
5036 /* Must be exiting, otherwise would be on tasklist. */ 5078 /* Must be exiting, otherwise would be on tasklist. */
5037 BUG_ON(tsk->exit_state != EXIT_ZOMBIE && tsk->exit_state != EXIT_DEAD); 5079 BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
5038 5080
5039 /* Cannot have done final schedule yet: would have vanished. */ 5081 /* Cannot have done final schedule yet: would have vanished. */
5040 BUG_ON(tsk->flags & PF_DEAD); 5082 BUG_ON(p->flags & PF_DEAD);
5041 5083
5042 get_task_struct(tsk); 5084 get_task_struct(p);
5043 5085
5044 /* 5086 /*
5045 * Drop lock around migration; if someone else moves it, 5087 * Drop lock around migration; if someone else moves it,
@@ -5047,25 +5089,25 @@ static void migrate_dead(unsigned int dead_cpu, task_t *tsk)
5047 * fine. 5089 * fine.
5048 */ 5090 */
5049 spin_unlock_irq(&rq->lock); 5091 spin_unlock_irq(&rq->lock);
5050 move_task_off_dead_cpu(dead_cpu, tsk); 5092 move_task_off_dead_cpu(dead_cpu, p);
5051 spin_lock_irq(&rq->lock); 5093 spin_lock_irq(&rq->lock);
5052 5094
5053 put_task_struct(tsk); 5095 put_task_struct(p);
5054} 5096}
5055 5097
5056/* release_task() removes task from tasklist, so we won't find dead tasks. */ 5098/* release_task() removes task from tasklist, so we won't find dead tasks. */
5057static void migrate_dead_tasks(unsigned int dead_cpu) 5099static void migrate_dead_tasks(unsigned int dead_cpu)
5058{ 5100{
5059 unsigned arr, i; 5101 struct rq *rq = cpu_rq(dead_cpu);
5060 struct runqueue *rq = cpu_rq(dead_cpu); 5102 unsigned int arr, i;
5061 5103
5062 for (arr = 0; arr < 2; arr++) { 5104 for (arr = 0; arr < 2; arr++) {
5063 for (i = 0; i < MAX_PRIO; i++) { 5105 for (i = 0; i < MAX_PRIO; i++) {
5064 struct list_head *list = &rq->arrays[arr].queue[i]; 5106 struct list_head *list = &rq->arrays[arr].queue[i];
5107
5065 while (!list_empty(list)) 5108 while (!list_empty(list))
5066 migrate_dead(dead_cpu, 5109 migrate_dead(dead_cpu, list_entry(list->next,
5067 list_entry(list->next, task_t, 5110 struct task_struct, run_list));
5068 run_list));
5069 } 5111 }
5070 } 5112 }
5071} 5113}
@@ -5075,14 +5117,13 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
5075 * migration_call - callback that gets triggered when a CPU is added. 5117 * migration_call - callback that gets triggered when a CPU is added.
5076 * Here we can start up the necessary migration thread for the new CPU. 5118 * Here we can start up the necessary migration thread for the new CPU.
5077 */ 5119 */
5078static int __cpuinit migration_call(struct notifier_block *nfb, 5120static int __cpuinit
5079 unsigned long action, 5121migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5080 void *hcpu)
5081{ 5122{
5082 int cpu = (long)hcpu;
5083 struct task_struct *p; 5123 struct task_struct *p;
5084 struct runqueue *rq; 5124 int cpu = (long)hcpu;
5085 unsigned long flags; 5125 unsigned long flags;
5126 struct rq *rq;
5086 5127
5087 switch (action) { 5128 switch (action) {
5088 case CPU_UP_PREPARE: 5129 case CPU_UP_PREPARE:
@@ -5097,10 +5138,12 @@ static int __cpuinit migration_call(struct notifier_block *nfb,
5097 task_rq_unlock(rq, &flags); 5138 task_rq_unlock(rq, &flags);
5098 cpu_rq(cpu)->migration_thread = p; 5139 cpu_rq(cpu)->migration_thread = p;
5099 break; 5140 break;
5141
5100 case CPU_ONLINE: 5142 case CPU_ONLINE:
5101 /* Strictly unneccessary, as first user will wake it. */ 5143 /* Strictly unneccessary, as first user will wake it. */
5102 wake_up_process(cpu_rq(cpu)->migration_thread); 5144 wake_up_process(cpu_rq(cpu)->migration_thread);
5103 break; 5145 break;
5146
5104#ifdef CONFIG_HOTPLUG_CPU 5147#ifdef CONFIG_HOTPLUG_CPU
5105 case CPU_UP_CANCELED: 5148 case CPU_UP_CANCELED:
5106 if (!cpu_rq(cpu)->migration_thread) 5149 if (!cpu_rq(cpu)->migration_thread)
@@ -5111,6 +5154,7 @@ static int __cpuinit migration_call(struct notifier_block *nfb,
5111 kthread_stop(cpu_rq(cpu)->migration_thread); 5154 kthread_stop(cpu_rq(cpu)->migration_thread);
5112 cpu_rq(cpu)->migration_thread = NULL; 5155 cpu_rq(cpu)->migration_thread = NULL;
5113 break; 5156 break;
5157
5114 case CPU_DEAD: 5158 case CPU_DEAD:
5115 migrate_live_tasks(cpu); 5159 migrate_live_tasks(cpu);
5116 rq = cpu_rq(cpu); 5160 rq = cpu_rq(cpu);
@@ -5131,9 +5175,10 @@ static int __cpuinit migration_call(struct notifier_block *nfb,
5131 * the requestors. */ 5175 * the requestors. */
5132 spin_lock_irq(&rq->lock); 5176 spin_lock_irq(&rq->lock);
5133 while (!list_empty(&rq->migration_queue)) { 5177 while (!list_empty(&rq->migration_queue)) {
5134 migration_req_t *req; 5178 struct migration_req *req;
5179
5135 req = list_entry(rq->migration_queue.next, 5180 req = list_entry(rq->migration_queue.next,
5136 migration_req_t, list); 5181 struct migration_req, list);
5137 list_del_init(&req->list); 5182 list_del_init(&req->list);
5138 complete(&req->done); 5183 complete(&req->done);
5139 } 5184 }
@@ -5155,10 +5200,12 @@ static struct notifier_block __cpuinitdata migration_notifier = {
5155int __init migration_init(void) 5200int __init migration_init(void)
5156{ 5201{
5157 void *cpu = (void *)(long)smp_processor_id(); 5202 void *cpu = (void *)(long)smp_processor_id();
5158 /* Start one for boot CPU. */ 5203
5204 /* Start one for the boot CPU: */
5159 migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); 5205 migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5160 migration_call(&migration_notifier, CPU_ONLINE, cpu); 5206 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5161 register_cpu_notifier(&migration_notifier); 5207 register_cpu_notifier(&migration_notifier);
5208
5162 return 0; 5209 return 0;
5163} 5210}
5164#endif 5211#endif
@@ -5254,7 +5301,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
5254 } while (sd); 5301 } while (sd);
5255} 5302}
5256#else 5303#else
5257#define sched_domain_debug(sd, cpu) {} 5304# define sched_domain_debug(sd, cpu) do { } while (0)
5258#endif 5305#endif
5259 5306
5260static int sd_degenerate(struct sched_domain *sd) 5307static int sd_degenerate(struct sched_domain *sd)
@@ -5280,8 +5327,8 @@ static int sd_degenerate(struct sched_domain *sd)
5280 return 1; 5327 return 1;
5281} 5328}
5282 5329
5283static int sd_parent_degenerate(struct sched_domain *sd, 5330static int
5284 struct sched_domain *parent) 5331sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5285{ 5332{
5286 unsigned long cflags = sd->flags, pflags = parent->flags; 5333 unsigned long cflags = sd->flags, pflags = parent->flags;
5287 5334
@@ -5314,7 +5361,7 @@ static int sd_parent_degenerate(struct sched_domain *sd,
5314 */ 5361 */
5315static void cpu_attach_domain(struct sched_domain *sd, int cpu) 5362static void cpu_attach_domain(struct sched_domain *sd, int cpu)
5316{ 5363{
5317 runqueue_t *rq = cpu_rq(cpu); 5364 struct rq *rq = cpu_rq(cpu);
5318 struct sched_domain *tmp; 5365 struct sched_domain *tmp;
5319 5366
5320 /* Remove the sched domains which do not contribute to scheduling. */ 5367 /* Remove the sched domains which do not contribute to scheduling. */
@@ -5576,8 +5623,8 @@ static void touch_cache(void *__cache, unsigned long __size)
5576/* 5623/*
5577 * Measure the cache-cost of one task migration. Returns in units of nsec. 5624 * Measure the cache-cost of one task migration. Returns in units of nsec.
5578 */ 5625 */
5579static unsigned long long measure_one(void *cache, unsigned long size, 5626static unsigned long long
5580 int source, int target) 5627measure_one(void *cache, unsigned long size, int source, int target)
5581{ 5628{
5582 cpumask_t mask, saved_mask; 5629 cpumask_t mask, saved_mask;
5583 unsigned long long t0, t1, t2, t3, cost; 5630 unsigned long long t0, t1, t2, t3, cost;
@@ -5927,9 +5974,9 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
5927 */ 5974 */
5928static cpumask_t sched_domain_node_span(int node) 5975static cpumask_t sched_domain_node_span(int node)
5929{ 5976{
5930 int i;
5931 cpumask_t span, nodemask;
5932 DECLARE_BITMAP(used_nodes, MAX_NUMNODES); 5977 DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
5978 cpumask_t span, nodemask;
5979 int i;
5933 5980
5934 cpus_clear(span); 5981 cpus_clear(span);
5935 bitmap_zero(used_nodes, MAX_NUMNODES); 5982 bitmap_zero(used_nodes, MAX_NUMNODES);
@@ -5940,6 +5987,7 @@ static cpumask_t sched_domain_node_span(int node)
5940 5987
5941 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { 5988 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
5942 int next_node = find_next_best_node(node, used_nodes); 5989 int next_node = find_next_best_node(node, used_nodes);
5990
5943 nodemask = node_to_cpumask(next_node); 5991 nodemask = node_to_cpumask(next_node);
5944 cpus_or(span, span, nodemask); 5992 cpus_or(span, span, nodemask);
5945 } 5993 }
@@ -5949,19 +5997,23 @@ static cpumask_t sched_domain_node_span(int node)
5949#endif 5997#endif
5950 5998
5951int sched_smt_power_savings = 0, sched_mc_power_savings = 0; 5999int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6000
5952/* 6001/*
5953 * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we 6002 * SMT sched-domains:
5954 * can switch it on easily if needed.
5955 */ 6003 */
5956#ifdef CONFIG_SCHED_SMT 6004#ifdef CONFIG_SCHED_SMT
5957static DEFINE_PER_CPU(struct sched_domain, cpu_domains); 6005static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
5958static struct sched_group sched_group_cpus[NR_CPUS]; 6006static struct sched_group sched_group_cpus[NR_CPUS];
6007
5959static int cpu_to_cpu_group(int cpu) 6008static int cpu_to_cpu_group(int cpu)
5960{ 6009{
5961 return cpu; 6010 return cpu;
5962} 6011}
5963#endif 6012#endif
5964 6013
6014/*
6015 * multi-core sched-domains:
6016 */
5965#ifdef CONFIG_SCHED_MC 6017#ifdef CONFIG_SCHED_MC
5966static DEFINE_PER_CPU(struct sched_domain, core_domains); 6018static DEFINE_PER_CPU(struct sched_domain, core_domains);
5967static struct sched_group *sched_group_core_bycpu[NR_CPUS]; 6019static struct sched_group *sched_group_core_bycpu[NR_CPUS];
@@ -5981,9 +6033,10 @@ static int cpu_to_core_group(int cpu)
5981 6033
5982static DEFINE_PER_CPU(struct sched_domain, phys_domains); 6034static DEFINE_PER_CPU(struct sched_domain, phys_domains);
5983static struct sched_group *sched_group_phys_bycpu[NR_CPUS]; 6035static struct sched_group *sched_group_phys_bycpu[NR_CPUS];
6036
5984static int cpu_to_phys_group(int cpu) 6037static int cpu_to_phys_group(int cpu)
5985{ 6038{
5986#if defined(CONFIG_SCHED_MC) 6039#ifdef CONFIG_SCHED_MC
5987 cpumask_t mask = cpu_coregroup_map(cpu); 6040 cpumask_t mask = cpu_coregroup_map(cpu);
5988 return first_cpu(mask); 6041 return first_cpu(mask);
5989#elif defined(CONFIG_SCHED_SMT) 6042#elif defined(CONFIG_SCHED_SMT)
@@ -6529,6 +6582,7 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
6529int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) 6582int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6530{ 6583{
6531 int err = 0; 6584 int err = 0;
6585
6532#ifdef CONFIG_SCHED_SMT 6586#ifdef CONFIG_SCHED_SMT
6533 if (smt_capable()) 6587 if (smt_capable())
6534 err = sysfs_create_file(&cls->kset.kobj, 6588 err = sysfs_create_file(&cls->kset.kobj,
@@ -6548,7 +6602,8 @@ static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
6548{ 6602{
6549 return sprintf(page, "%u\n", sched_mc_power_savings); 6603 return sprintf(page, "%u\n", sched_mc_power_savings);
6550} 6604}
6551static ssize_t sched_mc_power_savings_store(struct sys_device *dev, const char *buf, size_t count) 6605static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
6606 const char *buf, size_t count)
6552{ 6607{
6553 return sched_power_savings_store(buf, count, 0); 6608 return sched_power_savings_store(buf, count, 0);
6554} 6609}
@@ -6561,7 +6616,8 @@ static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
6561{ 6616{
6562 return sprintf(page, "%u\n", sched_smt_power_savings); 6617 return sprintf(page, "%u\n", sched_smt_power_savings);
6563} 6618}
6564static ssize_t sched_smt_power_savings_store(struct sys_device *dev, const char *buf, size_t count) 6619static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
6620 const char *buf, size_t count)
6565{ 6621{
6566 return sched_power_savings_store(buf, count, 1); 6622 return sched_power_savings_store(buf, count, 1);
6567} 6623}
@@ -6623,6 +6679,7 @@ int in_sched_functions(unsigned long addr)
6623{ 6679{
6624 /* Linker adds these: start and end of __sched functions */ 6680 /* Linker adds these: start and end of __sched functions */
6625 extern char __sched_text_start[], __sched_text_end[]; 6681 extern char __sched_text_start[], __sched_text_end[];
6682
6626 return in_lock_functions(addr) || 6683 return in_lock_functions(addr) ||
6627 (addr >= (unsigned long)__sched_text_start 6684 (addr >= (unsigned long)__sched_text_start
6628 && addr < (unsigned long)__sched_text_end); 6685 && addr < (unsigned long)__sched_text_end);
@@ -6630,14 +6687,15 @@ int in_sched_functions(unsigned long addr)
6630 6687
6631void __init sched_init(void) 6688void __init sched_init(void)
6632{ 6689{
6633 runqueue_t *rq;
6634 int i, j, k; 6690 int i, j, k;
6635 6691
6636 for_each_possible_cpu(i) { 6692 for_each_possible_cpu(i) {
6637 prio_array_t *array; 6693 struct prio_array *array;
6694 struct rq *rq;
6638 6695
6639 rq = cpu_rq(i); 6696 rq = cpu_rq(i);
6640 spin_lock_init(&rq->lock); 6697 spin_lock_init(&rq->lock);
6698 lockdep_set_class(&rq->lock, &rq->rq_lock_key);
6641 rq->nr_running = 0; 6699 rq->nr_running = 0;
6642 rq->active = rq->arrays; 6700 rq->active = rq->arrays;
6643 rq->expired = rq->arrays + 1; 6701 rq->expired = rq->arrays + 1;
@@ -6684,7 +6742,7 @@ void __init sched_init(void)
6684#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 6742#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
6685void __might_sleep(char *file, int line) 6743void __might_sleep(char *file, int line)
6686{ 6744{
6687#if defined(in_atomic) 6745#ifdef in_atomic
6688 static unsigned long prev_jiffy; /* ratelimiting */ 6746 static unsigned long prev_jiffy; /* ratelimiting */
6689 6747
6690 if ((in_atomic() || irqs_disabled()) && 6748 if ((in_atomic() || irqs_disabled()) &&
@@ -6706,10 +6764,10 @@ EXPORT_SYMBOL(__might_sleep);
6706#ifdef CONFIG_MAGIC_SYSRQ 6764#ifdef CONFIG_MAGIC_SYSRQ
6707void normalize_rt_tasks(void) 6765void normalize_rt_tasks(void)
6708{ 6766{
6767 struct prio_array *array;
6709 struct task_struct *p; 6768 struct task_struct *p;
6710 prio_array_t *array;
6711 unsigned long flags; 6769 unsigned long flags;
6712 runqueue_t *rq; 6770 struct rq *rq;
6713 6771
6714 read_lock_irq(&tasklist_lock); 6772 read_lock_irq(&tasklist_lock);
6715 for_each_process(p) { 6773 for_each_process(p) {
@@ -6753,7 +6811,7 @@ void normalize_rt_tasks(void)
6753 * 6811 *
6754 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6812 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6755 */ 6813 */
6756task_t *curr_task(int cpu) 6814struct task_struct *curr_task(int cpu)
6757{ 6815{
6758 return cpu_curr(cpu); 6816 return cpu_curr(cpu);
6759} 6817}
@@ -6773,7 +6831,7 @@ task_t *curr_task(int cpu)
6773 * 6831 *
6774 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6832 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6775 */ 6833 */
6776void set_curr_task(int cpu, task_t *p) 6834void set_curr_task(int cpu, struct task_struct *p)
6777{ 6835{
6778 cpu_curr(cpu) = p; 6836 cpu_curr(cpu) = p;
6779} 6837}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8f03e3b89b55..215541e26c1a 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -62,6 +62,119 @@ static inline void wakeup_softirqd(void)
62} 62}
63 63
64/* 64/*
65 * This one is for softirq.c-internal use,
66 * where hardirqs are disabled legitimately:
67 */
68static void __local_bh_disable(unsigned long ip)
69{
70 unsigned long flags;
71
72 WARN_ON_ONCE(in_irq());
73
74 raw_local_irq_save(flags);
75 add_preempt_count(SOFTIRQ_OFFSET);
76 /*
77 * Were softirqs turned off above:
78 */
79 if (softirq_count() == SOFTIRQ_OFFSET)
80 trace_softirqs_off(ip);
81 raw_local_irq_restore(flags);
82}
83
84void local_bh_disable(void)
85{
86 __local_bh_disable((unsigned long)__builtin_return_address(0));
87}
88
89EXPORT_SYMBOL(local_bh_disable);
90
91void __local_bh_enable(void)
92{
93 WARN_ON_ONCE(in_irq());
94
95 /*
96 * softirqs should never be enabled by __local_bh_enable(),
97 * it always nests inside local_bh_enable() sections:
98 */
99 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
100
101 sub_preempt_count(SOFTIRQ_OFFSET);
102}
103EXPORT_SYMBOL_GPL(__local_bh_enable);
104
105/*
106 * Special-case - softirqs can safely be enabled in
107 * cond_resched_softirq(), or by __do_softirq(),
108 * without processing still-pending softirqs:
109 */
110void _local_bh_enable(void)
111{
112 WARN_ON_ONCE(in_irq());
113 WARN_ON_ONCE(!irqs_disabled());
114
115 if (softirq_count() == SOFTIRQ_OFFSET)
116 trace_softirqs_on((unsigned long)__builtin_return_address(0));
117 sub_preempt_count(SOFTIRQ_OFFSET);
118}
119
120EXPORT_SYMBOL(_local_bh_enable);
121
122void local_bh_enable(void)
123{
124 unsigned long flags;
125
126 WARN_ON_ONCE(in_irq());
127 WARN_ON_ONCE(irqs_disabled());
128
129 local_irq_save(flags);
130 /*
131 * Are softirqs going to be turned on now:
132 */
133 if (softirq_count() == SOFTIRQ_OFFSET)
134 trace_softirqs_on((unsigned long)__builtin_return_address(0));
135 /*
136 * Keep preemption disabled until we are done with
137 * softirq processing:
138 */
139 sub_preempt_count(SOFTIRQ_OFFSET - 1);
140
141 if (unlikely(!in_interrupt() && local_softirq_pending()))
142 do_softirq();
143
144 dec_preempt_count();
145 local_irq_restore(flags);
146 preempt_check_resched();
147}
148EXPORT_SYMBOL(local_bh_enable);
149
150void local_bh_enable_ip(unsigned long ip)
151{
152 unsigned long flags;
153
154 WARN_ON_ONCE(in_irq());
155
156 local_irq_save(flags);
157 /*
158 * Are softirqs going to be turned on now:
159 */
160 if (softirq_count() == SOFTIRQ_OFFSET)
161 trace_softirqs_on(ip);
162 /*
163 * Keep preemption disabled until we are done with
164 * softirq processing:
165 */
166 sub_preempt_count(SOFTIRQ_OFFSET - 1);
167
168 if (unlikely(!in_interrupt() && local_softirq_pending()))
169 do_softirq();
170
171 dec_preempt_count();
172 local_irq_restore(flags);
173 preempt_check_resched();
174}
175EXPORT_SYMBOL(local_bh_enable_ip);
176
177/*
65 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 178 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
66 * and we fall back to softirqd after that. 179 * and we fall back to softirqd after that.
67 * 180 *
@@ -80,8 +193,11 @@ asmlinkage void __do_softirq(void)
80 int cpu; 193 int cpu;
81 194
82 pending = local_softirq_pending(); 195 pending = local_softirq_pending();
196 account_system_vtime(current);
197
198 __local_bh_disable((unsigned long)__builtin_return_address(0));
199 trace_softirq_enter();
83 200
84 local_bh_disable();
85 cpu = smp_processor_id(); 201 cpu = smp_processor_id();
86restart: 202restart:
87 /* Reset the pending bitmask before enabling irqs */ 203 /* Reset the pending bitmask before enabling irqs */
@@ -109,7 +225,10 @@ restart:
109 if (pending) 225 if (pending)
110 wakeup_softirqd(); 226 wakeup_softirqd();
111 227
112 __local_bh_enable(); 228 trace_softirq_exit();
229
230 account_system_vtime(current);
231 _local_bh_enable();
113} 232}
114 233
115#ifndef __ARCH_HAS_DO_SOFTIRQ 234#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -136,23 +255,6 @@ EXPORT_SYMBOL(do_softirq);
136 255
137#endif 256#endif
138 257
139void local_bh_enable(void)
140{
141 WARN_ON(irqs_disabled());
142 /*
143 * Keep preemption disabled until we are done with
144 * softirq processing:
145 */
146 sub_preempt_count(SOFTIRQ_OFFSET - 1);
147
148 if (unlikely(!in_interrupt() && local_softirq_pending()))
149 do_softirq();
150
151 dec_preempt_count();
152 preempt_check_resched();
153}
154EXPORT_SYMBOL(local_bh_enable);
155
156#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 258#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
157# define invoke_softirq() __do_softirq() 259# define invoke_softirq() __do_softirq()
158#else 260#else
@@ -165,6 +267,7 @@ EXPORT_SYMBOL(local_bh_enable);
165void irq_exit(void) 267void irq_exit(void)
166{ 268{
167 account_system_vtime(current); 269 account_system_vtime(current);
270 trace_hardirq_exit();
168 sub_preempt_count(IRQ_EXIT_OFFSET); 271 sub_preempt_count(IRQ_EXIT_OFFSET);
169 if (!in_interrupt() && local_softirq_pending()) 272 if (!in_interrupt() && local_softirq_pending())
170 invoke_softirq(); 273 invoke_softirq();
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index b31e54eadf56..bfd6ad9c0330 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -13,6 +13,7 @@
13#include <linux/preempt.h> 13#include <linux/preempt.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/debug_locks.h>
16#include <linux/module.h> 17#include <linux/module.h>
17 18
18/* 19/*
@@ -29,8 +30,10 @@ EXPORT_SYMBOL(generic__raw_read_trylock);
29int __lockfunc _spin_trylock(spinlock_t *lock) 30int __lockfunc _spin_trylock(spinlock_t *lock)
30{ 31{
31 preempt_disable(); 32 preempt_disable();
32 if (_raw_spin_trylock(lock)) 33 if (_raw_spin_trylock(lock)) {
34 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
33 return 1; 35 return 1;
36 }
34 37
35 preempt_enable(); 38 preempt_enable();
36 return 0; 39 return 0;
@@ -40,8 +43,10 @@ EXPORT_SYMBOL(_spin_trylock);
40int __lockfunc _read_trylock(rwlock_t *lock) 43int __lockfunc _read_trylock(rwlock_t *lock)
41{ 44{
42 preempt_disable(); 45 preempt_disable();
43 if (_raw_read_trylock(lock)) 46 if (_raw_read_trylock(lock)) {
47 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
44 return 1; 48 return 1;
49 }
45 50
46 preempt_enable(); 51 preempt_enable();
47 return 0; 52 return 0;
@@ -51,19 +56,28 @@ EXPORT_SYMBOL(_read_trylock);
51int __lockfunc _write_trylock(rwlock_t *lock) 56int __lockfunc _write_trylock(rwlock_t *lock)
52{ 57{
53 preempt_disable(); 58 preempt_disable();
54 if (_raw_write_trylock(lock)) 59 if (_raw_write_trylock(lock)) {
60 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
55 return 1; 61 return 1;
62 }
56 63
57 preempt_enable(); 64 preempt_enable();
58 return 0; 65 return 0;
59} 66}
60EXPORT_SYMBOL(_write_trylock); 67EXPORT_SYMBOL(_write_trylock);
61 68
62#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) 69/*
70 * If lockdep is enabled then we use the non-preemption spin-ops
71 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
72 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
73 */
74#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
75 defined(CONFIG_PROVE_LOCKING)
63 76
64void __lockfunc _read_lock(rwlock_t *lock) 77void __lockfunc _read_lock(rwlock_t *lock)
65{ 78{
66 preempt_disable(); 79 preempt_disable();
80 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
67 _raw_read_lock(lock); 81 _raw_read_lock(lock);
68} 82}
69EXPORT_SYMBOL(_read_lock); 83EXPORT_SYMBOL(_read_lock);
@@ -74,7 +88,17 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
74 88
75 local_irq_save(flags); 89 local_irq_save(flags);
76 preempt_disable(); 90 preempt_disable();
91 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
92 /*
93 * On lockdep we dont want the hand-coded irq-enable of
94 * _raw_spin_lock_flags() code, because lockdep assumes
95 * that interrupts are not re-enabled during lock-acquire:
96 */
97#ifdef CONFIG_PROVE_LOCKING
98 _raw_spin_lock(lock);
99#else
77 _raw_spin_lock_flags(lock, &flags); 100 _raw_spin_lock_flags(lock, &flags);
101#endif
78 return flags; 102 return flags;
79} 103}
80EXPORT_SYMBOL(_spin_lock_irqsave); 104EXPORT_SYMBOL(_spin_lock_irqsave);
@@ -83,6 +107,7 @@ void __lockfunc _spin_lock_irq(spinlock_t *lock)
83{ 107{
84 local_irq_disable(); 108 local_irq_disable();
85 preempt_disable(); 109 preempt_disable();
110 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
86 _raw_spin_lock(lock); 111 _raw_spin_lock(lock);
87} 112}
88EXPORT_SYMBOL(_spin_lock_irq); 113EXPORT_SYMBOL(_spin_lock_irq);
@@ -91,6 +116,7 @@ void __lockfunc _spin_lock_bh(spinlock_t *lock)
91{ 116{
92 local_bh_disable(); 117 local_bh_disable();
93 preempt_disable(); 118 preempt_disable();
119 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
94 _raw_spin_lock(lock); 120 _raw_spin_lock(lock);
95} 121}
96EXPORT_SYMBOL(_spin_lock_bh); 122EXPORT_SYMBOL(_spin_lock_bh);
@@ -101,6 +127,7 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
101 127
102 local_irq_save(flags); 128 local_irq_save(flags);
103 preempt_disable(); 129 preempt_disable();
130 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
104 _raw_read_lock(lock); 131 _raw_read_lock(lock);
105 return flags; 132 return flags;
106} 133}
@@ -110,6 +137,7 @@ void __lockfunc _read_lock_irq(rwlock_t *lock)
110{ 137{
111 local_irq_disable(); 138 local_irq_disable();
112 preempt_disable(); 139 preempt_disable();
140 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
113 _raw_read_lock(lock); 141 _raw_read_lock(lock);
114} 142}
115EXPORT_SYMBOL(_read_lock_irq); 143EXPORT_SYMBOL(_read_lock_irq);
@@ -118,6 +146,7 @@ void __lockfunc _read_lock_bh(rwlock_t *lock)
118{ 146{
119 local_bh_disable(); 147 local_bh_disable();
120 preempt_disable(); 148 preempt_disable();
149 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
121 _raw_read_lock(lock); 150 _raw_read_lock(lock);
122} 151}
123EXPORT_SYMBOL(_read_lock_bh); 152EXPORT_SYMBOL(_read_lock_bh);
@@ -128,6 +157,7 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
128 157
129 local_irq_save(flags); 158 local_irq_save(flags);
130 preempt_disable(); 159 preempt_disable();
160 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
131 _raw_write_lock(lock); 161 _raw_write_lock(lock);
132 return flags; 162 return flags;
133} 163}
@@ -137,6 +167,7 @@ void __lockfunc _write_lock_irq(rwlock_t *lock)
137{ 167{
138 local_irq_disable(); 168 local_irq_disable();
139 preempt_disable(); 169 preempt_disable();
170 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
140 _raw_write_lock(lock); 171 _raw_write_lock(lock);
141} 172}
142EXPORT_SYMBOL(_write_lock_irq); 173EXPORT_SYMBOL(_write_lock_irq);
@@ -145,6 +176,7 @@ void __lockfunc _write_lock_bh(rwlock_t *lock)
145{ 176{
146 local_bh_disable(); 177 local_bh_disable();
147 preempt_disable(); 178 preempt_disable();
179 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
148 _raw_write_lock(lock); 180 _raw_write_lock(lock);
149} 181}
150EXPORT_SYMBOL(_write_lock_bh); 182EXPORT_SYMBOL(_write_lock_bh);
@@ -152,6 +184,7 @@ EXPORT_SYMBOL(_write_lock_bh);
152void __lockfunc _spin_lock(spinlock_t *lock) 184void __lockfunc _spin_lock(spinlock_t *lock)
153{ 185{
154 preempt_disable(); 186 preempt_disable();
187 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
155 _raw_spin_lock(lock); 188 _raw_spin_lock(lock);
156} 189}
157 190
@@ -160,6 +193,7 @@ EXPORT_SYMBOL(_spin_lock);
160void __lockfunc _write_lock(rwlock_t *lock) 193void __lockfunc _write_lock(rwlock_t *lock)
161{ 194{
162 preempt_disable(); 195 preempt_disable();
196 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
163 _raw_write_lock(lock); 197 _raw_write_lock(lock);
164} 198}
165 199
@@ -255,8 +289,22 @@ BUILD_LOCK_OPS(write, rwlock);
255 289
256#endif /* CONFIG_PREEMPT */ 290#endif /* CONFIG_PREEMPT */
257 291
292#ifdef CONFIG_DEBUG_LOCK_ALLOC
293
294void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
295{
296 preempt_disable();
297 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
298 _raw_spin_lock(lock);
299}
300
301EXPORT_SYMBOL(_spin_lock_nested);
302
303#endif
304
258void __lockfunc _spin_unlock(spinlock_t *lock) 305void __lockfunc _spin_unlock(spinlock_t *lock)
259{ 306{
307 spin_release(&lock->dep_map, 1, _RET_IP_);
260 _raw_spin_unlock(lock); 308 _raw_spin_unlock(lock);
261 preempt_enable(); 309 preempt_enable();
262} 310}
@@ -264,6 +312,7 @@ EXPORT_SYMBOL(_spin_unlock);
264 312
265void __lockfunc _write_unlock(rwlock_t *lock) 313void __lockfunc _write_unlock(rwlock_t *lock)
266{ 314{
315 rwlock_release(&lock->dep_map, 1, _RET_IP_);
267 _raw_write_unlock(lock); 316 _raw_write_unlock(lock);
268 preempt_enable(); 317 preempt_enable();
269} 318}
@@ -271,6 +320,7 @@ EXPORT_SYMBOL(_write_unlock);
271 320
272void __lockfunc _read_unlock(rwlock_t *lock) 321void __lockfunc _read_unlock(rwlock_t *lock)
273{ 322{
323 rwlock_release(&lock->dep_map, 1, _RET_IP_);
274 _raw_read_unlock(lock); 324 _raw_read_unlock(lock);
275 preempt_enable(); 325 preempt_enable();
276} 326}
@@ -278,6 +328,7 @@ EXPORT_SYMBOL(_read_unlock);
278 328
279void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 329void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
280{ 330{
331 spin_release(&lock->dep_map, 1, _RET_IP_);
281 _raw_spin_unlock(lock); 332 _raw_spin_unlock(lock);
282 local_irq_restore(flags); 333 local_irq_restore(flags);
283 preempt_enable(); 334 preempt_enable();
@@ -286,6 +337,7 @@ EXPORT_SYMBOL(_spin_unlock_irqrestore);
286 337
287void __lockfunc _spin_unlock_irq(spinlock_t *lock) 338void __lockfunc _spin_unlock_irq(spinlock_t *lock)
288{ 339{
340 spin_release(&lock->dep_map, 1, _RET_IP_);
289 _raw_spin_unlock(lock); 341 _raw_spin_unlock(lock);
290 local_irq_enable(); 342 local_irq_enable();
291 preempt_enable(); 343 preempt_enable();
@@ -294,14 +346,16 @@ EXPORT_SYMBOL(_spin_unlock_irq);
294 346
295void __lockfunc _spin_unlock_bh(spinlock_t *lock) 347void __lockfunc _spin_unlock_bh(spinlock_t *lock)
296{ 348{
349 spin_release(&lock->dep_map, 1, _RET_IP_);
297 _raw_spin_unlock(lock); 350 _raw_spin_unlock(lock);
298 preempt_enable_no_resched(); 351 preempt_enable_no_resched();
299 local_bh_enable(); 352 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
300} 353}
301EXPORT_SYMBOL(_spin_unlock_bh); 354EXPORT_SYMBOL(_spin_unlock_bh);
302 355
303void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 356void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
304{ 357{
358 rwlock_release(&lock->dep_map, 1, _RET_IP_);
305 _raw_read_unlock(lock); 359 _raw_read_unlock(lock);
306 local_irq_restore(flags); 360 local_irq_restore(flags);
307 preempt_enable(); 361 preempt_enable();
@@ -310,6 +364,7 @@ EXPORT_SYMBOL(_read_unlock_irqrestore);
310 364
311void __lockfunc _read_unlock_irq(rwlock_t *lock) 365void __lockfunc _read_unlock_irq(rwlock_t *lock)
312{ 366{
367 rwlock_release(&lock->dep_map, 1, _RET_IP_);
313 _raw_read_unlock(lock); 368 _raw_read_unlock(lock);
314 local_irq_enable(); 369 local_irq_enable();
315 preempt_enable(); 370 preempt_enable();
@@ -318,14 +373,16 @@ EXPORT_SYMBOL(_read_unlock_irq);
318 373
319void __lockfunc _read_unlock_bh(rwlock_t *lock) 374void __lockfunc _read_unlock_bh(rwlock_t *lock)
320{ 375{
376 rwlock_release(&lock->dep_map, 1, _RET_IP_);
321 _raw_read_unlock(lock); 377 _raw_read_unlock(lock);
322 preempt_enable_no_resched(); 378 preempt_enable_no_resched();
323 local_bh_enable(); 379 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
324} 380}
325EXPORT_SYMBOL(_read_unlock_bh); 381EXPORT_SYMBOL(_read_unlock_bh);
326 382
327void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 383void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
328{ 384{
385 rwlock_release(&lock->dep_map, 1, _RET_IP_);
329 _raw_write_unlock(lock); 386 _raw_write_unlock(lock);
330 local_irq_restore(flags); 387 local_irq_restore(flags);
331 preempt_enable(); 388 preempt_enable();
@@ -334,6 +391,7 @@ EXPORT_SYMBOL(_write_unlock_irqrestore);
334 391
335void __lockfunc _write_unlock_irq(rwlock_t *lock) 392void __lockfunc _write_unlock_irq(rwlock_t *lock)
336{ 393{
394 rwlock_release(&lock->dep_map, 1, _RET_IP_);
337 _raw_write_unlock(lock); 395 _raw_write_unlock(lock);
338 local_irq_enable(); 396 local_irq_enable();
339 preempt_enable(); 397 preempt_enable();
@@ -342,9 +400,10 @@ EXPORT_SYMBOL(_write_unlock_irq);
342 400
343void __lockfunc _write_unlock_bh(rwlock_t *lock) 401void __lockfunc _write_unlock_bh(rwlock_t *lock)
344{ 402{
403 rwlock_release(&lock->dep_map, 1, _RET_IP_);
345 _raw_write_unlock(lock); 404 _raw_write_unlock(lock);
346 preempt_enable_no_resched(); 405 preempt_enable_no_resched();
347 local_bh_enable(); 406 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
348} 407}
349EXPORT_SYMBOL(_write_unlock_bh); 408EXPORT_SYMBOL(_write_unlock_bh);
350 409
@@ -352,11 +411,13 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)
352{ 411{
353 local_bh_disable(); 412 local_bh_disable();
354 preempt_disable(); 413 preempt_disable();
355 if (_raw_spin_trylock(lock)) 414 if (_raw_spin_trylock(lock)) {
415 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
356 return 1; 416 return 1;
417 }
357 418
358 preempt_enable_no_resched(); 419 preempt_enable_no_resched();
359 local_bh_enable(); 420 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
360 return 0; 421 return 0;
361} 422}
362EXPORT_SYMBOL(_spin_trylock_bh); 423EXPORT_SYMBOL(_spin_trylock_bh);
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
new file mode 100644
index 000000000000..b71816e47a30
--- /dev/null
+++ b/kernel/stacktrace.c
@@ -0,0 +1,24 @@
1/*
2 * kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 */
8#include <linux/sched.h>
9#include <linux/kallsyms.h>
10#include <linux/stacktrace.h>
11
12void print_stack_trace(struct stack_trace *trace, int spaces)
13{
14 int i, j;
15
16 for (i = 0; i < trace->nr_entries; i++) {
17 unsigned long ip = trace->entries[i];
18
19 for (j = 0; j < spaces + 1; j++)
20 printk(" ");
21 print_ip_sym(ip);
22 }
23}
24
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 2c0aacc37c55..dcfb5d731466 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -4,7 +4,6 @@
4#include <linux/cpu.h> 4#include <linux/cpu.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/syscalls.h> 6#include <linux/syscalls.h>
7#include <linux/kthread.h>
8#include <asm/atomic.h> 7#include <asm/atomic.h>
9#include <asm/semaphore.h> 8#include <asm/semaphore.h>
10#include <asm/uaccess.h> 9#include <asm/uaccess.h>
@@ -26,11 +25,13 @@ static unsigned int stopmachine_num_threads;
26static atomic_t stopmachine_thread_ack; 25static atomic_t stopmachine_thread_ack;
27static DECLARE_MUTEX(stopmachine_mutex); 26static DECLARE_MUTEX(stopmachine_mutex);
28 27
29static int stopmachine(void *unused) 28static int stopmachine(void *cpu)
30{ 29{
31 int irqs_disabled = 0; 30 int irqs_disabled = 0;
32 int prepared = 0; 31 int prepared = 0;
33 32
33 set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
34
34 /* Ack: we are alive */ 35 /* Ack: we are alive */
35 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 36 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
36 atomic_inc(&stopmachine_thread_ack); 37 atomic_inc(&stopmachine_thread_ack);
@@ -84,8 +85,7 @@ static void stopmachine_set_state(enum stopmachine_state state)
84 85
85static int stop_machine(void) 86static int stop_machine(void)
86{ 87{
87 int ret = 0; 88 int i, ret = 0;
88 unsigned int i;
89 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 89 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
90 90
91 /* One high-prio thread per cpu. We'll do this one. */ 91 /* One high-prio thread per cpu. We'll do this one. */
@@ -96,16 +96,11 @@ static int stop_machine(void)
96 stopmachine_state = STOPMACHINE_WAIT; 96 stopmachine_state = STOPMACHINE_WAIT;
97 97
98 for_each_online_cpu(i) { 98 for_each_online_cpu(i) {
99 struct task_struct *tsk;
100 if (i == raw_smp_processor_id()) 99 if (i == raw_smp_processor_id())
101 continue; 100 continue;
102 tsk = kthread_create(stopmachine, NULL, "stopmachine"); 101 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
103 if (IS_ERR(tsk)) { 102 if (ret < 0)
104 ret = PTR_ERR(tsk);
105 break; 103 break;
106 }
107 kthread_bind(tsk, i);
108 wake_up_process(tsk);
109 stopmachine_num_threads++; 104 stopmachine_num_threads++;
110 } 105 }
111 106
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 99a58f279077..362a0cc37138 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -932,6 +932,17 @@ static ctl_table vm_table[] = {
932 .strategy = &sysctl_intvec, 932 .strategy = &sysctl_intvec,
933 .extra1 = &zero, 933 .extra1 = &zero,
934 }, 934 },
935 {
936 .ctl_name = VM_MIN_UNMAPPED,
937 .procname = "min_unmapped_ratio",
938 .data = &sysctl_min_unmapped_ratio,
939 .maxlen = sizeof(sysctl_min_unmapped_ratio),
940 .mode = 0644,
941 .proc_handler = &sysctl_min_unmapped_ratio_sysctl_handler,
942 .strategy = &sysctl_intvec,
943 .extra1 = &zero,
944 .extra2 = &one_hundred,
945 },
935#endif 946#endif
936#ifdef CONFIG_X86_32 947#ifdef CONFIG_X86_32
937 { 948 {
diff --git a/kernel/timer.c b/kernel/timer.c
index 5a8960253063..396a3c024c2c 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1208,7 +1208,7 @@ unsigned long wall_jiffies = INITIAL_JIFFIES;
1208 * playing with xtime and avenrun. 1208 * playing with xtime and avenrun.
1209 */ 1209 */
1210#ifndef ARCH_HAVE_XTIME_LOCK 1210#ifndef ARCH_HAVE_XTIME_LOCK
1211seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; 1211__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
1212 1212
1213EXPORT_SYMBOL(xtime_lock); 1213EXPORT_SYMBOL(xtime_lock);
1214#endif 1214#endif
@@ -1368,7 +1368,7 @@ asmlinkage long sys_getegid(void)
1368 1368
1369static void process_timeout(unsigned long __data) 1369static void process_timeout(unsigned long __data)
1370{ 1370{
1371 wake_up_process((task_t *)__data); 1371 wake_up_process((struct task_struct *)__data);
1372} 1372}
1373 1373
1374/** 1374/**
@@ -1559,6 +1559,13 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1559 return 0; 1559 return 0;
1560} 1560}
1561 1561
1562/*
1563 * lockdep: we want to track each per-CPU base as a separate lock-class,
1564 * but timer-bases are kmalloc()-ed, so we need to attach separate
1565 * keys to them:
1566 */
1567static struct lock_class_key base_lock_keys[NR_CPUS];
1568
1562static int __devinit init_timers_cpu(int cpu) 1569static int __devinit init_timers_cpu(int cpu)
1563{ 1570{
1564 int j; 1571 int j;
@@ -1594,6 +1601,8 @@ static int __devinit init_timers_cpu(int cpu)
1594 } 1601 }
1595 1602
1596 spin_lock_init(&base->lock); 1603 spin_lock_init(&base->lock);
1604 lockdep_set_class(&base->lock, base_lock_keys + cpu);
1605
1597 for (j = 0; j < TVN_SIZE; j++) { 1606 for (j = 0; j < TVN_SIZE; j++) {
1598 INIT_LIST_HEAD(base->tv5.vec + j); 1607 INIT_LIST_HEAD(base->tv5.vec + j);
1599 INIT_LIST_HEAD(base->tv4.vec + j); 1608 INIT_LIST_HEAD(base->tv4.vec + j);
diff --git a/kernel/wait.c b/kernel/wait.c
index 5985d866531f..a1d57aeb7f75 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,6 +10,10 @@
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/hash.h> 11#include <linux/hash.h>
12 12
13struct lock_class_key waitqueue_lock_key;
14
15EXPORT_SYMBOL(waitqueue_lock_key);
16
13void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 17void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
14{ 18{
15 unsigned long flags; 19 unsigned long flags;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 59f0b42bd89e..eebb1d839235 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -51,7 +51,7 @@ struct cpu_workqueue_struct {
51 wait_queue_head_t work_done; 51 wait_queue_head_t work_done;
52 52
53 struct workqueue_struct *wq; 53 struct workqueue_struct *wq;
54 task_t *thread; 54 struct task_struct *thread;
55 55
56 int run_depth; /* Detect run_workqueue() recursion depth */ 56 int run_depth; /* Detect run_workqueue() recursion depth */
57} ____cacheline_aligned; 57} ____cacheline_aligned;
@@ -114,6 +114,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
114 put_cpu(); 114 put_cpu();
115 return ret; 115 return ret;
116} 116}
117EXPORT_SYMBOL_GPL(queue_work);
117 118
118static void delayed_work_timer_fn(unsigned long __data) 119static void delayed_work_timer_fn(unsigned long __data)
119{ 120{
@@ -147,6 +148,29 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
147 } 148 }
148 return ret; 149 return ret;
149} 150}
151EXPORT_SYMBOL_GPL(queue_delayed_work);
152
153int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
154 struct work_struct *work, unsigned long delay)
155{
156 int ret = 0;
157 struct timer_list *timer = &work->timer;
158
159 if (!test_and_set_bit(0, &work->pending)) {
160 BUG_ON(timer_pending(timer));
161 BUG_ON(!list_empty(&work->entry));
162
163 /* This stores wq for the moment, for the timer_fn */
164 work->wq_data = wq;
165 timer->expires = jiffies + delay;
166 timer->data = (unsigned long)work;
167 timer->function = delayed_work_timer_fn;
168 add_timer_on(timer, cpu);
169 ret = 1;
170 }
171 return ret;
172}
173EXPORT_SYMBOL_GPL(queue_delayed_work_on);
150 174
151static void run_workqueue(struct cpu_workqueue_struct *cwq) 175static void run_workqueue(struct cpu_workqueue_struct *cwq)
152{ 176{
@@ -281,6 +305,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
281 unlock_cpu_hotplug(); 305 unlock_cpu_hotplug();
282 } 306 }
283} 307}
308EXPORT_SYMBOL_GPL(flush_workqueue);
284 309
285static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 310static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
286 int cpu) 311 int cpu)
@@ -358,6 +383,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
358 } 383 }
359 return wq; 384 return wq;
360} 385}
386EXPORT_SYMBOL_GPL(__create_workqueue);
361 387
362static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 388static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
363{ 389{
@@ -395,6 +421,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
395 free_percpu(wq->cpu_wq); 421 free_percpu(wq->cpu_wq);
396 kfree(wq); 422 kfree(wq);
397} 423}
424EXPORT_SYMBOL_GPL(destroy_workqueue);
398 425
399static struct workqueue_struct *keventd_wq; 426static struct workqueue_struct *keventd_wq;
400 427
@@ -402,31 +429,20 @@ int fastcall schedule_work(struct work_struct *work)
402{ 429{
403 return queue_work(keventd_wq, work); 430 return queue_work(keventd_wq, work);
404} 431}
432EXPORT_SYMBOL(schedule_work);
405 433
406int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 434int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
407{ 435{
408 return queue_delayed_work(keventd_wq, work, delay); 436 return queue_delayed_work(keventd_wq, work, delay);
409} 437}
438EXPORT_SYMBOL(schedule_delayed_work);
410 439
411int schedule_delayed_work_on(int cpu, 440int schedule_delayed_work_on(int cpu,
412 struct work_struct *work, unsigned long delay) 441 struct work_struct *work, unsigned long delay)
413{ 442{
414 int ret = 0; 443 return queue_delayed_work_on(cpu, keventd_wq, work, delay);
415 struct timer_list *timer = &work->timer;
416
417 if (!test_and_set_bit(0, &work->pending)) {
418 BUG_ON(timer_pending(timer));
419 BUG_ON(!list_empty(&work->entry));
420 /* This stores keventd_wq for the moment, for the timer_fn */
421 work->wq_data = keventd_wq;
422 timer->expires = jiffies + delay;
423 timer->data = (unsigned long)work;
424 timer->function = delayed_work_timer_fn;
425 add_timer_on(timer, cpu);
426 ret = 1;
427 }
428 return ret;
429} 444}
445EXPORT_SYMBOL(schedule_delayed_work_on);
430 446
431/** 447/**
432 * schedule_on_each_cpu - call a function on each online CPU from keventd 448 * schedule_on_each_cpu - call a function on each online CPU from keventd
@@ -463,6 +479,7 @@ void flush_scheduled_work(void)
463{ 479{
464 flush_workqueue(keventd_wq); 480 flush_workqueue(keventd_wq);
465} 481}
482EXPORT_SYMBOL(flush_scheduled_work);
466 483
467/** 484/**
468 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 485 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
@@ -619,13 +636,3 @@ void init_workqueues(void)
619 BUG_ON(!keventd_wq); 636 BUG_ON(!keventd_wq);
620} 637}
621 638
622EXPORT_SYMBOL_GPL(__create_workqueue);
623EXPORT_SYMBOL_GPL(queue_work);
624EXPORT_SYMBOL_GPL(queue_delayed_work);
625EXPORT_SYMBOL_GPL(flush_workqueue);
626EXPORT_SYMBOL_GPL(destroy_workqueue);
627
628EXPORT_SYMBOL(schedule_work);
629EXPORT_SYMBOL(schedule_delayed_work);
630EXPORT_SYMBOL(schedule_delayed_work_on);
631EXPORT_SYMBOL(flush_scheduled_work);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e4fcbd12cf6e..e5889b1a33ff 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -48,7 +48,7 @@ config DEBUG_KERNEL
48config LOG_BUF_SHIFT 48config LOG_BUF_SHIFT
49 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL 49 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
50 range 12 21 50 range 12 21
51 default 17 if S390 51 default 17 if S390 || LOCKDEP
52 default 16 if X86_NUMAQ || IA64 52 default 16 if X86_NUMAQ || IA64
53 default 15 if SMP 53 default 15 if SMP
54 default 14 54 default 14
@@ -107,7 +107,7 @@ config DEBUG_SLAB_LEAK
107 107
108config DEBUG_PREEMPT 108config DEBUG_PREEMPT
109 bool "Debug preemptible kernel" 109 bool "Debug preemptible kernel"
110 depends on DEBUG_KERNEL && PREEMPT 110 depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
111 default y 111 default y
112 help 112 help
113 If you say Y here then the kernel will use a debug variant of the 113 If you say Y here then the kernel will use a debug variant of the
@@ -115,14 +115,6 @@ config DEBUG_PREEMPT
115 if kernel code uses it in a preemption-unsafe way. Also, the kernel 115 if kernel code uses it in a preemption-unsafe way. Also, the kernel
116 will detect preemption count underflows. 116 will detect preemption count underflows.
117 117
118config DEBUG_MUTEXES
119 bool "Mutex debugging, deadlock detection"
120 default n
121 depends on DEBUG_KERNEL
122 help
123 This allows mutex semantics violations and mutex related deadlocks
124 (lockups) to be detected and reported automatically.
125
126config DEBUG_RT_MUTEXES 118config DEBUG_RT_MUTEXES
127 bool "RT Mutex debugging, deadlock detection" 119 bool "RT Mutex debugging, deadlock detection"
128 depends on DEBUG_KERNEL && RT_MUTEXES 120 depends on DEBUG_KERNEL && RT_MUTEXES
@@ -142,7 +134,7 @@ config RT_MUTEX_TESTER
142 This option enables a rt-mutex tester. 134 This option enables a rt-mutex tester.
143 135
144config DEBUG_SPINLOCK 136config DEBUG_SPINLOCK
145 bool "Spinlock debugging" 137 bool "Spinlock and rw-lock debugging: basic checks"
146 depends on DEBUG_KERNEL 138 depends on DEBUG_KERNEL
147 help 139 help
148 Say Y here and build SMP to catch missing spinlock initialization 140 Say Y here and build SMP to catch missing spinlock initialization
@@ -150,13 +142,122 @@ config DEBUG_SPINLOCK
150 best used in conjunction with the NMI watchdog so that spinlock 142 best used in conjunction with the NMI watchdog so that spinlock
151 deadlocks are also debuggable. 143 deadlocks are also debuggable.
152 144
145config DEBUG_MUTEXES
146 bool "Mutex debugging: basic checks"
147 depends on DEBUG_KERNEL
148 help
149 This feature allows mutex semantics violations to be detected and
150 reported.
151
152config DEBUG_RWSEMS
153 bool "RW-sem debugging: basic checks"
154 depends on DEBUG_KERNEL
155 help
156 This feature allows read-write semaphore semantics violations to
157 be detected and reported.
158
159config DEBUG_LOCK_ALLOC
160 bool "Lock debugging: detect incorrect freeing of live locks"
161 depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
162 select DEBUG_SPINLOCK
163 select DEBUG_MUTEXES
164 select DEBUG_RWSEMS
165 select LOCKDEP
166 help
167 This feature will check whether any held lock (spinlock, rwlock,
168 mutex or rwsem) is incorrectly freed by the kernel, via any of the
169 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
170 vfree(), etc.), whether a live lock is incorrectly reinitialized via
171 spin_lock_init()/mutex_init()/etc., or whether there is any lock
172 held during task exit.
173
174config PROVE_LOCKING
175 bool "Lock debugging: prove locking correctness"
176 depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
177 select LOCKDEP
178 select DEBUG_SPINLOCK
179 select DEBUG_MUTEXES
180 select DEBUG_RWSEMS
181 select DEBUG_LOCK_ALLOC
182 default n
183 help
184 This feature enables the kernel to prove that all locking
185 that occurs in the kernel runtime is mathematically
186 correct: that under no circumstance could an arbitrary (and
187 not yet triggered) combination of observed locking
188 sequences (on an arbitrary number of CPUs, running an
189 arbitrary number of tasks and interrupt contexts) cause a
190 deadlock.
191
192 In short, this feature enables the kernel to report locking
193 related deadlocks before they actually occur.
194
195 The proof does not depend on how hard and complex a
196 deadlock scenario would be to trigger: how many
197 participant CPUs, tasks and irq-contexts would be needed
198 for it to trigger. The proof also does not depend on
199 timing: if a race and a resulting deadlock is possible
200 theoretically (no matter how unlikely the race scenario
201 is), it will be proven so and will immediately be
202 reported by the kernel (once the event is observed that
203 makes the deadlock theoretically possible).
204
205 If a deadlock is impossible (i.e. the locking rules, as
206 observed by the kernel, are mathematically correct), the
207 kernel reports nothing.
208
209 NOTE: this feature can also be enabled for rwlocks, mutexes
210 and rwsems - in which case all dependencies between these
211 different locking variants are observed and mapped too, and
212 the proof of observed correctness is also maintained for an
213 arbitrary combination of these separate locking variants.
214
215 For more details, see Documentation/lockdep-design.txt.
216
217config LOCKDEP
218 bool
219 depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
220 select STACKTRACE
221 select FRAME_POINTER
222 select KALLSYMS
223 select KALLSYMS_ALL
224
225config DEBUG_LOCKDEP
226 bool "Lock dependency engine debugging"
227 depends on LOCKDEP
228 help
229 If you say Y here, the lock dependency engine will do
230 additional runtime checks to debug itself, at the price
231 of more runtime overhead.
232
233config TRACE_IRQFLAGS
234 bool
235 default y
236 depends on TRACE_IRQFLAGS_SUPPORT
237 depends on PROVE_LOCKING
238
153config DEBUG_SPINLOCK_SLEEP 239config DEBUG_SPINLOCK_SLEEP
154 bool "Sleep-inside-spinlock checking" 240 bool "Spinlock debugging: sleep-inside-spinlock checking"
155 depends on DEBUG_KERNEL 241 depends on DEBUG_KERNEL
156 help 242 help
157 If you say Y here, various routines which may sleep will become very 243 If you say Y here, various routines which may sleep will become very
158 noisy if they are called with a spinlock held. 244 noisy if they are called with a spinlock held.
159 245
246config DEBUG_LOCKING_API_SELFTESTS
247 bool "Locking API boot-time self-tests"
248 depends on DEBUG_KERNEL
249 help
250 Say Y here if you want the kernel to run a short self-test during
251 bootup. The self-test checks whether common types of locking bugs
252 are detected by debugging mechanisms or not. (if you disable
253 lock debugging then those bugs wont be detected of course.)
254 The following locking APIs are covered: spinlocks, rwlocks,
255 mutexes and rwsems.
256
257config STACKTRACE
258 bool
259 depends on STACKTRACE_SUPPORT
260
160config DEBUG_KOBJECT 261config DEBUG_KOBJECT
161 bool "kobject debugging" 262 bool "kobject debugging"
162 depends on DEBUG_KERNEL 263 depends on DEBUG_KERNEL
@@ -212,7 +313,7 @@ config DEBUG_VM
212 313
213config FRAME_POINTER 314config FRAME_POINTER
214 bool "Compile the kernel with frame pointers" 315 bool "Compile the kernel with frame pointers"
215 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) 316 depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390)
216 default y if DEBUG_INFO && UML 317 default y if DEBUG_INFO && UML
217 help 318 help
218 If you say Y here the resulting kernel image will be slightly larger 319 If you say Y here the resulting kernel image will be slightly larger
diff --git a/lib/Makefile b/lib/Makefile
index 10c13c9d7824..be9719ae82d0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,13 +11,14 @@ lib-$(CONFIG_SMP) += cpumask.o
11 11
12lib-y += kobject.o kref.o kobject_uevent.o klist.o 12lib-y += kobject.o kref.o kobject_uevent.o klist.o
13 13
14obj-y += sort.o parser.o halfmd4.o iomap_copy.o 14obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o
15 15
16ifeq ($(CONFIG_DEBUG_KOBJECT),y) 16ifeq ($(CONFIG_DEBUG_KOBJECT),y)
17CFLAGS_kobject.o += -DDEBUG 17CFLAGS_kobject.o += -DDEBUG
18CFLAGS_kobject_uevent.o += -DDEBUG 18CFLAGS_kobject_uevent.o += -DDEBUG
19endif 19endif
20 20
21obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
21obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 22obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
22lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 23lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
23lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 24lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
new file mode 100644
index 000000000000..0ef01d14727c
--- /dev/null
+++ b/lib/debug_locks.c
@@ -0,0 +1,45 @@
1/*
2 * lib/debug_locks.c
3 *
4 * Generic place for common debugging facilities for various locks:
5 * spinlocks, rwlocks, mutexes and rwsems.
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 */
11#include <linux/rwsem.h>
12#include <linux/mutex.h>
13#include <linux/module.h>
14#include <linux/spinlock.h>
15#include <linux/debug_locks.h>
16
17/*
18 * We want to turn all lock-debugging facilities on/off at once,
19 * via a global flag. The reason is that once a single bug has been
20 * detected and reported, there might be cascade of followup bugs
21 * that would just muddy the log. So we report the first one and
22 * shut up after that.
23 */
24int debug_locks = 1;
25
26/*
27 * The locking-testsuite uses <debug_locks_silent> to get a
28 * 'silent failure': nothing is printed to the console when
29 * a locking bug is detected.
30 */
31int debug_locks_silent;
32
33/*
34 * Generic 'turn off all lock debugging' function:
35 */
36int debug_locks_off(void)
37{
38 if (xchg(&debug_locks, 0)) {
39 if (!debug_locks_silent) {
40 console_verbose();
41 return 1;
42 }
43 }
44 return 0;
45}
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index e713e86811ae..e0fdfddb406e 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -177,7 +177,12 @@ static inline void __lock_kernel(void)
177 177
178static inline void __unlock_kernel(void) 178static inline void __unlock_kernel(void)
179{ 179{
180 spin_unlock(&kernel_flag); 180 /*
181 * the BKL is not covered by lockdep, so we open-code the
182 * unlocking sequence (and thus avoid the dep-chain ops):
183 */
184 _raw_spin_unlock(&kernel_flag);
185 preempt_enable();
181} 186}
182 187
183/* 188/*
diff --git a/lib/locking-selftest-hardirq.h b/lib/locking-selftest-hardirq.h
new file mode 100644
index 000000000000..10d4a150b259
--- /dev/null
+++ b/lib/locking-selftest-hardirq.h
@@ -0,0 +1,9 @@
1#undef IRQ_DISABLE
2#undef IRQ_ENABLE
3#undef IRQ_ENTER
4#undef IRQ_EXIT
5
6#define IRQ_ENABLE HARDIRQ_ENABLE
7#define IRQ_DISABLE HARDIRQ_DISABLE
8#define IRQ_ENTER HARDIRQ_ENTER
9#define IRQ_EXIT HARDIRQ_EXIT
diff --git a/lib/locking-selftest-mutex.h b/lib/locking-selftest-mutex.h
new file mode 100644
index 000000000000..68601b6f584b
--- /dev/null
+++ b/lib/locking-selftest-mutex.h
@@ -0,0 +1,11 @@
1#undef LOCK
2#define LOCK ML
3
4#undef UNLOCK
5#define UNLOCK MU
6
7#undef RLOCK
8#undef WLOCK
9
10#undef INIT
11#define INIT MI
diff --git a/lib/locking-selftest-rlock-hardirq.h b/lib/locking-selftest-rlock-hardirq.h
new file mode 100644
index 000000000000..9f517ebcb786
--- /dev/null
+++ b/lib/locking-selftest-rlock-hardirq.h
@@ -0,0 +1,2 @@
1#include "locking-selftest-rlock.h"
2#include "locking-selftest-hardirq.h"
diff --git a/lib/locking-selftest-rlock-softirq.h b/lib/locking-selftest-rlock-softirq.h
new file mode 100644
index 000000000000..981455db7ff0
--- /dev/null
+++ b/lib/locking-selftest-rlock-softirq.h
@@ -0,0 +1,2 @@
1#include "locking-selftest-rlock.h"
2#include "locking-selftest-softirq.h"
diff --git a/lib/locking-selftest-rlock.h b/lib/locking-selftest-rlock.h
new file mode 100644
index 000000000000..6789044f4d0e
--- /dev/null
+++ b/lib/locking-selftest-rlock.h
@@ -0,0 +1,14 @@
1#undef LOCK
2#define LOCK RL
3
4#undef UNLOCK
5#define UNLOCK RU
6
7#undef RLOCK
8#define RLOCK RL
9
10#undef WLOCK
11#define WLOCK WL
12
13#undef INIT
14#define INIT RWI
diff --git a/lib/locking-selftest-rsem.h b/lib/locking-selftest-rsem.h
new file mode 100644
index 000000000000..62da886680c7
--- /dev/null
+++ b/lib/locking-selftest-rsem.h
@@ -0,0 +1,14 @@
1#undef LOCK
2#define LOCK RSL
3
4#undef UNLOCK
5#define UNLOCK RSU
6
7#undef RLOCK
8#define RLOCK RSL
9
10#undef WLOCK
11#define WLOCK WSL
12
13#undef INIT
14#define INIT RWSI
diff --git a/lib/locking-selftest-softirq.h b/lib/locking-selftest-softirq.h
new file mode 100644
index 000000000000..a83de2a04ace
--- /dev/null
+++ b/lib/locking-selftest-softirq.h
@@ -0,0 +1,9 @@
1#undef IRQ_DISABLE
2#undef IRQ_ENABLE
3#undef IRQ_ENTER
4#undef IRQ_EXIT
5
6#define IRQ_DISABLE SOFTIRQ_DISABLE
7#define IRQ_ENABLE SOFTIRQ_ENABLE
8#define IRQ_ENTER SOFTIRQ_ENTER
9#define IRQ_EXIT SOFTIRQ_EXIT
diff --git a/lib/locking-selftest-spin-hardirq.h b/lib/locking-selftest-spin-hardirq.h
new file mode 100644
index 000000000000..693198dce30a
--- /dev/null
+++ b/lib/locking-selftest-spin-hardirq.h
@@ -0,0 +1,2 @@
1#include "locking-selftest-spin.h"
2#include "locking-selftest-hardirq.h"
diff --git a/lib/locking-selftest-spin-softirq.h b/lib/locking-selftest-spin-softirq.h
new file mode 100644
index 000000000000..c472e2a87ffc
--- /dev/null
+++ b/lib/locking-selftest-spin-softirq.h
@@ -0,0 +1,2 @@
1#include "locking-selftest-spin.h"
2#include "locking-selftest-softirq.h"
diff --git a/lib/locking-selftest-spin.h b/lib/locking-selftest-spin.h
new file mode 100644
index 000000000000..ccd1b4b09757
--- /dev/null
+++ b/lib/locking-selftest-spin.h
@@ -0,0 +1,11 @@
1#undef LOCK
2#define LOCK L
3
4#undef UNLOCK
5#define UNLOCK U
6
7#undef RLOCK
8#undef WLOCK
9
10#undef INIT
11#define INIT SI
diff --git a/lib/locking-selftest-wlock-hardirq.h b/lib/locking-selftest-wlock-hardirq.h
new file mode 100644
index 000000000000..2dd2e5122caa
--- /dev/null
+++ b/lib/locking-selftest-wlock-hardirq.h
@@ -0,0 +1,2 @@
1#include "locking-selftest-wlock.h"
2#include "locking-selftest-hardirq.h"
diff --git a/lib/locking-selftest-wlock-softirq.h b/lib/locking-selftest-wlock-softirq.h
new file mode 100644
index 000000000000..cb80d1cb944e
--- /dev/null
+++ b/lib/locking-selftest-wlock-softirq.h
@@ -0,0 +1,2 @@
1#include "locking-selftest-wlock.h"
2#include "locking-selftest-softirq.h"
diff --git a/lib/locking-selftest-wlock.h b/lib/locking-selftest-wlock.h
new file mode 100644
index 000000000000..0815322d99ed
--- /dev/null
+++ b/lib/locking-selftest-wlock.h
@@ -0,0 +1,14 @@
1#undef LOCK
2#define LOCK WL
3
4#undef UNLOCK
5#define UNLOCK WU
6
7#undef RLOCK
8#define RLOCK RL
9
10#undef WLOCK
11#define WLOCK WL
12
13#undef INIT
14#define INIT RWI
diff --git a/lib/locking-selftest-wsem.h b/lib/locking-selftest-wsem.h
new file mode 100644
index 000000000000..b88c5f2dc5f0
--- /dev/null
+++ b/lib/locking-selftest-wsem.h
@@ -0,0 +1,14 @@
1#undef LOCK
2#define LOCK WSL
3
4#undef UNLOCK
5#define UNLOCK WSU
6
7#undef RLOCK
8#define RLOCK RSL
9
10#undef WLOCK
11#define WLOCK WSL
12
13#undef INIT
14#define INIT RWSI
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
new file mode 100644
index 000000000000..7945787f439a
--- /dev/null
+++ b/lib/locking-selftest.c
@@ -0,0 +1,1216 @@
1/*
2 * lib/locking-selftest.c
3 *
4 * Testsuite for various locking APIs: spinlocks, rwlocks,
5 * mutexes and rw-semaphores.
6 *
7 * It is checking both false positives and false negatives.
8 *
9 * Started by Ingo Molnar:
10 *
11 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
12 */
13#include <linux/rwsem.h>
14#include <linux/mutex.h>
15#include <linux/sched.h>
16#include <linux/delay.h>
17#include <linux/module.h>
18#include <linux/lockdep.h>
19#include <linux/spinlock.h>
20#include <linux/kallsyms.h>
21#include <linux/interrupt.h>
22#include <linux/debug_locks.h>
23#include <linux/irqflags.h>
24
25/*
26 * Change this to 1 if you want to see the failure printouts:
27 */
28static unsigned int debug_locks_verbose;
29
30static int __init setup_debug_locks_verbose(char *str)
31{
32 get_option(&str, &debug_locks_verbose);
33
34 return 1;
35}
36
37__setup("debug_locks_verbose=", setup_debug_locks_verbose);
38
39#define FAILURE 0
40#define SUCCESS 1
41
42#define LOCKTYPE_SPIN 0x1
43#define LOCKTYPE_RWLOCK 0x2
44#define LOCKTYPE_MUTEX 0x4
45#define LOCKTYPE_RWSEM 0x8
46
47/*
48 * Normal standalone locks, for the circular and irq-context
49 * dependency tests:
50 */
51static DEFINE_SPINLOCK(lock_A);
52static DEFINE_SPINLOCK(lock_B);
53static DEFINE_SPINLOCK(lock_C);
54static DEFINE_SPINLOCK(lock_D);
55
56static DEFINE_RWLOCK(rwlock_A);
57static DEFINE_RWLOCK(rwlock_B);
58static DEFINE_RWLOCK(rwlock_C);
59static DEFINE_RWLOCK(rwlock_D);
60
61static DEFINE_MUTEX(mutex_A);
62static DEFINE_MUTEX(mutex_B);
63static DEFINE_MUTEX(mutex_C);
64static DEFINE_MUTEX(mutex_D);
65
66static DECLARE_RWSEM(rwsem_A);
67static DECLARE_RWSEM(rwsem_B);
68static DECLARE_RWSEM(rwsem_C);
69static DECLARE_RWSEM(rwsem_D);
70
71/*
72 * Locks that we initialize dynamically as well so that
73 * e.g. X1 and X2 becomes two instances of the same class,
74 * but X* and Y* are different classes. We do this so that
75 * we do not trigger a real lockup:
76 */
77static DEFINE_SPINLOCK(lock_X1);
78static DEFINE_SPINLOCK(lock_X2);
79static DEFINE_SPINLOCK(lock_Y1);
80static DEFINE_SPINLOCK(lock_Y2);
81static DEFINE_SPINLOCK(lock_Z1);
82static DEFINE_SPINLOCK(lock_Z2);
83
84static DEFINE_RWLOCK(rwlock_X1);
85static DEFINE_RWLOCK(rwlock_X2);
86static DEFINE_RWLOCK(rwlock_Y1);
87static DEFINE_RWLOCK(rwlock_Y2);
88static DEFINE_RWLOCK(rwlock_Z1);
89static DEFINE_RWLOCK(rwlock_Z2);
90
91static DEFINE_MUTEX(mutex_X1);
92static DEFINE_MUTEX(mutex_X2);
93static DEFINE_MUTEX(mutex_Y1);
94static DEFINE_MUTEX(mutex_Y2);
95static DEFINE_MUTEX(mutex_Z1);
96static DEFINE_MUTEX(mutex_Z2);
97
98static DECLARE_RWSEM(rwsem_X1);
99static DECLARE_RWSEM(rwsem_X2);
100static DECLARE_RWSEM(rwsem_Y1);
101static DECLARE_RWSEM(rwsem_Y2);
102static DECLARE_RWSEM(rwsem_Z1);
103static DECLARE_RWSEM(rwsem_Z2);
104
105/*
106 * non-inlined runtime initializers, to let separate locks share
107 * the same lock-class:
108 */
109#define INIT_CLASS_FUNC(class) \
110static noinline void \
111init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \
112 struct rw_semaphore *rwsem) \
113{ \
114 spin_lock_init(lock); \
115 rwlock_init(rwlock); \
116 mutex_init(mutex); \
117 init_rwsem(rwsem); \
118}
119
120INIT_CLASS_FUNC(X)
121INIT_CLASS_FUNC(Y)
122INIT_CLASS_FUNC(Z)
123
124static void init_shared_classes(void)
125{
126 init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
127 init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
128
129 init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1);
130 init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2);
131
132 init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1);
133 init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2);
134}
135
136/*
137 * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests.
138 * The following functions use a lock from a simulated hardirq/softirq
139 * context, causing the locks to be marked as hardirq-safe/softirq-safe:
140 */
141
142#define HARDIRQ_DISABLE local_irq_disable
143#define HARDIRQ_ENABLE local_irq_enable
144
145#define HARDIRQ_ENTER() \
146 local_irq_disable(); \
147 irq_enter(); \
148 WARN_ON(!in_irq());
149
150#define HARDIRQ_EXIT() \
151 __irq_exit(); \
152 local_irq_enable();
153
154#define SOFTIRQ_DISABLE local_bh_disable
155#define SOFTIRQ_ENABLE local_bh_enable
156
157#define SOFTIRQ_ENTER() \
158 local_bh_disable(); \
159 local_irq_disable(); \
160 trace_softirq_enter(); \
161 WARN_ON(!in_softirq());
162
163#define SOFTIRQ_EXIT() \
164 trace_softirq_exit(); \
165 local_irq_enable(); \
166 local_bh_enable();
167
168/*
169 * Shortcuts for lock/unlock API variants, to keep
170 * the testcases compact:
171 */
172#define L(x) spin_lock(&lock_##x)
173#define U(x) spin_unlock(&lock_##x)
174#define LU(x) L(x); U(x)
175#define SI(x) spin_lock_init(&lock_##x)
176
177#define WL(x) write_lock(&rwlock_##x)
178#define WU(x) write_unlock(&rwlock_##x)
179#define WLU(x) WL(x); WU(x)
180
181#define RL(x) read_lock(&rwlock_##x)
182#define RU(x) read_unlock(&rwlock_##x)
183#define RLU(x) RL(x); RU(x)
184#define RWI(x) rwlock_init(&rwlock_##x)
185
186#define ML(x) mutex_lock(&mutex_##x)
187#define MU(x) mutex_unlock(&mutex_##x)
188#define MI(x) mutex_init(&mutex_##x)
189
190#define WSL(x) down_write(&rwsem_##x)
191#define WSU(x) up_write(&rwsem_##x)
192
193#define RSL(x) down_read(&rwsem_##x)
194#define RSU(x) up_read(&rwsem_##x)
195#define RWSI(x) init_rwsem(&rwsem_##x)
196
197#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
198
199/*
200 * Generate different permutations of the same testcase, using
201 * the same basic lock-dependency/state events:
202 */
203
204#define GENERATE_TESTCASE(name) \
205 \
206static void name(void) { E(); }
207
208#define GENERATE_PERMUTATIONS_2_EVENTS(name) \
209 \
210static void name##_12(void) { E1(); E2(); } \
211static void name##_21(void) { E2(); E1(); }
212
213#define GENERATE_PERMUTATIONS_3_EVENTS(name) \
214 \
215static void name##_123(void) { E1(); E2(); E3(); } \
216static void name##_132(void) { E1(); E3(); E2(); } \
217static void name##_213(void) { E2(); E1(); E3(); } \
218static void name##_231(void) { E2(); E3(); E1(); } \
219static void name##_312(void) { E3(); E1(); E2(); } \
220static void name##_321(void) { E3(); E2(); E1(); }
221
222/*
223 * AA deadlock:
224 */
225
226#define E() \
227 \
228 LOCK(X1); \
229 LOCK(X2); /* this one should fail */
230
231/*
232 * 6 testcases:
233 */
234#include "locking-selftest-spin.h"
235GENERATE_TESTCASE(AA_spin)
236#include "locking-selftest-wlock.h"
237GENERATE_TESTCASE(AA_wlock)
238#include "locking-selftest-rlock.h"
239GENERATE_TESTCASE(AA_rlock)
240#include "locking-selftest-mutex.h"
241GENERATE_TESTCASE(AA_mutex)
242#include "locking-selftest-wsem.h"
243GENERATE_TESTCASE(AA_wsem)
244#include "locking-selftest-rsem.h"
245GENERATE_TESTCASE(AA_rsem)
246
247#undef E
248
249/*
250 * Special-case for read-locking, they are
251 * allowed to recurse on the same lock class:
252 */
253static void rlock_AA1(void)
254{
255 RL(X1);
256 RL(X1); // this one should NOT fail
257}
258
259static void rlock_AA1B(void)
260{
261 RL(X1);
262 RL(X2); // this one should NOT fail
263}
264
265static void rsem_AA1(void)
266{
267 RSL(X1);
268 RSL(X1); // this one should fail
269}
270
271static void rsem_AA1B(void)
272{
273 RSL(X1);
274 RSL(X2); // this one should fail
275}
276/*
277 * The mixing of read and write locks is not allowed:
278 */
279static void rlock_AA2(void)
280{
281 RL(X1);
282 WL(X2); // this one should fail
283}
284
285static void rsem_AA2(void)
286{
287 RSL(X1);
288 WSL(X2); // this one should fail
289}
290
291static void rlock_AA3(void)
292{
293 WL(X1);
294 RL(X2); // this one should fail
295}
296
297static void rsem_AA3(void)
298{
299 WSL(X1);
300 RSL(X2); // this one should fail
301}
302
303/*
304 * ABBA deadlock:
305 */
306
307#define E() \
308 \
309 LOCK_UNLOCK_2(A, B); \
310 LOCK_UNLOCK_2(B, A); /* fail */
311
312/*
313 * 6 testcases:
314 */
315#include "locking-selftest-spin.h"
316GENERATE_TESTCASE(ABBA_spin)
317#include "locking-selftest-wlock.h"
318GENERATE_TESTCASE(ABBA_wlock)
319#include "locking-selftest-rlock.h"
320GENERATE_TESTCASE(ABBA_rlock)
321#include "locking-selftest-mutex.h"
322GENERATE_TESTCASE(ABBA_mutex)
323#include "locking-selftest-wsem.h"
324GENERATE_TESTCASE(ABBA_wsem)
325#include "locking-selftest-rsem.h"
326GENERATE_TESTCASE(ABBA_rsem)
327
328#undef E
329
330/*
331 * AB BC CA deadlock:
332 */
333
334#define E() \
335 \
336 LOCK_UNLOCK_2(A, B); \
337 LOCK_UNLOCK_2(B, C); \
338 LOCK_UNLOCK_2(C, A); /* fail */
339
340/*
341 * 6 testcases:
342 */
343#include "locking-selftest-spin.h"
344GENERATE_TESTCASE(ABBCCA_spin)
345#include "locking-selftest-wlock.h"
346GENERATE_TESTCASE(ABBCCA_wlock)
347#include "locking-selftest-rlock.h"
348GENERATE_TESTCASE(ABBCCA_rlock)
349#include "locking-selftest-mutex.h"
350GENERATE_TESTCASE(ABBCCA_mutex)
351#include "locking-selftest-wsem.h"
352GENERATE_TESTCASE(ABBCCA_wsem)
353#include "locking-selftest-rsem.h"
354GENERATE_TESTCASE(ABBCCA_rsem)
355
356#undef E
357
358/*
359 * AB CA BC deadlock:
360 */
361
362#define E() \
363 \
364 LOCK_UNLOCK_2(A, B); \
365 LOCK_UNLOCK_2(C, A); \
366 LOCK_UNLOCK_2(B, C); /* fail */
367
368/*
369 * 6 testcases:
370 */
371#include "locking-selftest-spin.h"
372GENERATE_TESTCASE(ABCABC_spin)
373#include "locking-selftest-wlock.h"
374GENERATE_TESTCASE(ABCABC_wlock)
375#include "locking-selftest-rlock.h"
376GENERATE_TESTCASE(ABCABC_rlock)
377#include "locking-selftest-mutex.h"
378GENERATE_TESTCASE(ABCABC_mutex)
379#include "locking-selftest-wsem.h"
380GENERATE_TESTCASE(ABCABC_wsem)
381#include "locking-selftest-rsem.h"
382GENERATE_TESTCASE(ABCABC_rsem)
383
384#undef E
385
386/*
387 * AB BC CD DA deadlock:
388 */
389
390#define E() \
391 \
392 LOCK_UNLOCK_2(A, B); \
393 LOCK_UNLOCK_2(B, C); \
394 LOCK_UNLOCK_2(C, D); \
395 LOCK_UNLOCK_2(D, A); /* fail */
396
397/*
398 * 6 testcases:
399 */
400#include "locking-selftest-spin.h"
401GENERATE_TESTCASE(ABBCCDDA_spin)
402#include "locking-selftest-wlock.h"
403GENERATE_TESTCASE(ABBCCDDA_wlock)
404#include "locking-selftest-rlock.h"
405GENERATE_TESTCASE(ABBCCDDA_rlock)
406#include "locking-selftest-mutex.h"
407GENERATE_TESTCASE(ABBCCDDA_mutex)
408#include "locking-selftest-wsem.h"
409GENERATE_TESTCASE(ABBCCDDA_wsem)
410#include "locking-selftest-rsem.h"
411GENERATE_TESTCASE(ABBCCDDA_rsem)
412
413#undef E
414
415/*
416 * AB CD BD DA deadlock:
417 */
418#define E() \
419 \
420 LOCK_UNLOCK_2(A, B); \
421 LOCK_UNLOCK_2(C, D); \
422 LOCK_UNLOCK_2(B, D); \
423 LOCK_UNLOCK_2(D, A); /* fail */
424
425/*
426 * 6 testcases:
427 */
428#include "locking-selftest-spin.h"
429GENERATE_TESTCASE(ABCDBDDA_spin)
430#include "locking-selftest-wlock.h"
431GENERATE_TESTCASE(ABCDBDDA_wlock)
432#include "locking-selftest-rlock.h"
433GENERATE_TESTCASE(ABCDBDDA_rlock)
434#include "locking-selftest-mutex.h"
435GENERATE_TESTCASE(ABCDBDDA_mutex)
436#include "locking-selftest-wsem.h"
437GENERATE_TESTCASE(ABCDBDDA_wsem)
438#include "locking-selftest-rsem.h"
439GENERATE_TESTCASE(ABCDBDDA_rsem)
440
441#undef E
442
443/*
444 * AB CD BC DA deadlock:
445 */
446#define E() \
447 \
448 LOCK_UNLOCK_2(A, B); \
449 LOCK_UNLOCK_2(C, D); \
450 LOCK_UNLOCK_2(B, C); \
451 LOCK_UNLOCK_2(D, A); /* fail */
452
453/*
454 * 6 testcases:
455 */
456#include "locking-selftest-spin.h"
457GENERATE_TESTCASE(ABCDBCDA_spin)
458#include "locking-selftest-wlock.h"
459GENERATE_TESTCASE(ABCDBCDA_wlock)
460#include "locking-selftest-rlock.h"
461GENERATE_TESTCASE(ABCDBCDA_rlock)
462#include "locking-selftest-mutex.h"
463GENERATE_TESTCASE(ABCDBCDA_mutex)
464#include "locking-selftest-wsem.h"
465GENERATE_TESTCASE(ABCDBCDA_wsem)
466#include "locking-selftest-rsem.h"
467GENERATE_TESTCASE(ABCDBCDA_rsem)
468
469#undef E
470
471/*
472 * Double unlock:
473 */
474#define E() \
475 \
476 LOCK(A); \
477 UNLOCK(A); \
478 UNLOCK(A); /* fail */
479
480/*
481 * 6 testcases:
482 */
483#include "locking-selftest-spin.h"
484GENERATE_TESTCASE(double_unlock_spin)
485#include "locking-selftest-wlock.h"
486GENERATE_TESTCASE(double_unlock_wlock)
487#include "locking-selftest-rlock.h"
488GENERATE_TESTCASE(double_unlock_rlock)
489#include "locking-selftest-mutex.h"
490GENERATE_TESTCASE(double_unlock_mutex)
491#include "locking-selftest-wsem.h"
492GENERATE_TESTCASE(double_unlock_wsem)
493#include "locking-selftest-rsem.h"
494GENERATE_TESTCASE(double_unlock_rsem)
495
496#undef E
497
498/*
499 * Bad unlock ordering:
500 */
501#define E() \
502 \
503 LOCK(A); \
504 LOCK(B); \
505 UNLOCK(A); /* fail */ \
506 UNLOCK(B);
507
508/*
509 * 6 testcases:
510 */
511#include "locking-selftest-spin.h"
512GENERATE_TESTCASE(bad_unlock_order_spin)
513#include "locking-selftest-wlock.h"
514GENERATE_TESTCASE(bad_unlock_order_wlock)
515#include "locking-selftest-rlock.h"
516GENERATE_TESTCASE(bad_unlock_order_rlock)
517#include "locking-selftest-mutex.h"
518GENERATE_TESTCASE(bad_unlock_order_mutex)
519#include "locking-selftest-wsem.h"
520GENERATE_TESTCASE(bad_unlock_order_wsem)
521#include "locking-selftest-rsem.h"
522GENERATE_TESTCASE(bad_unlock_order_rsem)
523
524#undef E
525
526/*
527 * initializing a held lock:
528 */
529#define E() \
530 \
531 LOCK(A); \
532 INIT(A); /* fail */
533
534/*
535 * 6 testcases:
536 */
537#include "locking-selftest-spin.h"
538GENERATE_TESTCASE(init_held_spin)
539#include "locking-selftest-wlock.h"
540GENERATE_TESTCASE(init_held_wlock)
541#include "locking-selftest-rlock.h"
542GENERATE_TESTCASE(init_held_rlock)
543#include "locking-selftest-mutex.h"
544GENERATE_TESTCASE(init_held_mutex)
545#include "locking-selftest-wsem.h"
546GENERATE_TESTCASE(init_held_wsem)
547#include "locking-selftest-rsem.h"
548GENERATE_TESTCASE(init_held_rsem)
549
550#undef E
551
552/*
553 * locking an irq-safe lock with irqs enabled:
554 */
555#define E1() \
556 \
557 IRQ_ENTER(); \
558 LOCK(A); \
559 UNLOCK(A); \
560 IRQ_EXIT();
561
562#define E2() \
563 \
564 LOCK(A); \
565 UNLOCK(A);
566
567/*
568 * Generate 24 testcases:
569 */
570#include "locking-selftest-spin-hardirq.h"
571GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
572
573#include "locking-selftest-rlock-hardirq.h"
574GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
575
576#include "locking-selftest-wlock-hardirq.h"
577GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
578
579#include "locking-selftest-spin-softirq.h"
580GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
581
582#include "locking-selftest-rlock-softirq.h"
583GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
584
585#include "locking-selftest-wlock-softirq.h"
586GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
587
588#undef E1
589#undef E2
590
591/*
592 * Enabling hardirqs with a softirq-safe lock held:
593 */
594#define E1() \
595 \
596 SOFTIRQ_ENTER(); \
597 LOCK(A); \
598 UNLOCK(A); \
599 SOFTIRQ_EXIT();
600
601#define E2() \
602 \
603 HARDIRQ_DISABLE(); \
604 LOCK(A); \
605 HARDIRQ_ENABLE(); \
606 UNLOCK(A);
607
608/*
609 * Generate 12 testcases:
610 */
611#include "locking-selftest-spin.h"
612GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin)
613
614#include "locking-selftest-wlock.h"
615GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock)
616
617#include "locking-selftest-rlock.h"
618GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
619
620#undef E1
621#undef E2
622
623/*
624 * Enabling irqs with an irq-safe lock held:
625 */
626#define E1() \
627 \
628 IRQ_ENTER(); \
629 LOCK(A); \
630 UNLOCK(A); \
631 IRQ_EXIT();
632
633#define E2() \
634 \
635 IRQ_DISABLE(); \
636 LOCK(A); \
637 IRQ_ENABLE(); \
638 UNLOCK(A);
639
640/*
641 * Generate 24 testcases:
642 */
643#include "locking-selftest-spin-hardirq.h"
644GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
645
646#include "locking-selftest-rlock-hardirq.h"
647GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
648
649#include "locking-selftest-wlock-hardirq.h"
650GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
651
652#include "locking-selftest-spin-softirq.h"
653GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
654
655#include "locking-selftest-rlock-softirq.h"
656GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
657
658#include "locking-selftest-wlock-softirq.h"
659GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
660
661#undef E1
662#undef E2
663
664/*
665 * Acquiring a irq-unsafe lock while holding an irq-safe-lock:
666 */
667#define E1() \
668 \
669 LOCK(A); \
670 LOCK(B); \
671 UNLOCK(B); \
672 UNLOCK(A); \
673
674#define E2() \
675 \
676 LOCK(B); \
677 UNLOCK(B);
678
679#define E3() \
680 \
681 IRQ_ENTER(); \
682 LOCK(A); \
683 UNLOCK(A); \
684 IRQ_EXIT();
685
686/*
687 * Generate 36 testcases:
688 */
689#include "locking-selftest-spin-hardirq.h"
690GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
691
692#include "locking-selftest-rlock-hardirq.h"
693GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
694
695#include "locking-selftest-wlock-hardirq.h"
696GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
697
698#include "locking-selftest-spin-softirq.h"
699GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
700
701#include "locking-selftest-rlock-softirq.h"
702GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
703
704#include "locking-selftest-wlock-softirq.h"
705GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
706
707#undef E1
708#undef E2
709#undef E3
710
711/*
712 * If a lock turns into softirq-safe, but earlier it took
713 * a softirq-unsafe lock:
714 */
715
716#define E1() \
717 IRQ_DISABLE(); \
718 LOCK(A); \
719 LOCK(B); \
720 UNLOCK(B); \
721 UNLOCK(A); \
722 IRQ_ENABLE();
723
724#define E2() \
725 LOCK(B); \
726 UNLOCK(B);
727
728#define E3() \
729 IRQ_ENTER(); \
730 LOCK(A); \
731 UNLOCK(A); \
732 IRQ_EXIT();
733
734/*
735 * Generate 36 testcases:
736 */
737#include "locking-selftest-spin-hardirq.h"
738GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
739
740#include "locking-selftest-rlock-hardirq.h"
741GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
742
743#include "locking-selftest-wlock-hardirq.h"
744GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
745
746#include "locking-selftest-spin-softirq.h"
747GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
748
749#include "locking-selftest-rlock-softirq.h"
750GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
751
752#include "locking-selftest-wlock-softirq.h"
753GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
754
755#undef E1
756#undef E2
757#undef E3
758
759/*
760 * read-lock / write-lock irq inversion.
761 *
762 * Deadlock scenario:
763 *
764 * CPU#1 is at #1, i.e. it has write-locked A, but has not
765 * taken B yet.
766 *
767 * CPU#2 is at #2, i.e. it has locked B.
768 *
769 * Hardirq hits CPU#2 at point #2 and is trying to read-lock A.
770 *
771 * The deadlock occurs because CPU#1 will spin on B, and CPU#2
772 * will spin on A.
773 */
774
775#define E1() \
776 \
777 IRQ_DISABLE(); \
778 WL(A); \
779 LOCK(B); \
780 UNLOCK(B); \
781 WU(A); \
782 IRQ_ENABLE();
783
784#define E2() \
785 \
786 LOCK(B); \
787 UNLOCK(B);
788
789#define E3() \
790 \
791 IRQ_ENTER(); \
792 RL(A); \
793 RU(A); \
794 IRQ_EXIT();
795
796/*
797 * Generate 36 testcases:
798 */
799#include "locking-selftest-spin-hardirq.h"
800GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin)
801
802#include "locking-selftest-rlock-hardirq.h"
803GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
804
805#include "locking-selftest-wlock-hardirq.h"
806GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
807
808#include "locking-selftest-spin-softirq.h"
809GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
810
811#include "locking-selftest-rlock-softirq.h"
812GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
813
814#include "locking-selftest-wlock-softirq.h"
815GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
816
817#undef E1
818#undef E2
819#undef E3
820
821/*
822 * read-lock / write-lock recursion that is actually safe.
823 */
824
825#define E1() \
826 \
827 IRQ_DISABLE(); \
828 WL(A); \
829 WU(A); \
830 IRQ_ENABLE();
831
832#define E2() \
833 \
834 RL(A); \
835 RU(A); \
836
837#define E3() \
838 \
839 IRQ_ENTER(); \
840 RL(A); \
841 L(B); \
842 U(B); \
843 RU(A); \
844 IRQ_EXIT();
845
846/*
847 * Generate 12 testcases:
848 */
849#include "locking-selftest-hardirq.h"
850GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard)
851
852#include "locking-selftest-softirq.h"
853GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
854
855#undef E1
856#undef E2
857#undef E3
858
859/*
860 * read-lock / write-lock recursion that is unsafe.
861 */
862
863#define E1() \
864 \
865 IRQ_DISABLE(); \
866 L(B); \
867 WL(A); \
868 WU(A); \
869 U(B); \
870 IRQ_ENABLE();
871
872#define E2() \
873 \
874 RL(A); \
875 RU(A); \
876
877#define E3() \
878 \
879 IRQ_ENTER(); \
880 L(B); \
881 U(B); \
882 IRQ_EXIT();
883
884/*
885 * Generate 12 testcases:
886 */
887#include "locking-selftest-hardirq.h"
888// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard)
889
890#include "locking-selftest-softirq.h"
891// GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft)
892
893#ifdef CONFIG_DEBUG_LOCK_ALLOC
894# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
895# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
896# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
897# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
898#else
899# define I_SPINLOCK(x)
900# define I_RWLOCK(x)
901# define I_MUTEX(x)
902# define I_RWSEM(x)
903#endif
904
905#define I1(x) \
906 do { \
907 I_SPINLOCK(x); \
908 I_RWLOCK(x); \
909 I_MUTEX(x); \
910 I_RWSEM(x); \
911 } while (0)
912
913#define I2(x) \
914 do { \
915 spin_lock_init(&lock_##x); \
916 rwlock_init(&rwlock_##x); \
917 mutex_init(&mutex_##x); \
918 init_rwsem(&rwsem_##x); \
919 } while (0)
920
921static void reset_locks(void)
922{
923 local_irq_disable();
924 I1(A); I1(B); I1(C); I1(D);
925 I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
926 lockdep_reset();
927 I2(A); I2(B); I2(C); I2(D);
928 init_shared_classes();
929 local_irq_enable();
930}
931
932#undef I
933
934static int testcase_total;
935static int testcase_successes;
936static int expected_testcase_failures;
937static int unexpected_testcase_failures;
938
939static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
940{
941 unsigned long saved_preempt_count = preempt_count();
942 int expected_failure = 0;
943
944 WARN_ON(irqs_disabled());
945
946 testcase_fn();
947 /*
948 * Filter out expected failures:
949 */
950#ifndef CONFIG_PROVE_LOCKING
951 if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected)
952 expected_failure = 1;
953 if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected)
954 expected_failure = 1;
955 if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected)
956 expected_failure = 1;
957 if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected)
958 expected_failure = 1;
959#endif
960 if (debug_locks != expected) {
961 if (expected_failure) {
962 expected_testcase_failures++;
963 printk("failed|");
964 } else {
965 unexpected_testcase_failures++;
966 printk("FAILED|");
967 }
968 } else {
969 testcase_successes++;
970 printk(" ok |");
971 }
972 testcase_total++;
973
974 if (debug_locks_verbose)
975 printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
976 lockclass_mask, debug_locks, expected);
977 /*
978 * Some tests (e.g. double-unlock) might corrupt the preemption
979 * count, so restore it:
980 */
981 preempt_count() = saved_preempt_count;
982#ifdef CONFIG_TRACE_IRQFLAGS
983 if (softirq_count())
984 current->softirqs_enabled = 0;
985 else
986 current->softirqs_enabled = 1;
987#endif
988
989 reset_locks();
990}
991
992static inline void print_testname(const char *testname)
993{
994 printk("%33s:", testname);
995}
996
997#define DO_TESTCASE_1(desc, name, nr) \
998 print_testname(desc"/"#nr); \
999 dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1000 printk("\n");
1001
1002#define DO_TESTCASE_1B(desc, name, nr) \
1003 print_testname(desc"/"#nr); \
1004 dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1005 printk("\n");
1006
1007#define DO_TESTCASE_3(desc, name, nr) \
1008 print_testname(desc"/"#nr); \
1009 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
1010 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1011 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1012 printk("\n");
1013
1014#define DO_TESTCASE_3RW(desc, name, nr) \
1015 print_testname(desc"/"#nr); \
1016 dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\
1017 dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
1018 dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
1019 printk("\n");
1020
1021#define DO_TESTCASE_6(desc, name) \
1022 print_testname(desc); \
1023 dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
1024 dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
1025 dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK); \
1026 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
1027 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
1028 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
1029 printk("\n");
1030
1031#define DO_TESTCASE_6_SUCCESS(desc, name) \
1032 print_testname(desc); \
1033 dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN); \
1034 dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK); \
1035 dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
1036 dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
1037 dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
1038 dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
1039 printk("\n");
1040
1041/*
1042 * 'read' variant: rlocks must not trigger.
1043 */
1044#define DO_TESTCASE_6R(desc, name) \
1045 print_testname(desc); \
1046 dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
1047 dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
1048 dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
1049 dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
1050 dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
1051 dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
1052 printk("\n");
1053
1054#define DO_TESTCASE_2I(desc, name, nr) \
1055 DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
1056 DO_TESTCASE_1("soft-"desc, name##_soft, nr);
1057
1058#define DO_TESTCASE_2IB(desc, name, nr) \
1059 DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
1060 DO_TESTCASE_1B("soft-"desc, name##_soft, nr);
1061
1062#define DO_TESTCASE_6I(desc, name, nr) \
1063 DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
1064 DO_TESTCASE_3("soft-"desc, name##_soft, nr);
1065
1066#define DO_TESTCASE_6IRW(desc, name, nr) \
1067 DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
1068 DO_TESTCASE_3RW("soft-"desc, name##_soft, nr);
1069
1070#define DO_TESTCASE_2x3(desc, name) \
1071 DO_TESTCASE_3(desc, name, 12); \
1072 DO_TESTCASE_3(desc, name, 21);
1073
1074#define DO_TESTCASE_2x6(desc, name) \
1075 DO_TESTCASE_6I(desc, name, 12); \
1076 DO_TESTCASE_6I(desc, name, 21);
1077
1078#define DO_TESTCASE_6x2(desc, name) \
1079 DO_TESTCASE_2I(desc, name, 123); \
1080 DO_TESTCASE_2I(desc, name, 132); \
1081 DO_TESTCASE_2I(desc, name, 213); \
1082 DO_TESTCASE_2I(desc, name, 231); \
1083 DO_TESTCASE_2I(desc, name, 312); \
1084 DO_TESTCASE_2I(desc, name, 321);
1085
1086#define DO_TESTCASE_6x2B(desc, name) \
1087 DO_TESTCASE_2IB(desc, name, 123); \
1088 DO_TESTCASE_2IB(desc, name, 132); \
1089 DO_TESTCASE_2IB(desc, name, 213); \
1090 DO_TESTCASE_2IB(desc, name, 231); \
1091 DO_TESTCASE_2IB(desc, name, 312); \
1092 DO_TESTCASE_2IB(desc, name, 321);
1093
1094#define DO_TESTCASE_6x6(desc, name) \
1095 DO_TESTCASE_6I(desc, name, 123); \
1096 DO_TESTCASE_6I(desc, name, 132); \
1097 DO_TESTCASE_6I(desc, name, 213); \
1098 DO_TESTCASE_6I(desc, name, 231); \
1099 DO_TESTCASE_6I(desc, name, 312); \
1100 DO_TESTCASE_6I(desc, name, 321);
1101
1102#define DO_TESTCASE_6x6RW(desc, name) \
1103 DO_TESTCASE_6IRW(desc, name, 123); \
1104 DO_TESTCASE_6IRW(desc, name, 132); \
1105 DO_TESTCASE_6IRW(desc, name, 213); \
1106 DO_TESTCASE_6IRW(desc, name, 231); \
1107 DO_TESTCASE_6IRW(desc, name, 312); \
1108 DO_TESTCASE_6IRW(desc, name, 321);
1109
1110
1111void locking_selftest(void)
1112{
1113 /*
1114 * Got a locking failure before the selftest ran?
1115 */
1116 if (!debug_locks) {
1117 printk("----------------------------------\n");
1118 printk("| Locking API testsuite disabled |\n");
1119 printk("----------------------------------\n");
1120 return;
1121 }
1122
1123 /*
1124 * Run the testsuite:
1125 */
1126 printk("------------------------\n");
1127 printk("| Locking API testsuite:\n");
1128 printk("----------------------------------------------------------------------------\n");
1129 printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n");
1130 printk(" --------------------------------------------------------------------------\n");
1131
1132 init_shared_classes();
1133 debug_locks_silent = !debug_locks_verbose;
1134
1135 DO_TESTCASE_6R("A-A deadlock", AA);
1136 DO_TESTCASE_6R("A-B-B-A deadlock", ABBA);
1137 DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA);
1138 DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC);
1139 DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA);
1140 DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA);
1141 DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
1142 DO_TESTCASE_6("double unlock", double_unlock);
1143 DO_TESTCASE_6("initialize held", init_held);
1144 DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order);
1145
1146 printk(" --------------------------------------------------------------------------\n");
1147 print_testname("recursive read-lock");
1148 printk(" |");
1149 dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK);
1150 printk(" |");
1151 dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM);
1152 printk("\n");
1153
1154 print_testname("recursive read-lock #2");
1155 printk(" |");
1156 dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK);
1157 printk(" |");
1158 dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM);
1159 printk("\n");
1160
1161 print_testname("mixed read-write-lock");
1162 printk(" |");
1163 dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK);
1164 printk(" |");
1165 dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM);
1166 printk("\n");
1167
1168 print_testname("mixed write-read-lock");
1169 printk(" |");
1170 dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK);
1171 printk(" |");
1172 dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
1173 printk("\n");
1174
1175 printk(" --------------------------------------------------------------------------\n");
1176
1177 /*
1178 * irq-context testcases:
1179 */
1180 DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
1181 DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A);
1182 DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
1183 DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
1184 DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
1185 DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion);
1186
1187 DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
1188// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
1189
1190 if (unexpected_testcase_failures) {
1191 printk("-----------------------------------------------------------------\n");
1192 debug_locks = 0;
1193 printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n",
1194 unexpected_testcase_failures, testcase_total);
1195 printk("-----------------------------------------------------------------\n");
1196 } else if (expected_testcase_failures && testcase_successes) {
1197 printk("--------------------------------------------------------\n");
1198 printk("%3d out of %3d testcases failed, as expected. |\n",
1199 expected_testcase_failures, testcase_total);
1200 printk("----------------------------------------------------\n");
1201 debug_locks = 1;
1202 } else if (expected_testcase_failures && !testcase_successes) {
1203 printk("--------------------------------------------------------\n");
1204 printk("All %3d testcases failed, as expected. |\n",
1205 expected_testcase_failures);
1206 printk("----------------------------------------\n");
1207 debug_locks = 1;
1208 } else {
1209 printk("-------------------------------------------------------\n");
1210 printk("Good, all %3d testcases passed! |\n",
1211 testcase_successes);
1212 printk("---------------------------------\n");
1213 debug_locks = 1;
1214 }
1215 debug_locks_silent = 0;
1216}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 40ffde940a86..db4fed74b940 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,27 +17,22 @@ struct rwsem_waiter {
17#define RWSEM_WAITING_FOR_WRITE 0x00000002 17#define RWSEM_WAITING_FOR_WRITE 0x00000002
18}; 18};
19 19
20#if RWSEM_DEBUG
21void rwsemtrace(struct rw_semaphore *sem, const char *str)
22{
23 if (sem->debug)
24 printk("[%d] %s({%d,%d})\n",
25 current->pid, str, sem->activity,
26 list_empty(&sem->wait_list) ? 0 : 1);
27}
28#endif
29
30/* 20/*
31 * initialise the semaphore 21 * initialise the semaphore
32 */ 22 */
33void fastcall init_rwsem(struct rw_semaphore *sem) 23void __init_rwsem(struct rw_semaphore *sem, const char *name,
24 struct lock_class_key *key)
34{ 25{
26#ifdef CONFIG_DEBUG_LOCK_ALLOC
27 /*
28 * Make sure we are not reinitializing a held semaphore:
29 */
30 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
31 lockdep_init_map(&sem->dep_map, name, key);
32#endif
35 sem->activity = 0; 33 sem->activity = 0;
36 spin_lock_init(&sem->wait_lock); 34 spin_lock_init(&sem->wait_lock);
37 INIT_LIST_HEAD(&sem->wait_list); 35 INIT_LIST_HEAD(&sem->wait_list);
38#if RWSEM_DEBUG
39 sem->debug = 0;
40#endif
41} 36}
42 37
43/* 38/*
@@ -56,8 +51,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
56 struct task_struct *tsk; 51 struct task_struct *tsk;
57 int woken; 52 int woken;
58 53
59 rwsemtrace(sem, "Entering __rwsem_do_wake");
60
61 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); 54 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
62 55
63 if (!wakewrite) { 56 if (!wakewrite) {
@@ -104,7 +97,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
104 sem->activity += woken; 97 sem->activity += woken;
105 98
106 out: 99 out:
107 rwsemtrace(sem, "Leaving __rwsem_do_wake");
108 return sem; 100 return sem;
109} 101}
110 102
@@ -138,8 +130,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
138 struct rwsem_waiter waiter; 130 struct rwsem_waiter waiter;
139 struct task_struct *tsk; 131 struct task_struct *tsk;
140 132
141 rwsemtrace(sem, "Entering __down_read");
142
143 spin_lock_irq(&sem->wait_lock); 133 spin_lock_irq(&sem->wait_lock);
144 134
145 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 135 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
@@ -171,9 +161,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
171 } 161 }
172 162
173 tsk->state = TASK_RUNNING; 163 tsk->state = TASK_RUNNING;
174
175 out: 164 out:
176 rwsemtrace(sem, "Leaving __down_read"); 165 ;
177} 166}
178 167
179/* 168/*
@@ -184,7 +173,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
184 unsigned long flags; 173 unsigned long flags;
185 int ret = 0; 174 int ret = 0;
186 175
187 rwsemtrace(sem, "Entering __down_read_trylock");
188 176
189 spin_lock_irqsave(&sem->wait_lock, flags); 177 spin_lock_irqsave(&sem->wait_lock, flags);
190 178
@@ -196,7 +184,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
196 184
197 spin_unlock_irqrestore(&sem->wait_lock, flags); 185 spin_unlock_irqrestore(&sem->wait_lock, flags);
198 186
199 rwsemtrace(sem, "Leaving __down_read_trylock");
200 return ret; 187 return ret;
201} 188}
202 189
@@ -204,13 +191,11 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
204 * get a write lock on the semaphore 191 * get a write lock on the semaphore
205 * - we increment the waiting count anyway to indicate an exclusive lock 192 * - we increment the waiting count anyway to indicate an exclusive lock
206 */ 193 */
207void fastcall __sched __down_write(struct rw_semaphore *sem) 194void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
208{ 195{
209 struct rwsem_waiter waiter; 196 struct rwsem_waiter waiter;
210 struct task_struct *tsk; 197 struct task_struct *tsk;
211 198
212 rwsemtrace(sem, "Entering __down_write");
213
214 spin_lock_irq(&sem->wait_lock); 199 spin_lock_irq(&sem->wait_lock);
215 200
216 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 201 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@@ -242,9 +227,13 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
242 } 227 }
243 228
244 tsk->state = TASK_RUNNING; 229 tsk->state = TASK_RUNNING;
245
246 out: 230 out:
247 rwsemtrace(sem, "Leaving __down_write"); 231 ;
232}
233
234void fastcall __sched __down_write(struct rw_semaphore *sem)
235{
236 __down_write_nested(sem, 0);
248} 237}
249 238
250/* 239/*
@@ -255,8 +244,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
255 unsigned long flags; 244 unsigned long flags;
256 int ret = 0; 245 int ret = 0;
257 246
258 rwsemtrace(sem, "Entering __down_write_trylock");
259
260 spin_lock_irqsave(&sem->wait_lock, flags); 247 spin_lock_irqsave(&sem->wait_lock, flags);
261 248
262 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 249 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@@ -267,7 +254,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
267 254
268 spin_unlock_irqrestore(&sem->wait_lock, flags); 255 spin_unlock_irqrestore(&sem->wait_lock, flags);
269 256
270 rwsemtrace(sem, "Leaving __down_write_trylock");
271 return ret; 257 return ret;
272} 258}
273 259
@@ -278,16 +264,12 @@ void fastcall __up_read(struct rw_semaphore *sem)
278{ 264{
279 unsigned long flags; 265 unsigned long flags;
280 266
281 rwsemtrace(sem, "Entering __up_read");
282
283 spin_lock_irqsave(&sem->wait_lock, flags); 267 spin_lock_irqsave(&sem->wait_lock, flags);
284 268
285 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 269 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
286 sem = __rwsem_wake_one_writer(sem); 270 sem = __rwsem_wake_one_writer(sem);
287 271
288 spin_unlock_irqrestore(&sem->wait_lock, flags); 272 spin_unlock_irqrestore(&sem->wait_lock, flags);
289
290 rwsemtrace(sem, "Leaving __up_read");
291} 273}
292 274
293/* 275/*
@@ -297,8 +279,6 @@ void fastcall __up_write(struct rw_semaphore *sem)
297{ 279{
298 unsigned long flags; 280 unsigned long flags;
299 281
300 rwsemtrace(sem, "Entering __up_write");
301
302 spin_lock_irqsave(&sem->wait_lock, flags); 282 spin_lock_irqsave(&sem->wait_lock, flags);
303 283
304 sem->activity = 0; 284 sem->activity = 0;
@@ -306,8 +286,6 @@ void fastcall __up_write(struct rw_semaphore *sem)
306 sem = __rwsem_do_wake(sem, 1); 286 sem = __rwsem_do_wake(sem, 1);
307 287
308 spin_unlock_irqrestore(&sem->wait_lock, flags); 288 spin_unlock_irqrestore(&sem->wait_lock, flags);
309
310 rwsemtrace(sem, "Leaving __up_write");
311} 289}
312 290
313/* 291/*
@@ -318,8 +296,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
318{ 296{
319 unsigned long flags; 297 unsigned long flags;
320 298
321 rwsemtrace(sem, "Entering __downgrade_write");
322
323 spin_lock_irqsave(&sem->wait_lock, flags); 299 spin_lock_irqsave(&sem->wait_lock, flags);
324 300
325 sem->activity = 1; 301 sem->activity = 1;
@@ -327,18 +303,14 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
327 sem = __rwsem_do_wake(sem, 0); 303 sem = __rwsem_do_wake(sem, 0);
328 304
329 spin_unlock_irqrestore(&sem->wait_lock, flags); 305 spin_unlock_irqrestore(&sem->wait_lock, flags);
330
331 rwsemtrace(sem, "Leaving __downgrade_write");
332} 306}
333 307
334EXPORT_SYMBOL(init_rwsem); 308EXPORT_SYMBOL(__init_rwsem);
335EXPORT_SYMBOL(__down_read); 309EXPORT_SYMBOL(__down_read);
336EXPORT_SYMBOL(__down_read_trylock); 310EXPORT_SYMBOL(__down_read_trylock);
311EXPORT_SYMBOL(__down_write_nested);
337EXPORT_SYMBOL(__down_write); 312EXPORT_SYMBOL(__down_write);
338EXPORT_SYMBOL(__down_write_trylock); 313EXPORT_SYMBOL(__down_write_trylock);
339EXPORT_SYMBOL(__up_read); 314EXPORT_SYMBOL(__up_read);
340EXPORT_SYMBOL(__up_write); 315EXPORT_SYMBOL(__up_write);
341EXPORT_SYMBOL(__downgrade_write); 316EXPORT_SYMBOL(__downgrade_write);
342#if RWSEM_DEBUG
343EXPORT_SYMBOL(rwsemtrace);
344#endif
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 62fa4eba9ffe..b322421c2969 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -8,6 +8,26 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/module.h> 9#include <linux/module.h>
10 10
11/*
12 * Initialize an rwsem:
13 */
14void __init_rwsem(struct rw_semaphore *sem, const char *name,
15 struct lock_class_key *key)
16{
17#ifdef CONFIG_DEBUG_LOCK_ALLOC
18 /*
19 * Make sure we are not reinitializing a held semaphore:
20 */
21 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
22 lockdep_init_map(&sem->dep_map, name, key);
23#endif
24 sem->count = RWSEM_UNLOCKED_VALUE;
25 spin_lock_init(&sem->wait_lock);
26 INIT_LIST_HEAD(&sem->wait_list);
27}
28
29EXPORT_SYMBOL(__init_rwsem);
30
11struct rwsem_waiter { 31struct rwsem_waiter {
12 struct list_head list; 32 struct list_head list;
13 struct task_struct *task; 33 struct task_struct *task;
@@ -16,17 +36,6 @@ struct rwsem_waiter {
16#define RWSEM_WAITING_FOR_WRITE 0x00000002 36#define RWSEM_WAITING_FOR_WRITE 0x00000002
17}; 37};
18 38
19#if RWSEM_DEBUG
20#undef rwsemtrace
21void rwsemtrace(struct rw_semaphore *sem, const char *str)
22{
23 printk("sem=%p\n", sem);
24 printk("(sem)=%08lx\n", sem->count);
25 if (sem->debug)
26 printk("[%d] %s({%08lx})\n", current->pid, str, sem->count);
27}
28#endif
29
30/* 39/*
31 * handle the lock release when processes blocked on it that can now run 40 * handle the lock release when processes blocked on it that can now run
32 * - if we come here from up_xxxx(), then: 41 * - if we come here from up_xxxx(), then:
@@ -45,8 +54,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
45 struct list_head *next; 54 struct list_head *next;
46 signed long oldcount, woken, loop; 55 signed long oldcount, woken, loop;
47 56
48 rwsemtrace(sem, "Entering __rwsem_do_wake");
49
50 if (downgrading) 57 if (downgrading)
51 goto dont_wake_writers; 58 goto dont_wake_writers;
52 59
@@ -127,7 +134,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
127 next->prev = &sem->wait_list; 134 next->prev = &sem->wait_list;
128 135
129 out: 136 out:
130 rwsemtrace(sem, "Leaving __rwsem_do_wake");
131 return sem; 137 return sem;
132 138
133 /* undo the change to count, but check for a transition 1->0 */ 139 /* undo the change to count, but check for a transition 1->0 */
@@ -186,13 +192,9 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
186{ 192{
187 struct rwsem_waiter waiter; 193 struct rwsem_waiter waiter;
188 194
189 rwsemtrace(sem, "Entering rwsem_down_read_failed");
190
191 waiter.flags = RWSEM_WAITING_FOR_READ; 195 waiter.flags = RWSEM_WAITING_FOR_READ;
192 rwsem_down_failed_common(sem, &waiter, 196 rwsem_down_failed_common(sem, &waiter,
193 RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); 197 RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
194
195 rwsemtrace(sem, "Leaving rwsem_down_read_failed");
196 return sem; 198 return sem;
197} 199}
198 200
@@ -204,12 +206,9 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
204{ 206{
205 struct rwsem_waiter waiter; 207 struct rwsem_waiter waiter;
206 208
207 rwsemtrace(sem, "Entering rwsem_down_write_failed");
208
209 waiter.flags = RWSEM_WAITING_FOR_WRITE; 209 waiter.flags = RWSEM_WAITING_FOR_WRITE;
210 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); 210 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
211 211
212 rwsemtrace(sem, "Leaving rwsem_down_write_failed");
213 return sem; 212 return sem;
214} 213}
215 214
@@ -221,8 +220,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
221{ 220{
222 unsigned long flags; 221 unsigned long flags;
223 222
224 rwsemtrace(sem, "Entering rwsem_wake");
225
226 spin_lock_irqsave(&sem->wait_lock, flags); 223 spin_lock_irqsave(&sem->wait_lock, flags);
227 224
228 /* do nothing if list empty */ 225 /* do nothing if list empty */
@@ -231,8 +228,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
231 228
232 spin_unlock_irqrestore(&sem->wait_lock, flags); 229 spin_unlock_irqrestore(&sem->wait_lock, flags);
233 230
234 rwsemtrace(sem, "Leaving rwsem_wake");
235
236 return sem; 231 return sem;
237} 232}
238 233
@@ -245,8 +240,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
245{ 240{
246 unsigned long flags; 241 unsigned long flags;
247 242
248 rwsemtrace(sem, "Entering rwsem_downgrade_wake");
249
250 spin_lock_irqsave(&sem->wait_lock, flags); 243 spin_lock_irqsave(&sem->wait_lock, flags);
251 244
252 /* do nothing if list empty */ 245 /* do nothing if list empty */
@@ -255,7 +248,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
255 248
256 spin_unlock_irqrestore(&sem->wait_lock, flags); 249 spin_unlock_irqrestore(&sem->wait_lock, flags);
257 250
258 rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
259 return sem; 251 return sem;
260} 252}
261 253
@@ -263,6 +255,3 @@ EXPORT_SYMBOL(rwsem_down_read_failed);
263EXPORT_SYMBOL(rwsem_down_write_failed); 255EXPORT_SYMBOL(rwsem_down_write_failed);
264EXPORT_SYMBOL(rwsem_wake); 256EXPORT_SYMBOL(rwsem_wake);
265EXPORT_SYMBOL(rwsem_downgrade_wake); 257EXPORT_SYMBOL(rwsem_downgrade_wake);
266#if RWSEM_DEBUG
267EXPORT_SYMBOL(rwsemtrace);
268#endif
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 93c15ee3f8ea..3d9c4dc965ed 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -8,38 +8,71 @@
8 8
9#include <linux/spinlock.h> 9#include <linux/spinlock.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/debug_locks.h>
11#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/module.h>
14
15void __spin_lock_init(spinlock_t *lock, const char *name,
16 struct lock_class_key *key)
17{
18#ifdef CONFIG_DEBUG_LOCK_ALLOC
19 /*
20 * Make sure we are not reinitializing a held lock:
21 */
22 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
23 lockdep_init_map(&lock->dep_map, name, key);
24#endif
25 lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
26 lock->magic = SPINLOCK_MAGIC;
27 lock->owner = SPINLOCK_OWNER_INIT;
28 lock->owner_cpu = -1;
29}
30
31EXPORT_SYMBOL(__spin_lock_init);
32
33void __rwlock_init(rwlock_t *lock, const char *name,
34 struct lock_class_key *key)
35{
36#ifdef CONFIG_DEBUG_LOCK_ALLOC
37 /*
38 * Make sure we are not reinitializing a held lock:
39 */
40 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
41 lockdep_init_map(&lock->dep_map, name, key);
42#endif
43 lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
44 lock->magic = RWLOCK_MAGIC;
45 lock->owner = SPINLOCK_OWNER_INIT;
46 lock->owner_cpu = -1;
47}
48
49EXPORT_SYMBOL(__rwlock_init);
12 50
13static void spin_bug(spinlock_t *lock, const char *msg) 51static void spin_bug(spinlock_t *lock, const char *msg)
14{ 52{
15 static long print_once = 1;
16 struct task_struct *owner = NULL; 53 struct task_struct *owner = NULL;
17 54
18 if (xchg(&print_once, 0)) { 55 if (!debug_locks_off())
19 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) 56 return;
20 owner = lock->owner; 57
21 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", 58 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
22 msg, raw_smp_processor_id(), 59 owner = lock->owner;
23 current->comm, current->pid); 60 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
24 printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " 61 msg, raw_smp_processor_id(),
25 ".owner_cpu: %d\n", 62 current->comm, current->pid);
26 lock, lock->magic, 63 printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
27 owner ? owner->comm : "<none>", 64 ".owner_cpu: %d\n",
28 owner ? owner->pid : -1, 65 lock, lock->magic,
29 lock->owner_cpu); 66 owner ? owner->comm : "<none>",
30 dump_stack(); 67 owner ? owner->pid : -1,
31#ifdef CONFIG_SMP 68 lock->owner_cpu);
32 /* 69 dump_stack();
33 * We cannot continue on SMP:
34 */
35// panic("bad locking");
36#endif
37 }
38} 70}
39 71
40#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) 72#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
41 73
42static inline void debug_spin_lock_before(spinlock_t *lock) 74static inline void
75debug_spin_lock_before(spinlock_t *lock)
43{ 76{
44 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 77 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
45 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); 78 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -118,20 +151,13 @@ void _raw_spin_unlock(spinlock_t *lock)
118 151
119static void rwlock_bug(rwlock_t *lock, const char *msg) 152static void rwlock_bug(rwlock_t *lock, const char *msg)
120{ 153{
121 static long print_once = 1; 154 if (!debug_locks_off())
122 155 return;
123 if (xchg(&print_once, 0)) { 156
124 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", 157 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
125 msg, raw_smp_processor_id(), current->comm, 158 msg, raw_smp_processor_id(), current->comm,
126 current->pid, lock); 159 current->pid, lock);
127 dump_stack(); 160 dump_stack();
128#ifdef CONFIG_SMP
129 /*
130 * We cannot continue on SMP:
131 */
132 panic("bad locking");
133#endif
134 }
135} 161}
136 162
137#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) 163#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
diff --git a/mm/memory.c b/mm/memory.c
index 7e2a4b1580e3..c1e14c9e67e4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -503,7 +503,7 @@ again:
503 return -ENOMEM; 503 return -ENOMEM;
504 src_pte = pte_offset_map_nested(src_pmd, addr); 504 src_pte = pte_offset_map_nested(src_pmd, addr);
505 src_ptl = pte_lockptr(src_mm, src_pmd); 505 src_ptl = pte_lockptr(src_mm, src_pmd);
506 spin_lock(src_ptl); 506 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
507 507
508 do { 508 do {
509 /* 509 /*
diff --git a/mm/mremap.c b/mm/mremap.c
index 1903bdf65e42..7c15cf3373ad 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -97,7 +97,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
97 new_pte = pte_offset_map_nested(new_pmd, new_addr); 97 new_pte = pte_offset_map_nested(new_pmd, new_addr);
98 new_ptl = pte_lockptr(mm, new_pmd); 98 new_ptl = pte_lockptr(mm, new_pmd);
99 if (new_ptl != old_ptl) 99 if (new_ptl != old_ptl)
100 spin_lock(new_ptl); 100 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
101 101
102 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 102 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
103 new_pte++, new_addr += PAGE_SIZE) { 103 new_pte++, new_addr += PAGE_SIZE) {
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d46ed0f1dc06..b9af136e5cfa 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -225,7 +225,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
225 * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that 225 * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that
226 * we select a process with CAP_SYS_RAW_IO set). 226 * we select a process with CAP_SYS_RAW_IO set).
227 */ 227 */
228static void __oom_kill_task(task_t *p, const char *message) 228static void __oom_kill_task(struct task_struct *p, const char *message)
229{ 229{
230 if (p->pid == 1) { 230 if (p->pid == 1) {
231 WARN_ON(1); 231 WARN_ON(1);
@@ -255,10 +255,10 @@ static void __oom_kill_task(task_t *p, const char *message)
255 force_sig(SIGKILL, p); 255 force_sig(SIGKILL, p);
256} 256}
257 257
258static int oom_kill_task(task_t *p, const char *message) 258static int oom_kill_task(struct task_struct *p, const char *message)
259{ 259{
260 struct mm_struct *mm; 260 struct mm_struct *mm;
261 task_t * g, * q; 261 struct task_struct *g, *q;
262 262
263 mm = p->mm; 263 mm = p->mm;
264 264
@@ -316,7 +316,7 @@ static int oom_kill_process(struct task_struct *p, unsigned long points,
316 */ 316 */
317void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) 317void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
318{ 318{
319 task_t *p; 319 struct task_struct *p;
320 unsigned long points = 0; 320 unsigned long points = 0;
321 321
322 if (printk_ratelimit()) { 322 if (printk_ratelimit()) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3e792a583f3b..54a4f5375bba 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2005,6 +2005,10 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2005 2005
2006 zone->spanned_pages = size; 2006 zone->spanned_pages = size;
2007 zone->present_pages = realsize; 2007 zone->present_pages = realsize;
2008#ifdef CONFIG_NUMA
2009 zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio)
2010 / 100;
2011#endif
2008 zone->name = zone_names[j]; 2012 zone->name = zone_names[j];
2009 spin_lock_init(&zone->lock); 2013 spin_lock_init(&zone->lock);
2010 spin_lock_init(&zone->lru_lock); 2014 spin_lock_init(&zone->lru_lock);
@@ -2298,6 +2302,24 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
2298 return 0; 2302 return 0;
2299} 2303}
2300 2304
2305#ifdef CONFIG_NUMA
2306int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
2307 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2308{
2309 struct zone *zone;
2310 int rc;
2311
2312 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2313 if (rc)
2314 return rc;
2315
2316 for_each_zone(zone)
2317 zone->min_unmapped_ratio = (zone->present_pages *
2318 sysctl_min_unmapped_ratio) / 100;
2319 return 0;
2320}
2321#endif
2322
2301/* 2323/*
2302 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 2324 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
2303 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 2325 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
diff --git a/mm/slab.c b/mm/slab.c
index 3936af344542..85c2e03098a7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1021,7 +1021,8 @@ static void drain_alien_cache(struct kmem_cache *cachep,
1021 } 1021 }
1022} 1022}
1023 1023
1024static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1024static inline int cache_free_alien(struct kmem_cache *cachep, void *objp,
1025 int nesting)
1025{ 1026{
1026 struct slab *slabp = virt_to_slab(objp); 1027 struct slab *slabp = virt_to_slab(objp);
1027 int nodeid = slabp->nodeid; 1028 int nodeid = slabp->nodeid;
@@ -1039,7 +1040,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1039 STATS_INC_NODEFREES(cachep); 1040 STATS_INC_NODEFREES(cachep);
1040 if (l3->alien && l3->alien[nodeid]) { 1041 if (l3->alien && l3->alien[nodeid]) {
1041 alien = l3->alien[nodeid]; 1042 alien = l3->alien[nodeid];
1042 spin_lock(&alien->lock); 1043 spin_lock_nested(&alien->lock, nesting);
1043 if (unlikely(alien->avail == alien->limit)) { 1044 if (unlikely(alien->avail == alien->limit)) {
1044 STATS_INC_ACOVERFLOW(cachep); 1045 STATS_INC_ACOVERFLOW(cachep);
1045 __drain_alien_cache(cachep, alien, nodeid); 1046 __drain_alien_cache(cachep, alien, nodeid);
@@ -1068,7 +1069,8 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
1068{ 1069{
1069} 1070}
1070 1071
1071static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1072static inline int cache_free_alien(struct kmem_cache *cachep, void *objp,
1073 int nesting)
1072{ 1074{
1073 return 0; 1075 return 0;
1074} 1076}
@@ -1272,6 +1274,11 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1272 1274
1273 local_irq_disable(); 1275 local_irq_disable();
1274 memcpy(ptr, list, sizeof(struct kmem_list3)); 1276 memcpy(ptr, list, sizeof(struct kmem_list3));
1277 /*
1278 * Do not assume that spinlocks can be initialized via memcpy:
1279 */
1280 spin_lock_init(&ptr->list_lock);
1281
1275 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1282 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1276 cachep->nodelists[nodeid] = ptr; 1283 cachep->nodelists[nodeid] = ptr;
1277 local_irq_enable(); 1284 local_irq_enable();
@@ -1398,7 +1405,7 @@ void __init kmem_cache_init(void)
1398 } 1405 }
1399 /* 4) Replace the bootstrap head arrays */ 1406 /* 4) Replace the bootstrap head arrays */
1400 { 1407 {
1401 void *ptr; 1408 struct array_cache *ptr;
1402 1409
1403 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1410 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1404 1411
@@ -1406,6 +1413,11 @@ void __init kmem_cache_init(void)
1406 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1413 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1407 memcpy(ptr, cpu_cache_get(&cache_cache), 1414 memcpy(ptr, cpu_cache_get(&cache_cache),
1408 sizeof(struct arraycache_init)); 1415 sizeof(struct arraycache_init));
1416 /*
1417 * Do not assume that spinlocks can be initialized via memcpy:
1418 */
1419 spin_lock_init(&ptr->lock);
1420
1409 cache_cache.array[smp_processor_id()] = ptr; 1421 cache_cache.array[smp_processor_id()] = ptr;
1410 local_irq_enable(); 1422 local_irq_enable();
1411 1423
@@ -1416,6 +1428,11 @@ void __init kmem_cache_init(void)
1416 != &initarray_generic.cache); 1428 != &initarray_generic.cache);
1417 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1429 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1418 sizeof(struct arraycache_init)); 1430 sizeof(struct arraycache_init));
1431 /*
1432 * Do not assume that spinlocks can be initialized via memcpy:
1433 */
1434 spin_lock_init(&ptr->lock);
1435
1419 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1436 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1420 ptr; 1437 ptr;
1421 local_irq_enable(); 1438 local_irq_enable();
@@ -1743,6 +1760,8 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1743} 1760}
1744#endif 1761#endif
1745 1762
1763static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting);
1764
1746/** 1765/**
1747 * slab_destroy - destroy and release all objects in a slab 1766 * slab_destroy - destroy and release all objects in a slab
1748 * @cachep: cache pointer being destroyed 1767 * @cachep: cache pointer being destroyed
@@ -1766,8 +1785,17 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1766 call_rcu(&slab_rcu->head, kmem_rcu_free); 1785 call_rcu(&slab_rcu->head, kmem_rcu_free);
1767 } else { 1786 } else {
1768 kmem_freepages(cachep, addr); 1787 kmem_freepages(cachep, addr);
1769 if (OFF_SLAB(cachep)) 1788 if (OFF_SLAB(cachep)) {
1770 kmem_cache_free(cachep->slabp_cache, slabp); 1789 unsigned long flags;
1790
1791 /*
1792 * lockdep: we may nest inside an already held
1793 * ac->lock, so pass in a nesting flag:
1794 */
1795 local_irq_save(flags);
1796 __cache_free(cachep->slabp_cache, slabp, 1);
1797 local_irq_restore(flags);
1798 }
1771 } 1799 }
1772} 1800}
1773 1801
@@ -3072,7 +3100,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3072 if (slabp->inuse == 0) { 3100 if (slabp->inuse == 0) {
3073 if (l3->free_objects > l3->free_limit) { 3101 if (l3->free_objects > l3->free_limit) {
3074 l3->free_objects -= cachep->num; 3102 l3->free_objects -= cachep->num;
3103 /*
3104 * It is safe to drop the lock. The slab is
3105 * no longer linked to the cache. cachep
3106 * cannot disappear - we are using it and
3107 * all destruction of caches must be
3108 * serialized properly by the user.
3109 */
3110 spin_unlock(&l3->list_lock);
3075 slab_destroy(cachep, slabp); 3111 slab_destroy(cachep, slabp);
3112 spin_lock(&l3->list_lock);
3076 } else { 3113 } else {
3077 list_add(&slabp->list, &l3->slabs_free); 3114 list_add(&slabp->list, &l3->slabs_free);
3078 } 3115 }
@@ -3098,7 +3135,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3098#endif 3135#endif
3099 check_irq_off(); 3136 check_irq_off();
3100 l3 = cachep->nodelists[node]; 3137 l3 = cachep->nodelists[node];
3101 spin_lock(&l3->list_lock); 3138 spin_lock_nested(&l3->list_lock, SINGLE_DEPTH_NESTING);
3102 if (l3->shared) { 3139 if (l3->shared) {
3103 struct array_cache *shared_array = l3->shared; 3140 struct array_cache *shared_array = l3->shared;
3104 int max = shared_array->limit - shared_array->avail; 3141 int max = shared_array->limit - shared_array->avail;
@@ -3141,14 +3178,14 @@ free_done:
3141 * Release an obj back to its cache. If the obj has a constructed state, it must 3178 * Release an obj back to its cache. If the obj has a constructed state, it must
3142 * be in this state _before_ it is released. Called with disabled ints. 3179 * be in this state _before_ it is released. Called with disabled ints.
3143 */ 3180 */
3144static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3181static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting)
3145{ 3182{
3146 struct array_cache *ac = cpu_cache_get(cachep); 3183 struct array_cache *ac = cpu_cache_get(cachep);
3147 3184
3148 check_irq_off(); 3185 check_irq_off();
3149 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3186 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3150 3187
3151 if (cache_free_alien(cachep, objp)) 3188 if (cache_free_alien(cachep, objp, nesting))
3152 return; 3189 return;
3153 3190
3154 if (likely(ac->avail < ac->limit)) { 3191 if (likely(ac->avail < ac->limit)) {
@@ -3387,7 +3424,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3387 BUG_ON(virt_to_cache(objp) != cachep); 3424 BUG_ON(virt_to_cache(objp) != cachep);
3388 3425
3389 local_irq_save(flags); 3426 local_irq_save(flags);
3390 __cache_free(cachep, objp); 3427 __cache_free(cachep, objp, 0);
3391 local_irq_restore(flags); 3428 local_irq_restore(flags);
3392} 3429}
3393EXPORT_SYMBOL(kmem_cache_free); 3430EXPORT_SYMBOL(kmem_cache_free);
@@ -3412,7 +3449,7 @@ void kfree(const void *objp)
3412 kfree_debugcheck(objp); 3449 kfree_debugcheck(objp);
3413 c = virt_to_cache(objp); 3450 c = virt_to_cache(objp);
3414 debug_check_no_locks_freed(objp, obj_size(c)); 3451 debug_check_no_locks_freed(objp, obj_size(c));
3415 __cache_free(c, (void *)objp); 3452 __cache_free(c, (void *)objp, 0);
3416 local_irq_restore(flags); 3453 local_irq_restore(flags);
3417} 3454}
3418EXPORT_SYMBOL(kfree); 3455EXPORT_SYMBOL(kfree);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index fccbd9bba77b..5f7cf2a4cb55 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -38,7 +38,7 @@ static struct backing_dev_info swap_backing_dev_info = {
38 38
39struct address_space swapper_space = { 39struct address_space swapper_space = {
40 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), 40 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
41 .tree_lock = RW_LOCK_UNLOCKED, 41 .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock),
42 .a_ops = &swap_aops, 42 .a_ops = &swap_aops,
43 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), 43 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
44 .backing_dev_info = &swap_backing_dev_info, 44 .backing_dev_info = &swap_backing_dev_info,
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 35f8553f893a..7b450798b458 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -330,6 +330,8 @@ void __vunmap(void *addr, int deallocate_pages)
330 return; 330 return;
331 } 331 }
332 332
333 debug_check_no_locks_freed(addr, area->size);
334
333 if (deallocate_pages) { 335 if (deallocate_pages) {
334 int i; 336 int i;
335 337
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ff2ebe9458a3..5d4c4d02254d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1503,10 +1503,6 @@ module_init(kswapd_init)
1503 * 1503 *
1504 * If non-zero call zone_reclaim when the number of free pages falls below 1504 * If non-zero call zone_reclaim when the number of free pages falls below
1505 * the watermarks. 1505 * the watermarks.
1506 *
1507 * In the future we may add flags to the mode. However, the page allocator
1508 * should only have to check that zone_reclaim_mode != 0 before calling
1509 * zone_reclaim().
1510 */ 1506 */
1511int zone_reclaim_mode __read_mostly; 1507int zone_reclaim_mode __read_mostly;
1512 1508
@@ -1524,6 +1520,12 @@ int zone_reclaim_mode __read_mostly;
1524#define ZONE_RECLAIM_PRIORITY 4 1520#define ZONE_RECLAIM_PRIORITY 4
1525 1521
1526/* 1522/*
1523 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
1524 * occur.
1525 */
1526int sysctl_min_unmapped_ratio = 1;
1527
1528/*
1527 * Try to free up some pages from this zone through reclaim. 1529 * Try to free up some pages from this zone through reclaim.
1528 */ 1530 */
1529static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 1531static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
@@ -1590,18 +1592,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1590 int node_id; 1592 int node_id;
1591 1593
1592 /* 1594 /*
1593 * Do not reclaim if there are not enough reclaimable pages in this 1595 * Zone reclaim reclaims unmapped file backed pages.
1594 * zone that would satify this allocations.
1595 * 1596 *
1596 * All unmapped pagecache pages are reclaimable. 1597 * A small portion of unmapped file backed pages is needed for
1597 * 1598 * file I/O otherwise pages read by file I/O will be immediately
1598 * Both counters may be temporarily off a bit so we use 1599 * thrown out if the zone is overallocated. So we do not reclaim
1599 * SWAP_CLUSTER_MAX as the boundary. It may also be good to 1600 * if less than a specified percentage of the zone is used by
1600 * leave a few frequently used unmapped pagecache pages around. 1601 * unmapped file backed pages.
1601 */ 1602 */
1602 if (zone_page_state(zone, NR_FILE_PAGES) - 1603 if (zone_page_state(zone, NR_FILE_PAGES) -
1603 zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX) 1604 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_ratio)
1604 return 0; 1605 return 0;
1605 1606
1606 /* 1607 /*
1607 * Avoid concurrent zone reclaims, do not reclaim in a zone that does 1608 * Avoid concurrent zone reclaims, do not reclaim in a zone that does
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 3948949a609a..458031bfff55 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -364,6 +364,14 @@ static void vlan_transfer_operstate(const struct net_device *dev, struct net_dev
364 } 364 }
365} 365}
366 366
367/*
368 * vlan network devices have devices nesting below it, and are a special
369 * "super class" of normal network devices; split their locks off into a
370 * separate class since they always nest.
371 */
372static struct lock_class_key vlan_netdev_xmit_lock_key;
373
374
367/* Attach a VLAN device to a mac address (ie Ethernet Card). 375/* Attach a VLAN device to a mac address (ie Ethernet Card).
368 * Returns the device that was created, or NULL if there was 376 * Returns the device that was created, or NULL if there was
369 * an error of some kind. 377 * an error of some kind.
@@ -460,6 +468,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
460 468
461 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, 469 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name,
462 vlan_setup); 470 vlan_setup);
471
463 if (new_dev == NULL) 472 if (new_dev == NULL)
464 goto out_unlock; 473 goto out_unlock;
465 474
@@ -518,6 +527,8 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
518 if (register_netdevice(new_dev)) 527 if (register_netdevice(new_dev))
519 goto out_free_newdev; 528 goto out_free_newdev;
520 529
530 lockdep_set_class(&new_dev->_xmit_lock, &vlan_netdev_xmit_lock_key);
531
521 new_dev->iflink = real_dev->ifindex; 532 new_dev->iflink = real_dev->ifindex;
522 vlan_transfer_operstate(real_dev, new_dev); 533 vlan_transfer_operstate(real_dev, new_dev);
523 linkwatch_fire_event(new_dev); /* _MUST_ call rfc2863_policy() */ 534 linkwatch_fire_event(new_dev); /* _MUST_ call rfc2863_policy() */
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 87a454f5c89c..121bf6f49148 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -23,6 +23,7 @@
23#include <linux/if.h> /* for IFF_UP */ 23#include <linux/if.h> /* for IFF_UP */
24#include <linux/inetdevice.h> 24#include <linux/inetdevice.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/poison.h>
26#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
27#include <linux/seq_file.h> 28#include <linux/seq_file.h>
28#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
@@ -266,7 +267,7 @@ static void clip_neigh_destroy(struct neighbour *neigh)
266 DPRINTK("clip_neigh_destroy (neigh %p)\n", neigh); 267 DPRINTK("clip_neigh_destroy (neigh %p)\n", neigh);
267 if (NEIGH2ENTRY(neigh)->vccs) 268 if (NEIGH2ENTRY(neigh)->vccs)
268 printk(KERN_CRIT "clip_neigh_destroy: vccs != NULL !!!\n"); 269 printk(KERN_CRIT "clip_neigh_destroy: vccs != NULL !!!\n");
269 NEIGH2ENTRY(neigh)->vccs = (void *) 0xdeadbeef; 270 NEIGH2ENTRY(neigh)->vccs = (void *) NEIGHBOR_DEAD;
270} 271}
271 272
272static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) 273static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 9be5c15e63d3..136c3aefa9de 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -103,11 +103,13 @@ int ax25_rebuild_header(struct sk_buff *skb)
103{ 103{
104 struct sk_buff *ourskb; 104 struct sk_buff *ourskb;
105 unsigned char *bp = skb->data; 105 unsigned char *bp = skb->data;
106 struct net_device *dev; 106 ax25_route *route;
107 struct net_device *dev = NULL;
107 ax25_address *src, *dst; 108 ax25_address *src, *dst;
109 ax25_digi *digipeat = NULL;
108 ax25_dev *ax25_dev; 110 ax25_dev *ax25_dev;
109 ax25_route _route, *route = &_route;
110 ax25_cb *ax25; 111 ax25_cb *ax25;
112 char ip_mode = ' ';
111 113
112 dst = (ax25_address *)(bp + 1); 114 dst = (ax25_address *)(bp + 1);
113 src = (ax25_address *)(bp + 8); 115 src = (ax25_address *)(bp + 8);
@@ -115,8 +117,12 @@ int ax25_rebuild_header(struct sk_buff *skb)
115 if (arp_find(bp + 1, skb)) 117 if (arp_find(bp + 1, skb))
116 return 1; 118 return 1;
117 119
118 route = ax25_rt_find_route(route, dst, NULL); 120 route = ax25_get_route(dst, NULL);
119 dev = route->dev; 121 if (route) {
122 digipeat = route->digipeat;
123 dev = route->dev;
124 ip_mode = route->ip_mode;
125 };
120 126
121 if (dev == NULL) 127 if (dev == NULL)
122 dev = skb->dev; 128 dev = skb->dev;
@@ -126,7 +132,7 @@ int ax25_rebuild_header(struct sk_buff *skb)
126 } 132 }
127 133
128 if (bp[16] == AX25_P_IP) { 134 if (bp[16] == AX25_P_IP) {
129 if (route->ip_mode == 'V' || (route->ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) { 135 if (ip_mode == 'V' || (ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) {
130 /* 136 /*
131 * We copy the buffer and release the original thereby 137 * We copy the buffer and release the original thereby
132 * keeping it straight 138 * keeping it straight
@@ -172,7 +178,7 @@ int ax25_rebuild_header(struct sk_buff *skb)
172 ourskb, 178 ourskb,
173 ax25_dev->values[AX25_VALUES_PACLEN], 179 ax25_dev->values[AX25_VALUES_PACLEN],
174 &src_c, 180 &src_c,
175 &dst_c, route->digipeat, dev); 181 &dst_c, digipeat, dev);
176 if (ax25) { 182 if (ax25) {
177 ax25_cb_put(ax25); 183 ax25_cb_put(ax25);
178 } 184 }
@@ -190,7 +196,7 @@ int ax25_rebuild_header(struct sk_buff *skb)
190 196
191 skb_pull(skb, AX25_KISS_HEADER_LEN); 197 skb_pull(skb, AX25_KISS_HEADER_LEN);
192 198
193 if (route->digipeat != NULL) { 199 if (digipeat != NULL) {
194 if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) { 200 if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) {
195 kfree_skb(skb); 201 kfree_skb(skb);
196 goto put; 202 goto put;
@@ -202,7 +208,8 @@ int ax25_rebuild_header(struct sk_buff *skb)
202 ax25_queue_xmit(skb, dev); 208 ax25_queue_xmit(skb, dev);
203 209
204put: 210put:
205 ax25_put_route(route); 211 if (route)
212 ax25_put_route(route);
206 213
207 return 1; 214 return 1;
208} 215}
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 5ac98250797b..51b7bdaf27eb 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -41,8 +41,6 @@
41static ax25_route *ax25_route_list; 41static ax25_route *ax25_route_list;
42static DEFINE_RWLOCK(ax25_route_lock); 42static DEFINE_RWLOCK(ax25_route_lock);
43 43
44static ax25_route *ax25_get_route(ax25_address *, struct net_device *);
45
46void ax25_rt_device_down(struct net_device *dev) 44void ax25_rt_device_down(struct net_device *dev)
47{ 45{
48 ax25_route *s, *t, *ax25_rt; 46 ax25_route *s, *t, *ax25_rt;
@@ -115,7 +113,7 @@ static int ax25_rt_add(struct ax25_routes_struct *route)
115 return -ENOMEM; 113 return -ENOMEM;
116 } 114 }
117 115
118 atomic_set(&ax25_rt->ref, 0); 116 atomic_set(&ax25_rt->refcount, 1);
119 ax25_rt->callsign = route->dest_addr; 117 ax25_rt->callsign = route->dest_addr;
120 ax25_rt->dev = ax25_dev->dev; 118 ax25_rt->dev = ax25_dev->dev;
121 ax25_rt->digipeat = NULL; 119 ax25_rt->digipeat = NULL;
@@ -140,23 +138,10 @@ static int ax25_rt_add(struct ax25_routes_struct *route)
140 return 0; 138 return 0;
141} 139}
142 140
143static void ax25_rt_destroy(ax25_route *ax25_rt) 141void __ax25_put_route(ax25_route *ax25_rt)
144{ 142{
145 if (atomic_read(&ax25_rt->ref) == 0) { 143 kfree(ax25_rt->digipeat);
146 kfree(ax25_rt->digipeat); 144 kfree(ax25_rt);
147 kfree(ax25_rt);
148 return;
149 }
150
151 /*
152 * Uh... Route is still in use; we can't yet destroy it. Retry later.
153 */
154 init_timer(&ax25_rt->timer);
155 ax25_rt->timer.data = (unsigned long) ax25_rt;
156 ax25_rt->timer.function = (void *) ax25_rt_destroy;
157 ax25_rt->timer.expires = jiffies + 5 * HZ;
158
159 add_timer(&ax25_rt->timer);
160} 145}
161 146
162static int ax25_rt_del(struct ax25_routes_struct *route) 147static int ax25_rt_del(struct ax25_routes_struct *route)
@@ -177,12 +162,12 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
177 ax25cmp(&route->dest_addr, &s->callsign) == 0) { 162 ax25cmp(&route->dest_addr, &s->callsign) == 0) {
178 if (ax25_route_list == s) { 163 if (ax25_route_list == s) {
179 ax25_route_list = s->next; 164 ax25_route_list = s->next;
180 ax25_rt_destroy(s); 165 ax25_put_route(s);
181 } else { 166 } else {
182 for (t = ax25_route_list; t != NULL; t = t->next) { 167 for (t = ax25_route_list; t != NULL; t = t->next) {
183 if (t->next == s) { 168 if (t->next == s) {
184 t->next = s->next; 169 t->next = s->next;
185 ax25_rt_destroy(s); 170 ax25_put_route(s);
186 break; 171 break;
187 } 172 }
188 } 173 }
@@ -362,7 +347,7 @@ struct file_operations ax25_route_fops = {
362 * 347 *
363 * Only routes with a reference count of zero can be destroyed. 348 * Only routes with a reference count of zero can be destroyed.
364 */ 349 */
365static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) 350ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
366{ 351{
367 ax25_route *ax25_spe_rt = NULL; 352 ax25_route *ax25_spe_rt = NULL;
368 ax25_route *ax25_def_rt = NULL; 353 ax25_route *ax25_def_rt = NULL;
@@ -392,7 +377,7 @@ static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
392 ax25_rt = ax25_spe_rt; 377 ax25_rt = ax25_spe_rt;
393 378
394 if (ax25_rt != NULL) 379 if (ax25_rt != NULL)
395 atomic_inc(&ax25_rt->ref); 380 ax25_hold_route(ax25_rt);
396 381
397 read_unlock(&ax25_route_lock); 382 read_unlock(&ax25_route_lock);
398 383
@@ -467,24 +452,6 @@ put:
467 return 0; 452 return 0;
468} 453}
469 454
470ax25_route *ax25_rt_find_route(ax25_route * route, ax25_address *addr,
471 struct net_device *dev)
472{
473 ax25_route *ax25_rt;
474
475 if ((ax25_rt = ax25_get_route(addr, dev)))
476 return ax25_rt;
477
478 route->next = NULL;
479 atomic_set(&route->ref, 1);
480 route->callsign = *addr;
481 route->dev = dev;
482 route->digipeat = NULL;
483 route->ip_mode = ' ';
484
485 return route;
486}
487
488struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, 455struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
489 ax25_address *dest, ax25_digi *digi) 456 ax25_address *dest, ax25_digi *digi)
490{ 457{
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 51f867062e1d..788ea7a2b744 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -48,7 +48,7 @@
48#define BT_DBG(D...) 48#define BT_DBG(D...)
49#endif 49#endif
50 50
51#define VERSION "2.8" 51#define VERSION "2.10"
52 52
53/* Bluetooth sockets */ 53/* Bluetooth sockets */
54#define BT_MAX_PROTO 8 54#define BT_MAX_PROTO 8
@@ -307,13 +307,21 @@ static struct net_proto_family bt_sock_family_ops = {
307 307
308static int __init bt_init(void) 308static int __init bt_init(void)
309{ 309{
310 int err;
311
310 BT_INFO("Core ver %s", VERSION); 312 BT_INFO("Core ver %s", VERSION);
311 313
312 sock_register(&bt_sock_family_ops); 314 err = bt_sysfs_init();
315 if (err < 0)
316 return err;
313 317
314 BT_INFO("HCI device and connection manager initialized"); 318 err = sock_register(&bt_sock_family_ops);
319 if (err < 0) {
320 bt_sysfs_cleanup();
321 return err;
322 }
315 323
316 bt_sysfs_init(); 324 BT_INFO("HCI device and connection manager initialized");
317 325
318 hci_sock_init(); 326 hci_sock_init();
319 327
@@ -324,9 +332,9 @@ static void __exit bt_exit(void)
324{ 332{
325 hci_sock_cleanup(); 333 hci_sock_cleanup();
326 334
327 bt_sysfs_cleanup();
328
329 sock_unregister(PF_BLUETOOTH); 335 sock_unregister(PF_BLUETOOTH);
336
337 bt_sysfs_cleanup();
330} 338}
331 339
332subsys_initcall(bt_init); 340subsys_initcall(bt_init);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5c0c2b1ef34a..420ed4d7e57e 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -115,8 +115,8 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
115 115
116static void hci_conn_timeout(unsigned long arg) 116static void hci_conn_timeout(unsigned long arg)
117{ 117{
118 struct hci_conn *conn = (void *)arg; 118 struct hci_conn *conn = (void *) arg;
119 struct hci_dev *hdev = conn->hdev; 119 struct hci_dev *hdev = conn->hdev;
120 120
121 BT_DBG("conn %p state %d", conn, conn->state); 121 BT_DBG("conn %p state %d", conn, conn->state);
122 122
@@ -132,11 +132,13 @@ static void hci_conn_timeout(unsigned long arg)
132 return; 132 return;
133} 133}
134 134
135static void hci_conn_init_timer(struct hci_conn *conn) 135static void hci_conn_idle(unsigned long arg)
136{ 136{
137 init_timer(&conn->timer); 137 struct hci_conn *conn = (void *) arg;
138 conn->timer.function = hci_conn_timeout; 138
139 conn->timer.data = (unsigned long)conn; 139 BT_DBG("conn %p mode %d", conn, conn->mode);
140
141 hci_conn_enter_sniff_mode(conn);
140} 142}
141 143
142struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 144struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -145,17 +147,27 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
145 147
146 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 148 BT_DBG("%s dst %s", hdev->name, batostr(dst));
147 149
148 if (!(conn = kmalloc(sizeof(struct hci_conn), GFP_ATOMIC))) 150 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
151 if (!conn)
149 return NULL; 152 return NULL;
150 memset(conn, 0, sizeof(struct hci_conn));
151 153
152 bacpy(&conn->dst, dst); 154 bacpy(&conn->dst, dst);
153 conn->type = type;
154 conn->hdev = hdev; 155 conn->hdev = hdev;
156 conn->type = type;
157 conn->mode = HCI_CM_ACTIVE;
155 conn->state = BT_OPEN; 158 conn->state = BT_OPEN;
156 159
160 conn->power_save = 1;
161
157 skb_queue_head_init(&conn->data_q); 162 skb_queue_head_init(&conn->data_q);
158 hci_conn_init_timer(conn); 163
164 init_timer(&conn->disc_timer);
165 conn->disc_timer.function = hci_conn_timeout;
166 conn->disc_timer.data = (unsigned long) conn;
167
168 init_timer(&conn->idle_timer);
169 conn->idle_timer.function = hci_conn_idle;
170 conn->idle_timer.data = (unsigned long) conn;
159 171
160 atomic_set(&conn->refcnt, 0); 172 atomic_set(&conn->refcnt, 0);
161 173
@@ -178,7 +190,9 @@ int hci_conn_del(struct hci_conn *conn)
178 190
179 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); 191 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
180 192
181 hci_conn_del_timer(conn); 193 del_timer(&conn->idle_timer);
194
195 del_timer(&conn->disc_timer);
182 196
183 if (conn->type == SCO_LINK) { 197 if (conn->type == SCO_LINK) {
184 struct hci_conn *acl = conn->link; 198 struct hci_conn *acl = conn->link;
@@ -364,6 +378,70 @@ int hci_conn_switch_role(struct hci_conn *conn, uint8_t role)
364} 378}
365EXPORT_SYMBOL(hci_conn_switch_role); 379EXPORT_SYMBOL(hci_conn_switch_role);
366 380
381/* Enter active mode */
382void hci_conn_enter_active_mode(struct hci_conn *conn)
383{
384 struct hci_dev *hdev = conn->hdev;
385
386 BT_DBG("conn %p mode %d", conn, conn->mode);
387
388 if (test_bit(HCI_RAW, &hdev->flags))
389 return;
390
391 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
392 goto timer;
393
394 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
395 struct hci_cp_exit_sniff_mode cp;
396 cp.handle = __cpu_to_le16(conn->handle);
397 hci_send_cmd(hdev, OGF_LINK_POLICY,
398 OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp);
399 }
400
401timer:
402 if (hdev->idle_timeout > 0)
403 mod_timer(&conn->idle_timer,
404 jiffies + msecs_to_jiffies(hdev->idle_timeout));
405}
406
407/* Enter sniff mode */
408void hci_conn_enter_sniff_mode(struct hci_conn *conn)
409{
410 struct hci_dev *hdev = conn->hdev;
411
412 BT_DBG("conn %p mode %d", conn, conn->mode);
413
414 if (test_bit(HCI_RAW, &hdev->flags))
415 return;
416
417 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
418 return;
419
420 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
421 return;
422
423 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
424 struct hci_cp_sniff_subrate cp;
425 cp.handle = __cpu_to_le16(conn->handle);
426 cp.max_latency = __constant_cpu_to_le16(0);
427 cp.min_remote_timeout = __constant_cpu_to_le16(0);
428 cp.min_local_timeout = __constant_cpu_to_le16(0);
429 hci_send_cmd(hdev, OGF_LINK_POLICY,
430 OCF_SNIFF_SUBRATE, sizeof(cp), &cp);
431 }
432
433 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
434 struct hci_cp_sniff_mode cp;
435 cp.handle = __cpu_to_le16(conn->handle);
436 cp.max_interval = __cpu_to_le16(hdev->sniff_max_interval);
437 cp.min_interval = __cpu_to_le16(hdev->sniff_min_interval);
438 cp.attempt = __constant_cpu_to_le16(4);
439 cp.timeout = __constant_cpu_to_le16(1);
440 hci_send_cmd(hdev, OGF_LINK_POLICY,
441 OCF_SNIFF_MODE, sizeof(cp), &cp);
442 }
443}
444
367/* Drop all connection on the device */ 445/* Drop all connection on the device */
368void hci_conn_hash_flush(struct hci_dev *hdev) 446void hci_conn_hash_flush(struct hci_dev *hdev)
369{ 447{
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index f67240beb0dd..54e8e5ea2154 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -411,7 +411,7 @@ int hci_inquiry(void __user *arg)
411 } 411 }
412 hci_dev_unlock_bh(hdev); 412 hci_dev_unlock_bh(hdev);
413 413
414 timeo = ir.length * 2 * HZ; 414 timeo = ir.length * msecs_to_jiffies(2000);
415 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) 415 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
416 goto done; 416 goto done;
417 417
@@ -479,7 +479,8 @@ int hci_dev_open(__u16 dev)
479 set_bit(HCI_INIT, &hdev->flags); 479 set_bit(HCI_INIT, &hdev->flags);
480 480
481 //__hci_request(hdev, hci_reset_req, 0, HZ); 481 //__hci_request(hdev, hci_reset_req, 0, HZ);
482 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT); 482 ret = __hci_request(hdev, hci_init_req, 0,
483 msecs_to_jiffies(HCI_INIT_TIMEOUT));
483 484
484 clear_bit(HCI_INIT, &hdev->flags); 485 clear_bit(HCI_INIT, &hdev->flags);
485 } 486 }
@@ -546,7 +547,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
546 atomic_set(&hdev->cmd_cnt, 1); 547 atomic_set(&hdev->cmd_cnt, 1);
547 if (!test_bit(HCI_RAW, &hdev->flags)) { 548 if (!test_bit(HCI_RAW, &hdev->flags)) {
548 set_bit(HCI_INIT, &hdev->flags); 549 set_bit(HCI_INIT, &hdev->flags);
549 __hci_request(hdev, hci_reset_req, 0, HZ/4); 550 __hci_request(hdev, hci_reset_req, 0,
551 msecs_to_jiffies(250));
550 clear_bit(HCI_INIT, &hdev->flags); 552 clear_bit(HCI_INIT, &hdev->flags);
551 } 553 }
552 554
@@ -619,7 +621,8 @@ int hci_dev_reset(__u16 dev)
619 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 621 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
620 622
621 if (!test_bit(HCI_RAW, &hdev->flags)) 623 if (!test_bit(HCI_RAW, &hdev->flags))
622 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); 624 ret = __hci_request(hdev, hci_reset_req, 0,
625 msecs_to_jiffies(HCI_INIT_TIMEOUT));
623 626
624done: 627done:
625 tasklet_enable(&hdev->tx_task); 628 tasklet_enable(&hdev->tx_task);
@@ -657,7 +660,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
657 660
658 switch (cmd) { 661 switch (cmd) {
659 case HCISETAUTH: 662 case HCISETAUTH:
660 err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT); 663 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
664 msecs_to_jiffies(HCI_INIT_TIMEOUT));
661 break; 665 break;
662 666
663 case HCISETENCRYPT: 667 case HCISETENCRYPT:
@@ -668,18 +672,19 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
668 672
669 if (!test_bit(HCI_AUTH, &hdev->flags)) { 673 if (!test_bit(HCI_AUTH, &hdev->flags)) {
670 /* Auth must be enabled first */ 674 /* Auth must be enabled first */
671 err = hci_request(hdev, hci_auth_req, 675 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
672 dr.dev_opt, HCI_INIT_TIMEOUT); 676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
673 if (err) 677 if (err)
674 break; 678 break;
675 } 679 }
676 680
677 err = hci_request(hdev, hci_encrypt_req, 681 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
678 dr.dev_opt, HCI_INIT_TIMEOUT); 682 msecs_to_jiffies(HCI_INIT_TIMEOUT));
679 break; 683 break;
680 684
681 case HCISETSCAN: 685 case HCISETSCAN:
682 err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT); 686 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
687 msecs_to_jiffies(HCI_INIT_TIMEOUT));
683 break; 688 break;
684 689
685 case HCISETPTYPE: 690 case HCISETPTYPE:
@@ -812,8 +817,8 @@ void hci_free_dev(struct hci_dev *hdev)
812{ 817{
813 skb_queue_purge(&hdev->driver_init); 818 skb_queue_purge(&hdev->driver_init);
814 819
815 /* will free via class release */ 820 /* will free via device release */
816 class_device_put(&hdev->class_dev); 821 put_device(&hdev->dev);
817} 822}
818EXPORT_SYMBOL(hci_free_dev); 823EXPORT_SYMBOL(hci_free_dev);
819 824
@@ -848,6 +853,10 @@ int hci_register_dev(struct hci_dev *hdev)
848 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 853 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
849 hdev->link_mode = (HCI_LM_ACCEPT); 854 hdev->link_mode = (HCI_LM_ACCEPT);
850 855
856 hdev->idle_timeout = 0;
857 hdev->sniff_max_interval = 800;
858 hdev->sniff_min_interval = 80;
859
851 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); 860 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
852 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); 861 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
853 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); 862 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
@@ -1220,6 +1229,9 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
1220 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) { 1229 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1221 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1230 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1222 BT_DBG("skb %p len %d", skb, skb->len); 1231 BT_DBG("skb %p len %d", skb, skb->len);
1232
1233 hci_conn_enter_active_mode(conn);
1234
1223 hci_send_frame(skb); 1235 hci_send_frame(skb);
1224 hdev->acl_last_tx = jiffies; 1236 hdev->acl_last_tx = jiffies;
1225 1237
@@ -1298,6 +1310,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1298 if (conn) { 1310 if (conn) {
1299 register struct hci_proto *hp; 1311 register struct hci_proto *hp;
1300 1312
1313 hci_conn_enter_active_mode(conn);
1314
1301 /* Send to upper protocol */ 1315 /* Send to upper protocol */
1302 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { 1316 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1303 hp->recv_acldata(conn, skb, flags); 1317 hp->recv_acldata(conn, skb, flags);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 618bacee1b1c..3896dabab11d 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -83,6 +83,8 @@ static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *
83{ 83{
84 struct hci_conn *conn; 84 struct hci_conn *conn;
85 struct hci_rp_role_discovery *rd; 85 struct hci_rp_role_discovery *rd;
86 struct hci_rp_write_link_policy *lp;
87 void *sent;
86 88
87 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 89 BT_DBG("%s ocf 0x%x", hdev->name, ocf);
88 90
@@ -106,6 +108,27 @@ static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *
106 hci_dev_unlock(hdev); 108 hci_dev_unlock(hdev);
107 break; 109 break;
108 110
111 case OCF_WRITE_LINK_POLICY:
112 sent = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY);
113 if (!sent)
114 break;
115
116 lp = (struct hci_rp_write_link_policy *) skb->data;
117
118 if (lp->status)
119 break;
120
121 hci_dev_lock(hdev);
122
123 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(lp->handle));
124 if (conn) {
125 __le16 policy = get_unaligned((__le16 *) (sent + 2));
126 conn->link_policy = __le16_to_cpu(policy);
127 }
128
129 hci_dev_unlock(hdev);
130 break;
131
109 default: 132 default:
110 BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x", 133 BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x",
111 hdev->name, ocf); 134 hdev->name, ocf);
@@ -274,7 +297,7 @@ static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb
274/* Command Complete OGF INFO_PARAM */ 297/* Command Complete OGF INFO_PARAM */
275static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb) 298static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
276{ 299{
277 struct hci_rp_read_loc_features *lf; 300 struct hci_rp_read_local_features *lf;
278 struct hci_rp_read_buffer_size *bs; 301 struct hci_rp_read_buffer_size *bs;
279 struct hci_rp_read_bd_addr *ba; 302 struct hci_rp_read_bd_addr *ba;
280 303
@@ -282,7 +305,7 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s
282 305
283 switch (ocf) { 306 switch (ocf) {
284 case OCF_READ_LOCAL_FEATURES: 307 case OCF_READ_LOCAL_FEATURES:
285 lf = (struct hci_rp_read_loc_features *) skb->data; 308 lf = (struct hci_rp_read_local_features *) skb->data;
286 309
287 if (lf->status) { 310 if (lf->status) {
288 BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status); 311 BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status);
@@ -319,9 +342,17 @@ static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *s
319 } 342 }
320 343
321 hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu); 344 hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu);
322 hdev->sco_mtu = bs->sco_mtu ? bs->sco_mtu : 64; 345 hdev->sco_mtu = bs->sco_mtu;
323 hdev->acl_pkts = hdev->acl_cnt = __le16_to_cpu(bs->acl_max_pkt); 346 hdev->acl_pkts = __le16_to_cpu(bs->acl_max_pkt);
324 hdev->sco_pkts = hdev->sco_cnt = __le16_to_cpu(bs->sco_max_pkt); 347 hdev->sco_pkts = __le16_to_cpu(bs->sco_max_pkt);
348
349 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
350 hdev->sco_mtu = 64;
351 hdev->sco_pkts = 8;
352 }
353
354 hdev->acl_cnt = hdev->acl_pkts;
355 hdev->sco_cnt = hdev->sco_pkts;
325 356
326 BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name, 357 BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name,
327 hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts); 358 hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts);
@@ -439,8 +470,46 @@ static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status)
439 BT_DBG("%s ocf 0x%x", hdev->name, ocf); 470 BT_DBG("%s ocf 0x%x", hdev->name, ocf);
440 471
441 switch (ocf) { 472 switch (ocf) {
473 case OCF_SNIFF_MODE:
474 if (status) {
475 struct hci_conn *conn;
476 struct hci_cp_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_SNIFF_MODE);
477
478 if (!cp)
479 break;
480
481 hci_dev_lock(hdev);
482
483 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
484 if (conn) {
485 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
486 }
487
488 hci_dev_unlock(hdev);
489 }
490 break;
491
492 case OCF_EXIT_SNIFF_MODE:
493 if (status) {
494 struct hci_conn *conn;
495 struct hci_cp_exit_sniff_mode *cp = hci_sent_cmd_data(hdev, OGF_LINK_POLICY, OCF_EXIT_SNIFF_MODE);
496
497 if (!cp)
498 break;
499
500 hci_dev_lock(hdev);
501
502 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
503 if (conn) {
504 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
505 }
506
507 hci_dev_unlock(hdev);
508 }
509 break;
510
442 default: 511 default:
443 BT_DBG("%s Command status: ogf HOST_POLICY ocf %x", hdev->name, ocf); 512 BT_DBG("%s Command status: ogf LINK_POLICY ocf %x", hdev->name, ocf);
444 break; 513 break;
445 } 514 }
446} 515}
@@ -622,14 +691,16 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
622 else 691 else
623 cp.role = 0x01; /* Remain slave */ 692 cp.role = 0x01; /* Remain slave */
624 693
625 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp); 694 hci_send_cmd(hdev, OGF_LINK_CTL,
695 OCF_ACCEPT_CONN_REQ, sizeof(cp), &cp);
626 } else { 696 } else {
627 /* Connection rejected */ 697 /* Connection rejected */
628 struct hci_cp_reject_conn_req cp; 698 struct hci_cp_reject_conn_req cp;
629 699
630 bacpy(&cp.bdaddr, &ev->bdaddr); 700 bacpy(&cp.bdaddr, &ev->bdaddr);
631 cp.reason = 0x0f; 701 cp.reason = 0x0f;
632 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_REJECT_CONN_REQ, sizeof(cp), &cp); 702 hci_send_cmd(hdev, OGF_LINK_CTL,
703 OCF_REJECT_CONN_REQ, sizeof(cp), &cp);
633 } 704 }
634} 705}
635 706
@@ -637,7 +708,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
637static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 708static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
638{ 709{
639 struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data; 710 struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data;
640 struct hci_conn *conn = NULL; 711 struct hci_conn *conn;
641 712
642 BT_DBG("%s", hdev->name); 713 BT_DBG("%s", hdev->name);
643 714
@@ -659,12 +730,21 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
659 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 730 if (test_bit(HCI_ENCRYPT, &hdev->flags))
660 conn->link_mode |= HCI_LM_ENCRYPT; 731 conn->link_mode |= HCI_LM_ENCRYPT;
661 732
733 /* Get remote features */
734 if (conn->type == ACL_LINK) {
735 struct hci_cp_read_remote_features cp;
736 cp.handle = ev->handle;
737 hci_send_cmd(hdev, OGF_LINK_CTL,
738 OCF_READ_REMOTE_FEATURES, sizeof(cp), &cp);
739 }
740
662 /* Set link policy */ 741 /* Set link policy */
663 if (conn->type == ACL_LINK && hdev->link_policy) { 742 if (conn->type == ACL_LINK && hdev->link_policy) {
664 struct hci_cp_write_link_policy cp; 743 struct hci_cp_write_link_policy cp;
665 cp.handle = ev->handle; 744 cp.handle = ev->handle;
666 cp.policy = __cpu_to_le16(hdev->link_policy); 745 cp.policy = __cpu_to_le16(hdev->link_policy);
667 hci_send_cmd(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY, sizeof(cp), &cp); 746 hci_send_cmd(hdev, OGF_LINK_POLICY,
747 OCF_WRITE_LINK_POLICY, sizeof(cp), &cp);
668 } 748 }
669 749
670 /* Set packet type for incoming connection */ 750 /* Set packet type for incoming connection */
@@ -675,7 +755,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
675 __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK): 755 __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK):
676 __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); 756 __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
677 757
678 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp); 758 hci_send_cmd(hdev, OGF_LINK_CTL,
759 OCF_CHANGE_CONN_PTYPE, sizeof(cp), &cp);
679 } 760 }
680 } else 761 } else
681 conn->state = BT_CLOSED; 762 conn->state = BT_CLOSED;
@@ -703,8 +784,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
703static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 784static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
704{ 785{
705 struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data; 786 struct hci_ev_disconn_complete *ev = (struct hci_ev_disconn_complete *) skb->data;
706 struct hci_conn *conn = NULL; 787 struct hci_conn *conn;
707 __u16 handle = __le16_to_cpu(ev->handle);
708 788
709 BT_DBG("%s status %d", hdev->name, ev->status); 789 BT_DBG("%s status %d", hdev->name, ev->status);
710 790
@@ -713,7 +793,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
713 793
714 hci_dev_lock(hdev); 794 hci_dev_lock(hdev);
715 795
716 conn = hci_conn_hash_lookup_handle(hdev, handle); 796 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
717 if (conn) { 797 if (conn) {
718 conn->state = BT_CLOSED; 798 conn->state = BT_CLOSED;
719 hci_proto_disconn_ind(conn, ev->reason); 799 hci_proto_disconn_ind(conn, ev->reason);
@@ -770,7 +850,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
770static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 850static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
771{ 851{
772 struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data; 852 struct hci_ev_role_change *ev = (struct hci_ev_role_change *) skb->data;
773 struct hci_conn *conn = NULL; 853 struct hci_conn *conn;
774 854
775 BT_DBG("%s status %d", hdev->name, ev->status); 855 BT_DBG("%s status %d", hdev->name, ev->status);
776 856
@@ -793,18 +873,43 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
793 hci_dev_unlock(hdev); 873 hci_dev_unlock(hdev);
794} 874}
795 875
876/* Mode Change */
877static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
878{
879 struct hci_ev_mode_change *ev = (struct hci_ev_mode_change *) skb->data;
880 struct hci_conn *conn;
881
882 BT_DBG("%s status %d", hdev->name, ev->status);
883
884 hci_dev_lock(hdev);
885
886 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
887 if (conn) {
888 conn->mode = ev->mode;
889 conn->interval = __le16_to_cpu(ev->interval);
890
891 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
892 if (conn->mode == HCI_CM_ACTIVE)
893 conn->power_save = 1;
894 else
895 conn->power_save = 0;
896 }
897 }
898
899 hci_dev_unlock(hdev);
900}
901
796/* Authentication Complete */ 902/* Authentication Complete */
797static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 903static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
798{ 904{
799 struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; 905 struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data;
800 struct hci_conn *conn = NULL; 906 struct hci_conn *conn;
801 __u16 handle = __le16_to_cpu(ev->handle);
802 907
803 BT_DBG("%s status %d", hdev->name, ev->status); 908 BT_DBG("%s status %d", hdev->name, ev->status);
804 909
805 hci_dev_lock(hdev); 910 hci_dev_lock(hdev);
806 911
807 conn = hci_conn_hash_lookup_handle(hdev, handle); 912 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
808 if (conn) { 913 if (conn) {
809 if (!ev->status) 914 if (!ev->status)
810 conn->link_mode |= HCI_LM_AUTH; 915 conn->link_mode |= HCI_LM_AUTH;
@@ -819,8 +924,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
819 cp.handle = __cpu_to_le16(conn->handle); 924 cp.handle = __cpu_to_le16(conn->handle);
820 cp.encrypt = 1; 925 cp.encrypt = 1;
821 hci_send_cmd(conn->hdev, OGF_LINK_CTL, 926 hci_send_cmd(conn->hdev, OGF_LINK_CTL,
822 OCF_SET_CONN_ENCRYPT, 927 OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp);
823 sizeof(cp), &cp);
824 } else { 928 } else {
825 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 929 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
826 hci_encrypt_cfm(conn, ev->status, 0x00); 930 hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -835,14 +939,13 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
835static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 939static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
836{ 940{
837 struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data; 941 struct hci_ev_encrypt_change *ev = (struct hci_ev_encrypt_change *) skb->data;
838 struct hci_conn *conn = NULL; 942 struct hci_conn *conn;
839 __u16 handle = __le16_to_cpu(ev->handle);
840 943
841 BT_DBG("%s status %d", hdev->name, ev->status); 944 BT_DBG("%s status %d", hdev->name, ev->status);
842 945
843 hci_dev_lock(hdev); 946 hci_dev_lock(hdev);
844 947
845 conn = hci_conn_hash_lookup_handle(hdev, handle); 948 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
846 if (conn) { 949 if (conn) {
847 if (!ev->status) { 950 if (!ev->status) {
848 if (ev->encrypt) 951 if (ev->encrypt)
@@ -863,14 +966,13 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
863static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 966static inline void hci_change_conn_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
864{ 967{
865 struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data; 968 struct hci_ev_change_conn_link_key_complete *ev = (struct hci_ev_change_conn_link_key_complete *) skb->data;
866 struct hci_conn *conn = NULL; 969 struct hci_conn *conn;
867 __u16 handle = __le16_to_cpu(ev->handle);
868 970
869 BT_DBG("%s status %d", hdev->name, ev->status); 971 BT_DBG("%s status %d", hdev->name, ev->status);
870 972
871 hci_dev_lock(hdev); 973 hci_dev_lock(hdev);
872 974
873 conn = hci_conn_hash_lookup_handle(hdev, handle); 975 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
874 if (conn) { 976 if (conn) {
875 if (!ev->status) 977 if (!ev->status)
876 conn->link_mode |= HCI_LM_SECURE; 978 conn->link_mode |= HCI_LM_SECURE;
@@ -898,18 +1000,35 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
898{ 1000{
899} 1001}
900 1002
1003/* Remote Features */
1004static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1005{
1006 struct hci_ev_remote_features *ev = (struct hci_ev_remote_features *) skb->data;
1007 struct hci_conn *conn;
1008
1009 BT_DBG("%s status %d", hdev->name, ev->status);
1010
1011 hci_dev_lock(hdev);
1012
1013 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1014 if (conn && !ev->status) {
1015 memcpy(conn->features, ev->features, sizeof(conn->features));
1016 }
1017
1018 hci_dev_unlock(hdev);
1019}
1020
901/* Clock Offset */ 1021/* Clock Offset */
902static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 1022static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
903{ 1023{
904 struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data; 1024 struct hci_ev_clock_offset *ev = (struct hci_ev_clock_offset *) skb->data;
905 struct hci_conn *conn = NULL; 1025 struct hci_conn *conn;
906 __u16 handle = __le16_to_cpu(ev->handle);
907 1026
908 BT_DBG("%s status %d", hdev->name, ev->status); 1027 BT_DBG("%s status %d", hdev->name, ev->status);
909 1028
910 hci_dev_lock(hdev); 1029 hci_dev_lock(hdev);
911 1030
912 conn = hci_conn_hash_lookup_handle(hdev, handle); 1031 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
913 if (conn && !ev->status) { 1032 if (conn && !ev->status) {
914 struct inquiry_entry *ie; 1033 struct inquiry_entry *ie;
915 1034
@@ -940,6 +1059,23 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
940 hci_dev_unlock(hdev); 1059 hci_dev_unlock(hdev);
941} 1060}
942 1061
1062/* Sniff Subrate */
1063static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
1064{
1065 struct hci_ev_sniff_subrate *ev = (struct hci_ev_sniff_subrate *) skb->data;
1066 struct hci_conn *conn;
1067
1068 BT_DBG("%s status %d", hdev->name, ev->status);
1069
1070 hci_dev_lock(hdev);
1071
1072 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1073 if (conn) {
1074 }
1075
1076 hci_dev_unlock(hdev);
1077}
1078
943void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 1079void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
944{ 1080{
945 struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data; 1081 struct hci_event_hdr *hdr = (struct hci_event_hdr *) skb->data;
@@ -988,6 +1124,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
988 hci_role_change_evt(hdev, skb); 1124 hci_role_change_evt(hdev, skb);
989 break; 1125 break;
990 1126
1127 case HCI_EV_MODE_CHANGE:
1128 hci_mode_change_evt(hdev, skb);
1129 break;
1130
991 case HCI_EV_AUTH_COMPLETE: 1131 case HCI_EV_AUTH_COMPLETE:
992 hci_auth_complete_evt(hdev, skb); 1132 hci_auth_complete_evt(hdev, skb);
993 break; 1133 break;
@@ -1012,6 +1152,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1012 hci_link_key_notify_evt(hdev, skb); 1152 hci_link_key_notify_evt(hdev, skb);
1013 break; 1153 break;
1014 1154
1155 case HCI_EV_REMOTE_FEATURES:
1156 hci_remote_features_evt(hdev, skb);
1157 break;
1158
1015 case HCI_EV_CLOCK_OFFSET: 1159 case HCI_EV_CLOCK_OFFSET:
1016 hci_clock_offset_evt(hdev, skb); 1160 hci_clock_offset_evt(hdev, skb);
1017 break; 1161 break;
@@ -1020,6 +1164,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1020 hci_pscan_rep_mode_evt(hdev, skb); 1164 hci_pscan_rep_mode_evt(hdev, skb);
1021 break; 1165 break;
1022 1166
1167 case HCI_EV_SNIFF_SUBRATE:
1168 hci_sniff_subrate_evt(hdev, skb);
1169 break;
1170
1023 case HCI_EV_CMD_STATUS: 1171 case HCI_EV_CMD_STATUS:
1024 cs = (struct hci_ev_cmd_status *) skb->data; 1172 cs = (struct hci_ev_cmd_status *) skb->data;
1025 skb_pull(skb, sizeof(cs)); 1173 skb_pull(skb, sizeof(cs));
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 19b234c86f33..3987d167f04e 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -3,6 +3,8 @@
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/init.h> 4#include <linux/init.h>
5 5
6#include <linux/platform_device.h>
7
6#include <net/bluetooth/bluetooth.h> 8#include <net/bluetooth/bluetooth.h>
7#include <net/bluetooth/hci_core.h> 9#include <net/bluetooth/hci_core.h>
8 10
@@ -11,35 +13,35 @@
11#define BT_DBG(D...) 13#define BT_DBG(D...)
12#endif 14#endif
13 15
14static ssize_t show_name(struct class_device *cdev, char *buf) 16static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
15{ 17{
16 struct hci_dev *hdev = class_get_devdata(cdev); 18 struct hci_dev *hdev = dev_get_drvdata(dev);
17 return sprintf(buf, "%s\n", hdev->name); 19 return sprintf(buf, "%s\n", hdev->name);
18} 20}
19 21
20static ssize_t show_type(struct class_device *cdev, char *buf) 22static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
21{ 23{
22 struct hci_dev *hdev = class_get_devdata(cdev); 24 struct hci_dev *hdev = dev_get_drvdata(dev);
23 return sprintf(buf, "%d\n", hdev->type); 25 return sprintf(buf, "%d\n", hdev->type);
24} 26}
25 27
26static ssize_t show_address(struct class_device *cdev, char *buf) 28static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
27{ 29{
28 struct hci_dev *hdev = class_get_devdata(cdev); 30 struct hci_dev *hdev = dev_get_drvdata(dev);
29 bdaddr_t bdaddr; 31 bdaddr_t bdaddr;
30 baswap(&bdaddr, &hdev->bdaddr); 32 baswap(&bdaddr, &hdev->bdaddr);
31 return sprintf(buf, "%s\n", batostr(&bdaddr)); 33 return sprintf(buf, "%s\n", batostr(&bdaddr));
32} 34}
33 35
34static ssize_t show_flags(struct class_device *cdev, char *buf) 36static ssize_t show_flags(struct device *dev, struct device_attribute *attr, char *buf)
35{ 37{
36 struct hci_dev *hdev = class_get_devdata(cdev); 38 struct hci_dev *hdev = dev_get_drvdata(dev);
37 return sprintf(buf, "0x%lx\n", hdev->flags); 39 return sprintf(buf, "0x%lx\n", hdev->flags);
38} 40}
39 41
40static ssize_t show_inquiry_cache(struct class_device *cdev, char *buf) 42static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *attr, char *buf)
41{ 43{
42 struct hci_dev *hdev = class_get_devdata(cdev); 44 struct hci_dev *hdev = dev_get_drvdata(dev);
43 struct inquiry_cache *cache = &hdev->inq_cache; 45 struct inquiry_cache *cache = &hdev->inq_cache;
44 struct inquiry_entry *e; 46 struct inquiry_entry *e;
45 int n = 0; 47 int n = 0;
@@ -61,94 +63,193 @@ static ssize_t show_inquiry_cache(struct class_device *cdev, char *buf)
61 return n; 63 return n;
62} 64}
63 65
64static CLASS_DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 66static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
65static CLASS_DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 67{
66static CLASS_DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 68 struct hci_dev *hdev = dev_get_drvdata(dev);
67static CLASS_DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL); 69 return sprintf(buf, "%d\n", hdev->idle_timeout);
68static CLASS_DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL); 70}
69 71
70static struct class_device_attribute *bt_attrs[] = { 72static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
71 &class_device_attr_name, 73{
72 &class_device_attr_type, 74 struct hci_dev *hdev = dev_get_drvdata(dev);
73 &class_device_attr_address, 75 char *ptr;
74 &class_device_attr_flags, 76 __u32 val;
75 &class_device_attr_inquiry_cache, 77
76 NULL 78 val = simple_strtoul(buf, &ptr, 10);
77}; 79 if (ptr == buf)
80 return -EINVAL;
78 81
79#ifdef CONFIG_HOTPLUG 82 if (val != 0 && (val < 500 || val > 3600000))
80static int bt_uevent(struct class_device *cdev, char **envp, int num_envp, char *buf, int size) 83 return -EINVAL;
84
85 hdev->idle_timeout = val;
86
87 return count;
88}
89
90static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
81{ 91{
82 struct hci_dev *hdev = class_get_devdata(cdev); 92 struct hci_dev *hdev = dev_get_drvdata(dev);
83 int n, i = 0; 93 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
94}
84 95
85 envp[i++] = buf; 96static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
86 n = snprintf(buf, size, "INTERFACE=%s", hdev->name) + 1; 97{
87 buf += n; 98 struct hci_dev *hdev = dev_get_drvdata(dev);
88 size -= n; 99 char *ptr;
100 __u16 val;
89 101
90 if ((size <= 0) || (i >= num_envp)) 102 val = simple_strtoul(buf, &ptr, 10);
91 return -ENOMEM; 103 if (ptr == buf)
104 return -EINVAL;
92 105
93 envp[i] = NULL; 106 if (val < 0x0002 || val > 0xFFFE || val % 2)
94 return 0; 107 return -EINVAL;
108
109 if (val < hdev->sniff_min_interval)
110 return -EINVAL;
111
112 hdev->sniff_max_interval = val;
113
114 return count;
115}
116
117static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
118{
119 struct hci_dev *hdev = dev_get_drvdata(dev);
120 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
95} 121}
96#endif
97 122
98static void bt_release(struct class_device *cdev) 123static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
99{ 124{
100 struct hci_dev *hdev = class_get_devdata(cdev); 125 struct hci_dev *hdev = dev_get_drvdata(dev);
126 char *ptr;
127 __u16 val;
101 128
102 kfree(hdev); 129 val = simple_strtoul(buf, &ptr, 10);
130 if (ptr == buf)
131 return -EINVAL;
132
133 if (val < 0x0002 || val > 0xFFFE || val % 2)
134 return -EINVAL;
135
136 if (val > hdev->sniff_max_interval)
137 return -EINVAL;
138
139 hdev->sniff_min_interval = val;
140
141 return count;
103} 142}
104 143
105struct class bt_class = { 144static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
106 .name = "bluetooth", 145static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
107 .release = bt_release, 146static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
108#ifdef CONFIG_HOTPLUG 147static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
109 .uevent = bt_uevent, 148static DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL);
110#endif 149
150static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
151 show_idle_timeout, store_idle_timeout);
152static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
153 show_sniff_max_interval, store_sniff_max_interval);
154static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
155 show_sniff_min_interval, store_sniff_min_interval);
156
157static struct device_attribute *bt_attrs[] = {
158 &dev_attr_name,
159 &dev_attr_type,
160 &dev_attr_address,
161 &dev_attr_flags,
162 &dev_attr_inquiry_cache,
163 &dev_attr_idle_timeout,
164 &dev_attr_sniff_max_interval,
165 &dev_attr_sniff_min_interval,
166 NULL
111}; 167};
112 168
169struct class *bt_class = NULL;
113EXPORT_SYMBOL_GPL(bt_class); 170EXPORT_SYMBOL_GPL(bt_class);
114 171
172static struct bus_type bt_bus = {
173 .name = "bluetooth",
174};
175
176static struct platform_device *bt_platform;
177
178static void bt_release(struct device *dev)
179{
180 struct hci_dev *hdev = dev_get_drvdata(dev);
181 kfree(hdev);
182}
183
115int hci_register_sysfs(struct hci_dev *hdev) 184int hci_register_sysfs(struct hci_dev *hdev)
116{ 185{
117 struct class_device *cdev = &hdev->class_dev; 186 struct device *dev = &hdev->dev;
118 unsigned int i; 187 unsigned int i;
119 int err; 188 int err;
120 189
121 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 190 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
122 191
123 cdev->class = &bt_class; 192 dev->class = bt_class;
124 class_set_devdata(cdev, hdev); 193
194 if (hdev->parent)
195 dev->parent = hdev->parent;
196 else
197 dev->parent = &bt_platform->dev;
198
199 strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE);
200
201 dev->release = bt_release;
125 202
126 strlcpy(cdev->class_id, hdev->name, BUS_ID_SIZE); 203 dev_set_drvdata(dev, hdev);
127 err = class_device_register(cdev); 204
205 err = device_register(dev);
128 if (err < 0) 206 if (err < 0)
129 return err; 207 return err;
130 208
131 for (i = 0; bt_attrs[i]; i++) 209 for (i = 0; bt_attrs[i]; i++)
132 class_device_create_file(cdev, bt_attrs[i]); 210 device_create_file(dev, bt_attrs[i]);
133 211
134 return 0; 212 return 0;
135} 213}
136 214
137void hci_unregister_sysfs(struct hci_dev *hdev) 215void hci_unregister_sysfs(struct hci_dev *hdev)
138{ 216{
139 struct class_device * cdev = &hdev->class_dev; 217 struct device *dev = &hdev->dev;
140 218
141 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 219 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
142 220
143 class_device_del(cdev); 221 device_del(dev);
144} 222}
145 223
146int __init bt_sysfs_init(void) 224int __init bt_sysfs_init(void)
147{ 225{
148 return class_register(&bt_class); 226 int err;
227
228 bt_platform = platform_device_register_simple("bluetooth", -1, NULL, 0);
229 if (IS_ERR(bt_platform))
230 return PTR_ERR(bt_platform);
231
232 err = bus_register(&bt_bus);
233 if (err < 0) {
234 platform_device_unregister(bt_platform);
235 return err;
236 }
237
238 bt_class = class_create(THIS_MODULE, "bluetooth");
239 if (IS_ERR(bt_class)) {
240 bus_unregister(&bt_bus);
241 platform_device_unregister(bt_platform);
242 return PTR_ERR(bt_class);
243 }
244
245 return 0;
149} 246}
150 247
151void __exit bt_sysfs_cleanup(void) 248void __exit bt_sysfs_cleanup(void)
152{ 249{
153 class_unregister(&bt_class); 250 class_destroy(bt_class);
251
252 bus_unregister(&bt_bus);
253
254 platform_device_unregister(bt_platform);
154} 255}
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 770101177da1..eaaad658d11d 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -63,11 +63,6 @@ static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED 63 .lock = RW_LOCK_UNLOCKED
64}; 64};
65 65
66static int l2cap_conn_del(struct hci_conn *conn, int err);
67
68static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
69static void l2cap_chan_del(struct sock *sk, int err);
70
71static void __l2cap_sock_close(struct sock *sk, int reason); 66static void __l2cap_sock_close(struct sock *sk, int reason);
72static void l2cap_sock_close(struct sock *sk); 67static void l2cap_sock_close(struct sock *sk);
73static void l2cap_sock_kill(struct sock *sk); 68static void l2cap_sock_kill(struct sock *sk);
@@ -109,24 +104,177 @@ static void l2cap_sock_init_timer(struct sock *sk)
109 sk->sk_timer.data = (unsigned long)sk; 104 sk->sk_timer.data = (unsigned long)sk;
110} 105}
111 106
107/* ---- L2CAP channels ---- */
108static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
109{
110 struct sock *s;
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
113 break;
114 }
115 return s;
116}
117
118static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
119{
120 struct sock *s;
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
123 break;
124 }
125 return s;
126}
127
128/* Find channel with given SCID.
129 * Returns locked socket */
130static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
131{
132 struct sock *s;
133 read_lock(&l->lock);
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
137 return s;
138}
139
140static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
141{
142 struct sock *s;
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
145 break;
146 }
147 return s;
148}
149
150static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
151{
152 struct sock *s;
153 read_lock(&l->lock);
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
157 return s;
158}
159
160static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
161{
162 u16 cid = 0x0040;
163
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
166 return cid;
167 }
168
169 return 0;
170}
171
172static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
173{
174 sock_hold(sk);
175
176 if (l->head)
177 l2cap_pi(l->head)->prev_c = sk;
178
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
181 l->head = sk;
182}
183
184static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
185{
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
187
188 write_lock(&l->lock);
189 if (sk == l->head)
190 l->head = next;
191
192 if (next)
193 l2cap_pi(next)->prev_c = prev;
194 if (prev)
195 l2cap_pi(prev)->next_c = next;
196 write_unlock(&l->lock);
197
198 __sock_put(sk);
199}
200
201static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
202{
203 struct l2cap_chan_list *l = &conn->chan_list;
204
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
206
207 l2cap_pi(sk)->conn = conn;
208
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
217 } else {
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
222 }
223
224 __l2cap_chan_link(l, sk);
225
226 if (parent)
227 bt_accept_enqueue(parent, sk);
228}
229
230/* Delete channel.
231 * Must be called on the locked socket. */
232static void l2cap_chan_del(struct sock *sk, int err)
233{
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
236
237 l2cap_sock_clear_timer(sk);
238
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
240
241 if (conn) {
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
246 }
247
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
250
251 if (err)
252 sk->sk_err = err;
253
254 if (parent) {
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
257 } else
258 sk->sk_state_change(sk);
259}
260
112/* ---- L2CAP connections ---- */ 261/* ---- L2CAP connections ---- */
113static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 262static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
114{ 263{
115 struct l2cap_conn *conn; 264 struct l2cap_conn *conn = hcon->l2cap_data;
116
117 if ((conn = hcon->l2cap_data))
118 return conn;
119 265
120 if (status) 266 if (conn || status)
121 return conn; 267 return conn;
122 268
123 if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC))) 269 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
270 if (!conn)
124 return NULL; 271 return NULL;
125 memset(conn, 0, sizeof(struct l2cap_conn));
126 272
127 hcon->l2cap_data = conn; 273 hcon->l2cap_data = conn;
128 conn->hcon = hcon; 274 conn->hcon = hcon;
129 275
276 BT_DBG("hcon %p conn %p", hcon, conn);
277
130 conn->mtu = hcon->hdev->acl_mtu; 278 conn->mtu = hcon->hdev->acl_mtu;
131 conn->src = &hcon->hdev->bdaddr; 279 conn->src = &hcon->hdev->bdaddr;
132 conn->dst = &hcon->dst; 280 conn->dst = &hcon->dst;
@@ -134,17 +282,16 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
134 spin_lock_init(&conn->lock); 282 spin_lock_init(&conn->lock);
135 rwlock_init(&conn->chan_list.lock); 283 rwlock_init(&conn->chan_list.lock);
136 284
137 BT_DBG("hcon %p conn %p", hcon, conn);
138 return conn; 285 return conn;
139} 286}
140 287
141static int l2cap_conn_del(struct hci_conn *hcon, int err) 288static void l2cap_conn_del(struct hci_conn *hcon, int err)
142{ 289{
143 struct l2cap_conn *conn; 290 struct l2cap_conn *conn = hcon->l2cap_data;
144 struct sock *sk; 291 struct sock *sk;
145 292
146 if (!(conn = hcon->l2cap_data)) 293 if (!conn)
147 return 0; 294 return;
148 295
149 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); 296 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
150 297
@@ -161,7 +308,6 @@ static int l2cap_conn_del(struct hci_conn *hcon, int err)
161 308
162 hcon->l2cap_data = NULL; 309 hcon->l2cap_data = NULL;
163 kfree(conn); 310 kfree(conn);
164 return 0;
165} 311}
166 312
167static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) 313static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
@@ -925,160 +1071,6 @@ static int l2cap_sock_release(struct socket *sock)
925 return err; 1071 return err;
926} 1072}
927 1073
928/* ---- L2CAP channels ---- */
929static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
930{
931 struct sock *s;
932 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
933 if (l2cap_pi(s)->dcid == cid)
934 break;
935 }
936 return s;
937}
938
939static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
940{
941 struct sock *s;
942 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
943 if (l2cap_pi(s)->scid == cid)
944 break;
945 }
946 return s;
947}
948
949/* Find channel with given SCID.
950 * Returns locked socket */
951static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
952{
953 struct sock *s;
954 read_lock(&l->lock);
955 s = __l2cap_get_chan_by_scid(l, cid);
956 if (s) bh_lock_sock(s);
957 read_unlock(&l->lock);
958 return s;
959}
960
961static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
962{
963 struct sock *s;
964 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
965 if (l2cap_pi(s)->ident == ident)
966 break;
967 }
968 return s;
969}
970
971static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
972{
973 struct sock *s;
974 read_lock(&l->lock);
975 s = __l2cap_get_chan_by_ident(l, ident);
976 if (s) bh_lock_sock(s);
977 read_unlock(&l->lock);
978 return s;
979}
980
981static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
982{
983 u16 cid = 0x0040;
984
985 for (; cid < 0xffff; cid++) {
986 if(!__l2cap_get_chan_by_scid(l, cid))
987 return cid;
988 }
989
990 return 0;
991}
992
993static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
994{
995 sock_hold(sk);
996
997 if (l->head)
998 l2cap_pi(l->head)->prev_c = sk;
999
1000 l2cap_pi(sk)->next_c = l->head;
1001 l2cap_pi(sk)->prev_c = NULL;
1002 l->head = sk;
1003}
1004
1005static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
1006{
1007 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
1008
1009 write_lock(&l->lock);
1010 if (sk == l->head)
1011 l->head = next;
1012
1013 if (next)
1014 l2cap_pi(next)->prev_c = prev;
1015 if (prev)
1016 l2cap_pi(prev)->next_c = next;
1017 write_unlock(&l->lock);
1018
1019 __sock_put(sk);
1020}
1021
1022static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
1023{
1024 struct l2cap_chan_list *l = &conn->chan_list;
1025
1026 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
1027
1028 l2cap_pi(sk)->conn = conn;
1029
1030 if (sk->sk_type == SOCK_SEQPACKET) {
1031 /* Alloc CID for connection-oriented socket */
1032 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
1033 } else if (sk->sk_type == SOCK_DGRAM) {
1034 /* Connectionless socket */
1035 l2cap_pi(sk)->scid = 0x0002;
1036 l2cap_pi(sk)->dcid = 0x0002;
1037 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1038 } else {
1039 /* Raw socket can send/recv signalling messages only */
1040 l2cap_pi(sk)->scid = 0x0001;
1041 l2cap_pi(sk)->dcid = 0x0001;
1042 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
1043 }
1044
1045 __l2cap_chan_link(l, sk);
1046
1047 if (parent)
1048 bt_accept_enqueue(parent, sk);
1049}
1050
1051/* Delete channel.
1052 * Must be called on the locked socket. */
1053static void l2cap_chan_del(struct sock *sk, int err)
1054{
1055 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1056 struct sock *parent = bt_sk(sk)->parent;
1057
1058 l2cap_sock_clear_timer(sk);
1059
1060 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
1061
1062 if (conn) {
1063 /* Unlink from channel list */
1064 l2cap_chan_unlink(&conn->chan_list, sk);
1065 l2cap_pi(sk)->conn = NULL;
1066 hci_conn_put(conn->hcon);
1067 }
1068
1069 sk->sk_state = BT_CLOSED;
1070 sock_set_flag(sk, SOCK_ZAPPED);
1071
1072 if (err)
1073 sk->sk_err = err;
1074
1075 if (parent) {
1076 bt_accept_unlink(sk);
1077 parent->sk_data_ready(parent, 0);
1078 } else
1079 sk->sk_state_change(sk);
1080}
1081
1082static void l2cap_conn_ready(struct l2cap_conn *conn) 1074static void l2cap_conn_ready(struct l2cap_conn *conn)
1083{ 1075{
1084 struct l2cap_chan_list *l = &conn->chan_list; 1076 struct l2cap_chan_list *l = &conn->chan_list;
@@ -1834,7 +1826,9 @@ drop:
1834 kfree_skb(skb); 1826 kfree_skb(skb);
1835 1827
1836done: 1828done:
1837 if (sk) bh_unlock_sock(sk); 1829 if (sk)
1830 bh_unlock_sock(sk);
1831
1838 return 0; 1832 return 0;
1839} 1833}
1840 1834
@@ -1925,18 +1919,18 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1925 1919
1926static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) 1920static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
1927{ 1921{
1922 struct l2cap_conn *conn;
1923
1928 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 1924 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
1929 1925
1930 if (hcon->type != ACL_LINK) 1926 if (hcon->type != ACL_LINK)
1931 return 0; 1927 return 0;
1932 1928
1933 if (!status) { 1929 if (!status) {
1934 struct l2cap_conn *conn;
1935
1936 conn = l2cap_conn_add(hcon, status); 1930 conn = l2cap_conn_add(hcon, status);
1937 if (conn) 1931 if (conn)
1938 l2cap_conn_ready(conn); 1932 l2cap_conn_ready(conn);
1939 } else 1933 } else
1940 l2cap_conn_del(hcon, bt_err(status)); 1934 l2cap_conn_del(hcon, bt_err(status));
1941 1935
1942 return 0; 1936 return 0;
@@ -1950,19 +1944,21 @@ static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
1950 return 0; 1944 return 0;
1951 1945
1952 l2cap_conn_del(hcon, bt_err(reason)); 1946 l2cap_conn_del(hcon, bt_err(reason));
1947
1953 return 0; 1948 return 0;
1954} 1949}
1955 1950
1956static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) 1951static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
1957{ 1952{
1958 struct l2cap_chan_list *l; 1953 struct l2cap_chan_list *l;
1959 struct l2cap_conn *conn; 1954 struct l2cap_conn *conn = conn = hcon->l2cap_data;
1960 struct l2cap_conn_rsp rsp; 1955 struct l2cap_conn_rsp rsp;
1961 struct sock *sk; 1956 struct sock *sk;
1962 int result; 1957 int result;
1963 1958
1964 if (!(conn = hcon->l2cap_data)) 1959 if (!conn)
1965 return 0; 1960 return 0;
1961
1966 l = &conn->chan_list; 1962 l = &conn->chan_list;
1967 1963
1968 BT_DBG("conn %p", conn); 1964 BT_DBG("conn %p", conn);
@@ -2005,13 +2001,14 @@ static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2005static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status) 2001static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2006{ 2002{
2007 struct l2cap_chan_list *l; 2003 struct l2cap_chan_list *l;
2008 struct l2cap_conn *conn; 2004 struct l2cap_conn *conn = hcon->l2cap_data;
2009 struct l2cap_conn_rsp rsp; 2005 struct l2cap_conn_rsp rsp;
2010 struct sock *sk; 2006 struct sock *sk;
2011 int result; 2007 int result;
2012 2008
2013 if (!(conn = hcon->l2cap_data)) 2009 if (!conn)
2014 return 0; 2010 return 0;
2011
2015 l = &conn->chan_list; 2012 l = &conn->chan_list;
2016 2013
2017 BT_DBG("conn %p", conn); 2014 BT_DBG("conn %p", conn);
@@ -2219,7 +2216,7 @@ static int __init l2cap_init(void)
2219 goto error; 2216 goto error;
2220 } 2217 }
2221 2218
2222 class_create_file(&bt_class, &class_attr_l2cap); 2219 class_create_file(bt_class, &class_attr_l2cap);
2223 2220
2224 BT_INFO("L2CAP ver %s", VERSION); 2221 BT_INFO("L2CAP ver %s", VERSION);
2225 BT_INFO("L2CAP socket layer initialized"); 2222 BT_INFO("L2CAP socket layer initialized");
@@ -2233,7 +2230,7 @@ error:
2233 2230
2234static void __exit l2cap_exit(void) 2231static void __exit l2cap_exit(void)
2235{ 2232{
2236 class_remove_file(&bt_class, &class_attr_l2cap); 2233 class_remove_file(bt_class, &class_attr_l2cap);
2237 2234
2238 if (bt_sock_unregister(BTPROTO_L2CAP) < 0) 2235 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2239 BT_ERR("L2CAP socket unregistration failed"); 2236 BT_ERR("L2CAP socket unregistration failed");
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index bd46e8927f29..155a2b93760e 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -52,8 +52,9 @@
52#define BT_DBG(D...) 52#define BT_DBG(D...)
53#endif 53#endif
54 54
55#define VERSION "1.7" 55#define VERSION "1.8"
56 56
57static int disable_cfc = 0;
57static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; 58static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
58 59
59static struct task_struct *rfcomm_thread; 60static struct task_struct *rfcomm_thread;
@@ -533,7 +534,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
533 s->sock = sock; 534 s->sock = sock;
534 535
535 s->mtu = RFCOMM_DEFAULT_MTU; 536 s->mtu = RFCOMM_DEFAULT_MTU;
536 s->cfc = RFCOMM_CFC_UNKNOWN; 537 s->cfc = disable_cfc ? RFCOMM_CFC_DISABLED : RFCOMM_CFC_UNKNOWN;
537 538
538 /* Do not increment module usage count for listening sessions. 539 /* Do not increment module usage count for listening sessions.
539 * Otherwise we won't be able to unload the module. */ 540 * Otherwise we won't be able to unload the module. */
@@ -1149,6 +1150,8 @@ static inline int rfcomm_check_link_mode(struct rfcomm_dlc *d)
1149 1150
1150static void rfcomm_dlc_accept(struct rfcomm_dlc *d) 1151static void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1151{ 1152{
1153 struct sock *sk = d->session->sock->sk;
1154
1152 BT_DBG("dlc %p", d); 1155 BT_DBG("dlc %p", d);
1153 1156
1154 rfcomm_send_ua(d->session, d->dlci); 1157 rfcomm_send_ua(d->session, d->dlci);
@@ -1158,6 +1161,9 @@ static void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1158 d->state_change(d, 0); 1161 d->state_change(d, 0);
1159 rfcomm_dlc_unlock(d); 1162 rfcomm_dlc_unlock(d);
1160 1163
1164 if (d->link_mode & RFCOMM_LM_MASTER)
1165 hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00);
1166
1161 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); 1167 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
1162} 1168}
1163 1169
@@ -1222,14 +1228,18 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn)
1222 BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d", 1228 BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d",
1223 d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits); 1229 d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits);
1224 1230
1225 if (pn->flow_ctrl == 0xf0 || pn->flow_ctrl == 0xe0) { 1231 if ((pn->flow_ctrl == 0xf0 && s->cfc != RFCOMM_CFC_DISABLED) ||
1226 d->cfc = s->cfc = RFCOMM_CFC_ENABLED; 1232 pn->flow_ctrl == 0xe0) {
1233 d->cfc = RFCOMM_CFC_ENABLED;
1227 d->tx_credits = pn->credits; 1234 d->tx_credits = pn->credits;
1228 } else { 1235 } else {
1229 d->cfc = s->cfc = RFCOMM_CFC_DISABLED; 1236 d->cfc = RFCOMM_CFC_DISABLED;
1230 set_bit(RFCOMM_TX_THROTTLED, &d->flags); 1237 set_bit(RFCOMM_TX_THROTTLED, &d->flags);
1231 } 1238 }
1232 1239
1240 if (s->cfc == RFCOMM_CFC_UNKNOWN)
1241 s->cfc = d->cfc;
1242
1233 d->priority = pn->priority; 1243 d->priority = pn->priority;
1234 1244
1235 d->mtu = s->mtu = btohs(pn->mtu); 1245 d->mtu = s->mtu = btohs(pn->mtu);
@@ -2035,7 +2045,7 @@ static int __init rfcomm_init(void)
2035 2045
2036 kernel_thread(rfcomm_run, NULL, CLONE_KERNEL); 2046 kernel_thread(rfcomm_run, NULL, CLONE_KERNEL);
2037 2047
2038 class_create_file(&bt_class, &class_attr_rfcomm_dlc); 2048 class_create_file(bt_class, &class_attr_rfcomm_dlc);
2039 2049
2040 rfcomm_init_sockets(); 2050 rfcomm_init_sockets();
2041 2051
@@ -2050,7 +2060,7 @@ static int __init rfcomm_init(void)
2050 2060
2051static void __exit rfcomm_exit(void) 2061static void __exit rfcomm_exit(void)
2052{ 2062{
2053 class_remove_file(&bt_class, &class_attr_rfcomm_dlc); 2063 class_remove_file(bt_class, &class_attr_rfcomm_dlc);
2054 2064
2055 hci_unregister_cb(&rfcomm_cb); 2065 hci_unregister_cb(&rfcomm_cb);
2056 2066
@@ -2073,6 +2083,9 @@ static void __exit rfcomm_exit(void)
2073module_init(rfcomm_init); 2083module_init(rfcomm_init);
2074module_exit(rfcomm_exit); 2084module_exit(rfcomm_exit);
2075 2085
2086module_param(disable_cfc, bool, 0644);
2087MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control");
2088
2076module_param(l2cap_mtu, uint, 0644); 2089module_param(l2cap_mtu, uint, 0644);
2077MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); 2090MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection");
2078 2091
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 4e9962c8cfa6..220fee04e7f2 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -944,7 +944,7 @@ int __init rfcomm_init_sockets(void)
944 if (err < 0) 944 if (err < 0)
945 goto error; 945 goto error;
946 946
947 class_create_file(&bt_class, &class_attr_rfcomm); 947 class_create_file(bt_class, &class_attr_rfcomm);
948 948
949 BT_INFO("RFCOMM socket layer initialized"); 949 BT_INFO("RFCOMM socket layer initialized");
950 950
@@ -958,7 +958,7 @@ error:
958 958
959void __exit rfcomm_cleanup_sockets(void) 959void __exit rfcomm_cleanup_sockets(void)
960{ 960{
961 class_remove_file(&bt_class, &class_attr_rfcomm); 961 class_remove_file(bt_class, &class_attr_rfcomm);
962 962
963 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) 963 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
964 BT_ERR("RFCOMM socket layer unregistration failed"); 964 BT_ERR("RFCOMM socket layer unregistration failed");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index a5f1e44db5d3..85defccc0287 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -969,7 +969,7 @@ static int __init sco_init(void)
969 goto error; 969 goto error;
970 } 970 }
971 971
972 class_create_file(&bt_class, &class_attr_sco); 972 class_create_file(bt_class, &class_attr_sco);
973 973
974 BT_INFO("SCO (Voice Link) ver %s", VERSION); 974 BT_INFO("SCO (Voice Link) ver %s", VERSION);
975 BT_INFO("SCO socket layer initialized"); 975 BT_INFO("SCO socket layer initialized");
@@ -983,7 +983,7 @@ error:
983 983
984static void __exit sco_exit(void) 984static void __exit sco_exit(void)
985{ 985{
986 class_remove_file(&bt_class, &class_attr_sco); 986 class_remove_file(bt_class, &class_attr_sco);
987 987
988 if (bt_sock_unregister(BTPROTO_SCO) < 0) 988 if (bt_sock_unregister(BTPROTO_SCO) < 0)
989 BT_ERR("SCO socket unregistration failed"); 989 BT_ERR("SCO socket unregistration failed");
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 881d7d1a732a..06abb6634f5b 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -117,12 +117,13 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
117 continue; 117 continue;
118 118
119 if (idx < s_idx) 119 if (idx < s_idx)
120 continue; 120 goto cont;
121 121
122 err = br_fill_ifinfo(skb, p, NETLINK_CB(cb->skb).pid, 122 err = br_fill_ifinfo(skb, p, NETLINK_CB(cb->skb).pid,
123 cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); 123 cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI);
124 if (err <= 0) 124 if (err <= 0)
125 break; 125 break;
126cont:
126 ++idx; 127 ++idx;
127 } 128 }
128 read_unlock(&dev_base_lock); 129 read_unlock(&dev_base_lock);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7cfbdb215ba2..44f6a181a754 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -71,6 +71,13 @@ static kmem_cache_t *skbuff_head_cache __read_mostly;
71static kmem_cache_t *skbuff_fclone_cache __read_mostly; 71static kmem_cache_t *skbuff_fclone_cache __read_mostly;
72 72
73/* 73/*
74 * lockdep: lock class key used by skb_queue_head_init():
75 */
76struct lock_class_key skb_queue_lock_key;
77
78EXPORT_SYMBOL(skb_queue_lock_key);
79
80/*
74 * Keep out-of-line to prevent kernel bloat. 81 * Keep out-of-line to prevent kernel bloat.
75 * __builtin_return_address is not used because it is not always 82 * __builtin_return_address is not used because it is not always
76 * reliable. 83 * reliable.
diff --git a/net/core/sock.c b/net/core/sock.c
index 533b9317144b..51fcfbc041a7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -129,6 +129,53 @@
129#include <net/tcp.h> 129#include <net/tcp.h>
130#endif 130#endif
131 131
132/*
133 * Each address family might have different locking rules, so we have
134 * one slock key per address family:
135 */
136static struct lock_class_key af_family_keys[AF_MAX];
137static struct lock_class_key af_family_slock_keys[AF_MAX];
138
139#ifdef CONFIG_DEBUG_LOCK_ALLOC
140/*
141 * Make lock validator output more readable. (we pre-construct these
142 * strings build-time, so that runtime initialization of socket
143 * locks is fast):
144 */
145static const char *af_family_key_strings[AF_MAX+1] = {
146 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
147 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
148 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
149 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
150 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
151 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
152 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
153 "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
154 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
155 "sk_lock-27" , "sk_lock-28" , "sk_lock-29" ,
156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX"
157};
158static const char *af_family_slock_key_strings[AF_MAX+1] = {
159 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
160 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
161 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
162 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
163 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
164 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
165 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
166 "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
167 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
168 "slock-27" , "slock-28" , "slock-29" ,
169 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_MAX"
170};
171#endif
172
173/*
174 * sk_callback_lock locking rules are per-address-family,
175 * so split the lock classes by using a per-AF key:
176 */
177static struct lock_class_key af_callback_keys[AF_MAX];
178
132/* Take into consideration the size of the struct sk_buff overhead in the 179/* Take into consideration the size of the struct sk_buff overhead in the
133 * determination of these values, since that is non-constant across 180 * determination of these values, since that is non-constant across
134 * platforms. This makes socket queueing behavior and performance 181 * platforms. This makes socket queueing behavior and performance
@@ -237,9 +284,16 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
237 skb->dev = NULL; 284 skb->dev = NULL;
238 285
239 bh_lock_sock(sk); 286 bh_lock_sock(sk);
240 if (!sock_owned_by_user(sk)) 287 if (!sock_owned_by_user(sk)) {
288 /*
289 * trylock + unlock semantics:
290 */
291 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
292
241 rc = sk->sk_backlog_rcv(sk, skb); 293 rc = sk->sk_backlog_rcv(sk, skb);
242 else 294
295 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
296 } else
243 sk_add_backlog(sk, skb); 297 sk_add_backlog(sk, skb);
244 bh_unlock_sock(sk); 298 bh_unlock_sock(sk);
245out: 299out:
@@ -749,6 +803,33 @@ lenout:
749 return 0; 803 return 0;
750} 804}
751 805
806/*
807 * Initialize an sk_lock.
808 *
809 * (We also register the sk_lock with the lock validator.)
810 */
811static void inline sock_lock_init(struct sock *sk)
812{
813 spin_lock_init(&sk->sk_lock.slock);
814 sk->sk_lock.owner = NULL;
815 init_waitqueue_head(&sk->sk_lock.wq);
816 /*
817 * Make sure we are not reinitializing a held lock:
818 */
819 debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
820
821 /*
822 * Mark both the sk_lock and the sk_lock.slock as a
823 * per-address-family lock class:
824 */
825 lockdep_set_class_and_name(&sk->sk_lock.slock,
826 af_family_slock_keys + sk->sk_family,
827 af_family_slock_key_strings[sk->sk_family]);
828 lockdep_init_map(&sk->sk_lock.dep_map,
829 af_family_key_strings[sk->sk_family],
830 af_family_keys + sk->sk_family);
831}
832
752/** 833/**
753 * sk_alloc - All socket objects are allocated here 834 * sk_alloc - All socket objects are allocated here
754 * @family: protocol family 835 * @family: protocol family
@@ -848,6 +929,8 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
848 929
849 rwlock_init(&newsk->sk_dst_lock); 930 rwlock_init(&newsk->sk_dst_lock);
850 rwlock_init(&newsk->sk_callback_lock); 931 rwlock_init(&newsk->sk_callback_lock);
932 lockdep_set_class(&newsk->sk_callback_lock,
933 af_callback_keys + newsk->sk_family);
851 934
852 newsk->sk_dst_cache = NULL; 935 newsk->sk_dst_cache = NULL;
853 newsk->sk_wmem_queued = 0; 936 newsk->sk_wmem_queued = 0;
@@ -1422,6 +1505,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1422 1505
1423 rwlock_init(&sk->sk_dst_lock); 1506 rwlock_init(&sk->sk_dst_lock);
1424 rwlock_init(&sk->sk_callback_lock); 1507 rwlock_init(&sk->sk_callback_lock);
1508 lockdep_set_class(&sk->sk_callback_lock,
1509 af_callback_keys + sk->sk_family);
1425 1510
1426 sk->sk_state_change = sock_def_wakeup; 1511 sk->sk_state_change = sock_def_wakeup;
1427 sk->sk_data_ready = sock_def_readable; 1512 sk->sk_data_ready = sock_def_readable;
@@ -1449,24 +1534,34 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1449void fastcall lock_sock(struct sock *sk) 1534void fastcall lock_sock(struct sock *sk)
1450{ 1535{
1451 might_sleep(); 1536 might_sleep();
1452 spin_lock_bh(&(sk->sk_lock.slock)); 1537 spin_lock_bh(&sk->sk_lock.slock);
1453 if (sk->sk_lock.owner) 1538 if (sk->sk_lock.owner)
1454 __lock_sock(sk); 1539 __lock_sock(sk);
1455 sk->sk_lock.owner = (void *)1; 1540 sk->sk_lock.owner = (void *)1;
1456 spin_unlock_bh(&(sk->sk_lock.slock)); 1541 spin_unlock(&sk->sk_lock.slock);
1542 /*
1543 * The sk_lock has mutex_lock() semantics here:
1544 */
1545 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
1546 local_bh_enable();
1457} 1547}
1458 1548
1459EXPORT_SYMBOL(lock_sock); 1549EXPORT_SYMBOL(lock_sock);
1460 1550
1461void fastcall release_sock(struct sock *sk) 1551void fastcall release_sock(struct sock *sk)
1462{ 1552{
1463 spin_lock_bh(&(sk->sk_lock.slock)); 1553 /*
1554 * The sk_lock has mutex_unlock() semantics:
1555 */
1556 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1557
1558 spin_lock_bh(&sk->sk_lock.slock);
1464 if (sk->sk_backlog.tail) 1559 if (sk->sk_backlog.tail)
1465 __release_sock(sk); 1560 __release_sock(sk);
1466 sk->sk_lock.owner = NULL; 1561 sk->sk_lock.owner = NULL;
1467 if (waitqueue_active(&(sk->sk_lock.wq))) 1562 if (waitqueue_active(&sk->sk_lock.wq))
1468 wake_up(&(sk->sk_lock.wq)); 1563 wake_up(&sk->sk_lock.wq);
1469 spin_unlock_bh(&(sk->sk_lock.slock)); 1564 spin_unlock_bh(&sk->sk_lock.slock);
1470} 1565}
1471EXPORT_SYMBOL(release_sock); 1566EXPORT_SYMBOL(release_sock);
1472 1567
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8d157157bf8e..318d4674faa1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1106,7 +1106,15 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
1106 int ihl; 1106 int ihl;
1107 int id; 1107 int id;
1108 1108
1109 if (!pskb_may_pull(skb, sizeof(*iph))) 1109 if (unlikely(skb_shinfo(skb)->gso_type &
1110 ~(SKB_GSO_TCPV4 |
1111 SKB_GSO_UDP |
1112 SKB_GSO_DODGY |
1113 SKB_GSO_TCP_ECN |
1114 0)))
1115 goto out;
1116
1117 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1110 goto out; 1118 goto out;
1111 1119
1112 iph = skb->nh.iph; 1120 iph = skb->nh.iph;
@@ -1114,7 +1122,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
1114 if (ihl < sizeof(*iph)) 1122 if (ihl < sizeof(*iph))
1115 goto out; 1123 goto out;
1116 1124
1117 if (!pskb_may_pull(skb, ihl)) 1125 if (unlikely(!pskb_may_pull(skb, ihl)))
1118 goto out; 1126 goto out;
1119 1127
1120 skb->h.raw = __skb_pull(skb, ihl); 1128 skb->h.raw = __skb_pull(skb, ihl);
@@ -1125,7 +1133,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
1125 1133
1126 rcu_read_lock(); 1134 rcu_read_lock();
1127 ops = rcu_dereference(inet_protos[proto]); 1135 ops = rcu_dereference(inet_protos[proto]);
1128 if (ops && ops->gso_segment) 1136 if (likely(ops && ops->gso_segment))
1129 segs = ops->gso_segment(skb, features); 1137 segs = ops->gso_segment(skb, features);
1130 rcu_read_unlock(); 1138 rcu_read_unlock();
1131 1139
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index da44fabf4dc5..2dc6dbb28467 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -205,21 +205,27 @@ __u8 ip_tos2prio[16] = {
205struct rt_hash_bucket { 205struct rt_hash_bucket {
206 struct rtable *chain; 206 struct rtable *chain;
207}; 207};
208#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 208#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
209 defined(CONFIG_PROVE_LOCKING)
209/* 210/*
210 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks 211 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
211 * The size of this table is a power of two and depends on the number of CPUS. 212 * The size of this table is a power of two and depends on the number of CPUS.
213 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
212 */ 214 */
213#if NR_CPUS >= 32 215#ifdef CONFIG_LOCKDEP
214#define RT_HASH_LOCK_SZ 4096 216# define RT_HASH_LOCK_SZ 256
215#elif NR_CPUS >= 16
216#define RT_HASH_LOCK_SZ 2048
217#elif NR_CPUS >= 8
218#define RT_HASH_LOCK_SZ 1024
219#elif NR_CPUS >= 4
220#define RT_HASH_LOCK_SZ 512
221#else 217#else
222#define RT_HASH_LOCK_SZ 256 218# if NR_CPUS >= 32
219# define RT_HASH_LOCK_SZ 4096
220# elif NR_CPUS >= 16
221# define RT_HASH_LOCK_SZ 2048
222# elif NR_CPUS >= 8
223# define RT_HASH_LOCK_SZ 1024
224# elif NR_CPUS >= 4
225# define RT_HASH_LOCK_SZ 512
226# else
227# define RT_HASH_LOCK_SZ 256
228# endif
223#endif 229#endif
224 230
225static spinlock_t *rt_hash_locks; 231static spinlock_t *rt_hash_locks;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 804458712d88..f6a2d9223d07 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2170,8 +2170,19 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2170 2170
2171 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 2171 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2172 /* Packet is from an untrusted source, reset gso_segs. */ 2172 /* Packet is from an untrusted source, reset gso_segs. */
2173 int mss = skb_shinfo(skb)->gso_size; 2173 int type = skb_shinfo(skb)->gso_type;
2174 int mss;
2175
2176 if (unlikely(type &
2177 ~(SKB_GSO_TCPV4 |
2178 SKB_GSO_DODGY |
2179 SKB_GSO_TCP_ECN |
2180 SKB_GSO_TCPV6 |
2181 0) ||
2182 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2183 goto out;
2174 2184
2185 mss = skb_shinfo(skb)->gso_size;
2175 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; 2186 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2176 2187
2177 segs = NULL; 2188 segs = NULL;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8355b729fa95..5a886e6efbbe 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -90,7 +90,7 @@ static struct socket *tcp_socket;
90void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); 90void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
91 91
92struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 92struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .lhash_lock = RW_LOCK_UNLOCKED, 93 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
94 .lhash_users = ATOMIC_INIT(0), 94 .lhash_users = ATOMIC_INIT(0),
95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), 95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
96}; 96};
@@ -1090,7 +1090,7 @@ process:
1090 1090
1091 skb->dev = NULL; 1091 skb->dev = NULL;
1092 1092
1093 bh_lock_sock(sk); 1093 bh_lock_sock_nested(sk);
1094 ret = 0; 1094 ret = 0;
1095 if (!sock_owned_by_user(sk)) { 1095 if (!sock_owned_by_user(sk)) {
1096#ifdef CONFIG_NET_DMA 1096#ifdef CONFIG_NET_DMA
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e0851697ad5e..0ccb7cb22b15 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -40,7 +40,7 @@ int sysctl_tcp_abort_on_overflow;
40struct inet_timewait_death_row tcp_death_row = { 40struct inet_timewait_death_row tcp_death_row = {
41 .sysctl_max_tw_buckets = NR_FILE * 2, 41 .sysctl_max_tw_buckets = NR_FILE * 2,
42 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, 42 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
43 .death_lock = SPIN_LOCK_UNLOCKED, 43 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
44 .hashinfo = &tcp_hashinfo, 44 .hashinfo = &tcp_hashinfo,
45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
46 (unsigned long)&tcp_death_row), 46 (unsigned long)&tcp_death_row),
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c28e5c287447..0c17dec11c8d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -64,6 +64,14 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
64 struct inet6_protocol *ops; 64 struct inet6_protocol *ops;
65 int proto; 65 int proto;
66 66
67 if (unlikely(skb_shinfo(skb)->gso_type &
68 ~(SKB_GSO_UDP |
69 SKB_GSO_DODGY |
70 SKB_GSO_TCP_ECN |
71 SKB_GSO_TCPV6 |
72 0)))
73 goto out;
74
67 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) 75 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
68 goto out; 76 goto out;
69 77
@@ -111,7 +119,8 @@ unlock:
111 119
112 for (skb = segs; skb; skb = skb->next) { 120 for (skb = segs; skb; skb = skb->next) {
113 ipv6h = skb->nh.ipv6h; 121 ipv6h = skb->nh.ipv6h;
114 ipv6h->payload_len = htons(skb->len - skb->mac_len); 122 ipv6h->payload_len = htons(skb->len - skb->mac_len -
123 sizeof(*ipv6h));
115 } 124 }
116 125
117out: 126out:
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 7ef143c0ebf6..f26898b00347 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -25,6 +25,7 @@
25#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/poison.h>
28#include <linux/icmpv6.h> 29#include <linux/icmpv6.h>
29#include <net/ipv6.h> 30#include <net/ipv6.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -376,7 +377,7 @@ ip6t_do_table(struct sk_buff **pskb,
376 } while (!hotdrop); 377 } while (!hotdrop);
377 378
378#ifdef CONFIG_NETFILTER_DEBUG 379#ifdef CONFIG_NETFILTER_DEBUG
379 ((struct ip6t_entry *)table_base)->comefrom = 0xdead57ac; 380 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
380#endif 381#endif
381 read_unlock_bh(&table->lock); 382 read_unlock_bh(&table->lock);
382 383
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 70cee82a98bf..55c0adc8f115 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -156,7 +156,7 @@ static void netlink_sock_destruct(struct sock *sk)
156 156
157static void netlink_table_grab(void) 157static void netlink_table_grab(void)
158{ 158{
159 write_lock_bh(&nl_table_lock); 159 write_lock_irq(&nl_table_lock);
160 160
161 if (atomic_read(&nl_table_users)) { 161 if (atomic_read(&nl_table_users)) {
162 DECLARE_WAITQUEUE(wait, current); 162 DECLARE_WAITQUEUE(wait, current);
@@ -166,9 +166,9 @@ static void netlink_table_grab(void)
166 set_current_state(TASK_UNINTERRUPTIBLE); 166 set_current_state(TASK_UNINTERRUPTIBLE);
167 if (atomic_read(&nl_table_users) == 0) 167 if (atomic_read(&nl_table_users) == 0)
168 break; 168 break;
169 write_unlock_bh(&nl_table_lock); 169 write_unlock_irq(&nl_table_lock);
170 schedule(); 170 schedule();
171 write_lock_bh(&nl_table_lock); 171 write_lock_irq(&nl_table_lock);
172 } 172 }
173 173
174 __set_current_state(TASK_RUNNING); 174 __set_current_state(TASK_RUNNING);
@@ -178,7 +178,7 @@ static void netlink_table_grab(void)
178 178
179static __inline__ void netlink_table_ungrab(void) 179static __inline__ void netlink_table_ungrab(void)
180{ 180{
181 write_unlock_bh(&nl_table_lock); 181 write_unlock_irq(&nl_table_lock);
182 wake_up(&nl_table_wait); 182 wake_up(&nl_table_wait);
183} 183}
184 184
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index eba6df054b1f..389a4119e1b4 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -800,7 +800,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
800 800
801 /* Now attach up the new socket */ 801 /* Now attach up the new socket */
802 kfree_skb(skb); 802 kfree_skb(skb);
803 sk->sk_ack_backlog--; 803 sk_acceptq_removed(sk);
804 newsock->sk = newsk; 804 newsock->sk = newsk;
805 805
806out: 806out:
@@ -985,7 +985,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
985 nr_make->vr = 0; 985 nr_make->vr = 0;
986 nr_make->vl = 0; 986 nr_make->vl = 0;
987 nr_make->state = NR_STATE_3; 987 nr_make->state = NR_STATE_3;
988 sk->sk_ack_backlog++; 988 sk_acceptq_added(sk);
989 989
990 nr_insert_socket(make); 990 nr_insert_socket(make);
991 991
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 7799fe82aeb6..d0a67bb31363 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -752,7 +752,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
752 752
753 rose_insert_socket(sk); /* Finish the bind */ 753 rose_insert_socket(sk); /* Finish the bind */
754 } 754 }
755 755rose_try_next_neigh:
756 rose->dest_addr = addr->srose_addr; 756 rose->dest_addr = addr->srose_addr;
757 rose->dest_call = addr->srose_call; 757 rose->dest_call = addr->srose_call;
758 rose->rand = ((long)rose & 0xFFFF) + rose->lci; 758 rose->rand = ((long)rose & 0xFFFF) + rose->lci;
@@ -810,6 +810,11 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
810 } 810 }
811 811
812 if (sk->sk_state != TCP_ESTABLISHED) { 812 if (sk->sk_state != TCP_ESTABLISHED) {
813 /* Try next neighbour */
814 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic);
815 if (rose->neighbour)
816 goto rose_try_next_neigh;
817 /* No more neighbour */
813 sock->state = SS_UNCONNECTED; 818 sock->state = SS_UNCONNECTED;
814 return sock_error(sk); /* Always set at this point */ 819 return sock_error(sk); /* Always set at this point */
815 } 820 }
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 9d0bf2a1ea3f..7c279e2659ec 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -59,6 +59,7 @@ static int rose_rebuild_header(struct sk_buff *skb)
59 struct net_device_stats *stats = netdev_priv(dev); 59 struct net_device_stats *stats = netdev_priv(dev);
60 unsigned char *bp = (unsigned char *)skb->data; 60 unsigned char *bp = (unsigned char *)skb->data;
61 struct sk_buff *skbn; 61 struct sk_buff *skbn;
62 unsigned int len;
62 63
63#ifdef CONFIG_INET 64#ifdef CONFIG_INET
64 if (arp_find(bp + 7, skb)) { 65 if (arp_find(bp + 7, skb)) {
@@ -75,6 +76,8 @@ static int rose_rebuild_header(struct sk_buff *skb)
75 76
76 kfree_skb(skb); 77 kfree_skb(skb);
77 78
79 len = skbn->len;
80
78 if (!rose_route_frame(skbn, NULL)) { 81 if (!rose_route_frame(skbn, NULL)) {
79 kfree_skb(skbn); 82 kfree_skb(skbn);
80 stats->tx_errors++; 83 stats->tx_errors++;
@@ -82,7 +85,7 @@ static int rose_rebuild_header(struct sk_buff *skb)
82 } 85 }
83 86
84 stats->tx_packets++; 87 stats->tx_packets++;
85 stats->tx_bytes += skbn->len; 88 stats->tx_bytes += len;
86#endif 89#endif
87 return 1; 90 return 1;
88} 91}
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 6db6006616c6..dc6cb93c8830 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -515,7 +515,7 @@ rpc_depopulate(struct dentry *parent)
515 struct dentry *dentry, *dvec[10]; 515 struct dentry *dentry, *dvec[10];
516 int n = 0; 516 int n = 0;
517 517
518 mutex_lock(&dir->i_mutex); 518 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
519repeat: 519repeat:
520 spin_lock(&dcache_lock); 520 spin_lock(&dcache_lock);
521 list_for_each_safe(pos, next, &parent->d_subdirs) { 521 list_for_each_safe(pos, next, &parent->d_subdirs) {
@@ -631,7 +631,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
631 if ((error = rpc_lookup_parent(path, nd)) != 0) 631 if ((error = rpc_lookup_parent(path, nd)) != 0)
632 return ERR_PTR(error); 632 return ERR_PTR(error);
633 dir = nd->dentry->d_inode; 633 dir = nd->dentry->d_inode;
634 mutex_lock(&dir->i_mutex); 634 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
635 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len); 635 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len);
636 if (IS_ERR(dentry)) 636 if (IS_ERR(dentry))
637 goto out_err; 637 goto out_err;
@@ -693,7 +693,7 @@ rpc_rmdir(char *path)
693 if ((error = rpc_lookup_parent(path, &nd)) != 0) 693 if ((error = rpc_lookup_parent(path, &nd)) != 0)
694 return error; 694 return error;
695 dir = nd.dentry->d_inode; 695 dir = nd.dentry->d_inode;
696 mutex_lock(&dir->i_mutex); 696 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
697 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 697 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
698 if (IS_ERR(dentry)) { 698 if (IS_ERR(dentry)) {
699 error = PTR_ERR(dentry); 699 error = PTR_ERR(dentry);
@@ -754,7 +754,7 @@ rpc_unlink(char *path)
754 if ((error = rpc_lookup_parent(path, &nd)) != 0) 754 if ((error = rpc_lookup_parent(path, &nd)) != 0)
755 return error; 755 return error;
756 dir = nd.dentry->d_inode; 756 dir = nd.dentry->d_inode;
757 mutex_lock(&dir->i_mutex); 757 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
758 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 758 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
759 if (IS_ERR(dentry)) { 759 if (IS_ERR(dentry)) {
760 error = PTR_ERR(dentry); 760 error = PTR_ERR(dentry);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 86f54f3512f1..762aac2572be 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -297,7 +297,10 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
297 * buf_acquire - creates a TIPC message buffer 297 * buf_acquire - creates a TIPC message buffer
298 * @size: message size (including TIPC header) 298 * @size: message size (including TIPC header)
299 * 299 *
300 * Returns a new buffer. Space is reserved for a data link header. 300 * Returns a new buffer with data pointers set to the specified size.
301 *
302 * NOTE: Headroom is reserved to allow prepending of a data link header.
303 * There may also be unrequested tailroom present at the buffer's end.
301 */ 304 */
302 305
303static inline struct sk_buff *buf_acquire(u32 size) 306static inline struct sk_buff *buf_acquire(u32 size)
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c6831c75cfa4..c10e18a49b96 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -998,6 +998,8 @@ static int link_bundle_buf(struct link *l_ptr,
998 return 0; 998 return 0;
999 if (skb_tailroom(bundler) < (pad + size)) 999 if (skb_tailroom(bundler) < (pad + size))
1000 return 0; 1000 return 0;
1001 if (link_max_pkt(l_ptr) < (to_pos + size))
1002 return 0;
1001 1003
1002 skb_put(bundler, pad + size); 1004 skb_put(bundler, pad + size);
1003 memcpy(bundler->data + to_pos, buf->data, size); 1005 memcpy(bundler->data + to_pos, buf->data, size);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aca650109425..f70475bfb62a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
144 scm->seclen = *UNIXSECLEN(skb); 144 scm->seclen = *UNIXSECLEN(skb);
145} 145}
146#else 146#else
147static void unix_get_peersec_dgram(struct sk_buff *skb) 147static inline void unix_get_peersec_dgram(struct sk_buff *skb)
148{ } 148{ }
149 149
150static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 150static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -565,6 +565,14 @@ static struct proto unix_proto = {
565 .obj_size = sizeof(struct unix_sock), 565 .obj_size = sizeof(struct unix_sock),
566}; 566};
567 567
568/*
569 * AF_UNIX sockets do not interact with hardware, hence they
570 * dont trigger interrupts - so it's safe for them to have
571 * bh-unsafe locking for their sk_receive_queue.lock. Split off
572 * this special lock-class by reinitializing the spinlock key:
573 */
574static struct lock_class_key af_unix_sk_receive_queue_lock_key;
575
568static struct sock * unix_create1(struct socket *sock) 576static struct sock * unix_create1(struct socket *sock)
569{ 577{
570 struct sock *sk = NULL; 578 struct sock *sk = NULL;
@@ -580,6 +588,8 @@ static struct sock * unix_create1(struct socket *sock)
580 atomic_inc(&unix_nr_socks); 588 atomic_inc(&unix_nr_socks);
581 589
582 sock_init_data(sock,sk); 590 sock_init_data(sock,sk);
591 lockdep_set_class(&sk->sk_receive_queue.lock,
592 &af_unix_sk_receive_queue_lock_key);
583 593
584 sk->sk_write_space = unix_write_space; 594 sk->sk_write_space = unix_write_space;
585 sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen; 595 sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
@@ -1045,7 +1055,7 @@ restart:
1045 goto out_unlock; 1055 goto out_unlock;
1046 } 1056 }
1047 1057
1048 unix_state_wlock(sk); 1058 unix_state_wlock_nested(sk);
1049 1059
1050 if (sk->sk_state != st) { 1060 if (sk->sk_state != st) {
1051 unix_state_wunlock(sk); 1061 unix_state_wunlock(sk);
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
new file mode 100644
index 000000000000..aa9990a3ccd6
--- /dev/null
+++ b/scripts/Makefile.headersinst
@@ -0,0 +1,158 @@
1# ==========================================================================
2# Installing headers
3#
4# header-y files will be installed verbatim
5# unifdef-y are the files where unifdef will be run before installing files
6# objhdr-y are generated files that will be installed verbatim
7#
8# ==========================================================================
9
10UNIFDEF := unifdef -U__KERNEL__
11
12# Eliminate the contents of (and inclusions of) compiler.h
13HDRSED := sed -e "s/ inline / __inline__ /g" \
14 -e "s/[[:space:]]__user[[:space:]]\+/ /g" \
15 -e "s/(__user[[:space:]]\+/ (/g" \
16 -e "s/[[:space:]]__force[[:space:]]\+/ /g" \
17 -e "s/(__force[[:space:]]\+/ (/g" \
18 -e "s/[[:space:]]__iomem[[:space:]]\+/ /g" \
19 -e "s/(__iomem[[:space:]]\+/ (/g" \
20 -e "s/[[:space:]]__attribute_const__[[:space:]]\+/\ /g" \
21 -e "s/[[:space:]]__attribute_const__$$//" \
22 -e "/^\#include <linux\/compiler.h>/d"
23
24_dst := $(if $(dst),$(dst),$(obj))
25
26.PHONY: __headersinst
27__headersinst:
28
29
30ifeq (,$(patsubst include/asm/%,,$(obj)/))
31# For producing the generated stuff in include/asm for biarch builds, include
32# both sets of Kbuild files; we'll generate anything which is mentioned in
33# _either_ arch, and recurse into subdirectories which are mentioned in either
34# arch. Since some directories may exist in one but not the other, we must
35# use '-include'.
36GENASM := 1
37archasm := $(subst include/asm,asm-$(ARCH),$(obj))
38altarchasm := $(subst include/asm,asm-$(ALTARCH),$(obj))
39-include $(srctree)/include/$(archasm)/Kbuild
40-include $(srctree)/include/$(altarchasm)/Kbuild
41else
42include $(srctree)/$(obj)/Kbuild
43endif
44
45include scripts/Kbuild.include
46
47# If this is include/asm-$(ARCH) and there's no $(ALTARCH), then
48# override $(_dst) so that we install to include/asm directly.
49ifeq ($(obj)$(ALTARCH),include/asm-$(ARCH))
50 _dst := include/asm
51endif
52
53header-y := $(sort $(header-y))
54unifdef-y := $(sort $(unifdef-y))
55subdir-y := $(patsubst %/,%,$(filter %/, $(header-y)))
56header-y := $(filter-out %/, $(header-y))
57header-y := $(filter-out $(unifdef-y),$(header-y))
58
59ifdef ALTARCH
60ifeq ($(obj),include/asm-$(ARCH))
61altarch-y := altarch-dir
62endif
63endif
64
65# Make the definitions visible for recursive make invocations
66export ALTARCH
67export ARCHDEF
68export ALTARCHDEF
69
70quiet_cmd_o_hdr_install = INSTALL $(_dst)/$@
71 cmd_o_hdr_install = cp $(objtree)/$(obj)/$@ $(INSTALL_HDR_PATH)/$(_dst)
72
73quiet_cmd_headers_install = INSTALL $(_dst)/$@
74 cmd_headers_install = $(HDRSED) $(srctree)/$(obj)/$@ \
75 > $(INSTALL_HDR_PATH)/$(_dst)/$@
76
77quiet_cmd_unifdef = UNIFDEF $(_dst)/$@
78 cmd_unifdef = $(UNIFDEF) $(srctree)/$(obj)/$@ | $(HDRSED) \
79 > $(INSTALL_HDR_PATH)/$(_dst)/$@ || :
80
81quiet_cmd_check = CHECK $(_dst)/$@
82 cmd_check = $(srctree)/scripts/hdrcheck.sh \
83 $(INSTALL_HDR_PATH)/include \
84 $(INSTALL_HDR_PATH)/$(_dst)/$@
85
86quiet_cmd_mkdir = MKDIR $@
87 cmd_mkdir = mkdir -p $(INSTALL_HDR_PATH)/$@
88
89quiet_cmd_gen = GEN $(_dst)/$@
90 cmd_gen = \
91STUBDEF=__ASM_STUB_`echo $@ | tr a-z. A-Z_`; \
92(echo "/* File autogenerated by 'make headers_install' */" ; \
93echo "\#ifndef $$STUBDEF" ; \
94echo "\#define $$STUBDEF" ; \
95echo "\# if $(ARCHDEF)" ; \
96if [ -r $(srctree)/include/$(archasm)/$@ ]; then \
97 echo "\# include <$(archasm)/$@>" ; \
98else \
99 echo "\# error $(archasm)/$@ does not exist in" \
100 "the $(ARCH) architecture" ; \
101fi ; \
102echo "\# elif $(ALTARCHDEF)" ; \
103if [ -r $(srctree)/include/$(altarchasm)/$@ ]; then \
104 echo "\# include <$(altarchasm)/$@>" ; \
105else \
106 echo "\# error $(altarchasm)/$@ does not exist in" \
107 "the $(ALTARCH) architecture" ; \
108fi ; \
109echo "\# else" ; \
110echo "\# warning This machine appears to be" \
111 "neither $(ARCH) nor $(ALTARCH)." ; \
112echo "\# endif" ; \
113echo "\#endif /* $$STUBDEF */" ; \
114) > $(INSTALL_HDR_PATH)/$(_dst)/$@
115
116__headersinst: $(subdir-y) $(header-y) $(unifdef-y) $(altarch-y) $(objhdr-y)
117
118.PHONY: $(header-y) $(unifdef-y) $(subdir-y)
119
120ifdef HDRCHECK
121# Rules for checking headers
122$(objhdr-y) $(header-y) $(unifdef-y):
123 $(call cmd,check)
124else
125# Rules for installing headers
126
127$(objhdr-y) $(subdir-y) $(header-y) $(unifdef-y): $(_dst)
128
129.PHONY: $(_dst)
130$(_dst):
131 $(call cmd,mkdir)
132
133ifdef GENASM
134$(objhdr-y) $(header-y) $(unifdef-y):
135 $(call cmd,gen)
136
137else
138$(objhdr-y):
139 $(call cmd,o_hdr_install)
140
141$(header-y):
142 $(call cmd,headers_install)
143
144$(unifdef-y):
145 $(call cmd,unifdef)
146endif
147endif
148
149hdrinst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
150
151.PHONY: altarch-dir
152altarch-dir:
153 $(Q)$(MAKE) $(hdrinst)=include/asm-$(ALTARCH) dst=include/asm-$(ALTARCH)
154 $(Q)$(MAKE) $(hdrinst)=include/asm dst=include/asm
155
156# Recursion
157$(subdir-y):
158 $(Q)$(MAKE) $(hdrinst)=$(obj)/$@ dst=$(_dst)/$@ rel=../$(rel)
diff --git a/scripts/checkversion.pl b/scripts/checkversion.pl
index 9f84e562318d..ec7d21161bdc 100755
--- a/scripts/checkversion.pl
+++ b/scripts/checkversion.pl
@@ -1,7 +1,7 @@
1#! /usr/bin/perl 1#! /usr/bin/perl
2# 2#
3# checkversion find uses of LINUX_VERSION_CODE, KERNEL_VERSION, or 3# checkversion find uses of LINUX_VERSION_CODE or KERNEL_VERSION
4# UTS_RELEASE without including <linux/version.h>, or cases of 4# without including <linux/version.h>, or cases of
5# including <linux/version.h> that don't need it. 5# including <linux/version.h> that don't need it.
6# Copyright (C) 2003, Randy Dunlap <rdunlap@xenotime.net> 6# Copyright (C) 2003, Randy Dunlap <rdunlap@xenotime.net>
7 7
@@ -41,8 +41,7 @@ foreach $file (@ARGV)
41 } 41 }
42 42
43 # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE 43 # Look for uses: LINUX_VERSION_CODE, KERNEL_VERSION, UTS_RELEASE
44 if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/) || 44 if (($_ =~ /LINUX_VERSION_CODE/) || ($_ =~ /\WKERNEL_VERSION/)) {
45 ($_ =~ /UTS_RELEASE/)) {
46 $fUseVersion = 1; 45 $fUseVersion = 1;
47 last LINE if $iLinuxVersion; 46 last LINE if $iLinuxVersion;
48 } 47 }
diff --git a/scripts/hdrcheck.sh b/scripts/hdrcheck.sh
new file mode 100755
index 000000000000..b3bb683b56b6
--- /dev/null
+++ b/scripts/hdrcheck.sh
@@ -0,0 +1,8 @@
1#!/bin/sh
2
3for FILE in `grep '^#include <' $2 | cut -f2 -d\< | cut -f1 -d\> | egrep ^linux\|^asm` ; do
4 if [ ! -r $1/$FILE ]; then
5 echo $2 requires $FILE, which does not exist
6 exit 1
7 fi
8done
diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c
index bab97547a052..7ae0c0bdfad8 100644
--- a/sound/aoa/core/snd-aoa-gpio-feature.c
+++ b/sound/aoa/core/snd-aoa-gpio-feature.c
@@ -112,12 +112,7 @@ static struct device_node *get_gpio(char *name,
112 112
113static void get_irq(struct device_node * np, int *irqptr) 113static void get_irq(struct device_node * np, int *irqptr)
114{ 114{
115 *irqptr = -1; 115 *irqptr = irq_of_parse_and_map(np, 0);
116 if (!np)
117 return;
118 if (np->n_intrs != 1)
119 return;
120 *irqptr = np->intrs[0].line;
121} 116}
122 117
123/* 0x4 is outenable, 0x1 is out, thus 4 or 5 */ 118/* 0x4 is outenable, 0x1 is out, thus 4 or 5 */
diff --git a/sound/aoa/soundbus/i2sbus/i2sbus-core.c b/sound/aoa/soundbus/i2sbus/i2sbus-core.c
index f268dacdaa00..01c0724335a3 100644
--- a/sound/aoa/soundbus/i2sbus/i2sbus-core.c
+++ b/sound/aoa/soundbus/i2sbus/i2sbus-core.c
@@ -129,7 +129,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
129 if (strncmp(np->name, "i2s-", 4)) 129 if (strncmp(np->name, "i2s-", 4))
130 return 0; 130 return 0;
131 131
132 if (np->n_intrs != 3) 132 if (macio_irq_count(macio) != 3)
133 return 0; 133 return 0;
134 134
135 dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL); 135 dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL);
@@ -183,9 +183,10 @@ static int i2sbus_add_dev(struct macio_dev *macio,
183 snprintf(dev->rnames[i], sizeof(dev->rnames[i]), rnames[i], np->name); 183 snprintf(dev->rnames[i], sizeof(dev->rnames[i]), rnames[i], np->name);
184 } 184 }
185 for (i=0;i<3;i++) { 185 for (i=0;i<3;i++) {
186 if (request_irq(np->intrs[i].line, ints[i], 0, dev->rnames[i], dev)) 186 if (request_irq(macio_irq(macio, i), ints[i], 0,
187 dev->rnames[i], dev))
187 goto err; 188 goto err;
188 dev->interrupts[i] = np->intrs[i].line; 189 dev->interrupts[i] = macio_irq(macio, i);
189 } 190 }
190 191
191 for (i=0;i<3;i++) { 192 for (i=0;i<3;i++) {
diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
index d812dc886360..4260de90f36f 100644
--- a/sound/core/seq/seq_device.c
+++ b/sound/core/seq/seq_device.c
@@ -380,6 +380,12 @@ static struct ops_list * create_driver(char *id)
380 /* set up driver entry */ 380 /* set up driver entry */
381 strlcpy(ops->id, id, sizeof(ops->id)); 381 strlcpy(ops->id, id, sizeof(ops->id));
382 mutex_init(&ops->reg_mutex); 382 mutex_init(&ops->reg_mutex);
383 /*
384 * The ->reg_mutex locking rules are per-driver, so we create
385 * separate per-driver lock classes:
386 */
387 lockdep_set_class(&ops->reg_mutex, (struct lock_class_key *)id);
388
383 ops->driver = DRIVER_EMPTY; 389 ops->driver = DRIVER_EMPTY;
384 INIT_LIST_HEAD(&ops->dev_list); 390 INIT_LIST_HEAD(&ops->dev_list);
385 /* lock this instance */ 391 /* lock this instance */
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index d467b4f0ff2b..8c64b58ff77b 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -514,7 +514,7 @@ int snd_seq_port_connect(struct snd_seq_client *connector,
514 atomic_set(&subs->ref_count, 2); 514 atomic_set(&subs->ref_count, 2);
515 515
516 down_write(&src->list_mutex); 516 down_write(&src->list_mutex);
517 down_write(&dest->list_mutex); 517 down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
518 518
519 exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0; 519 exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
520 err = -EBUSY; 520 err = -EBUSY;
@@ -587,7 +587,7 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
587 unsigned long flags; 587 unsigned long flags;
588 588
589 down_write(&src->list_mutex); 589 down_write(&src->list_mutex);
590 down_write(&dest->list_mutex); 590 down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
591 591
592 /* look for the connection */ 592 /* look for the connection */
593 list_for_each(p, &src->list_head) { 593 list_for_each(p, &src->list_head) {
diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c
index de454ca39226..4359903f4376 100644
--- a/sound/oss/dmasound/dmasound_awacs.c
+++ b/sound/oss/dmasound/dmasound_awacs.c
@@ -374,10 +374,7 @@ setup_audio_gpio(const char *name, const char* compatible, int *gpio_addr, int*
374 *gpio_pol = *pp; 374 *gpio_pol = *pp;
375 else 375 else
376 *gpio_pol = 1; 376 *gpio_pol = 1;
377 if (np->n_intrs > 0) 377 return irq_of_parse_and_map(np, 0);
378 return np->intrs[0].line;
379
380 return 0;
381} 378}
382 379
383static inline void 380static inline void
@@ -2864,14 +2861,13 @@ printk("dmasound_pmac: couldn't find a Codec we can handle\n");
2864 * other info if necessary (early AWACS we want to read chip ids) 2861 * other info if necessary (early AWACS we want to read chip ids)
2865 */ 2862 */
2866 2863
2867 if (of_get_address(io, 2, NULL, NULL) == NULL || io->n_intrs < 3) { 2864 if (of_get_address(io, 2, NULL, NULL) == NULL) {
2868 /* OK - maybe we need to use the 'awacs' node (on earlier 2865 /* OK - maybe we need to use the 'awacs' node (on earlier
2869 * machines). 2866 * machines).
2870 */ 2867 */
2871 if (awacs_node) { 2868 if (awacs_node) {
2872 io = awacs_node ; 2869 io = awacs_node ;
2873 if (of_get_address(io, 2, NULL, NULL) == NULL || 2870 if (of_get_address(io, 2, NULL, NULL) == NULL) {
2874 io->n_intrs < 3) {
2875 printk("dmasound_pmac: can't use %s\n", 2871 printk("dmasound_pmac: can't use %s\n",
2876 io->full_name); 2872 io->full_name);
2877 return -ENODEV; 2873 return -ENODEV;
@@ -2940,9 +2936,9 @@ printk("dmasound_pmac: couldn't find a Codec we can handle\n");
2940 if (awacs_revision == AWACS_SCREAMER && awacs) 2936 if (awacs_revision == AWACS_SCREAMER && awacs)
2941 awacs_recalibrate(); 2937 awacs_recalibrate();
2942 2938
2943 awacs_irq = io->intrs[0].line; 2939 awacs_irq = irq_of_parse_and_map(io, 0);
2944 awacs_tx_irq = io->intrs[1].line; 2940 awacs_tx_irq = irq_of_parse_and_map(io, 1);
2945 awacs_rx_irq = io->intrs[2].line; 2941 awacs_rx_irq = irq_of_parse_and_map(io, 2);
2946 2942
2947 /* Hack for legacy crap that will be killed someday */ 2943 /* Hack for legacy crap that will be killed someday */
2948 awacs_node = io; 2944 awacs_node = io;
diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
index 3844d18af19c..232b337852ff 100644
--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
+++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
@@ -180,6 +180,7 @@ static void _dsp_clear_sample_buffer (struct snd_cs46xx *chip, u32 sample_buffer
180void cs46xx_dsp_remove_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor * scb) 180void cs46xx_dsp_remove_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor * scb)
181{ 181{
182 struct dsp_spos_instance * ins = chip->dsp_spos_instance; 182 struct dsp_spos_instance * ins = chip->dsp_spos_instance;
183 unsigned long flags;
183 184
184 /* check integrety */ 185 /* check integrety */
185 snd_assert ( (scb->index >= 0 && 186 snd_assert ( (scb->index >= 0 &&
@@ -194,9 +195,9 @@ void cs46xx_dsp_remove_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor *
194 goto _end); 195 goto _end);
195#endif 196#endif
196 197
197 spin_lock(&scb->lock); 198 spin_lock_irqsave(&scb->lock, flags);
198 _dsp_unlink_scb (chip,scb); 199 _dsp_unlink_scb (chip,scb);
199 spin_unlock(&scb->lock); 200 spin_unlock_irqrestore(&scb->lock, flags);
200 201
201 cs46xx_dsp_proc_free_scb_desc(scb); 202 cs46xx_dsp_proc_free_scb_desc(scb);
202 snd_assert (scb->scb_symbol != NULL, return ); 203 snd_assert (scb->scb_symbol != NULL, return );
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 90db9a1d1e0a..641430631505 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -1120,6 +1120,7 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
1120 struct snd_pmac *chip; 1120 struct snd_pmac *chip;
1121 struct device_node *np; 1121 struct device_node *np;
1122 int i, err; 1122 int i, err;
1123 unsigned int irq;
1123 unsigned long ctrl_addr, txdma_addr, rxdma_addr; 1124 unsigned long ctrl_addr, txdma_addr, rxdma_addr;
1124 static struct snd_device_ops ops = { 1125 static struct snd_device_ops ops = {
1125 .dev_free = snd_pmac_dev_free, 1126 .dev_free = snd_pmac_dev_free,
@@ -1153,10 +1154,6 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
1153 if (chip->is_k2) { 1154 if (chip->is_k2) {
1154 static char *rnames[] = { 1155 static char *rnames[] = {
1155 "Sound Control", "Sound DMA" }; 1156 "Sound Control", "Sound DMA" };
1156 if (np->n_intrs < 3) {
1157 err = -ENODEV;
1158 goto __error;
1159 }
1160 for (i = 0; i < 2; i ++) { 1157 for (i = 0; i < 2; i ++) {
1161 if (of_address_to_resource(np->parent, i, 1158 if (of_address_to_resource(np->parent, i,
1162 &chip->rsrc[i])) { 1159 &chip->rsrc[i])) {
@@ -1185,10 +1182,6 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
1185 } else { 1182 } else {
1186 static char *rnames[] = { 1183 static char *rnames[] = {
1187 "Sound Control", "Sound Tx DMA", "Sound Rx DMA" }; 1184 "Sound Control", "Sound Tx DMA", "Sound Rx DMA" };
1188 if (np->n_intrs < 3) {
1189 err = -ENODEV;
1190 goto __error;
1191 }
1192 for (i = 0; i < 3; i ++) { 1185 for (i = 0; i < 3; i ++) {
1193 if (of_address_to_resource(np, i, 1186 if (of_address_to_resource(np, i,
1194 &chip->rsrc[i])) { 1187 &chip->rsrc[i])) {
@@ -1220,28 +1213,30 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
1220 chip->playback.dma = ioremap(txdma_addr, 0x100); 1213 chip->playback.dma = ioremap(txdma_addr, 0x100);
1221 chip->capture.dma = ioremap(rxdma_addr, 0x100); 1214 chip->capture.dma = ioremap(rxdma_addr, 0x100);
1222 if (chip->model <= PMAC_BURGUNDY) { 1215 if (chip->model <= PMAC_BURGUNDY) {
1223 if (request_irq(np->intrs[0].line, snd_pmac_ctrl_intr, 0, 1216 irq = irq_of_parse_and_map(np, 0);
1217 if (request_irq(irq, snd_pmac_ctrl_intr, 0,
1224 "PMac", (void*)chip)) { 1218 "PMac", (void*)chip)) {
1225 snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[0].line); 1219 snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n",
1220 irq);
1226 err = -EBUSY; 1221 err = -EBUSY;
1227 goto __error; 1222 goto __error;
1228 } 1223 }
1229 chip->irq = np->intrs[0].line; 1224 chip->irq = irq;
1230 } 1225 }
1231 if (request_irq(np->intrs[1].line, snd_pmac_tx_intr, 0, 1226 irq = irq_of_parse_and_map(np, 1);
1232 "PMac Output", (void*)chip)) { 1227 if (request_irq(irq, snd_pmac_tx_intr, 0, "PMac Output", (void*)chip)){
1233 snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[1].line); 1228 snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq);
1234 err = -EBUSY; 1229 err = -EBUSY;
1235 goto __error; 1230 goto __error;
1236 } 1231 }
1237 chip->tx_irq = np->intrs[1].line; 1232 chip->tx_irq = irq;
1238 if (request_irq(np->intrs[2].line, snd_pmac_rx_intr, 0, 1233 irq = irq_of_parse_and_map(np, 2);
1239 "PMac Input", (void*)chip)) { 1234 if (request_irq(irq, snd_pmac_rx_intr, 0, "PMac Input", (void*)chip)) {
1240 snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[2].line); 1235 snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq);
1241 err = -EBUSY; 1236 err = -EBUSY;
1242 goto __error; 1237 goto __error;
1243 } 1238 }
1244 chip->rx_irq = np->intrs[2].line; 1239 chip->rx_irq = irq;
1245 1240
1246 snd_pmac_sound_feature(chip, 1); 1241 snd_pmac_sound_feature(chip, 1);
1247 1242
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 70e4ebc70260..692c61177678 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -1121,7 +1121,7 @@ static long tumbler_find_device(const char *device, const char *platform,
1121 DBG("(I) GPIO device %s found, offset: %x, active state: %d !\n", 1121 DBG("(I) GPIO device %s found, offset: %x, active state: %d !\n",
1122 device, gp->addr, gp->active_state); 1122 device, gp->addr, gp->active_state);
1123 1123
1124 return (node->n_intrs > 0) ? node->intrs[0].line : 0; 1124 return irq_of_parse_and_map(node, 0);
1125} 1125}
1126 1126
1127/* reset audio */ 1127/* reset audio */
@@ -1264,16 +1264,16 @@ static int __init tumbler_init(struct snd_pmac *chip)
1264 &mix->line_mute, 1); 1264 &mix->line_mute, 1);
1265 irq = tumbler_find_device("headphone-detect", 1265 irq = tumbler_find_device("headphone-detect",
1266 NULL, &mix->hp_detect, 0); 1266 NULL, &mix->hp_detect, 0);
1267 if (irq < 0) 1267 if (irq <= NO_IRQ)
1268 irq = tumbler_find_device("headphone-detect", 1268 irq = tumbler_find_device("headphone-detect",
1269 NULL, &mix->hp_detect, 1); 1269 NULL, &mix->hp_detect, 1);
1270 if (irq < 0) 1270 if (irq <= NO_IRQ)
1271 irq = tumbler_find_device("keywest-gpio15", 1271 irq = tumbler_find_device("keywest-gpio15",
1272 NULL, &mix->hp_detect, 1); 1272 NULL, &mix->hp_detect, 1);
1273 mix->headphone_irq = irq; 1273 mix->headphone_irq = irq;
1274 irq = tumbler_find_device("line-output-detect", 1274 irq = tumbler_find_device("line-output-detect",
1275 NULL, &mix->line_detect, 0); 1275 NULL, &mix->line_detect, 0);
1276 if (irq < 0) 1276 if (irq <= NO_IRQ)
1277 irq = tumbler_find_device("line-output-detect", 1277 irq = tumbler_find_device("line-output-detect",
1278 NULL, &mix->line_detect, 1); 1278 NULL, &mix->line_detect, 1);
1279 mix->lineout_irq = irq; 1279 mix->lineout_irq = irq;
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index db3e22efd02e..2bd8e40b8541 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -1033,10 +1033,10 @@ static int __init amd7930_attach_common(struct resource *rp, int irq)
1033 1033
1034 strcpy(card->driver, "AMD7930"); 1034 strcpy(card->driver, "AMD7930");
1035 strcpy(card->shortname, "Sun AMD7930"); 1035 strcpy(card->shortname, "Sun AMD7930");
1036 sprintf(card->longname, "%s at 0x%02lx:0x%08lx, irq %d", 1036 sprintf(card->longname, "%s at 0x%02lx:0x%08Lx, irq %d",
1037 card->shortname, 1037 card->shortname,
1038 rp->flags & 0xffL, 1038 rp->flags & 0xffL,
1039 rp->start, 1039 (unsigned long long)rp->start,
1040 irq); 1040 irq);
1041 1041
1042 if ((err = snd_amd7930_create(card, rp, 1042 if ((err = snd_amd7930_create(card, rp,
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index 5018fcf41df5..9a06c3bd6944 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -2036,7 +2036,7 @@ static int __init cs4231_sbus_attach(struct sbus_dev *sdev)
2036 if (err) 2036 if (err)
2037 return err; 2037 return err;
2038 2038
2039 sprintf(card->longname, "%s at 0x%02lx:0x%016lx, irq %d", 2039 sprintf(card->longname, "%s at 0x%02lx:0x%016Lx, irq %d",
2040 card->shortname, 2040 card->shortname,
2041 rp->flags & 0xffL, 2041 rp->flags & 0xffL,
2042 (unsigned long long)rp->start, 2042 (unsigned long long)rp->start,
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 59a02a0d9afc..f3ae6e23610e 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2645,7 +2645,7 @@ static int __init dbri_attach(int prom_node, struct sbus_dev *sdev)
2645 strcpy(card->driver, "DBRI"); 2645 strcpy(card->driver, "DBRI");
2646 strcpy(card->shortname, "Sun DBRI"); 2646 strcpy(card->shortname, "Sun DBRI");
2647 rp = &sdev->resource[0]; 2647 rp = &sdev->resource[0];
2648 sprintf(card->longname, "%s at 0x%02lx:0x%016lx, irq %d", 2648 sprintf(card->longname, "%s at 0x%02lx:0x%016Lx, irq %d",
2649 card->shortname, 2649 card->shortname,
2650 rp->flags & 0xffL, (unsigned long long)rp->start, irq.pri); 2650 rp->flags & 0xffL, (unsigned long long)rp->start, irq.pri);
2651 2651